xref: /openbmc/linux/fs/ceph/inode.c (revision 2d332d5b)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 
4 #include <linux/module.h>
5 #include <linux/fs.h>
6 #include <linux/slab.h>
7 #include <linux/string.h>
8 #include <linux/uaccess.h>
9 #include <linux/kernel.h>
10 #include <linux/writeback.h>
11 #include <linux/vmalloc.h>
12 #include <linux/xattr.h>
13 #include <linux/posix_acl.h>
14 #include <linux/random.h>
15 #include <linux/sort.h>
16 #include <linux/iversion.h>
17 #include <linux/fscrypt.h>
18 
19 #include "super.h"
20 #include "mds_client.h"
21 #include "cache.h"
22 #include "crypto.h"
23 #include <linux/ceph/decode.h>
24 
25 /*
26  * Ceph inode operations
27  *
28  * Implement basic inode helpers (get, alloc) and inode ops (getattr,
29  * setattr, etc.), xattr helpers, and helpers for assimilating
30  * metadata returned by the MDS into our cache.
31  *
32  * Also define helpers for doing asynchronous writeback, invalidation,
33  * and truncation for the benefit of those who can't afford to block
34  * (typically because they are in the message handler path).
35  */
36 
37 static const struct inode_operations ceph_symlink_iops;
38 
39 static void ceph_inode_work(struct work_struct *work);
40 
41 /*
42  * find or create an inode, given the ceph ino number
43  */
44 static int ceph_set_ino_cb(struct inode *inode, void *data)
45 {
46 	struct ceph_inode_info *ci = ceph_inode(inode);
47 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
48 
49 	ci->i_vino = *(struct ceph_vino *)data;
50 	inode->i_ino = ceph_vino_to_ino_t(ci->i_vino);
51 	inode_set_iversion_raw(inode, 0);
52 	percpu_counter_inc(&mdsc->metric.total_inodes);
53 
54 	return 0;
55 }
56 
57 /**
58  * ceph_new_inode - allocate a new inode in advance of an expected create
59  * @dir: parent directory for new inode
60  * @dentry: dentry that may eventually point to new inode
61  * @mode: mode of new inode
62  * @as_ctx: pointer to inherited security context
63  *
64  * Allocate a new inode in advance of an operation to create a new inode.
65  * This allocates the inode and sets up the acl_sec_ctx with appropriate
66  * info for the new inode.
67  *
68  * Returns a pointer to the new inode or an ERR_PTR.
69  */
70 struct inode *ceph_new_inode(struct inode *dir, struct dentry *dentry,
71 			     umode_t *mode, struct ceph_acl_sec_ctx *as_ctx)
72 {
73 	int err;
74 	struct inode *inode;
75 
76 	inode = new_inode(dir->i_sb);
77 	if (!inode)
78 		return ERR_PTR(-ENOMEM);
79 
80 	if (!S_ISLNK(*mode)) {
81 		err = ceph_pre_init_acls(dir, mode, as_ctx);
82 		if (err < 0)
83 			goto out_err;
84 	}
85 
86 	err = ceph_security_init_secctx(dentry, *mode, as_ctx);
87 	if (err < 0)
88 		goto out_err;
89 
90 	inode->i_state = 0;
91 	inode->i_mode = *mode;
92 	return inode;
93 out_err:
94 	iput(inode);
95 	return ERR_PTR(err);
96 }
97 
98 void ceph_as_ctx_to_req(struct ceph_mds_request *req,
99 			struct ceph_acl_sec_ctx *as_ctx)
100 {
101 	if (as_ctx->pagelist) {
102 		req->r_pagelist = as_ctx->pagelist;
103 		as_ctx->pagelist = NULL;
104 	}
105 }
106 
107 /**
108  * ceph_get_inode - find or create/hash a new inode
109  * @sb: superblock to search and allocate in
110  * @vino: vino to search for
111  * @newino: optional new inode to insert if one isn't found (may be NULL)
112  *
113  * Search for or insert a new inode into the hash for the given vino, and
114  * return a reference to it. If new is non-NULL, its reference is consumed.
115  */
116 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino,
117 			     struct inode *newino)
118 {
119 	struct inode *inode;
120 
121 	if (ceph_vino_is_reserved(vino))
122 		return ERR_PTR(-EREMOTEIO);
123 
124 	if (newino) {
125 		inode = inode_insert5(newino, (unsigned long)vino.ino,
126 				      ceph_ino_compare, ceph_set_ino_cb, &vino);
127 		if (inode != newino)
128 			iput(newino);
129 	} else {
130 		inode = iget5_locked(sb, (unsigned long)vino.ino,
131 				     ceph_ino_compare, ceph_set_ino_cb, &vino);
132 	}
133 
134 	if (!inode) {
135 		dout("No inode found for %llx.%llx\n", vino.ino, vino.snap);
136 		return ERR_PTR(-ENOMEM);
137 	}
138 
139 	dout("get_inode on %llu=%llx.%llx got %p new %d\n", ceph_present_inode(inode),
140 	     ceph_vinop(inode), inode, !!(inode->i_state & I_NEW));
141 	return inode;
142 }
143 
144 /*
145  * get/constuct snapdir inode for a given directory
146  */
147 struct inode *ceph_get_snapdir(struct inode *parent)
148 {
149 	struct ceph_vino vino = {
150 		.ino = ceph_ino(parent),
151 		.snap = CEPH_SNAPDIR,
152 	};
153 	struct inode *inode = ceph_get_inode(parent->i_sb, vino, NULL);
154 	struct ceph_inode_info *ci = ceph_inode(inode);
155 
156 	if (IS_ERR(inode))
157 		return inode;
158 
159 	if (!S_ISDIR(parent->i_mode)) {
160 		pr_warn_once("bad snapdir parent type (mode=0%o)\n",
161 			     parent->i_mode);
162 		goto err;
163 	}
164 
165 	if (!(inode->i_state & I_NEW) && !S_ISDIR(inode->i_mode)) {
166 		pr_warn_once("bad snapdir inode type (mode=0%o)\n",
167 			     inode->i_mode);
168 		goto err;
169 	}
170 
171 	inode->i_mode = parent->i_mode;
172 	inode->i_uid = parent->i_uid;
173 	inode->i_gid = parent->i_gid;
174 	inode->i_mtime = parent->i_mtime;
175 	inode->i_ctime = parent->i_ctime;
176 	inode->i_atime = parent->i_atime;
177 	ci->i_rbytes = 0;
178 	ci->i_btime = ceph_inode(parent)->i_btime;
179 
180 	if (inode->i_state & I_NEW) {
181 		inode->i_op = &ceph_snapdir_iops;
182 		inode->i_fop = &ceph_snapdir_fops;
183 		ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
184 		unlock_new_inode(inode);
185 	}
186 
187 	return inode;
188 err:
189 	if ((inode->i_state & I_NEW))
190 		discard_new_inode(inode);
191 	else
192 		iput(inode);
193 	return ERR_PTR(-ENOTDIR);
194 }
195 
196 const struct inode_operations ceph_file_iops = {
197 	.permission = ceph_permission,
198 	.setattr = ceph_setattr,
199 	.getattr = ceph_getattr,
200 	.listxattr = ceph_listxattr,
201 	.get_inode_acl = ceph_get_acl,
202 	.set_acl = ceph_set_acl,
203 };
204 
205 
206 /*
207  * We use a 'frag tree' to keep track of the MDS's directory fragments
208  * for a given inode (usually there is just a single fragment).  We
209  * need to know when a child frag is delegated to a new MDS, or when
210  * it is flagged as replicated, so we can direct our requests
211  * accordingly.
212  */
213 
214 /*
215  * find/create a frag in the tree
216  */
217 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
218 						    u32 f)
219 {
220 	struct rb_node **p;
221 	struct rb_node *parent = NULL;
222 	struct ceph_inode_frag *frag;
223 	int c;
224 
225 	p = &ci->i_fragtree.rb_node;
226 	while (*p) {
227 		parent = *p;
228 		frag = rb_entry(parent, struct ceph_inode_frag, node);
229 		c = ceph_frag_compare(f, frag->frag);
230 		if (c < 0)
231 			p = &(*p)->rb_left;
232 		else if (c > 0)
233 			p = &(*p)->rb_right;
234 		else
235 			return frag;
236 	}
237 
238 	frag = kmalloc(sizeof(*frag), GFP_NOFS);
239 	if (!frag)
240 		return ERR_PTR(-ENOMEM);
241 
242 	frag->frag = f;
243 	frag->split_by = 0;
244 	frag->mds = -1;
245 	frag->ndist = 0;
246 
247 	rb_link_node(&frag->node, parent, p);
248 	rb_insert_color(&frag->node, &ci->i_fragtree);
249 
250 	dout("get_or_create_frag added %llx.%llx frag %x\n",
251 	     ceph_vinop(&ci->netfs.inode), f);
252 	return frag;
253 }
254 
255 /*
256  * find a specific frag @f
257  */
258 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
259 {
260 	struct rb_node *n = ci->i_fragtree.rb_node;
261 
262 	while (n) {
263 		struct ceph_inode_frag *frag =
264 			rb_entry(n, struct ceph_inode_frag, node);
265 		int c = ceph_frag_compare(f, frag->frag);
266 		if (c < 0)
267 			n = n->rb_left;
268 		else if (c > 0)
269 			n = n->rb_right;
270 		else
271 			return frag;
272 	}
273 	return NULL;
274 }
275 
276 /*
277  * Choose frag containing the given value @v.  If @pfrag is
278  * specified, copy the frag delegation info to the caller if
279  * it is present.
280  */
281 static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
282 			      struct ceph_inode_frag *pfrag, int *found)
283 {
284 	u32 t = ceph_frag_make(0, 0);
285 	struct ceph_inode_frag *frag;
286 	unsigned nway, i;
287 	u32 n;
288 
289 	if (found)
290 		*found = 0;
291 
292 	while (1) {
293 		WARN_ON(!ceph_frag_contains_value(t, v));
294 		frag = __ceph_find_frag(ci, t);
295 		if (!frag)
296 			break; /* t is a leaf */
297 		if (frag->split_by == 0) {
298 			if (pfrag)
299 				memcpy(pfrag, frag, sizeof(*pfrag));
300 			if (found)
301 				*found = 1;
302 			break;
303 		}
304 
305 		/* choose child */
306 		nway = 1 << frag->split_by;
307 		dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
308 		     frag->split_by, nway);
309 		for (i = 0; i < nway; i++) {
310 			n = ceph_frag_make_child(t, frag->split_by, i);
311 			if (ceph_frag_contains_value(n, v)) {
312 				t = n;
313 				break;
314 			}
315 		}
316 		BUG_ON(i == nway);
317 	}
318 	dout("choose_frag(%x) = %x\n", v, t);
319 
320 	return t;
321 }
322 
323 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
324 		     struct ceph_inode_frag *pfrag, int *found)
325 {
326 	u32 ret;
327 	mutex_lock(&ci->i_fragtree_mutex);
328 	ret = __ceph_choose_frag(ci, v, pfrag, found);
329 	mutex_unlock(&ci->i_fragtree_mutex);
330 	return ret;
331 }
332 
333 /*
334  * Process dirfrag (delegation) info from the mds.  Include leaf
335  * fragment in tree ONLY if ndist > 0.  Otherwise, only
336  * branches/splits are included in i_fragtree)
337  */
338 static int ceph_fill_dirfrag(struct inode *inode,
339 			     struct ceph_mds_reply_dirfrag *dirinfo)
340 {
341 	struct ceph_inode_info *ci = ceph_inode(inode);
342 	struct ceph_inode_frag *frag;
343 	u32 id = le32_to_cpu(dirinfo->frag);
344 	int mds = le32_to_cpu(dirinfo->auth);
345 	int ndist = le32_to_cpu(dirinfo->ndist);
346 	int diri_auth = -1;
347 	int i;
348 	int err = 0;
349 
350 	spin_lock(&ci->i_ceph_lock);
351 	if (ci->i_auth_cap)
352 		diri_auth = ci->i_auth_cap->mds;
353 	spin_unlock(&ci->i_ceph_lock);
354 
355 	if (mds == -1) /* CDIR_AUTH_PARENT */
356 		mds = diri_auth;
357 
358 	mutex_lock(&ci->i_fragtree_mutex);
359 	if (ndist == 0 && mds == diri_auth) {
360 		/* no delegation info needed. */
361 		frag = __ceph_find_frag(ci, id);
362 		if (!frag)
363 			goto out;
364 		if (frag->split_by == 0) {
365 			/* tree leaf, remove */
366 			dout("fill_dirfrag removed %llx.%llx frag %x"
367 			     " (no ref)\n", ceph_vinop(inode), id);
368 			rb_erase(&frag->node, &ci->i_fragtree);
369 			kfree(frag);
370 		} else {
371 			/* tree branch, keep and clear */
372 			dout("fill_dirfrag cleared %llx.%llx frag %x"
373 			     " referral\n", ceph_vinop(inode), id);
374 			frag->mds = -1;
375 			frag->ndist = 0;
376 		}
377 		goto out;
378 	}
379 
380 
381 	/* find/add this frag to store mds delegation info */
382 	frag = __get_or_create_frag(ci, id);
383 	if (IS_ERR(frag)) {
384 		/* this is not the end of the world; we can continue
385 		   with bad/inaccurate delegation info */
386 		pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
387 		       ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
388 		err = -ENOMEM;
389 		goto out;
390 	}
391 
392 	frag->mds = mds;
393 	frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
394 	for (i = 0; i < frag->ndist; i++)
395 		frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
396 	dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
397 	     ceph_vinop(inode), frag->frag, frag->ndist);
398 
399 out:
400 	mutex_unlock(&ci->i_fragtree_mutex);
401 	return err;
402 }
403 
404 static int frag_tree_split_cmp(const void *l, const void *r)
405 {
406 	struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l;
407 	struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r;
408 	return ceph_frag_compare(le32_to_cpu(ls->frag),
409 				 le32_to_cpu(rs->frag));
410 }
411 
412 static bool is_frag_child(u32 f, struct ceph_inode_frag *frag)
413 {
414 	if (!frag)
415 		return f == ceph_frag_make(0, 0);
416 	if (ceph_frag_bits(f) != ceph_frag_bits(frag->frag) + frag->split_by)
417 		return false;
418 	return ceph_frag_contains_value(frag->frag, ceph_frag_value(f));
419 }
420 
421 static int ceph_fill_fragtree(struct inode *inode,
422 			      struct ceph_frag_tree_head *fragtree,
423 			      struct ceph_mds_reply_dirfrag *dirinfo)
424 {
425 	struct ceph_inode_info *ci = ceph_inode(inode);
426 	struct ceph_inode_frag *frag, *prev_frag = NULL;
427 	struct rb_node *rb_node;
428 	unsigned i, split_by, nsplits;
429 	u32 id;
430 	bool update = false;
431 
432 	mutex_lock(&ci->i_fragtree_mutex);
433 	nsplits = le32_to_cpu(fragtree->nsplits);
434 	if (nsplits != ci->i_fragtree_nsplits) {
435 		update = true;
436 	} else if (nsplits) {
437 		i = get_random_u32_below(nsplits);
438 		id = le32_to_cpu(fragtree->splits[i].frag);
439 		if (!__ceph_find_frag(ci, id))
440 			update = true;
441 	} else if (!RB_EMPTY_ROOT(&ci->i_fragtree)) {
442 		rb_node = rb_first(&ci->i_fragtree);
443 		frag = rb_entry(rb_node, struct ceph_inode_frag, node);
444 		if (frag->frag != ceph_frag_make(0, 0) || rb_next(rb_node))
445 			update = true;
446 	}
447 	if (!update && dirinfo) {
448 		id = le32_to_cpu(dirinfo->frag);
449 		if (id != __ceph_choose_frag(ci, id, NULL, NULL))
450 			update = true;
451 	}
452 	if (!update)
453 		goto out_unlock;
454 
455 	if (nsplits > 1) {
456 		sort(fragtree->splits, nsplits, sizeof(fragtree->splits[0]),
457 		     frag_tree_split_cmp, NULL);
458 	}
459 
460 	dout("fill_fragtree %llx.%llx\n", ceph_vinop(inode));
461 	rb_node = rb_first(&ci->i_fragtree);
462 	for (i = 0; i < nsplits; i++) {
463 		id = le32_to_cpu(fragtree->splits[i].frag);
464 		split_by = le32_to_cpu(fragtree->splits[i].by);
465 		if (split_by == 0 || ceph_frag_bits(id) + split_by > 24) {
466 			pr_err("fill_fragtree %llx.%llx invalid split %d/%u, "
467 			       "frag %x split by %d\n", ceph_vinop(inode),
468 			       i, nsplits, id, split_by);
469 			continue;
470 		}
471 		frag = NULL;
472 		while (rb_node) {
473 			frag = rb_entry(rb_node, struct ceph_inode_frag, node);
474 			if (ceph_frag_compare(frag->frag, id) >= 0) {
475 				if (frag->frag != id)
476 					frag = NULL;
477 				else
478 					rb_node = rb_next(rb_node);
479 				break;
480 			}
481 			rb_node = rb_next(rb_node);
482 			/* delete stale split/leaf node */
483 			if (frag->split_by > 0 ||
484 			    !is_frag_child(frag->frag, prev_frag)) {
485 				rb_erase(&frag->node, &ci->i_fragtree);
486 				if (frag->split_by > 0)
487 					ci->i_fragtree_nsplits--;
488 				kfree(frag);
489 			}
490 			frag = NULL;
491 		}
492 		if (!frag) {
493 			frag = __get_or_create_frag(ci, id);
494 			if (IS_ERR(frag))
495 				continue;
496 		}
497 		if (frag->split_by == 0)
498 			ci->i_fragtree_nsplits++;
499 		frag->split_by = split_by;
500 		dout(" frag %x split by %d\n", frag->frag, frag->split_by);
501 		prev_frag = frag;
502 	}
503 	while (rb_node) {
504 		frag = rb_entry(rb_node, struct ceph_inode_frag, node);
505 		rb_node = rb_next(rb_node);
506 		/* delete stale split/leaf node */
507 		if (frag->split_by > 0 ||
508 		    !is_frag_child(frag->frag, prev_frag)) {
509 			rb_erase(&frag->node, &ci->i_fragtree);
510 			if (frag->split_by > 0)
511 				ci->i_fragtree_nsplits--;
512 			kfree(frag);
513 		}
514 	}
515 out_unlock:
516 	mutex_unlock(&ci->i_fragtree_mutex);
517 	return 0;
518 }
519 
520 /*
521  * initialize a newly allocated inode.
522  */
523 struct inode *ceph_alloc_inode(struct super_block *sb)
524 {
525 	struct ceph_inode_info *ci;
526 	int i;
527 
528 	ci = alloc_inode_sb(sb, ceph_inode_cachep, GFP_NOFS);
529 	if (!ci)
530 		return NULL;
531 
532 	dout("alloc_inode %p\n", &ci->netfs.inode);
533 
534 	/* Set parameters for the netfs library */
535 	netfs_inode_init(&ci->netfs, &ceph_netfs_ops);
536 
537 	spin_lock_init(&ci->i_ceph_lock);
538 
539 	ci->i_version = 0;
540 	ci->i_inline_version = 0;
541 	ci->i_time_warp_seq = 0;
542 	ci->i_ceph_flags = 0;
543 	atomic64_set(&ci->i_ordered_count, 1);
544 	atomic64_set(&ci->i_release_count, 1);
545 	atomic64_set(&ci->i_complete_seq[0], 0);
546 	atomic64_set(&ci->i_complete_seq[1], 0);
547 	ci->i_symlink = NULL;
548 
549 	ci->i_max_bytes = 0;
550 	ci->i_max_files = 0;
551 
552 	memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
553 	memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout));
554 	RCU_INIT_POINTER(ci->i_layout.pool_ns, NULL);
555 
556 	ci->i_fragtree = RB_ROOT;
557 	mutex_init(&ci->i_fragtree_mutex);
558 
559 	ci->i_xattrs.blob = NULL;
560 	ci->i_xattrs.prealloc_blob = NULL;
561 	ci->i_xattrs.dirty = false;
562 	ci->i_xattrs.index = RB_ROOT;
563 	ci->i_xattrs.count = 0;
564 	ci->i_xattrs.names_size = 0;
565 	ci->i_xattrs.vals_size = 0;
566 	ci->i_xattrs.version = 0;
567 	ci->i_xattrs.index_version = 0;
568 
569 	ci->i_caps = RB_ROOT;
570 	ci->i_auth_cap = NULL;
571 	ci->i_dirty_caps = 0;
572 	ci->i_flushing_caps = 0;
573 	INIT_LIST_HEAD(&ci->i_dirty_item);
574 	INIT_LIST_HEAD(&ci->i_flushing_item);
575 	ci->i_prealloc_cap_flush = NULL;
576 	INIT_LIST_HEAD(&ci->i_cap_flush_list);
577 	init_waitqueue_head(&ci->i_cap_wq);
578 	ci->i_hold_caps_max = 0;
579 	INIT_LIST_HEAD(&ci->i_cap_delay_list);
580 	INIT_LIST_HEAD(&ci->i_cap_snaps);
581 	ci->i_head_snapc = NULL;
582 	ci->i_snap_caps = 0;
583 
584 	ci->i_last_rd = ci->i_last_wr = jiffies - 3600 * HZ;
585 	for (i = 0; i < CEPH_FILE_MODE_BITS; i++)
586 		ci->i_nr_by_mode[i] = 0;
587 
588 	mutex_init(&ci->i_truncate_mutex);
589 	ci->i_truncate_seq = 0;
590 	ci->i_truncate_size = 0;
591 	ci->i_truncate_pending = 0;
592 
593 	ci->i_max_size = 0;
594 	ci->i_reported_size = 0;
595 	ci->i_wanted_max_size = 0;
596 	ci->i_requested_max_size = 0;
597 
598 	ci->i_pin_ref = 0;
599 	ci->i_rd_ref = 0;
600 	ci->i_rdcache_ref = 0;
601 	ci->i_wr_ref = 0;
602 	ci->i_wb_ref = 0;
603 	ci->i_fx_ref = 0;
604 	ci->i_wrbuffer_ref = 0;
605 	ci->i_wrbuffer_ref_head = 0;
606 	atomic_set(&ci->i_filelock_ref, 0);
607 	atomic_set(&ci->i_shared_gen, 1);
608 	ci->i_rdcache_gen = 0;
609 	ci->i_rdcache_revoking = 0;
610 
611 	INIT_LIST_HEAD(&ci->i_unsafe_dirops);
612 	INIT_LIST_HEAD(&ci->i_unsafe_iops);
613 	spin_lock_init(&ci->i_unsafe_lock);
614 
615 	ci->i_snap_realm = NULL;
616 	INIT_LIST_HEAD(&ci->i_snap_realm_item);
617 	INIT_LIST_HEAD(&ci->i_snap_flush_item);
618 
619 	INIT_WORK(&ci->i_work, ceph_inode_work);
620 	ci->i_work_mask = 0;
621 	memset(&ci->i_btime, '\0', sizeof(ci->i_btime));
622 #ifdef CONFIG_FS_ENCRYPTION
623 	ci->fscrypt_auth = NULL;
624 	ci->fscrypt_auth_len = 0;
625 #endif
626 	return &ci->netfs.inode;
627 }
628 
629 void ceph_free_inode(struct inode *inode)
630 {
631 	struct ceph_inode_info *ci = ceph_inode(inode);
632 
633 	kfree(ci->i_symlink);
634 #ifdef CONFIG_FS_ENCRYPTION
635 	kfree(ci->fscrypt_auth);
636 #endif
637 	kmem_cache_free(ceph_inode_cachep, ci);
638 }
639 
640 void ceph_evict_inode(struct inode *inode)
641 {
642 	struct ceph_inode_info *ci = ceph_inode(inode);
643 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
644 	struct ceph_inode_frag *frag;
645 	struct rb_node *n;
646 
647 	dout("evict_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
648 
649 	percpu_counter_dec(&mdsc->metric.total_inodes);
650 
651 	truncate_inode_pages_final(&inode->i_data);
652 	if (inode->i_state & I_PINNING_FSCACHE_WB)
653 		ceph_fscache_unuse_cookie(inode, true);
654 	clear_inode(inode);
655 
656 	ceph_fscache_unregister_inode_cookie(ci);
657 	fscrypt_put_encryption_info(inode);
658 
659 	__ceph_remove_caps(ci);
660 
661 	if (__ceph_has_quota(ci, QUOTA_GET_ANY))
662 		ceph_adjust_quota_realms_count(inode, false);
663 
664 	/*
665 	 * we may still have a snap_realm reference if there are stray
666 	 * caps in i_snap_caps.
667 	 */
668 	if (ci->i_snap_realm) {
669 		if (ceph_snap(inode) == CEPH_NOSNAP) {
670 			dout(" dropping residual ref to snap realm %p\n",
671 			     ci->i_snap_realm);
672 			ceph_change_snap_realm(inode, NULL);
673 		} else {
674 			ceph_put_snapid_map(mdsc, ci->i_snapid_map);
675 			ci->i_snap_realm = NULL;
676 		}
677 	}
678 
679 	while ((n = rb_first(&ci->i_fragtree)) != NULL) {
680 		frag = rb_entry(n, struct ceph_inode_frag, node);
681 		rb_erase(n, &ci->i_fragtree);
682 		kfree(frag);
683 	}
684 	ci->i_fragtree_nsplits = 0;
685 
686 	__ceph_destroy_xattrs(ci);
687 	if (ci->i_xattrs.blob)
688 		ceph_buffer_put(ci->i_xattrs.blob);
689 	if (ci->i_xattrs.prealloc_blob)
690 		ceph_buffer_put(ci->i_xattrs.prealloc_blob);
691 
692 	ceph_put_string(rcu_dereference_raw(ci->i_layout.pool_ns));
693 	ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns));
694 }
695 
696 static inline blkcnt_t calc_inode_blocks(u64 size)
697 {
698 	return (size + (1<<9) - 1) >> 9;
699 }
700 
701 /*
702  * Helpers to fill in size, ctime, mtime, and atime.  We have to be
703  * careful because either the client or MDS may have more up to date
704  * info, depending on which capabilities are held, and whether
705  * time_warp_seq or truncate_seq have increased.  (Ordinarily, mtime
706  * and size are monotonically increasing, except when utimes() or
707  * truncate() increments the corresponding _seq values.)
708  */
709 int ceph_fill_file_size(struct inode *inode, int issued,
710 			u32 truncate_seq, u64 truncate_size, u64 size)
711 {
712 	struct ceph_inode_info *ci = ceph_inode(inode);
713 	int queue_trunc = 0;
714 	loff_t isize = i_size_read(inode);
715 
716 	if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
717 	    (truncate_seq == ci->i_truncate_seq && size > isize)) {
718 		dout("size %lld -> %llu\n", isize, size);
719 		if (size > 0 && S_ISDIR(inode->i_mode)) {
720 			pr_err("fill_file_size non-zero size for directory\n");
721 			size = 0;
722 		}
723 		i_size_write(inode, size);
724 		inode->i_blocks = calc_inode_blocks(size);
725 		/*
726 		 * If we're expanding, then we should be able to just update
727 		 * the existing cookie.
728 		 */
729 		if (size > isize)
730 			ceph_fscache_update(inode);
731 		ci->i_reported_size = size;
732 		if (truncate_seq != ci->i_truncate_seq) {
733 			dout("truncate_seq %u -> %u\n",
734 			     ci->i_truncate_seq, truncate_seq);
735 			ci->i_truncate_seq = truncate_seq;
736 
737 			/* the MDS should have revoked these caps */
738 			WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL |
739 					       CEPH_CAP_FILE_RD |
740 					       CEPH_CAP_FILE_WR |
741 					       CEPH_CAP_FILE_LAZYIO));
742 			/*
743 			 * If we hold relevant caps, or in the case where we're
744 			 * not the only client referencing this file and we
745 			 * don't hold those caps, then we need to check whether
746 			 * the file is either opened or mmaped
747 			 */
748 			if ((issued & (CEPH_CAP_FILE_CACHE|
749 				       CEPH_CAP_FILE_BUFFER)) ||
750 			    mapping_mapped(inode->i_mapping) ||
751 			    __ceph_is_file_opened(ci)) {
752 				ci->i_truncate_pending++;
753 				queue_trunc = 1;
754 			}
755 		}
756 	}
757 	if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
758 	    ci->i_truncate_size != truncate_size) {
759 		dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
760 		     truncate_size);
761 		ci->i_truncate_size = truncate_size;
762 	}
763 	return queue_trunc;
764 }
765 
766 void ceph_fill_file_time(struct inode *inode, int issued,
767 			 u64 time_warp_seq, struct timespec64 *ctime,
768 			 struct timespec64 *mtime, struct timespec64 *atime)
769 {
770 	struct ceph_inode_info *ci = ceph_inode(inode);
771 	int warn = 0;
772 
773 	if (issued & (CEPH_CAP_FILE_EXCL|
774 		      CEPH_CAP_FILE_WR|
775 		      CEPH_CAP_FILE_BUFFER|
776 		      CEPH_CAP_AUTH_EXCL|
777 		      CEPH_CAP_XATTR_EXCL)) {
778 		if (ci->i_version == 0 ||
779 		    timespec64_compare(ctime, &inode->i_ctime) > 0) {
780 			dout("ctime %lld.%09ld -> %lld.%09ld inc w/ cap\n",
781 			     inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
782 			     ctime->tv_sec, ctime->tv_nsec);
783 			inode->i_ctime = *ctime;
784 		}
785 		if (ci->i_version == 0 ||
786 		    ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
787 			/* the MDS did a utimes() */
788 			dout("mtime %lld.%09ld -> %lld.%09ld "
789 			     "tw %d -> %d\n",
790 			     inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
791 			     mtime->tv_sec, mtime->tv_nsec,
792 			     ci->i_time_warp_seq, (int)time_warp_seq);
793 
794 			inode->i_mtime = *mtime;
795 			inode->i_atime = *atime;
796 			ci->i_time_warp_seq = time_warp_seq;
797 		} else if (time_warp_seq == ci->i_time_warp_seq) {
798 			/* nobody did utimes(); take the max */
799 			if (timespec64_compare(mtime, &inode->i_mtime) > 0) {
800 				dout("mtime %lld.%09ld -> %lld.%09ld inc\n",
801 				     inode->i_mtime.tv_sec,
802 				     inode->i_mtime.tv_nsec,
803 				     mtime->tv_sec, mtime->tv_nsec);
804 				inode->i_mtime = *mtime;
805 			}
806 			if (timespec64_compare(atime, &inode->i_atime) > 0) {
807 				dout("atime %lld.%09ld -> %lld.%09ld inc\n",
808 				     inode->i_atime.tv_sec,
809 				     inode->i_atime.tv_nsec,
810 				     atime->tv_sec, atime->tv_nsec);
811 				inode->i_atime = *atime;
812 			}
813 		} else if (issued & CEPH_CAP_FILE_EXCL) {
814 			/* we did a utimes(); ignore mds values */
815 		} else {
816 			warn = 1;
817 		}
818 	} else {
819 		/* we have no write|excl caps; whatever the MDS says is true */
820 		if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
821 			inode->i_ctime = *ctime;
822 			inode->i_mtime = *mtime;
823 			inode->i_atime = *atime;
824 			ci->i_time_warp_seq = time_warp_seq;
825 		} else {
826 			warn = 1;
827 		}
828 	}
829 	if (warn) /* time_warp_seq shouldn't go backwards */
830 		dout("%p mds time_warp_seq %llu < %u\n",
831 		     inode, time_warp_seq, ci->i_time_warp_seq);
832 }
833 
834 /*
835  * Populate an inode based on info from mds.  May be called on new or
836  * existing inodes.
837  */
838 int ceph_fill_inode(struct inode *inode, struct page *locked_page,
839 		    struct ceph_mds_reply_info_in *iinfo,
840 		    struct ceph_mds_reply_dirfrag *dirinfo,
841 		    struct ceph_mds_session *session, int cap_fmode,
842 		    struct ceph_cap_reservation *caps_reservation)
843 {
844 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
845 	struct ceph_mds_reply_inode *info = iinfo->in;
846 	struct ceph_inode_info *ci = ceph_inode(inode);
847 	int issued, new_issued, info_caps;
848 	struct timespec64 mtime, atime, ctime;
849 	struct ceph_buffer *xattr_blob = NULL;
850 	struct ceph_buffer *old_blob = NULL;
851 	struct ceph_string *pool_ns = NULL;
852 	struct ceph_cap *new_cap = NULL;
853 	int err = 0;
854 	bool wake = false;
855 	bool queue_trunc = false;
856 	bool new_version = false;
857 	bool fill_inline = false;
858 	umode_t mode = le32_to_cpu(info->mode);
859 	dev_t rdev = le32_to_cpu(info->rdev);
860 
861 	lockdep_assert_held(&mdsc->snap_rwsem);
862 
863 	dout("%s %p ino %llx.%llx v %llu had %llu\n", __func__,
864 	     inode, ceph_vinop(inode), le64_to_cpu(info->version),
865 	     ci->i_version);
866 
867 	/* Once I_NEW is cleared, we can't change type or dev numbers */
868 	if (inode->i_state & I_NEW) {
869 		inode->i_mode = mode;
870 	} else {
871 		if (inode_wrong_type(inode, mode)) {
872 			pr_warn_once("inode type changed! (ino %llx.%llx is 0%o, mds says 0%o)\n",
873 				     ceph_vinop(inode), inode->i_mode, mode);
874 			return -ESTALE;
875 		}
876 
877 		if ((S_ISCHR(mode) || S_ISBLK(mode)) && inode->i_rdev != rdev) {
878 			pr_warn_once("dev inode rdev changed! (ino %llx.%llx is %u:%u, mds says %u:%u)\n",
879 				     ceph_vinop(inode), MAJOR(inode->i_rdev),
880 				     MINOR(inode->i_rdev), MAJOR(rdev),
881 				     MINOR(rdev));
882 			return -ESTALE;
883 		}
884 	}
885 
886 	info_caps = le32_to_cpu(info->cap.caps);
887 
888 	/* prealloc new cap struct */
889 	if (info_caps && ceph_snap(inode) == CEPH_NOSNAP) {
890 		new_cap = ceph_get_cap(mdsc, caps_reservation);
891 		if (!new_cap)
892 			return -ENOMEM;
893 	}
894 
895 	/*
896 	 * prealloc xattr data, if it looks like we'll need it.  only
897 	 * if len > 4 (meaning there are actually xattrs; the first 4
898 	 * bytes are the xattr count).
899 	 */
900 	if (iinfo->xattr_len > 4) {
901 		xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
902 		if (!xattr_blob)
903 			pr_err("%s ENOMEM xattr blob %d bytes\n", __func__,
904 			       iinfo->xattr_len);
905 	}
906 
907 	if (iinfo->pool_ns_len > 0)
908 		pool_ns = ceph_find_or_create_string(iinfo->pool_ns_data,
909 						     iinfo->pool_ns_len);
910 
911 	if (ceph_snap(inode) != CEPH_NOSNAP && !ci->i_snapid_map)
912 		ci->i_snapid_map = ceph_get_snapid_map(mdsc, ceph_snap(inode));
913 
914 	spin_lock(&ci->i_ceph_lock);
915 
916 	/*
917 	 * provided version will be odd if inode value is projected,
918 	 * even if stable.  skip the update if we have newer stable
919 	 * info (ours>=theirs, e.g. due to racing mds replies), unless
920 	 * we are getting projected (unstable) info (in which case the
921 	 * version is odd, and we want ours>theirs).
922 	 *   us   them
923 	 *   2    2     skip
924 	 *   3    2     skip
925 	 *   3    3     update
926 	 */
927 	if (ci->i_version == 0 ||
928 	    ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
929 	     le64_to_cpu(info->version) > (ci->i_version & ~1)))
930 		new_version = true;
931 
932 	/* Update change_attribute */
933 	inode_set_max_iversion_raw(inode, iinfo->change_attr);
934 
935 	__ceph_caps_issued(ci, &issued);
936 	issued |= __ceph_caps_dirty(ci);
937 	new_issued = ~issued & info_caps;
938 
939 	/* directories have fl_stripe_unit set to zero */
940 	if (le32_to_cpu(info->layout.fl_stripe_unit))
941 		inode->i_blkbits =
942 			fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
943 	else
944 		inode->i_blkbits = CEPH_BLOCK_SHIFT;
945 
946 	__ceph_update_quota(ci, iinfo->max_bytes, iinfo->max_files);
947 
948 #ifdef CONFIG_FS_ENCRYPTION
949 	if (iinfo->fscrypt_auth_len && (inode->i_state & I_NEW)) {
950 		kfree(ci->fscrypt_auth);
951 		ci->fscrypt_auth_len = iinfo->fscrypt_auth_len;
952 		ci->fscrypt_auth = iinfo->fscrypt_auth;
953 		iinfo->fscrypt_auth = NULL;
954 		iinfo->fscrypt_auth_len = 0;
955 		inode_set_flags(inode, S_ENCRYPTED, S_ENCRYPTED);
956 	}
957 #endif
958 
959 	if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) &&
960 	    (issued & CEPH_CAP_AUTH_EXCL) == 0) {
961 		inode->i_mode = mode;
962 		inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid));
963 		inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid));
964 		dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
965 		     from_kuid(&init_user_ns, inode->i_uid),
966 		     from_kgid(&init_user_ns, inode->i_gid));
967 		ceph_decode_timespec64(&ci->i_btime, &iinfo->btime);
968 		ceph_decode_timespec64(&ci->i_snap_btime, &iinfo->snap_btime);
969 	}
970 
971 	if ((new_version || (new_issued & CEPH_CAP_LINK_SHARED)) &&
972 	    (issued & CEPH_CAP_LINK_EXCL) == 0)
973 		set_nlink(inode, le32_to_cpu(info->nlink));
974 
975 	if (new_version || (new_issued & CEPH_CAP_ANY_RD)) {
976 		/* be careful with mtime, atime, size */
977 		ceph_decode_timespec64(&atime, &info->atime);
978 		ceph_decode_timespec64(&mtime, &info->mtime);
979 		ceph_decode_timespec64(&ctime, &info->ctime);
980 		ceph_fill_file_time(inode, issued,
981 				le32_to_cpu(info->time_warp_seq),
982 				&ctime, &mtime, &atime);
983 	}
984 
985 	if (new_version || (info_caps & CEPH_CAP_FILE_SHARED)) {
986 		ci->i_files = le64_to_cpu(info->files);
987 		ci->i_subdirs = le64_to_cpu(info->subdirs);
988 	}
989 
990 	if (new_version ||
991 	    (new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) {
992 		s64 old_pool = ci->i_layout.pool_id;
993 		struct ceph_string *old_ns;
994 
995 		ceph_file_layout_from_legacy(&ci->i_layout, &info->layout);
996 		old_ns = rcu_dereference_protected(ci->i_layout.pool_ns,
997 					lockdep_is_held(&ci->i_ceph_lock));
998 		rcu_assign_pointer(ci->i_layout.pool_ns, pool_ns);
999 
1000 		if (ci->i_layout.pool_id != old_pool || pool_ns != old_ns)
1001 			ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
1002 
1003 		pool_ns = old_ns;
1004 
1005 		queue_trunc = ceph_fill_file_size(inode, issued,
1006 					le32_to_cpu(info->truncate_seq),
1007 					le64_to_cpu(info->truncate_size),
1008 					le64_to_cpu(info->size));
1009 		/* only update max_size on auth cap */
1010 		if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
1011 		    ci->i_max_size != le64_to_cpu(info->max_size)) {
1012 			dout("max_size %lld -> %llu\n", ci->i_max_size,
1013 					le64_to_cpu(info->max_size));
1014 			ci->i_max_size = le64_to_cpu(info->max_size);
1015 		}
1016 	}
1017 
1018 	/* layout and rstat are not tracked by capability, update them if
1019 	 * the inode info is from auth mds */
1020 	if (new_version || (info->cap.flags & CEPH_CAP_FLAG_AUTH)) {
1021 		if (S_ISDIR(inode->i_mode)) {
1022 			ci->i_dir_layout = iinfo->dir_layout;
1023 			ci->i_rbytes = le64_to_cpu(info->rbytes);
1024 			ci->i_rfiles = le64_to_cpu(info->rfiles);
1025 			ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
1026 			ci->i_dir_pin = iinfo->dir_pin;
1027 			ci->i_rsnaps = iinfo->rsnaps;
1028 			ceph_decode_timespec64(&ci->i_rctime, &info->rctime);
1029 		}
1030 	}
1031 
1032 	/* xattrs */
1033 	/* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
1034 	if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL))  &&
1035 	    le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
1036 		if (ci->i_xattrs.blob)
1037 			old_blob = ci->i_xattrs.blob;
1038 		ci->i_xattrs.blob = xattr_blob;
1039 		if (xattr_blob)
1040 			memcpy(ci->i_xattrs.blob->vec.iov_base,
1041 			       iinfo->xattr_data, iinfo->xattr_len);
1042 		ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
1043 		ceph_forget_all_cached_acls(inode);
1044 		ceph_security_invalidate_secctx(inode);
1045 		xattr_blob = NULL;
1046 	}
1047 
1048 	/* finally update i_version */
1049 	if (le64_to_cpu(info->version) > ci->i_version)
1050 		ci->i_version = le64_to_cpu(info->version);
1051 
1052 	inode->i_mapping->a_ops = &ceph_aops;
1053 
1054 	switch (inode->i_mode & S_IFMT) {
1055 	case S_IFIFO:
1056 	case S_IFBLK:
1057 	case S_IFCHR:
1058 	case S_IFSOCK:
1059 		inode->i_blkbits = PAGE_SHIFT;
1060 		init_special_inode(inode, inode->i_mode, rdev);
1061 		inode->i_op = &ceph_file_iops;
1062 		break;
1063 	case S_IFREG:
1064 		inode->i_op = &ceph_file_iops;
1065 		inode->i_fop = &ceph_file_fops;
1066 		break;
1067 	case S_IFLNK:
1068 		inode->i_op = &ceph_symlink_iops;
1069 		if (!ci->i_symlink) {
1070 			u32 symlen = iinfo->symlink_len;
1071 			char *sym;
1072 
1073 			spin_unlock(&ci->i_ceph_lock);
1074 
1075 			if (symlen != i_size_read(inode)) {
1076 				pr_err("%s %llx.%llx BAD symlink "
1077 					"size %lld\n", __func__,
1078 					ceph_vinop(inode),
1079 					i_size_read(inode));
1080 				i_size_write(inode, symlen);
1081 				inode->i_blocks = calc_inode_blocks(symlen);
1082 			}
1083 
1084 			err = -ENOMEM;
1085 			sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
1086 			if (!sym)
1087 				goto out;
1088 
1089 			spin_lock(&ci->i_ceph_lock);
1090 			if (!ci->i_symlink)
1091 				ci->i_symlink = sym;
1092 			else
1093 				kfree(sym); /* lost a race */
1094 		}
1095 		inode->i_link = ci->i_symlink;
1096 		break;
1097 	case S_IFDIR:
1098 		inode->i_op = &ceph_dir_iops;
1099 		inode->i_fop = &ceph_dir_fops;
1100 		break;
1101 	default:
1102 		pr_err("%s %llx.%llx BAD mode 0%o\n", __func__,
1103 		       ceph_vinop(inode), inode->i_mode);
1104 	}
1105 
1106 	/* were we issued a capability? */
1107 	if (info_caps) {
1108 		if (ceph_snap(inode) == CEPH_NOSNAP) {
1109 			ceph_add_cap(inode, session,
1110 				     le64_to_cpu(info->cap.cap_id),
1111 				     info_caps,
1112 				     le32_to_cpu(info->cap.wanted),
1113 				     le32_to_cpu(info->cap.seq),
1114 				     le32_to_cpu(info->cap.mseq),
1115 				     le64_to_cpu(info->cap.realm),
1116 				     info->cap.flags, &new_cap);
1117 
1118 			/* set dir completion flag? */
1119 			if (S_ISDIR(inode->i_mode) &&
1120 			    ci->i_files == 0 && ci->i_subdirs == 0 &&
1121 			    (info_caps & CEPH_CAP_FILE_SHARED) &&
1122 			    (issued & CEPH_CAP_FILE_EXCL) == 0 &&
1123 			    !__ceph_dir_is_complete(ci)) {
1124 				dout(" marking %p complete (empty)\n", inode);
1125 				i_size_write(inode, 0);
1126 				__ceph_dir_set_complete(ci,
1127 					atomic64_read(&ci->i_release_count),
1128 					atomic64_read(&ci->i_ordered_count));
1129 			}
1130 
1131 			wake = true;
1132 		} else {
1133 			dout(" %p got snap_caps %s\n", inode,
1134 			     ceph_cap_string(info_caps));
1135 			ci->i_snap_caps |= info_caps;
1136 		}
1137 	}
1138 
1139 	if (iinfo->inline_version > 0 &&
1140 	    iinfo->inline_version >= ci->i_inline_version) {
1141 		int cache_caps = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1142 		ci->i_inline_version = iinfo->inline_version;
1143 		if (ceph_has_inline_data(ci) &&
1144 		    (locked_page || (info_caps & cache_caps)))
1145 			fill_inline = true;
1146 	}
1147 
1148 	if (cap_fmode >= 0) {
1149 		if (!info_caps)
1150 			pr_warn("mds issued no caps on %llx.%llx\n",
1151 				ceph_vinop(inode));
1152 		__ceph_touch_fmode(ci, mdsc, cap_fmode);
1153 	}
1154 
1155 	spin_unlock(&ci->i_ceph_lock);
1156 
1157 	ceph_fscache_register_inode_cookie(inode);
1158 
1159 	if (fill_inline)
1160 		ceph_fill_inline_data(inode, locked_page,
1161 				      iinfo->inline_data, iinfo->inline_len);
1162 
1163 	if (wake)
1164 		wake_up_all(&ci->i_cap_wq);
1165 
1166 	/* queue truncate if we saw i_size decrease */
1167 	if (queue_trunc)
1168 		ceph_queue_vmtruncate(inode);
1169 
1170 	/* populate frag tree */
1171 	if (S_ISDIR(inode->i_mode))
1172 		ceph_fill_fragtree(inode, &info->fragtree, dirinfo);
1173 
1174 	/* update delegation info? */
1175 	if (dirinfo)
1176 		ceph_fill_dirfrag(inode, dirinfo);
1177 
1178 	err = 0;
1179 out:
1180 	if (new_cap)
1181 		ceph_put_cap(mdsc, new_cap);
1182 	ceph_buffer_put(old_blob);
1183 	ceph_buffer_put(xattr_blob);
1184 	ceph_put_string(pool_ns);
1185 	return err;
1186 }
1187 
1188 /*
1189  * caller should hold session s_mutex and dentry->d_lock.
1190  */
1191 static void __update_dentry_lease(struct inode *dir, struct dentry *dentry,
1192 				  struct ceph_mds_reply_lease *lease,
1193 				  struct ceph_mds_session *session,
1194 				  unsigned long from_time,
1195 				  struct ceph_mds_session **old_lease_session)
1196 {
1197 	struct ceph_dentry_info *di = ceph_dentry(dentry);
1198 	unsigned mask = le16_to_cpu(lease->mask);
1199 	long unsigned duration = le32_to_cpu(lease->duration_ms);
1200 	long unsigned ttl = from_time + (duration * HZ) / 1000;
1201 	long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
1202 
1203 	dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
1204 	     dentry, duration, ttl);
1205 
1206 	/* only track leases on regular dentries */
1207 	if (ceph_snap(dir) != CEPH_NOSNAP)
1208 		return;
1209 
1210 	if (mask & CEPH_LEASE_PRIMARY_LINK)
1211 		di->flags |= CEPH_DENTRY_PRIMARY_LINK;
1212 	else
1213 		di->flags &= ~CEPH_DENTRY_PRIMARY_LINK;
1214 
1215 	di->lease_shared_gen = atomic_read(&ceph_inode(dir)->i_shared_gen);
1216 	if (!(mask & CEPH_LEASE_VALID)) {
1217 		__ceph_dentry_dir_lease_touch(di);
1218 		return;
1219 	}
1220 
1221 	if (di->lease_gen == atomic_read(&session->s_cap_gen) &&
1222 	    time_before(ttl, di->time))
1223 		return;  /* we already have a newer lease. */
1224 
1225 	if (di->lease_session && di->lease_session != session) {
1226 		*old_lease_session = di->lease_session;
1227 		di->lease_session = NULL;
1228 	}
1229 
1230 	if (!di->lease_session)
1231 		di->lease_session = ceph_get_mds_session(session);
1232 	di->lease_gen = atomic_read(&session->s_cap_gen);
1233 	di->lease_seq = le32_to_cpu(lease->seq);
1234 	di->lease_renew_after = half_ttl;
1235 	di->lease_renew_from = 0;
1236 	di->time = ttl;
1237 
1238 	__ceph_dentry_lease_touch(di);
1239 }
1240 
1241 static inline void update_dentry_lease(struct inode *dir, struct dentry *dentry,
1242 					struct ceph_mds_reply_lease *lease,
1243 					struct ceph_mds_session *session,
1244 					unsigned long from_time)
1245 {
1246 	struct ceph_mds_session *old_lease_session = NULL;
1247 	spin_lock(&dentry->d_lock);
1248 	__update_dentry_lease(dir, dentry, lease, session, from_time,
1249 			      &old_lease_session);
1250 	spin_unlock(&dentry->d_lock);
1251 	ceph_put_mds_session(old_lease_session);
1252 }
1253 
1254 /*
1255  * update dentry lease without having parent inode locked
1256  */
1257 static void update_dentry_lease_careful(struct dentry *dentry,
1258 					struct ceph_mds_reply_lease *lease,
1259 					struct ceph_mds_session *session,
1260 					unsigned long from_time,
1261 					char *dname, u32 dname_len,
1262 					struct ceph_vino *pdvino,
1263 					struct ceph_vino *ptvino)
1264 
1265 {
1266 	struct inode *dir;
1267 	struct ceph_mds_session *old_lease_session = NULL;
1268 
1269 	spin_lock(&dentry->d_lock);
1270 	/* make sure dentry's name matches target */
1271 	if (dentry->d_name.len != dname_len ||
1272 	    memcmp(dentry->d_name.name, dname, dname_len))
1273 		goto out_unlock;
1274 
1275 	dir = d_inode(dentry->d_parent);
1276 	/* make sure parent matches dvino */
1277 	if (!ceph_ino_compare(dir, pdvino))
1278 		goto out_unlock;
1279 
1280 	/* make sure dentry's inode matches target. NULL ptvino means that
1281 	 * we expect a negative dentry */
1282 	if (ptvino) {
1283 		if (d_really_is_negative(dentry))
1284 			goto out_unlock;
1285 		if (!ceph_ino_compare(d_inode(dentry), ptvino))
1286 			goto out_unlock;
1287 	} else {
1288 		if (d_really_is_positive(dentry))
1289 			goto out_unlock;
1290 	}
1291 
1292 	__update_dentry_lease(dir, dentry, lease, session,
1293 			      from_time, &old_lease_session);
1294 out_unlock:
1295 	spin_unlock(&dentry->d_lock);
1296 	ceph_put_mds_session(old_lease_session);
1297 }
1298 
1299 /*
1300  * splice a dentry to an inode.
1301  * caller must hold directory i_rwsem for this to be safe.
1302  */
1303 static int splice_dentry(struct dentry **pdn, struct inode *in)
1304 {
1305 	struct dentry *dn = *pdn;
1306 	struct dentry *realdn;
1307 
1308 	BUG_ON(d_inode(dn));
1309 
1310 	if (S_ISDIR(in->i_mode)) {
1311 		/* If inode is directory, d_splice_alias() below will remove
1312 		 * 'realdn' from its origin parent. We need to ensure that
1313 		 * origin parent's readdir cache will not reference 'realdn'
1314 		 */
1315 		realdn = d_find_any_alias(in);
1316 		if (realdn) {
1317 			struct ceph_dentry_info *di = ceph_dentry(realdn);
1318 			spin_lock(&realdn->d_lock);
1319 
1320 			realdn->d_op->d_prune(realdn);
1321 
1322 			di->time = jiffies;
1323 			di->lease_shared_gen = 0;
1324 			di->offset = 0;
1325 
1326 			spin_unlock(&realdn->d_lock);
1327 			dput(realdn);
1328 		}
1329 	}
1330 
1331 	/* dn must be unhashed */
1332 	if (!d_unhashed(dn))
1333 		d_drop(dn);
1334 	realdn = d_splice_alias(in, dn);
1335 	if (IS_ERR(realdn)) {
1336 		pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
1337 		       PTR_ERR(realdn), dn, in, ceph_vinop(in));
1338 		return PTR_ERR(realdn);
1339 	}
1340 
1341 	if (realdn) {
1342 		dout("dn %p (%d) spliced with %p (%d) "
1343 		     "inode %p ino %llx.%llx\n",
1344 		     dn, d_count(dn),
1345 		     realdn, d_count(realdn),
1346 		     d_inode(realdn), ceph_vinop(d_inode(realdn)));
1347 		dput(dn);
1348 		*pdn = realdn;
1349 	} else {
1350 		BUG_ON(!ceph_dentry(dn));
1351 		dout("dn %p attached to %p ino %llx.%llx\n",
1352 		     dn, d_inode(dn), ceph_vinop(d_inode(dn)));
1353 	}
1354 	return 0;
1355 }
1356 
1357 /*
1358  * Incorporate results into the local cache.  This is either just
1359  * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
1360  * after a lookup).
1361  *
1362  * A reply may contain
1363  *         a directory inode along with a dentry.
1364  *  and/or a target inode
1365  *
1366  * Called with snap_rwsem (read).
1367  */
1368 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
1369 {
1370 	struct ceph_mds_session *session = req->r_session;
1371 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1372 	struct inode *in = NULL;
1373 	struct ceph_vino tvino, dvino;
1374 	struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
1375 	int err = 0;
1376 
1377 	dout("fill_trace %p is_dentry %d is_target %d\n", req,
1378 	     rinfo->head->is_dentry, rinfo->head->is_target);
1379 
1380 	if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
1381 		dout("fill_trace reply is empty!\n");
1382 		if (rinfo->head->result == 0 && req->r_parent)
1383 			ceph_invalidate_dir_request(req);
1384 		return 0;
1385 	}
1386 
1387 	if (rinfo->head->is_dentry) {
1388 		struct inode *dir = req->r_parent;
1389 
1390 		if (dir) {
1391 			err = ceph_fill_inode(dir, NULL, &rinfo->diri,
1392 					      rinfo->dirfrag, session, -1,
1393 					      &req->r_caps_reservation);
1394 			if (err < 0)
1395 				goto done;
1396 		} else {
1397 			WARN_ON_ONCE(1);
1398 		}
1399 
1400 		if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME &&
1401 		    test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1402 		    !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
1403 			struct qstr dname;
1404 			struct dentry *dn, *parent;
1405 
1406 			BUG_ON(!rinfo->head->is_target);
1407 			BUG_ON(req->r_dentry);
1408 
1409 			parent = d_find_any_alias(dir);
1410 			BUG_ON(!parent);
1411 
1412 			dname.name = rinfo->dname;
1413 			dname.len = rinfo->dname_len;
1414 			dname.hash = full_name_hash(parent, dname.name, dname.len);
1415 			tvino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1416 			tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1417 retry_lookup:
1418 			dn = d_lookup(parent, &dname);
1419 			dout("d_lookup on parent=%p name=%.*s got %p\n",
1420 			     parent, dname.len, dname.name, dn);
1421 
1422 			if (!dn) {
1423 				dn = d_alloc(parent, &dname);
1424 				dout("d_alloc %p '%.*s' = %p\n", parent,
1425 				     dname.len, dname.name, dn);
1426 				if (!dn) {
1427 					dput(parent);
1428 					err = -ENOMEM;
1429 					goto done;
1430 				}
1431 				err = 0;
1432 			} else if (d_really_is_positive(dn) &&
1433 				   (ceph_ino(d_inode(dn)) != tvino.ino ||
1434 				    ceph_snap(d_inode(dn)) != tvino.snap)) {
1435 				dout(" dn %p points to wrong inode %p\n",
1436 				     dn, d_inode(dn));
1437 				ceph_dir_clear_ordered(dir);
1438 				d_delete(dn);
1439 				dput(dn);
1440 				goto retry_lookup;
1441 			}
1442 
1443 			req->r_dentry = dn;
1444 			dput(parent);
1445 		}
1446 	}
1447 
1448 	if (rinfo->head->is_target) {
1449 		/* Should be filled in by handle_reply */
1450 		BUG_ON(!req->r_target_inode);
1451 
1452 		in = req->r_target_inode;
1453 		err = ceph_fill_inode(in, req->r_locked_page, &rinfo->targeti,
1454 				NULL, session,
1455 				(!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) &&
1456 				 !test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags) &&
1457 				 rinfo->head->result == 0) ?  req->r_fmode : -1,
1458 				&req->r_caps_reservation);
1459 		if (err < 0) {
1460 			pr_err("ceph_fill_inode badness %p %llx.%llx\n",
1461 				in, ceph_vinop(in));
1462 			req->r_target_inode = NULL;
1463 			if (in->i_state & I_NEW)
1464 				discard_new_inode(in);
1465 			else
1466 				iput(in);
1467 			goto done;
1468 		}
1469 		if (in->i_state & I_NEW)
1470 			unlock_new_inode(in);
1471 	}
1472 
1473 	/*
1474 	 * ignore null lease/binding on snapdir ENOENT, or else we
1475 	 * will have trouble splicing in the virtual snapdir later
1476 	 */
1477 	if (rinfo->head->is_dentry &&
1478             !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) &&
1479 	    test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1480 	    (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
1481 					       fsc->mount_options->snapdir_name,
1482 					       req->r_dentry->d_name.len))) {
1483 		/*
1484 		 * lookup link rename   : null -> possibly existing inode
1485 		 * mknod symlink mkdir  : null -> new inode
1486 		 * unlink               : linked -> null
1487 		 */
1488 		struct inode *dir = req->r_parent;
1489 		struct dentry *dn = req->r_dentry;
1490 		bool have_dir_cap, have_lease;
1491 
1492 		BUG_ON(!dn);
1493 		BUG_ON(!dir);
1494 		BUG_ON(d_inode(dn->d_parent) != dir);
1495 
1496 		dvino.ino = le64_to_cpu(rinfo->diri.in->ino);
1497 		dvino.snap = le64_to_cpu(rinfo->diri.in->snapid);
1498 
1499 		BUG_ON(ceph_ino(dir) != dvino.ino);
1500 		BUG_ON(ceph_snap(dir) != dvino.snap);
1501 
1502 		/* do we have a lease on the whole dir? */
1503 		have_dir_cap =
1504 			(le32_to_cpu(rinfo->diri.in->cap.caps) &
1505 			 CEPH_CAP_FILE_SHARED);
1506 
1507 		/* do we have a dn lease? */
1508 		have_lease = have_dir_cap ||
1509 			le32_to_cpu(rinfo->dlease->duration_ms);
1510 		if (!have_lease)
1511 			dout("fill_trace  no dentry lease or dir cap\n");
1512 
1513 		/* rename? */
1514 		if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
1515 			struct inode *olddir = req->r_old_dentry_dir;
1516 			BUG_ON(!olddir);
1517 
1518 			dout(" src %p '%pd' dst %p '%pd'\n",
1519 			     req->r_old_dentry,
1520 			     req->r_old_dentry,
1521 			     dn, dn);
1522 			dout("fill_trace doing d_move %p -> %p\n",
1523 			     req->r_old_dentry, dn);
1524 
1525 			/* d_move screws up sibling dentries' offsets */
1526 			ceph_dir_clear_ordered(dir);
1527 			ceph_dir_clear_ordered(olddir);
1528 
1529 			d_move(req->r_old_dentry, dn);
1530 			dout(" src %p '%pd' dst %p '%pd'\n",
1531 			     req->r_old_dentry,
1532 			     req->r_old_dentry,
1533 			     dn, dn);
1534 
1535 			/* ensure target dentry is invalidated, despite
1536 			   rehashing bug in vfs_rename_dir */
1537 			ceph_invalidate_dentry_lease(dn);
1538 
1539 			dout("dn %p gets new offset %lld\n", req->r_old_dentry,
1540 			     ceph_dentry(req->r_old_dentry)->offset);
1541 
1542 			/* swap r_dentry and r_old_dentry in case that
1543 			 * splice_dentry() gets called later. This is safe
1544 			 * because no other place will use them */
1545 			req->r_dentry = req->r_old_dentry;
1546 			req->r_old_dentry = dn;
1547 			dn = req->r_dentry;
1548 		}
1549 
1550 		/* null dentry? */
1551 		if (!rinfo->head->is_target) {
1552 			dout("fill_trace null dentry\n");
1553 			if (d_really_is_positive(dn)) {
1554 				dout("d_delete %p\n", dn);
1555 				ceph_dir_clear_ordered(dir);
1556 				d_delete(dn);
1557 			} else if (have_lease) {
1558 				if (d_unhashed(dn))
1559 					d_add(dn, NULL);
1560 			}
1561 
1562 			if (!d_unhashed(dn) && have_lease)
1563 				update_dentry_lease(dir, dn,
1564 						    rinfo->dlease, session,
1565 						    req->r_request_started);
1566 			goto done;
1567 		}
1568 
1569 		/* attach proper inode */
1570 		if (d_really_is_negative(dn)) {
1571 			ceph_dir_clear_ordered(dir);
1572 			ihold(in);
1573 			err = splice_dentry(&req->r_dentry, in);
1574 			if (err < 0)
1575 				goto done;
1576 			dn = req->r_dentry;  /* may have spliced */
1577 		} else if (d_really_is_positive(dn) && d_inode(dn) != in) {
1578 			dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
1579 			     dn, d_inode(dn), ceph_vinop(d_inode(dn)),
1580 			     ceph_vinop(in));
1581 			d_invalidate(dn);
1582 			have_lease = false;
1583 		}
1584 
1585 		if (have_lease) {
1586 			update_dentry_lease(dir, dn,
1587 					    rinfo->dlease, session,
1588 					    req->r_request_started);
1589 		}
1590 		dout(" final dn %p\n", dn);
1591 	} else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1592 		    req->r_op == CEPH_MDS_OP_MKSNAP) &&
1593 	           test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1594 		   !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
1595 		struct inode *dir = req->r_parent;
1596 
1597 		/* fill out a snapdir LOOKUPSNAP dentry */
1598 		BUG_ON(!dir);
1599 		BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR);
1600 		BUG_ON(!req->r_dentry);
1601 		dout(" linking snapped dir %p to dn %p\n", in, req->r_dentry);
1602 		ceph_dir_clear_ordered(dir);
1603 		ihold(in);
1604 		err = splice_dentry(&req->r_dentry, in);
1605 		if (err < 0)
1606 			goto done;
1607 	} else if (rinfo->head->is_dentry && req->r_dentry) {
1608 		/* parent inode is not locked, be carefull */
1609 		struct ceph_vino *ptvino = NULL;
1610 		dvino.ino = le64_to_cpu(rinfo->diri.in->ino);
1611 		dvino.snap = le64_to_cpu(rinfo->diri.in->snapid);
1612 		if (rinfo->head->is_target) {
1613 			tvino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1614 			tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1615 			ptvino = &tvino;
1616 		}
1617 		update_dentry_lease_careful(req->r_dentry, rinfo->dlease,
1618 					    session, req->r_request_started,
1619 					    rinfo->dname, rinfo->dname_len,
1620 					    &dvino, ptvino);
1621 	}
1622 done:
1623 	dout("fill_trace done err=%d\n", err);
1624 	return err;
1625 }
1626 
1627 /*
1628  * Prepopulate our cache with readdir results, leases, etc.
1629  */
1630 static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
1631 					   struct ceph_mds_session *session)
1632 {
1633 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1634 	int i, err = 0;
1635 
1636 	for (i = 0; i < rinfo->dir_nr; i++) {
1637 		struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
1638 		struct ceph_vino vino;
1639 		struct inode *in;
1640 		int rc;
1641 
1642 		vino.ino = le64_to_cpu(rde->inode.in->ino);
1643 		vino.snap = le64_to_cpu(rde->inode.in->snapid);
1644 
1645 		in = ceph_get_inode(req->r_dentry->d_sb, vino, NULL);
1646 		if (IS_ERR(in)) {
1647 			err = PTR_ERR(in);
1648 			dout("new_inode badness got %d\n", err);
1649 			continue;
1650 		}
1651 		rc = ceph_fill_inode(in, NULL, &rde->inode, NULL, session,
1652 				     -1, &req->r_caps_reservation);
1653 		if (rc < 0) {
1654 			pr_err("ceph_fill_inode badness on %p got %d\n",
1655 			       in, rc);
1656 			err = rc;
1657 			if (in->i_state & I_NEW) {
1658 				ihold(in);
1659 				discard_new_inode(in);
1660 			}
1661 		} else if (in->i_state & I_NEW) {
1662 			unlock_new_inode(in);
1663 		}
1664 
1665 		iput(in);
1666 	}
1667 
1668 	return err;
1669 }
1670 
1671 void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl)
1672 {
1673 	if (ctl->page) {
1674 		kunmap(ctl->page);
1675 		put_page(ctl->page);
1676 		ctl->page = NULL;
1677 	}
1678 }
1679 
1680 static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
1681 			      struct ceph_readdir_cache_control *ctl,
1682 			      struct ceph_mds_request *req)
1683 {
1684 	struct ceph_inode_info *ci = ceph_inode(dir);
1685 	unsigned nsize = PAGE_SIZE / sizeof(struct dentry*);
1686 	unsigned idx = ctl->index % nsize;
1687 	pgoff_t pgoff = ctl->index / nsize;
1688 
1689 	if (!ctl->page || pgoff != page_index(ctl->page)) {
1690 		ceph_readdir_cache_release(ctl);
1691 		if (idx == 0)
1692 			ctl->page = grab_cache_page(&dir->i_data, pgoff);
1693 		else
1694 			ctl->page = find_lock_page(&dir->i_data, pgoff);
1695 		if (!ctl->page) {
1696 			ctl->index = -1;
1697 			return idx == 0 ? -ENOMEM : 0;
1698 		}
1699 		/* reading/filling the cache are serialized by
1700 		 * i_rwsem, no need to use page lock */
1701 		unlock_page(ctl->page);
1702 		ctl->dentries = kmap(ctl->page);
1703 		if (idx == 0)
1704 			memset(ctl->dentries, 0, PAGE_SIZE);
1705 	}
1706 
1707 	if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) &&
1708 	    req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) {
1709 		dout("readdir cache dn %p idx %d\n", dn, ctl->index);
1710 		ctl->dentries[idx] = dn;
1711 		ctl->index++;
1712 	} else {
1713 		dout("disable readdir cache\n");
1714 		ctl->index = -1;
1715 	}
1716 	return 0;
1717 }
1718 
1719 int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1720 			     struct ceph_mds_session *session)
1721 {
1722 	struct dentry *parent = req->r_dentry;
1723 	struct ceph_inode_info *ci = ceph_inode(d_inode(parent));
1724 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1725 	struct qstr dname;
1726 	struct dentry *dn;
1727 	struct inode *in;
1728 	int err = 0, skipped = 0, ret, i;
1729 	u32 frag = le32_to_cpu(req->r_args.readdir.frag);
1730 	u32 last_hash = 0;
1731 	u32 fpos_offset;
1732 	struct ceph_readdir_cache_control cache_ctl = {};
1733 
1734 	if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
1735 		return readdir_prepopulate_inodes_only(req, session);
1736 
1737 	if (rinfo->hash_order) {
1738 		if (req->r_path2) {
1739 			last_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
1740 						  req->r_path2,
1741 						  strlen(req->r_path2));
1742 			last_hash = ceph_frag_value(last_hash);
1743 		} else if (rinfo->offset_hash) {
1744 			/* mds understands offset_hash */
1745 			WARN_ON_ONCE(req->r_readdir_offset != 2);
1746 			last_hash = le32_to_cpu(req->r_args.readdir.offset_hash);
1747 		}
1748 	}
1749 
1750 	if (rinfo->dir_dir &&
1751 	    le32_to_cpu(rinfo->dir_dir->frag) != frag) {
1752 		dout("readdir_prepopulate got new frag %x -> %x\n",
1753 		     frag, le32_to_cpu(rinfo->dir_dir->frag));
1754 		frag = le32_to_cpu(rinfo->dir_dir->frag);
1755 		if (!rinfo->hash_order)
1756 			req->r_readdir_offset = 2;
1757 	}
1758 
1759 	if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
1760 		dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
1761 		     rinfo->dir_nr, parent);
1762 	} else {
1763 		dout("readdir_prepopulate %d items under dn %p\n",
1764 		     rinfo->dir_nr, parent);
1765 		if (rinfo->dir_dir)
1766 			ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir);
1767 
1768 		if (ceph_frag_is_leftmost(frag) &&
1769 		    req->r_readdir_offset == 2 &&
1770 		    !(rinfo->hash_order && last_hash)) {
1771 			/* note dir version at start of readdir so we can
1772 			 * tell if any dentries get dropped */
1773 			req->r_dir_release_cnt =
1774 				atomic64_read(&ci->i_release_count);
1775 			req->r_dir_ordered_cnt =
1776 				atomic64_read(&ci->i_ordered_count);
1777 			req->r_readdir_cache_idx = 0;
1778 		}
1779 	}
1780 
1781 	cache_ctl.index = req->r_readdir_cache_idx;
1782 	fpos_offset = req->r_readdir_offset;
1783 
1784 	/* FIXME: release caps/leases if error occurs */
1785 	for (i = 0; i < rinfo->dir_nr; i++) {
1786 		struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
1787 		struct ceph_vino tvino;
1788 
1789 		dname.name = rde->name;
1790 		dname.len = rde->name_len;
1791 		dname.hash = full_name_hash(parent, dname.name, dname.len);
1792 
1793 		tvino.ino = le64_to_cpu(rde->inode.in->ino);
1794 		tvino.snap = le64_to_cpu(rde->inode.in->snapid);
1795 
1796 		if (rinfo->hash_order) {
1797 			u32 hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
1798 						 rde->name, rde->name_len);
1799 			hash = ceph_frag_value(hash);
1800 			if (hash != last_hash)
1801 				fpos_offset = 2;
1802 			last_hash = hash;
1803 			rde->offset = ceph_make_fpos(hash, fpos_offset++, true);
1804 		} else {
1805 			rde->offset = ceph_make_fpos(frag, fpos_offset++, false);
1806 		}
1807 
1808 retry_lookup:
1809 		dn = d_lookup(parent, &dname);
1810 		dout("d_lookup on parent=%p name=%.*s got %p\n",
1811 		     parent, dname.len, dname.name, dn);
1812 
1813 		if (!dn) {
1814 			dn = d_alloc(parent, &dname);
1815 			dout("d_alloc %p '%.*s' = %p\n", parent,
1816 			     dname.len, dname.name, dn);
1817 			if (!dn) {
1818 				dout("d_alloc badness\n");
1819 				err = -ENOMEM;
1820 				goto out;
1821 			}
1822 		} else if (d_really_is_positive(dn) &&
1823 			   (ceph_ino(d_inode(dn)) != tvino.ino ||
1824 			    ceph_snap(d_inode(dn)) != tvino.snap)) {
1825 			struct ceph_dentry_info *di = ceph_dentry(dn);
1826 			dout(" dn %p points to wrong inode %p\n",
1827 			     dn, d_inode(dn));
1828 
1829 			spin_lock(&dn->d_lock);
1830 			if (di->offset > 0 &&
1831 			    di->lease_shared_gen ==
1832 			    atomic_read(&ci->i_shared_gen)) {
1833 				__ceph_dir_clear_ordered(ci);
1834 				di->offset = 0;
1835 			}
1836 			spin_unlock(&dn->d_lock);
1837 
1838 			d_delete(dn);
1839 			dput(dn);
1840 			goto retry_lookup;
1841 		}
1842 
1843 		/* inode */
1844 		if (d_really_is_positive(dn)) {
1845 			in = d_inode(dn);
1846 		} else {
1847 			in = ceph_get_inode(parent->d_sb, tvino, NULL);
1848 			if (IS_ERR(in)) {
1849 				dout("new_inode badness\n");
1850 				d_drop(dn);
1851 				dput(dn);
1852 				err = PTR_ERR(in);
1853 				goto out;
1854 			}
1855 		}
1856 
1857 		ret = ceph_fill_inode(in, NULL, &rde->inode, NULL, session,
1858 				      -1, &req->r_caps_reservation);
1859 		if (ret < 0) {
1860 			pr_err("ceph_fill_inode badness on %p\n", in);
1861 			if (d_really_is_negative(dn)) {
1862 				if (in->i_state & I_NEW) {
1863 					ihold(in);
1864 					discard_new_inode(in);
1865 				}
1866 				iput(in);
1867 			}
1868 			d_drop(dn);
1869 			err = ret;
1870 			goto next_item;
1871 		}
1872 		if (in->i_state & I_NEW)
1873 			unlock_new_inode(in);
1874 
1875 		if (d_really_is_negative(dn)) {
1876 			if (ceph_security_xattr_deadlock(in)) {
1877 				dout(" skip splicing dn %p to inode %p"
1878 				     " (security xattr deadlock)\n", dn, in);
1879 				iput(in);
1880 				skipped++;
1881 				goto next_item;
1882 			}
1883 
1884 			err = splice_dentry(&dn, in);
1885 			if (err < 0)
1886 				goto next_item;
1887 		}
1888 
1889 		ceph_dentry(dn)->offset = rde->offset;
1890 
1891 		update_dentry_lease(d_inode(parent), dn,
1892 				    rde->lease, req->r_session,
1893 				    req->r_request_started);
1894 
1895 		if (err == 0 && skipped == 0 && cache_ctl.index >= 0) {
1896 			ret = fill_readdir_cache(d_inode(parent), dn,
1897 						 &cache_ctl, req);
1898 			if (ret < 0)
1899 				err = ret;
1900 		}
1901 next_item:
1902 		dput(dn);
1903 	}
1904 out:
1905 	if (err == 0 && skipped == 0) {
1906 		set_bit(CEPH_MDS_R_DID_PREPOPULATE, &req->r_req_flags);
1907 		req->r_readdir_cache_idx = cache_ctl.index;
1908 	}
1909 	ceph_readdir_cache_release(&cache_ctl);
1910 	dout("readdir_prepopulate done\n");
1911 	return err;
1912 }
1913 
1914 bool ceph_inode_set_size(struct inode *inode, loff_t size)
1915 {
1916 	struct ceph_inode_info *ci = ceph_inode(inode);
1917 	bool ret;
1918 
1919 	spin_lock(&ci->i_ceph_lock);
1920 	dout("set_size %p %llu -> %llu\n", inode, i_size_read(inode), size);
1921 	i_size_write(inode, size);
1922 	ceph_fscache_update(inode);
1923 	inode->i_blocks = calc_inode_blocks(size);
1924 
1925 	ret = __ceph_should_report_size(ci);
1926 
1927 	spin_unlock(&ci->i_ceph_lock);
1928 
1929 	return ret;
1930 }
1931 
1932 void ceph_queue_inode_work(struct inode *inode, int work_bit)
1933 {
1934 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1935 	struct ceph_inode_info *ci = ceph_inode(inode);
1936 	set_bit(work_bit, &ci->i_work_mask);
1937 
1938 	ihold(inode);
1939 	if (queue_work(fsc->inode_wq, &ci->i_work)) {
1940 		dout("queue_inode_work %p, mask=%lx\n", inode, ci->i_work_mask);
1941 	} else {
1942 		dout("queue_inode_work %p already queued, mask=%lx\n",
1943 		     inode, ci->i_work_mask);
1944 		iput(inode);
1945 	}
1946 }
1947 
1948 static void ceph_do_invalidate_pages(struct inode *inode)
1949 {
1950 	struct ceph_inode_info *ci = ceph_inode(inode);
1951 	u32 orig_gen;
1952 	int check = 0;
1953 
1954 	ceph_fscache_invalidate(inode, false);
1955 
1956 	mutex_lock(&ci->i_truncate_mutex);
1957 
1958 	if (ceph_inode_is_shutdown(inode)) {
1959 		pr_warn_ratelimited("%s: inode %llx.%llx is shut down\n",
1960 				    __func__, ceph_vinop(inode));
1961 		mapping_set_error(inode->i_mapping, -EIO);
1962 		truncate_pagecache(inode, 0);
1963 		mutex_unlock(&ci->i_truncate_mutex);
1964 		goto out;
1965 	}
1966 
1967 	spin_lock(&ci->i_ceph_lock);
1968 	dout("invalidate_pages %p gen %d revoking %d\n", inode,
1969 	     ci->i_rdcache_gen, ci->i_rdcache_revoking);
1970 	if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
1971 		if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1972 			check = 1;
1973 		spin_unlock(&ci->i_ceph_lock);
1974 		mutex_unlock(&ci->i_truncate_mutex);
1975 		goto out;
1976 	}
1977 	orig_gen = ci->i_rdcache_gen;
1978 	spin_unlock(&ci->i_ceph_lock);
1979 
1980 	if (invalidate_inode_pages2(inode->i_mapping) < 0) {
1981 		pr_err("invalidate_inode_pages2 %llx.%llx failed\n",
1982 		       ceph_vinop(inode));
1983 	}
1984 
1985 	spin_lock(&ci->i_ceph_lock);
1986 	if (orig_gen == ci->i_rdcache_gen &&
1987 	    orig_gen == ci->i_rdcache_revoking) {
1988 		dout("invalidate_pages %p gen %d successful\n", inode,
1989 		     ci->i_rdcache_gen);
1990 		ci->i_rdcache_revoking--;
1991 		check = 1;
1992 	} else {
1993 		dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
1994 		     inode, orig_gen, ci->i_rdcache_gen,
1995 		     ci->i_rdcache_revoking);
1996 		if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1997 			check = 1;
1998 	}
1999 	spin_unlock(&ci->i_ceph_lock);
2000 	mutex_unlock(&ci->i_truncate_mutex);
2001 out:
2002 	if (check)
2003 		ceph_check_caps(ci, 0);
2004 }
2005 
2006 /*
2007  * Make sure any pending truncation is applied before doing anything
2008  * that may depend on it.
2009  */
2010 void __ceph_do_pending_vmtruncate(struct inode *inode)
2011 {
2012 	struct ceph_inode_info *ci = ceph_inode(inode);
2013 	u64 to;
2014 	int wrbuffer_refs, finish = 0;
2015 
2016 	mutex_lock(&ci->i_truncate_mutex);
2017 retry:
2018 	spin_lock(&ci->i_ceph_lock);
2019 	if (ci->i_truncate_pending == 0) {
2020 		dout("__do_pending_vmtruncate %p none pending\n", inode);
2021 		spin_unlock(&ci->i_ceph_lock);
2022 		mutex_unlock(&ci->i_truncate_mutex);
2023 		return;
2024 	}
2025 
2026 	/*
2027 	 * make sure any dirty snapped pages are flushed before we
2028 	 * possibly truncate them.. so write AND block!
2029 	 */
2030 	if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
2031 		spin_unlock(&ci->i_ceph_lock);
2032 		dout("__do_pending_vmtruncate %p flushing snaps first\n",
2033 		     inode);
2034 		filemap_write_and_wait_range(&inode->i_data, 0,
2035 					     inode->i_sb->s_maxbytes);
2036 		goto retry;
2037 	}
2038 
2039 	/* there should be no reader or writer */
2040 	WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref);
2041 
2042 	to = ci->i_truncate_size;
2043 	wrbuffer_refs = ci->i_wrbuffer_ref;
2044 	dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
2045 	     ci->i_truncate_pending, to);
2046 	spin_unlock(&ci->i_ceph_lock);
2047 
2048 	ceph_fscache_resize(inode, to);
2049 	truncate_pagecache(inode, to);
2050 
2051 	spin_lock(&ci->i_ceph_lock);
2052 	if (to == ci->i_truncate_size) {
2053 		ci->i_truncate_pending = 0;
2054 		finish = 1;
2055 	}
2056 	spin_unlock(&ci->i_ceph_lock);
2057 	if (!finish)
2058 		goto retry;
2059 
2060 	mutex_unlock(&ci->i_truncate_mutex);
2061 
2062 	if (wrbuffer_refs == 0)
2063 		ceph_check_caps(ci, 0);
2064 
2065 	wake_up_all(&ci->i_cap_wq);
2066 }
2067 
2068 static void ceph_inode_work(struct work_struct *work)
2069 {
2070 	struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
2071 						 i_work);
2072 	struct inode *inode = &ci->netfs.inode;
2073 
2074 	if (test_and_clear_bit(CEPH_I_WORK_WRITEBACK, &ci->i_work_mask)) {
2075 		dout("writeback %p\n", inode);
2076 		filemap_fdatawrite(&inode->i_data);
2077 	}
2078 	if (test_and_clear_bit(CEPH_I_WORK_INVALIDATE_PAGES, &ci->i_work_mask))
2079 		ceph_do_invalidate_pages(inode);
2080 
2081 	if (test_and_clear_bit(CEPH_I_WORK_VMTRUNCATE, &ci->i_work_mask))
2082 		__ceph_do_pending_vmtruncate(inode);
2083 
2084 	if (test_and_clear_bit(CEPH_I_WORK_CHECK_CAPS, &ci->i_work_mask))
2085 		ceph_check_caps(ci, 0);
2086 
2087 	if (test_and_clear_bit(CEPH_I_WORK_FLUSH_SNAPS, &ci->i_work_mask))
2088 		ceph_flush_snaps(ci, NULL);
2089 
2090 	iput(inode);
2091 }
2092 
2093 /*
2094  * symlinks
2095  */
2096 static const struct inode_operations ceph_symlink_iops = {
2097 	.get_link = simple_get_link,
2098 	.setattr = ceph_setattr,
2099 	.getattr = ceph_getattr,
2100 	.listxattr = ceph_listxattr,
2101 };
2102 
2103 int __ceph_setattr(struct inode *inode, struct iattr *attr,
2104 		   struct ceph_iattr *cia)
2105 {
2106 	struct ceph_inode_info *ci = ceph_inode(inode);
2107 	unsigned int ia_valid = attr->ia_valid;
2108 	struct ceph_mds_request *req;
2109 	struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
2110 	struct ceph_cap_flush *prealloc_cf;
2111 	int issued;
2112 	int release = 0, dirtied = 0;
2113 	int mask = 0;
2114 	int err = 0;
2115 	int inode_dirty_flags = 0;
2116 	bool lock_snap_rwsem = false;
2117 
2118 	prealloc_cf = ceph_alloc_cap_flush();
2119 	if (!prealloc_cf)
2120 		return -ENOMEM;
2121 
2122 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
2123 				       USE_AUTH_MDS);
2124 	if (IS_ERR(req)) {
2125 		ceph_free_cap_flush(prealloc_cf);
2126 		return PTR_ERR(req);
2127 	}
2128 
2129 	spin_lock(&ci->i_ceph_lock);
2130 	issued = __ceph_caps_issued(ci, NULL);
2131 
2132 	if (!ci->i_head_snapc &&
2133 	    (issued & (CEPH_CAP_ANY_EXCL | CEPH_CAP_FILE_WR))) {
2134 		lock_snap_rwsem = true;
2135 		if (!down_read_trylock(&mdsc->snap_rwsem)) {
2136 			spin_unlock(&ci->i_ceph_lock);
2137 			down_read(&mdsc->snap_rwsem);
2138 			spin_lock(&ci->i_ceph_lock);
2139 			issued = __ceph_caps_issued(ci, NULL);
2140 		}
2141 	}
2142 
2143 	dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
2144 #if IS_ENABLED(CONFIG_FS_ENCRYPTION)
2145 	if (cia && cia->fscrypt_auth) {
2146 		u32 len = ceph_fscrypt_auth_len(cia->fscrypt_auth);
2147 
2148 		if (len > sizeof(*cia->fscrypt_auth)) {
2149 			err = -EINVAL;
2150 			spin_unlock(&ci->i_ceph_lock);
2151 			goto out;
2152 		}
2153 
2154 		dout("setattr %llx:%llx fscrypt_auth len %u to %u)\n",
2155 			ceph_vinop(inode), ci->fscrypt_auth_len, len);
2156 
2157 		/* It should never be re-set once set */
2158 		WARN_ON_ONCE(ci->fscrypt_auth);
2159 
2160 		if (issued & CEPH_CAP_AUTH_EXCL) {
2161 			dirtied |= CEPH_CAP_AUTH_EXCL;
2162 			kfree(ci->fscrypt_auth);
2163 			ci->fscrypt_auth = (u8 *)cia->fscrypt_auth;
2164 			ci->fscrypt_auth_len = len;
2165 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2166 			   ci->fscrypt_auth_len != len ||
2167 			   memcmp(ci->fscrypt_auth, cia->fscrypt_auth, len)) {
2168 			req->r_fscrypt_auth = cia->fscrypt_auth;
2169 			mask |= CEPH_SETATTR_FSCRYPT_AUTH;
2170 			release |= CEPH_CAP_AUTH_SHARED;
2171 		}
2172 		cia->fscrypt_auth = NULL;
2173 	}
2174 #else
2175 	if (cia && cia->fscrypt_auth) {
2176 		err = -EINVAL;
2177 		spin_unlock(&ci->i_ceph_lock);
2178 		goto out;
2179 	}
2180 #endif /* CONFIG_FS_ENCRYPTION */
2181 
2182 	if (ia_valid & ATTR_UID) {
2183 		dout("setattr %p uid %d -> %d\n", inode,
2184 		     from_kuid(&init_user_ns, inode->i_uid),
2185 		     from_kuid(&init_user_ns, attr->ia_uid));
2186 		if (issued & CEPH_CAP_AUTH_EXCL) {
2187 			inode->i_uid = attr->ia_uid;
2188 			dirtied |= CEPH_CAP_AUTH_EXCL;
2189 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2190 			   !uid_eq(attr->ia_uid, inode->i_uid)) {
2191 			req->r_args.setattr.uid = cpu_to_le32(
2192 				from_kuid(&init_user_ns, attr->ia_uid));
2193 			mask |= CEPH_SETATTR_UID;
2194 			release |= CEPH_CAP_AUTH_SHARED;
2195 		}
2196 	}
2197 	if (ia_valid & ATTR_GID) {
2198 		dout("setattr %p gid %d -> %d\n", inode,
2199 		     from_kgid(&init_user_ns, inode->i_gid),
2200 		     from_kgid(&init_user_ns, attr->ia_gid));
2201 		if (issued & CEPH_CAP_AUTH_EXCL) {
2202 			inode->i_gid = attr->ia_gid;
2203 			dirtied |= CEPH_CAP_AUTH_EXCL;
2204 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2205 			   !gid_eq(attr->ia_gid, inode->i_gid)) {
2206 			req->r_args.setattr.gid = cpu_to_le32(
2207 				from_kgid(&init_user_ns, attr->ia_gid));
2208 			mask |= CEPH_SETATTR_GID;
2209 			release |= CEPH_CAP_AUTH_SHARED;
2210 		}
2211 	}
2212 	if (ia_valid & ATTR_MODE) {
2213 		dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
2214 		     attr->ia_mode);
2215 		if (issued & CEPH_CAP_AUTH_EXCL) {
2216 			inode->i_mode = attr->ia_mode;
2217 			dirtied |= CEPH_CAP_AUTH_EXCL;
2218 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2219 			   attr->ia_mode != inode->i_mode) {
2220 			inode->i_mode = attr->ia_mode;
2221 			req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
2222 			mask |= CEPH_SETATTR_MODE;
2223 			release |= CEPH_CAP_AUTH_SHARED;
2224 		}
2225 	}
2226 
2227 	if (ia_valid & ATTR_ATIME) {
2228 		dout("setattr %p atime %lld.%ld -> %lld.%ld\n", inode,
2229 		     inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
2230 		     attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
2231 		if (issued & CEPH_CAP_FILE_EXCL) {
2232 			ci->i_time_warp_seq++;
2233 			inode->i_atime = attr->ia_atime;
2234 			dirtied |= CEPH_CAP_FILE_EXCL;
2235 		} else if ((issued & CEPH_CAP_FILE_WR) &&
2236 			   timespec64_compare(&inode->i_atime,
2237 					    &attr->ia_atime) < 0) {
2238 			inode->i_atime = attr->ia_atime;
2239 			dirtied |= CEPH_CAP_FILE_WR;
2240 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2241 			   !timespec64_equal(&inode->i_atime, &attr->ia_atime)) {
2242 			ceph_encode_timespec64(&req->r_args.setattr.atime,
2243 					       &attr->ia_atime);
2244 			mask |= CEPH_SETATTR_ATIME;
2245 			release |= CEPH_CAP_FILE_SHARED |
2246 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2247 		}
2248 	}
2249 	if (ia_valid & ATTR_SIZE) {
2250 		loff_t isize = i_size_read(inode);
2251 
2252 		dout("setattr %p size %lld -> %lld\n", inode, isize, attr->ia_size);
2253 		if ((issued & CEPH_CAP_FILE_EXCL) && attr->ia_size >= isize) {
2254 			if (attr->ia_size > isize) {
2255 				i_size_write(inode, attr->ia_size);
2256 				inode->i_blocks = calc_inode_blocks(attr->ia_size);
2257 				ci->i_reported_size = attr->ia_size;
2258 				dirtied |= CEPH_CAP_FILE_EXCL;
2259 				ia_valid |= ATTR_MTIME;
2260 			}
2261 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2262 			   attr->ia_size != isize) {
2263 			req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
2264 			req->r_args.setattr.old_size = cpu_to_le64(isize);
2265 			mask |= CEPH_SETATTR_SIZE;
2266 			release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL |
2267 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2268 		}
2269 	}
2270 	if (ia_valid & ATTR_MTIME) {
2271 		dout("setattr %p mtime %lld.%ld -> %lld.%ld\n", inode,
2272 		     inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
2273 		     attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
2274 		if (issued & CEPH_CAP_FILE_EXCL) {
2275 			ci->i_time_warp_seq++;
2276 			inode->i_mtime = attr->ia_mtime;
2277 			dirtied |= CEPH_CAP_FILE_EXCL;
2278 		} else if ((issued & CEPH_CAP_FILE_WR) &&
2279 			   timespec64_compare(&inode->i_mtime,
2280 					    &attr->ia_mtime) < 0) {
2281 			inode->i_mtime = attr->ia_mtime;
2282 			dirtied |= CEPH_CAP_FILE_WR;
2283 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2284 			   !timespec64_equal(&inode->i_mtime, &attr->ia_mtime)) {
2285 			ceph_encode_timespec64(&req->r_args.setattr.mtime,
2286 					       &attr->ia_mtime);
2287 			mask |= CEPH_SETATTR_MTIME;
2288 			release |= CEPH_CAP_FILE_SHARED |
2289 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2290 		}
2291 	}
2292 
2293 	/* these do nothing */
2294 	if (ia_valid & ATTR_CTIME) {
2295 		bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
2296 					 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
2297 		dout("setattr %p ctime %lld.%ld -> %lld.%ld (%s)\n", inode,
2298 		     inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
2299 		     attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
2300 		     only ? "ctime only" : "ignored");
2301 		if (only) {
2302 			/*
2303 			 * if kernel wants to dirty ctime but nothing else,
2304 			 * we need to choose a cap to dirty under, or do
2305 			 * a almost-no-op setattr
2306 			 */
2307 			if (issued & CEPH_CAP_AUTH_EXCL)
2308 				dirtied |= CEPH_CAP_AUTH_EXCL;
2309 			else if (issued & CEPH_CAP_FILE_EXCL)
2310 				dirtied |= CEPH_CAP_FILE_EXCL;
2311 			else if (issued & CEPH_CAP_XATTR_EXCL)
2312 				dirtied |= CEPH_CAP_XATTR_EXCL;
2313 			else
2314 				mask |= CEPH_SETATTR_CTIME;
2315 		}
2316 	}
2317 	if (ia_valid & ATTR_FILE)
2318 		dout("setattr %p ATTR_FILE ... hrm!\n", inode);
2319 
2320 	if (dirtied) {
2321 		inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied,
2322 							   &prealloc_cf);
2323 		inode->i_ctime = attr->ia_ctime;
2324 		inode_inc_iversion_raw(inode);
2325 	}
2326 
2327 	release &= issued;
2328 	spin_unlock(&ci->i_ceph_lock);
2329 	if (lock_snap_rwsem)
2330 		up_read(&mdsc->snap_rwsem);
2331 
2332 	if (inode_dirty_flags)
2333 		__mark_inode_dirty(inode, inode_dirty_flags);
2334 
2335 	if (mask) {
2336 		req->r_inode = inode;
2337 		ihold(inode);
2338 		req->r_inode_drop = release;
2339 		req->r_args.setattr.mask = cpu_to_le32(mask);
2340 		req->r_num_caps = 1;
2341 		req->r_stamp = attr->ia_ctime;
2342 		err = ceph_mdsc_do_request(mdsc, NULL, req);
2343 	}
2344 out:
2345 	dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
2346 	     ceph_cap_string(dirtied), mask);
2347 
2348 	ceph_mdsc_put_request(req);
2349 	ceph_free_cap_flush(prealloc_cf);
2350 
2351 	if (err >= 0 && (mask & CEPH_SETATTR_SIZE))
2352 		__ceph_do_pending_vmtruncate(inode);
2353 
2354 	return err;
2355 }
2356 
2357 /*
2358  * setattr
2359  */
2360 int ceph_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
2361 		 struct iattr *attr)
2362 {
2363 	struct inode *inode = d_inode(dentry);
2364 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
2365 	int err;
2366 
2367 	if (ceph_snap(inode) != CEPH_NOSNAP)
2368 		return -EROFS;
2369 
2370 	if (ceph_inode_is_shutdown(inode))
2371 		return -ESTALE;
2372 
2373 	err = setattr_prepare(&nop_mnt_idmap, dentry, attr);
2374 	if (err != 0)
2375 		return err;
2376 
2377 	if ((attr->ia_valid & ATTR_SIZE) &&
2378 	    attr->ia_size > max(i_size_read(inode), fsc->max_file_size))
2379 		return -EFBIG;
2380 
2381 	if ((attr->ia_valid & ATTR_SIZE) &&
2382 	    ceph_quota_is_max_bytes_exceeded(inode, attr->ia_size))
2383 		return -EDQUOT;
2384 
2385 	err = __ceph_setattr(inode, attr, NULL);
2386 
2387 	if (err >= 0 && (attr->ia_valid & ATTR_MODE))
2388 		err = posix_acl_chmod(&nop_mnt_idmap, dentry, attr->ia_mode);
2389 
2390 	return err;
2391 }
2392 
2393 int ceph_try_to_choose_auth_mds(struct inode *inode, int mask)
2394 {
2395 	int issued = ceph_caps_issued(ceph_inode(inode));
2396 
2397 	/*
2398 	 * If any 'x' caps is issued we can just choose the auth MDS
2399 	 * instead of the random replica MDSes. Because only when the
2400 	 * Locker is in LOCK_EXEC state will the loner client could
2401 	 * get the 'x' caps. And if we send the getattr requests to
2402 	 * any replica MDS it must auth pin and tries to rdlock from
2403 	 * the auth MDS, and then the auth MDS need to do the Locker
2404 	 * state transition to LOCK_SYNC. And after that the lock state
2405 	 * will change back.
2406 	 *
2407 	 * This cost much when doing the Locker state transition and
2408 	 * usually will need to revoke caps from clients.
2409 	 *
2410 	 * And for the 'Xs' caps for getxattr we will also choose the
2411 	 * auth MDS, because the MDS side code is buggy due to setxattr
2412 	 * won't notify the replica MDSes when the values changed and
2413 	 * the replica MDS will return the old values. Though we will
2414 	 * fix it in MDS code, but this still makes sense for old ceph.
2415 	 */
2416 	if (((mask & CEPH_CAP_ANY_SHARED) && (issued & CEPH_CAP_ANY_EXCL))
2417 	    || (mask & (CEPH_STAT_RSTAT | CEPH_STAT_CAP_XATTR)))
2418 		return USE_AUTH_MDS;
2419 	else
2420 		return USE_ANY_MDS;
2421 }
2422 
2423 /*
2424  * Verify that we have a lease on the given mask.  If not,
2425  * do a getattr against an mds.
2426  */
2427 int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
2428 		      int mask, bool force)
2429 {
2430 	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
2431 	struct ceph_mds_client *mdsc = fsc->mdsc;
2432 	struct ceph_mds_request *req;
2433 	int mode;
2434 	int err;
2435 
2436 	if (ceph_snap(inode) == CEPH_SNAPDIR) {
2437 		dout("do_getattr inode %p SNAPDIR\n", inode);
2438 		return 0;
2439 	}
2440 
2441 	dout("do_getattr inode %p mask %s mode 0%o\n",
2442 	     inode, ceph_cap_string(mask), inode->i_mode);
2443 	if (!force && ceph_caps_issued_mask_metric(ceph_inode(inode), mask, 1))
2444 			return 0;
2445 
2446 	mode = ceph_try_to_choose_auth_mds(inode, mask);
2447 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, mode);
2448 	if (IS_ERR(req))
2449 		return PTR_ERR(req);
2450 	req->r_inode = inode;
2451 	ihold(inode);
2452 	req->r_num_caps = 1;
2453 	req->r_args.getattr.mask = cpu_to_le32(mask);
2454 	req->r_locked_page = locked_page;
2455 	err = ceph_mdsc_do_request(mdsc, NULL, req);
2456 	if (locked_page && err == 0) {
2457 		u64 inline_version = req->r_reply_info.targeti.inline_version;
2458 		if (inline_version == 0) {
2459 			/* the reply is supposed to contain inline data */
2460 			err = -EINVAL;
2461 		} else if (inline_version == CEPH_INLINE_NONE ||
2462 			   inline_version == 1) {
2463 			err = -ENODATA;
2464 		} else {
2465 			err = req->r_reply_info.targeti.inline_len;
2466 		}
2467 	}
2468 	ceph_mdsc_put_request(req);
2469 	dout("do_getattr result=%d\n", err);
2470 	return err;
2471 }
2472 
2473 int ceph_do_getvxattr(struct inode *inode, const char *name, void *value,
2474 		      size_t size)
2475 {
2476 	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
2477 	struct ceph_mds_client *mdsc = fsc->mdsc;
2478 	struct ceph_mds_request *req;
2479 	int mode = USE_AUTH_MDS;
2480 	int err;
2481 	char *xattr_value;
2482 	size_t xattr_value_len;
2483 
2484 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETVXATTR, mode);
2485 	if (IS_ERR(req)) {
2486 		err = -ENOMEM;
2487 		goto out;
2488 	}
2489 
2490 	req->r_feature_needed = CEPHFS_FEATURE_OP_GETVXATTR;
2491 	req->r_path2 = kstrdup(name, GFP_NOFS);
2492 	if (!req->r_path2) {
2493 		err = -ENOMEM;
2494 		goto put;
2495 	}
2496 
2497 	ihold(inode);
2498 	req->r_inode = inode;
2499 	err = ceph_mdsc_do_request(mdsc, NULL, req);
2500 	if (err < 0)
2501 		goto put;
2502 
2503 	xattr_value = req->r_reply_info.xattr_info.xattr_value;
2504 	xattr_value_len = req->r_reply_info.xattr_info.xattr_value_len;
2505 
2506 	dout("do_getvxattr xattr_value_len:%zu, size:%zu\n", xattr_value_len, size);
2507 
2508 	err = (int)xattr_value_len;
2509 	if (size == 0)
2510 		goto put;
2511 
2512 	if (xattr_value_len > size) {
2513 		err = -ERANGE;
2514 		goto put;
2515 	}
2516 
2517 	memcpy(value, xattr_value, xattr_value_len);
2518 put:
2519 	ceph_mdsc_put_request(req);
2520 out:
2521 	dout("do_getvxattr result=%d\n", err);
2522 	return err;
2523 }
2524 
2525 
2526 /*
2527  * Check inode permissions.  We verify we have a valid value for
2528  * the AUTH cap, then call the generic handler.
2529  */
2530 int ceph_permission(struct mnt_idmap *idmap, struct inode *inode,
2531 		    int mask)
2532 {
2533 	int err;
2534 
2535 	if (mask & MAY_NOT_BLOCK)
2536 		return -ECHILD;
2537 
2538 	err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED, false);
2539 
2540 	if (!err)
2541 		err = generic_permission(&nop_mnt_idmap, inode, mask);
2542 	return err;
2543 }
2544 
2545 /* Craft a mask of needed caps given a set of requested statx attrs. */
2546 static int statx_to_caps(u32 want, umode_t mode)
2547 {
2548 	int mask = 0;
2549 
2550 	if (want & (STATX_MODE|STATX_UID|STATX_GID|STATX_CTIME|STATX_BTIME|STATX_CHANGE_COOKIE))
2551 		mask |= CEPH_CAP_AUTH_SHARED;
2552 
2553 	if (want & (STATX_NLINK|STATX_CTIME|STATX_CHANGE_COOKIE)) {
2554 		/*
2555 		 * The link count for directories depends on inode->i_subdirs,
2556 		 * and that is only updated when Fs caps are held.
2557 		 */
2558 		if (S_ISDIR(mode))
2559 			mask |= CEPH_CAP_FILE_SHARED;
2560 		else
2561 			mask |= CEPH_CAP_LINK_SHARED;
2562 	}
2563 
2564 	if (want & (STATX_ATIME|STATX_MTIME|STATX_CTIME|STATX_SIZE|STATX_BLOCKS|STATX_CHANGE_COOKIE))
2565 		mask |= CEPH_CAP_FILE_SHARED;
2566 
2567 	if (want & (STATX_CTIME|STATX_CHANGE_COOKIE))
2568 		mask |= CEPH_CAP_XATTR_SHARED;
2569 
2570 	return mask;
2571 }
2572 
2573 /*
2574  * Get all the attributes. If we have sufficient caps for the requested attrs,
2575  * then we can avoid talking to the MDS at all.
2576  */
2577 int ceph_getattr(struct mnt_idmap *idmap, const struct path *path,
2578 		 struct kstat *stat, u32 request_mask, unsigned int flags)
2579 {
2580 	struct inode *inode = d_inode(path->dentry);
2581 	struct super_block *sb = inode->i_sb;
2582 	struct ceph_inode_info *ci = ceph_inode(inode);
2583 	u32 valid_mask = STATX_BASIC_STATS;
2584 	int err = 0;
2585 
2586 	if (ceph_inode_is_shutdown(inode))
2587 		return -ESTALE;
2588 
2589 	/* Skip the getattr altogether if we're asked not to sync */
2590 	if ((flags & AT_STATX_SYNC_TYPE) != AT_STATX_DONT_SYNC) {
2591 		err = ceph_do_getattr(inode,
2592 				statx_to_caps(request_mask, inode->i_mode),
2593 				flags & AT_STATX_FORCE_SYNC);
2594 		if (err)
2595 			return err;
2596 	}
2597 
2598 	generic_fillattr(&nop_mnt_idmap, inode, stat);
2599 	stat->ino = ceph_present_inode(inode);
2600 
2601 	/*
2602 	 * btime on newly-allocated inodes is 0, so if this is still set to
2603 	 * that, then assume that it's not valid.
2604 	 */
2605 	if (ci->i_btime.tv_sec || ci->i_btime.tv_nsec) {
2606 		stat->btime = ci->i_btime;
2607 		valid_mask |= STATX_BTIME;
2608 	}
2609 
2610 	if (request_mask & STATX_CHANGE_COOKIE) {
2611 		stat->change_cookie = inode_peek_iversion_raw(inode);
2612 		valid_mask |= STATX_CHANGE_COOKIE;
2613 	}
2614 
2615 	if (ceph_snap(inode) == CEPH_NOSNAP)
2616 		stat->dev = sb->s_dev;
2617 	else
2618 		stat->dev = ci->i_snapid_map ? ci->i_snapid_map->dev : 0;
2619 
2620 	if (S_ISDIR(inode->i_mode)) {
2621 		if (ceph_test_mount_opt(ceph_sb_to_client(sb), RBYTES)) {
2622 			stat->size = ci->i_rbytes;
2623 		} else if (ceph_snap(inode) == CEPH_SNAPDIR) {
2624 			struct ceph_inode_info *pci;
2625 			struct ceph_snap_realm *realm;
2626 			struct inode *parent;
2627 
2628 			parent = ceph_lookup_inode(sb, ceph_ino(inode));
2629 			if (IS_ERR(parent))
2630 				return PTR_ERR(parent);
2631 
2632 			pci = ceph_inode(parent);
2633 			spin_lock(&pci->i_ceph_lock);
2634 			realm = pci->i_snap_realm;
2635 			if (realm)
2636 				stat->size = realm->num_snaps;
2637 			else
2638 				stat->size = 0;
2639 			spin_unlock(&pci->i_ceph_lock);
2640 			iput(parent);
2641 		} else {
2642 			stat->size = ci->i_files + ci->i_subdirs;
2643 		}
2644 		stat->blocks = 0;
2645 		stat->blksize = 65536;
2646 		/*
2647 		 * Some applications rely on the number of st_nlink
2648 		 * value on directories to be either 0 (if unlinked)
2649 		 * or 2 + number of subdirectories.
2650 		 */
2651 		if (stat->nlink == 1)
2652 			/* '.' + '..' + subdirs */
2653 			stat->nlink = 1 + 1 + ci->i_subdirs;
2654 	}
2655 
2656 	stat->attributes_mask |= STATX_ATTR_CHANGE_MONOTONIC;
2657 	stat->attributes |= STATX_ATTR_CHANGE_MONOTONIC;
2658 	stat->result_mask = request_mask & valid_mask;
2659 	return err;
2660 }
2661 
2662 void ceph_inode_shutdown(struct inode *inode)
2663 {
2664 	struct ceph_inode_info *ci = ceph_inode(inode);
2665 	struct rb_node *p;
2666 	int iputs = 0;
2667 	bool invalidate = false;
2668 
2669 	spin_lock(&ci->i_ceph_lock);
2670 	ci->i_ceph_flags |= CEPH_I_SHUTDOWN;
2671 	p = rb_first(&ci->i_caps);
2672 	while (p) {
2673 		struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
2674 
2675 		p = rb_next(p);
2676 		iputs += ceph_purge_inode_cap(inode, cap, &invalidate);
2677 	}
2678 	spin_unlock(&ci->i_ceph_lock);
2679 
2680 	if (invalidate)
2681 		ceph_queue_invalidate(inode);
2682 	while (iputs--)
2683 		iput(inode);
2684 }
2685