xref: /openbmc/linux/fs/ceph/inode.c (revision 16be62fc8a53482529201b4be6bbcd0de3a058cb)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 
4 #include <linux/module.h>
5 #include <linux/fs.h>
6 #include <linux/slab.h>
7 #include <linux/string.h>
8 #include <linux/uaccess.h>
9 #include <linux/kernel.h>
10 #include <linux/writeback.h>
11 #include <linux/vmalloc.h>
12 #include <linux/xattr.h>
13 #include <linux/posix_acl.h>
14 #include <linux/random.h>
15 #include <linux/sort.h>
16 #include <linux/iversion.h>
17 #include <linux/fscrypt.h>
18 
19 #include "super.h"
20 #include "mds_client.h"
21 #include "cache.h"
22 #include "crypto.h"
23 #include <linux/ceph/decode.h>
24 
25 /*
26  * Ceph inode operations
27  *
28  * Implement basic inode helpers (get, alloc) and inode ops (getattr,
29  * setattr, etc.), xattr helpers, and helpers for assimilating
30  * metadata returned by the MDS into our cache.
31  *
32  * Also define helpers for doing asynchronous writeback, invalidation,
33  * and truncation for the benefit of those who can't afford to block
34  * (typically because they are in the message handler path).
35  */
36 
37 static const struct inode_operations ceph_symlink_iops;
38 static const struct inode_operations ceph_encrypted_symlink_iops;
39 
40 static void ceph_inode_work(struct work_struct *work);
41 
42 /*
43  * find or create an inode, given the ceph ino number
44  */
45 static int ceph_set_ino_cb(struct inode *inode, void *data)
46 {
47 	struct ceph_inode_info *ci = ceph_inode(inode);
48 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
49 
50 	ci->i_vino = *(struct ceph_vino *)data;
51 	inode->i_ino = ceph_vino_to_ino_t(ci->i_vino);
52 	inode_set_iversion_raw(inode, 0);
53 	percpu_counter_inc(&mdsc->metric.total_inodes);
54 
55 	return 0;
56 }
57 
58 /**
59  * ceph_new_inode - allocate a new inode in advance of an expected create
60  * @dir: parent directory for new inode
61  * @dentry: dentry that may eventually point to new inode
62  * @mode: mode of new inode
63  * @as_ctx: pointer to inherited security context
64  *
65  * Allocate a new inode in advance of an operation to create a new inode.
66  * This allocates the inode and sets up the acl_sec_ctx with appropriate
67  * info for the new inode.
68  *
69  * Returns a pointer to the new inode or an ERR_PTR.
70  */
71 struct inode *ceph_new_inode(struct inode *dir, struct dentry *dentry,
72 			     umode_t *mode, struct ceph_acl_sec_ctx *as_ctx)
73 {
74 	int err;
75 	struct inode *inode;
76 
77 	inode = new_inode(dir->i_sb);
78 	if (!inode)
79 		return ERR_PTR(-ENOMEM);
80 
81 	if (!S_ISLNK(*mode)) {
82 		err = ceph_pre_init_acls(dir, mode, as_ctx);
83 		if (err < 0)
84 			goto out_err;
85 	}
86 
87 	inode->i_state = 0;
88 	inode->i_mode = *mode;
89 
90 	err = ceph_security_init_secctx(dentry, *mode, as_ctx);
91 	if (err < 0)
92 		goto out_err;
93 
94 	err = ceph_fscrypt_prepare_context(dir, inode, as_ctx);
95 	if (err)
96 		goto out_err;
97 
98 	return inode;
99 out_err:
100 	iput(inode);
101 	return ERR_PTR(err);
102 }
103 
104 void ceph_as_ctx_to_req(struct ceph_mds_request *req,
105 			struct ceph_acl_sec_ctx *as_ctx)
106 {
107 	if (as_ctx->pagelist) {
108 		req->r_pagelist = as_ctx->pagelist;
109 		as_ctx->pagelist = NULL;
110 	}
111 	ceph_fscrypt_as_ctx_to_req(req, as_ctx);
112 }
113 
114 /**
115  * ceph_get_inode - find or create/hash a new inode
116  * @sb: superblock to search and allocate in
117  * @vino: vino to search for
118  * @newino: optional new inode to insert if one isn't found (may be NULL)
119  *
120  * Search for or insert a new inode into the hash for the given vino, and
121  * return a reference to it. If new is non-NULL, its reference is consumed.
122  */
123 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino,
124 			     struct inode *newino)
125 {
126 	struct inode *inode;
127 
128 	if (ceph_vino_is_reserved(vino))
129 		return ERR_PTR(-EREMOTEIO);
130 
131 	if (newino) {
132 		inode = inode_insert5(newino, (unsigned long)vino.ino,
133 				      ceph_ino_compare, ceph_set_ino_cb, &vino);
134 		if (inode != newino)
135 			iput(newino);
136 	} else {
137 		inode = iget5_locked(sb, (unsigned long)vino.ino,
138 				     ceph_ino_compare, ceph_set_ino_cb, &vino);
139 	}
140 
141 	if (!inode) {
142 		dout("No inode found for %llx.%llx\n", vino.ino, vino.snap);
143 		return ERR_PTR(-ENOMEM);
144 	}
145 
146 	dout("get_inode on %llu=%llx.%llx got %p new %d\n", ceph_present_inode(inode),
147 	     ceph_vinop(inode), inode, !!(inode->i_state & I_NEW));
148 	return inode;
149 }
150 
151 /*
152  * get/constuct snapdir inode for a given directory
153  */
154 struct inode *ceph_get_snapdir(struct inode *parent)
155 {
156 	struct ceph_vino vino = {
157 		.ino = ceph_ino(parent),
158 		.snap = CEPH_SNAPDIR,
159 	};
160 	struct inode *inode = ceph_get_inode(parent->i_sb, vino, NULL);
161 	struct ceph_inode_info *ci = ceph_inode(inode);
162 
163 	if (IS_ERR(inode))
164 		return inode;
165 
166 	if (!S_ISDIR(parent->i_mode)) {
167 		pr_warn_once("bad snapdir parent type (mode=0%o)\n",
168 			     parent->i_mode);
169 		goto err;
170 	}
171 
172 	if (!(inode->i_state & I_NEW) && !S_ISDIR(inode->i_mode)) {
173 		pr_warn_once("bad snapdir inode type (mode=0%o)\n",
174 			     inode->i_mode);
175 		goto err;
176 	}
177 
178 	inode->i_mode = parent->i_mode;
179 	inode->i_uid = parent->i_uid;
180 	inode->i_gid = parent->i_gid;
181 	inode->i_mtime = parent->i_mtime;
182 	inode->i_ctime = parent->i_ctime;
183 	inode->i_atime = parent->i_atime;
184 	ci->i_rbytes = 0;
185 	ci->i_btime = ceph_inode(parent)->i_btime;
186 
187 	if (inode->i_state & I_NEW) {
188 		inode->i_op = &ceph_snapdir_iops;
189 		inode->i_fop = &ceph_snapdir_fops;
190 		ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
191 		unlock_new_inode(inode);
192 	}
193 
194 	return inode;
195 err:
196 	if ((inode->i_state & I_NEW))
197 		discard_new_inode(inode);
198 	else
199 		iput(inode);
200 	return ERR_PTR(-ENOTDIR);
201 }
202 
203 const struct inode_operations ceph_file_iops = {
204 	.permission = ceph_permission,
205 	.setattr = ceph_setattr,
206 	.getattr = ceph_getattr,
207 	.listxattr = ceph_listxattr,
208 	.get_inode_acl = ceph_get_acl,
209 	.set_acl = ceph_set_acl,
210 };
211 
212 
213 /*
214  * We use a 'frag tree' to keep track of the MDS's directory fragments
215  * for a given inode (usually there is just a single fragment).  We
216  * need to know when a child frag is delegated to a new MDS, or when
217  * it is flagged as replicated, so we can direct our requests
218  * accordingly.
219  */
220 
221 /*
222  * find/create a frag in the tree
223  */
224 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
225 						    u32 f)
226 {
227 	struct rb_node **p;
228 	struct rb_node *parent = NULL;
229 	struct ceph_inode_frag *frag;
230 	int c;
231 
232 	p = &ci->i_fragtree.rb_node;
233 	while (*p) {
234 		parent = *p;
235 		frag = rb_entry(parent, struct ceph_inode_frag, node);
236 		c = ceph_frag_compare(f, frag->frag);
237 		if (c < 0)
238 			p = &(*p)->rb_left;
239 		else if (c > 0)
240 			p = &(*p)->rb_right;
241 		else
242 			return frag;
243 	}
244 
245 	frag = kmalloc(sizeof(*frag), GFP_NOFS);
246 	if (!frag)
247 		return ERR_PTR(-ENOMEM);
248 
249 	frag->frag = f;
250 	frag->split_by = 0;
251 	frag->mds = -1;
252 	frag->ndist = 0;
253 
254 	rb_link_node(&frag->node, parent, p);
255 	rb_insert_color(&frag->node, &ci->i_fragtree);
256 
257 	dout("get_or_create_frag added %llx.%llx frag %x\n",
258 	     ceph_vinop(&ci->netfs.inode), f);
259 	return frag;
260 }
261 
262 /*
263  * find a specific frag @f
264  */
265 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
266 {
267 	struct rb_node *n = ci->i_fragtree.rb_node;
268 
269 	while (n) {
270 		struct ceph_inode_frag *frag =
271 			rb_entry(n, struct ceph_inode_frag, node);
272 		int c = ceph_frag_compare(f, frag->frag);
273 		if (c < 0)
274 			n = n->rb_left;
275 		else if (c > 0)
276 			n = n->rb_right;
277 		else
278 			return frag;
279 	}
280 	return NULL;
281 }
282 
283 /*
284  * Choose frag containing the given value @v.  If @pfrag is
285  * specified, copy the frag delegation info to the caller if
286  * it is present.
287  */
288 static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
289 			      struct ceph_inode_frag *pfrag, int *found)
290 {
291 	u32 t = ceph_frag_make(0, 0);
292 	struct ceph_inode_frag *frag;
293 	unsigned nway, i;
294 	u32 n;
295 
296 	if (found)
297 		*found = 0;
298 
299 	while (1) {
300 		WARN_ON(!ceph_frag_contains_value(t, v));
301 		frag = __ceph_find_frag(ci, t);
302 		if (!frag)
303 			break; /* t is a leaf */
304 		if (frag->split_by == 0) {
305 			if (pfrag)
306 				memcpy(pfrag, frag, sizeof(*pfrag));
307 			if (found)
308 				*found = 1;
309 			break;
310 		}
311 
312 		/* choose child */
313 		nway = 1 << frag->split_by;
314 		dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
315 		     frag->split_by, nway);
316 		for (i = 0; i < nway; i++) {
317 			n = ceph_frag_make_child(t, frag->split_by, i);
318 			if (ceph_frag_contains_value(n, v)) {
319 				t = n;
320 				break;
321 			}
322 		}
323 		BUG_ON(i == nway);
324 	}
325 	dout("choose_frag(%x) = %x\n", v, t);
326 
327 	return t;
328 }
329 
330 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
331 		     struct ceph_inode_frag *pfrag, int *found)
332 {
333 	u32 ret;
334 	mutex_lock(&ci->i_fragtree_mutex);
335 	ret = __ceph_choose_frag(ci, v, pfrag, found);
336 	mutex_unlock(&ci->i_fragtree_mutex);
337 	return ret;
338 }
339 
340 /*
341  * Process dirfrag (delegation) info from the mds.  Include leaf
342  * fragment in tree ONLY if ndist > 0.  Otherwise, only
343  * branches/splits are included in i_fragtree)
344  */
345 static int ceph_fill_dirfrag(struct inode *inode,
346 			     struct ceph_mds_reply_dirfrag *dirinfo)
347 {
348 	struct ceph_inode_info *ci = ceph_inode(inode);
349 	struct ceph_inode_frag *frag;
350 	u32 id = le32_to_cpu(dirinfo->frag);
351 	int mds = le32_to_cpu(dirinfo->auth);
352 	int ndist = le32_to_cpu(dirinfo->ndist);
353 	int diri_auth = -1;
354 	int i;
355 	int err = 0;
356 
357 	spin_lock(&ci->i_ceph_lock);
358 	if (ci->i_auth_cap)
359 		diri_auth = ci->i_auth_cap->mds;
360 	spin_unlock(&ci->i_ceph_lock);
361 
362 	if (mds == -1) /* CDIR_AUTH_PARENT */
363 		mds = diri_auth;
364 
365 	mutex_lock(&ci->i_fragtree_mutex);
366 	if (ndist == 0 && mds == diri_auth) {
367 		/* no delegation info needed. */
368 		frag = __ceph_find_frag(ci, id);
369 		if (!frag)
370 			goto out;
371 		if (frag->split_by == 0) {
372 			/* tree leaf, remove */
373 			dout("fill_dirfrag removed %llx.%llx frag %x"
374 			     " (no ref)\n", ceph_vinop(inode), id);
375 			rb_erase(&frag->node, &ci->i_fragtree);
376 			kfree(frag);
377 		} else {
378 			/* tree branch, keep and clear */
379 			dout("fill_dirfrag cleared %llx.%llx frag %x"
380 			     " referral\n", ceph_vinop(inode), id);
381 			frag->mds = -1;
382 			frag->ndist = 0;
383 		}
384 		goto out;
385 	}
386 
387 
388 	/* find/add this frag to store mds delegation info */
389 	frag = __get_or_create_frag(ci, id);
390 	if (IS_ERR(frag)) {
391 		/* this is not the end of the world; we can continue
392 		   with bad/inaccurate delegation info */
393 		pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
394 		       ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
395 		err = -ENOMEM;
396 		goto out;
397 	}
398 
399 	frag->mds = mds;
400 	frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
401 	for (i = 0; i < frag->ndist; i++)
402 		frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
403 	dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
404 	     ceph_vinop(inode), frag->frag, frag->ndist);
405 
406 out:
407 	mutex_unlock(&ci->i_fragtree_mutex);
408 	return err;
409 }
410 
411 static int frag_tree_split_cmp(const void *l, const void *r)
412 {
413 	struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l;
414 	struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r;
415 	return ceph_frag_compare(le32_to_cpu(ls->frag),
416 				 le32_to_cpu(rs->frag));
417 }
418 
419 static bool is_frag_child(u32 f, struct ceph_inode_frag *frag)
420 {
421 	if (!frag)
422 		return f == ceph_frag_make(0, 0);
423 	if (ceph_frag_bits(f) != ceph_frag_bits(frag->frag) + frag->split_by)
424 		return false;
425 	return ceph_frag_contains_value(frag->frag, ceph_frag_value(f));
426 }
427 
428 static int ceph_fill_fragtree(struct inode *inode,
429 			      struct ceph_frag_tree_head *fragtree,
430 			      struct ceph_mds_reply_dirfrag *dirinfo)
431 {
432 	struct ceph_inode_info *ci = ceph_inode(inode);
433 	struct ceph_inode_frag *frag, *prev_frag = NULL;
434 	struct rb_node *rb_node;
435 	unsigned i, split_by, nsplits;
436 	u32 id;
437 	bool update = false;
438 
439 	mutex_lock(&ci->i_fragtree_mutex);
440 	nsplits = le32_to_cpu(fragtree->nsplits);
441 	if (nsplits != ci->i_fragtree_nsplits) {
442 		update = true;
443 	} else if (nsplits) {
444 		i = get_random_u32_below(nsplits);
445 		id = le32_to_cpu(fragtree->splits[i].frag);
446 		if (!__ceph_find_frag(ci, id))
447 			update = true;
448 	} else if (!RB_EMPTY_ROOT(&ci->i_fragtree)) {
449 		rb_node = rb_first(&ci->i_fragtree);
450 		frag = rb_entry(rb_node, struct ceph_inode_frag, node);
451 		if (frag->frag != ceph_frag_make(0, 0) || rb_next(rb_node))
452 			update = true;
453 	}
454 	if (!update && dirinfo) {
455 		id = le32_to_cpu(dirinfo->frag);
456 		if (id != __ceph_choose_frag(ci, id, NULL, NULL))
457 			update = true;
458 	}
459 	if (!update)
460 		goto out_unlock;
461 
462 	if (nsplits > 1) {
463 		sort(fragtree->splits, nsplits, sizeof(fragtree->splits[0]),
464 		     frag_tree_split_cmp, NULL);
465 	}
466 
467 	dout("fill_fragtree %llx.%llx\n", ceph_vinop(inode));
468 	rb_node = rb_first(&ci->i_fragtree);
469 	for (i = 0; i < nsplits; i++) {
470 		id = le32_to_cpu(fragtree->splits[i].frag);
471 		split_by = le32_to_cpu(fragtree->splits[i].by);
472 		if (split_by == 0 || ceph_frag_bits(id) + split_by > 24) {
473 			pr_err("fill_fragtree %llx.%llx invalid split %d/%u, "
474 			       "frag %x split by %d\n", ceph_vinop(inode),
475 			       i, nsplits, id, split_by);
476 			continue;
477 		}
478 		frag = NULL;
479 		while (rb_node) {
480 			frag = rb_entry(rb_node, struct ceph_inode_frag, node);
481 			if (ceph_frag_compare(frag->frag, id) >= 0) {
482 				if (frag->frag != id)
483 					frag = NULL;
484 				else
485 					rb_node = rb_next(rb_node);
486 				break;
487 			}
488 			rb_node = rb_next(rb_node);
489 			/* delete stale split/leaf node */
490 			if (frag->split_by > 0 ||
491 			    !is_frag_child(frag->frag, prev_frag)) {
492 				rb_erase(&frag->node, &ci->i_fragtree);
493 				if (frag->split_by > 0)
494 					ci->i_fragtree_nsplits--;
495 				kfree(frag);
496 			}
497 			frag = NULL;
498 		}
499 		if (!frag) {
500 			frag = __get_or_create_frag(ci, id);
501 			if (IS_ERR(frag))
502 				continue;
503 		}
504 		if (frag->split_by == 0)
505 			ci->i_fragtree_nsplits++;
506 		frag->split_by = split_by;
507 		dout(" frag %x split by %d\n", frag->frag, frag->split_by);
508 		prev_frag = frag;
509 	}
510 	while (rb_node) {
511 		frag = rb_entry(rb_node, struct ceph_inode_frag, node);
512 		rb_node = rb_next(rb_node);
513 		/* delete stale split/leaf node */
514 		if (frag->split_by > 0 ||
515 		    !is_frag_child(frag->frag, prev_frag)) {
516 			rb_erase(&frag->node, &ci->i_fragtree);
517 			if (frag->split_by > 0)
518 				ci->i_fragtree_nsplits--;
519 			kfree(frag);
520 		}
521 	}
522 out_unlock:
523 	mutex_unlock(&ci->i_fragtree_mutex);
524 	return 0;
525 }
526 
527 /*
528  * initialize a newly allocated inode.
529  */
530 struct inode *ceph_alloc_inode(struct super_block *sb)
531 {
532 	struct ceph_inode_info *ci;
533 	int i;
534 
535 	ci = alloc_inode_sb(sb, ceph_inode_cachep, GFP_NOFS);
536 	if (!ci)
537 		return NULL;
538 
539 	dout("alloc_inode %p\n", &ci->netfs.inode);
540 
541 	/* Set parameters for the netfs library */
542 	netfs_inode_init(&ci->netfs, &ceph_netfs_ops);
543 
544 	spin_lock_init(&ci->i_ceph_lock);
545 
546 	ci->i_version = 0;
547 	ci->i_inline_version = 0;
548 	ci->i_time_warp_seq = 0;
549 	ci->i_ceph_flags = 0;
550 	atomic64_set(&ci->i_ordered_count, 1);
551 	atomic64_set(&ci->i_release_count, 1);
552 	atomic64_set(&ci->i_complete_seq[0], 0);
553 	atomic64_set(&ci->i_complete_seq[1], 0);
554 	ci->i_symlink = NULL;
555 
556 	ci->i_max_bytes = 0;
557 	ci->i_max_files = 0;
558 
559 	memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
560 	memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout));
561 	RCU_INIT_POINTER(ci->i_layout.pool_ns, NULL);
562 
563 	ci->i_fragtree = RB_ROOT;
564 	mutex_init(&ci->i_fragtree_mutex);
565 
566 	ci->i_xattrs.blob = NULL;
567 	ci->i_xattrs.prealloc_blob = NULL;
568 	ci->i_xattrs.dirty = false;
569 	ci->i_xattrs.index = RB_ROOT;
570 	ci->i_xattrs.count = 0;
571 	ci->i_xattrs.names_size = 0;
572 	ci->i_xattrs.vals_size = 0;
573 	ci->i_xattrs.version = 0;
574 	ci->i_xattrs.index_version = 0;
575 
576 	ci->i_caps = RB_ROOT;
577 	ci->i_auth_cap = NULL;
578 	ci->i_dirty_caps = 0;
579 	ci->i_flushing_caps = 0;
580 	INIT_LIST_HEAD(&ci->i_dirty_item);
581 	INIT_LIST_HEAD(&ci->i_flushing_item);
582 	ci->i_prealloc_cap_flush = NULL;
583 	INIT_LIST_HEAD(&ci->i_cap_flush_list);
584 	init_waitqueue_head(&ci->i_cap_wq);
585 	ci->i_hold_caps_max = 0;
586 	INIT_LIST_HEAD(&ci->i_cap_delay_list);
587 	INIT_LIST_HEAD(&ci->i_cap_snaps);
588 	ci->i_head_snapc = NULL;
589 	ci->i_snap_caps = 0;
590 
591 	ci->i_last_rd = ci->i_last_wr = jiffies - 3600 * HZ;
592 	for (i = 0; i < CEPH_FILE_MODE_BITS; i++)
593 		ci->i_nr_by_mode[i] = 0;
594 
595 	mutex_init(&ci->i_truncate_mutex);
596 	ci->i_truncate_seq = 0;
597 	ci->i_truncate_size = 0;
598 	ci->i_truncate_pending = 0;
599 
600 	ci->i_max_size = 0;
601 	ci->i_reported_size = 0;
602 	ci->i_wanted_max_size = 0;
603 	ci->i_requested_max_size = 0;
604 
605 	ci->i_pin_ref = 0;
606 	ci->i_rd_ref = 0;
607 	ci->i_rdcache_ref = 0;
608 	ci->i_wr_ref = 0;
609 	ci->i_wb_ref = 0;
610 	ci->i_fx_ref = 0;
611 	ci->i_wrbuffer_ref = 0;
612 	ci->i_wrbuffer_ref_head = 0;
613 	atomic_set(&ci->i_filelock_ref, 0);
614 	atomic_set(&ci->i_shared_gen, 1);
615 	ci->i_rdcache_gen = 0;
616 	ci->i_rdcache_revoking = 0;
617 
618 	INIT_LIST_HEAD(&ci->i_unsafe_dirops);
619 	INIT_LIST_HEAD(&ci->i_unsafe_iops);
620 	spin_lock_init(&ci->i_unsafe_lock);
621 
622 	ci->i_snap_realm = NULL;
623 	INIT_LIST_HEAD(&ci->i_snap_realm_item);
624 	INIT_LIST_HEAD(&ci->i_snap_flush_item);
625 
626 	INIT_WORK(&ci->i_work, ceph_inode_work);
627 	ci->i_work_mask = 0;
628 	memset(&ci->i_btime, '\0', sizeof(ci->i_btime));
629 #ifdef CONFIG_FS_ENCRYPTION
630 	ci->fscrypt_auth = NULL;
631 	ci->fscrypt_auth_len = 0;
632 #endif
633 	return &ci->netfs.inode;
634 }
635 
636 void ceph_free_inode(struct inode *inode)
637 {
638 	struct ceph_inode_info *ci = ceph_inode(inode);
639 
640 	kfree(ci->i_symlink);
641 #ifdef CONFIG_FS_ENCRYPTION
642 	kfree(ci->fscrypt_auth);
643 #endif
644 	fscrypt_free_inode(inode);
645 	kmem_cache_free(ceph_inode_cachep, ci);
646 }
647 
648 void ceph_evict_inode(struct inode *inode)
649 {
650 	struct ceph_inode_info *ci = ceph_inode(inode);
651 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
652 	struct ceph_inode_frag *frag;
653 	struct rb_node *n;
654 
655 	dout("evict_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
656 
657 	percpu_counter_dec(&mdsc->metric.total_inodes);
658 
659 	truncate_inode_pages_final(&inode->i_data);
660 	if (inode->i_state & I_PINNING_FSCACHE_WB)
661 		ceph_fscache_unuse_cookie(inode, true);
662 	clear_inode(inode);
663 
664 	ceph_fscache_unregister_inode_cookie(ci);
665 	fscrypt_put_encryption_info(inode);
666 
667 	__ceph_remove_caps(ci);
668 
669 	if (__ceph_has_quota(ci, QUOTA_GET_ANY))
670 		ceph_adjust_quota_realms_count(inode, false);
671 
672 	/*
673 	 * we may still have a snap_realm reference if there are stray
674 	 * caps in i_snap_caps.
675 	 */
676 	if (ci->i_snap_realm) {
677 		if (ceph_snap(inode) == CEPH_NOSNAP) {
678 			dout(" dropping residual ref to snap realm %p\n",
679 			     ci->i_snap_realm);
680 			ceph_change_snap_realm(inode, NULL);
681 		} else {
682 			ceph_put_snapid_map(mdsc, ci->i_snapid_map);
683 			ci->i_snap_realm = NULL;
684 		}
685 	}
686 
687 	while ((n = rb_first(&ci->i_fragtree)) != NULL) {
688 		frag = rb_entry(n, struct ceph_inode_frag, node);
689 		rb_erase(n, &ci->i_fragtree);
690 		kfree(frag);
691 	}
692 	ci->i_fragtree_nsplits = 0;
693 
694 	__ceph_destroy_xattrs(ci);
695 	if (ci->i_xattrs.blob)
696 		ceph_buffer_put(ci->i_xattrs.blob);
697 	if (ci->i_xattrs.prealloc_blob)
698 		ceph_buffer_put(ci->i_xattrs.prealloc_blob);
699 
700 	ceph_put_string(rcu_dereference_raw(ci->i_layout.pool_ns));
701 	ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns));
702 }
703 
704 static inline blkcnt_t calc_inode_blocks(u64 size)
705 {
706 	return (size + (1<<9) - 1) >> 9;
707 }
708 
709 /*
710  * Helpers to fill in size, ctime, mtime, and atime.  We have to be
711  * careful because either the client or MDS may have more up to date
712  * info, depending on which capabilities are held, and whether
713  * time_warp_seq or truncate_seq have increased.  (Ordinarily, mtime
714  * and size are monotonically increasing, except when utimes() or
715  * truncate() increments the corresponding _seq values.)
716  */
717 int ceph_fill_file_size(struct inode *inode, int issued,
718 			u32 truncate_seq, u64 truncate_size, u64 size)
719 {
720 	struct ceph_inode_info *ci = ceph_inode(inode);
721 	int queue_trunc = 0;
722 	loff_t isize = i_size_read(inode);
723 
724 	if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
725 	    (truncate_seq == ci->i_truncate_seq && size > isize)) {
726 		dout("size %lld -> %llu\n", isize, size);
727 		if (size > 0 && S_ISDIR(inode->i_mode)) {
728 			pr_err("fill_file_size non-zero size for directory\n");
729 			size = 0;
730 		}
731 		i_size_write(inode, size);
732 		inode->i_blocks = calc_inode_blocks(size);
733 		/*
734 		 * If we're expanding, then we should be able to just update
735 		 * the existing cookie.
736 		 */
737 		if (size > isize)
738 			ceph_fscache_update(inode);
739 		ci->i_reported_size = size;
740 		if (truncate_seq != ci->i_truncate_seq) {
741 			dout("truncate_seq %u -> %u\n",
742 			     ci->i_truncate_seq, truncate_seq);
743 			ci->i_truncate_seq = truncate_seq;
744 
745 			/* the MDS should have revoked these caps */
746 			WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL |
747 					       CEPH_CAP_FILE_RD |
748 					       CEPH_CAP_FILE_WR |
749 					       CEPH_CAP_FILE_LAZYIO));
750 			/*
751 			 * If we hold relevant caps, or in the case where we're
752 			 * not the only client referencing this file and we
753 			 * don't hold those caps, then we need to check whether
754 			 * the file is either opened or mmaped
755 			 */
756 			if ((issued & (CEPH_CAP_FILE_CACHE|
757 				       CEPH_CAP_FILE_BUFFER)) ||
758 			    mapping_mapped(inode->i_mapping) ||
759 			    __ceph_is_file_opened(ci)) {
760 				ci->i_truncate_pending++;
761 				queue_trunc = 1;
762 			}
763 		}
764 	}
765 	if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
766 	    ci->i_truncate_size != truncate_size) {
767 		dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
768 		     truncate_size);
769 		ci->i_truncate_size = truncate_size;
770 	}
771 	return queue_trunc;
772 }
773 
774 void ceph_fill_file_time(struct inode *inode, int issued,
775 			 u64 time_warp_seq, struct timespec64 *ctime,
776 			 struct timespec64 *mtime, struct timespec64 *atime)
777 {
778 	struct ceph_inode_info *ci = ceph_inode(inode);
779 	int warn = 0;
780 
781 	if (issued & (CEPH_CAP_FILE_EXCL|
782 		      CEPH_CAP_FILE_WR|
783 		      CEPH_CAP_FILE_BUFFER|
784 		      CEPH_CAP_AUTH_EXCL|
785 		      CEPH_CAP_XATTR_EXCL)) {
786 		if (ci->i_version == 0 ||
787 		    timespec64_compare(ctime, &inode->i_ctime) > 0) {
788 			dout("ctime %lld.%09ld -> %lld.%09ld inc w/ cap\n",
789 			     inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
790 			     ctime->tv_sec, ctime->tv_nsec);
791 			inode->i_ctime = *ctime;
792 		}
793 		if (ci->i_version == 0 ||
794 		    ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
795 			/* the MDS did a utimes() */
796 			dout("mtime %lld.%09ld -> %lld.%09ld "
797 			     "tw %d -> %d\n",
798 			     inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
799 			     mtime->tv_sec, mtime->tv_nsec,
800 			     ci->i_time_warp_seq, (int)time_warp_seq);
801 
802 			inode->i_mtime = *mtime;
803 			inode->i_atime = *atime;
804 			ci->i_time_warp_seq = time_warp_seq;
805 		} else if (time_warp_seq == ci->i_time_warp_seq) {
806 			/* nobody did utimes(); take the max */
807 			if (timespec64_compare(mtime, &inode->i_mtime) > 0) {
808 				dout("mtime %lld.%09ld -> %lld.%09ld inc\n",
809 				     inode->i_mtime.tv_sec,
810 				     inode->i_mtime.tv_nsec,
811 				     mtime->tv_sec, mtime->tv_nsec);
812 				inode->i_mtime = *mtime;
813 			}
814 			if (timespec64_compare(atime, &inode->i_atime) > 0) {
815 				dout("atime %lld.%09ld -> %lld.%09ld inc\n",
816 				     inode->i_atime.tv_sec,
817 				     inode->i_atime.tv_nsec,
818 				     atime->tv_sec, atime->tv_nsec);
819 				inode->i_atime = *atime;
820 			}
821 		} else if (issued & CEPH_CAP_FILE_EXCL) {
822 			/* we did a utimes(); ignore mds values */
823 		} else {
824 			warn = 1;
825 		}
826 	} else {
827 		/* we have no write|excl caps; whatever the MDS says is true */
828 		if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
829 			inode->i_ctime = *ctime;
830 			inode->i_mtime = *mtime;
831 			inode->i_atime = *atime;
832 			ci->i_time_warp_seq = time_warp_seq;
833 		} else {
834 			warn = 1;
835 		}
836 	}
837 	if (warn) /* time_warp_seq shouldn't go backwards */
838 		dout("%p mds time_warp_seq %llu < %u\n",
839 		     inode, time_warp_seq, ci->i_time_warp_seq);
840 }
841 
842 #if IS_ENABLED(CONFIG_FS_ENCRYPTION)
843 static int decode_encrypted_symlink(const char *encsym, int enclen, u8 **decsym)
844 {
845 	int declen;
846 	u8 *sym;
847 
848 	sym = kmalloc(enclen + 1, GFP_NOFS);
849 	if (!sym)
850 		return -ENOMEM;
851 
852 	declen = ceph_base64_decode(encsym, enclen, sym);
853 	if (declen < 0) {
854 		pr_err("%s: can't decode symlink (%d). Content: %.*s\n",
855 		       __func__, declen, enclen, encsym);
856 		kfree(sym);
857 		return -EIO;
858 	}
859 	sym[declen + 1] = '\0';
860 	*decsym = sym;
861 	return declen;
862 }
863 #else
864 static int decode_encrypted_symlink(const char *encsym, int symlen, u8 **decsym)
865 {
866 	return -EOPNOTSUPP;
867 }
868 #endif
869 
870 /*
871  * Populate an inode based on info from mds.  May be called on new or
872  * existing inodes.
873  */
874 int ceph_fill_inode(struct inode *inode, struct page *locked_page,
875 		    struct ceph_mds_reply_info_in *iinfo,
876 		    struct ceph_mds_reply_dirfrag *dirinfo,
877 		    struct ceph_mds_session *session, int cap_fmode,
878 		    struct ceph_cap_reservation *caps_reservation)
879 {
880 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
881 	struct ceph_mds_reply_inode *info = iinfo->in;
882 	struct ceph_inode_info *ci = ceph_inode(inode);
883 	int issued, new_issued, info_caps;
884 	struct timespec64 mtime, atime, ctime;
885 	struct ceph_buffer *xattr_blob = NULL;
886 	struct ceph_buffer *old_blob = NULL;
887 	struct ceph_string *pool_ns = NULL;
888 	struct ceph_cap *new_cap = NULL;
889 	int err = 0;
890 	bool wake = false;
891 	bool queue_trunc = false;
892 	bool new_version = false;
893 	bool fill_inline = false;
894 	umode_t mode = le32_to_cpu(info->mode);
895 	dev_t rdev = le32_to_cpu(info->rdev);
896 
897 	lockdep_assert_held(&mdsc->snap_rwsem);
898 
899 	dout("%s %p ino %llx.%llx v %llu had %llu\n", __func__,
900 	     inode, ceph_vinop(inode), le64_to_cpu(info->version),
901 	     ci->i_version);
902 
903 	/* Once I_NEW is cleared, we can't change type or dev numbers */
904 	if (inode->i_state & I_NEW) {
905 		inode->i_mode = mode;
906 	} else {
907 		if (inode_wrong_type(inode, mode)) {
908 			pr_warn_once("inode type changed! (ino %llx.%llx is 0%o, mds says 0%o)\n",
909 				     ceph_vinop(inode), inode->i_mode, mode);
910 			return -ESTALE;
911 		}
912 
913 		if ((S_ISCHR(mode) || S_ISBLK(mode)) && inode->i_rdev != rdev) {
914 			pr_warn_once("dev inode rdev changed! (ino %llx.%llx is %u:%u, mds says %u:%u)\n",
915 				     ceph_vinop(inode), MAJOR(inode->i_rdev),
916 				     MINOR(inode->i_rdev), MAJOR(rdev),
917 				     MINOR(rdev));
918 			return -ESTALE;
919 		}
920 	}
921 
922 	info_caps = le32_to_cpu(info->cap.caps);
923 
924 	/* prealloc new cap struct */
925 	if (info_caps && ceph_snap(inode) == CEPH_NOSNAP) {
926 		new_cap = ceph_get_cap(mdsc, caps_reservation);
927 		if (!new_cap)
928 			return -ENOMEM;
929 	}
930 
931 	/*
932 	 * prealloc xattr data, if it looks like we'll need it.  only
933 	 * if len > 4 (meaning there are actually xattrs; the first 4
934 	 * bytes are the xattr count).
935 	 */
936 	if (iinfo->xattr_len > 4) {
937 		xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
938 		if (!xattr_blob)
939 			pr_err("%s ENOMEM xattr blob %d bytes\n", __func__,
940 			       iinfo->xattr_len);
941 	}
942 
943 	if (iinfo->pool_ns_len > 0)
944 		pool_ns = ceph_find_or_create_string(iinfo->pool_ns_data,
945 						     iinfo->pool_ns_len);
946 
947 	if (ceph_snap(inode) != CEPH_NOSNAP && !ci->i_snapid_map)
948 		ci->i_snapid_map = ceph_get_snapid_map(mdsc, ceph_snap(inode));
949 
950 	spin_lock(&ci->i_ceph_lock);
951 
952 	/*
953 	 * provided version will be odd if inode value is projected,
954 	 * even if stable.  skip the update if we have newer stable
955 	 * info (ours>=theirs, e.g. due to racing mds replies), unless
956 	 * we are getting projected (unstable) info (in which case the
957 	 * version is odd, and we want ours>theirs).
958 	 *   us   them
959 	 *   2    2     skip
960 	 *   3    2     skip
961 	 *   3    3     update
962 	 */
963 	if (ci->i_version == 0 ||
964 	    ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
965 	     le64_to_cpu(info->version) > (ci->i_version & ~1)))
966 		new_version = true;
967 
968 	/* Update change_attribute */
969 	inode_set_max_iversion_raw(inode, iinfo->change_attr);
970 
971 	__ceph_caps_issued(ci, &issued);
972 	issued |= __ceph_caps_dirty(ci);
973 	new_issued = ~issued & info_caps;
974 
975 	__ceph_update_quota(ci, iinfo->max_bytes, iinfo->max_files);
976 
977 #ifdef CONFIG_FS_ENCRYPTION
978 	if (iinfo->fscrypt_auth_len &&
979 	    ((inode->i_state & I_NEW) || (ci->fscrypt_auth_len == 0))) {
980 		kfree(ci->fscrypt_auth);
981 		ci->fscrypt_auth_len = iinfo->fscrypt_auth_len;
982 		ci->fscrypt_auth = iinfo->fscrypt_auth;
983 		iinfo->fscrypt_auth = NULL;
984 		iinfo->fscrypt_auth_len = 0;
985 		inode_set_flags(inode, S_ENCRYPTED, S_ENCRYPTED);
986 	}
987 #endif
988 
989 	if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) &&
990 	    (issued & CEPH_CAP_AUTH_EXCL) == 0) {
991 		inode->i_mode = mode;
992 		inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid));
993 		inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid));
994 		dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
995 		     from_kuid(&init_user_ns, inode->i_uid),
996 		     from_kgid(&init_user_ns, inode->i_gid));
997 		ceph_decode_timespec64(&ci->i_btime, &iinfo->btime);
998 		ceph_decode_timespec64(&ci->i_snap_btime, &iinfo->snap_btime);
999 	}
1000 
1001 	/* directories have fl_stripe_unit set to zero */
1002 	if (IS_ENCRYPTED(inode))
1003 		inode->i_blkbits = CEPH_FSCRYPT_BLOCK_SHIFT;
1004 	else if (le32_to_cpu(info->layout.fl_stripe_unit))
1005 		inode->i_blkbits =
1006 			fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
1007 	else
1008 		inode->i_blkbits = CEPH_BLOCK_SHIFT;
1009 
1010 	if ((new_version || (new_issued & CEPH_CAP_LINK_SHARED)) &&
1011 	    (issued & CEPH_CAP_LINK_EXCL) == 0)
1012 		set_nlink(inode, le32_to_cpu(info->nlink));
1013 
1014 	if (new_version || (new_issued & CEPH_CAP_ANY_RD)) {
1015 		/* be careful with mtime, atime, size */
1016 		ceph_decode_timespec64(&atime, &info->atime);
1017 		ceph_decode_timespec64(&mtime, &info->mtime);
1018 		ceph_decode_timespec64(&ctime, &info->ctime);
1019 		ceph_fill_file_time(inode, issued,
1020 				le32_to_cpu(info->time_warp_seq),
1021 				&ctime, &mtime, &atime);
1022 	}
1023 
1024 	if (new_version || (info_caps & CEPH_CAP_FILE_SHARED)) {
1025 		ci->i_files = le64_to_cpu(info->files);
1026 		ci->i_subdirs = le64_to_cpu(info->subdirs);
1027 	}
1028 
1029 	if (new_version ||
1030 	    (new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) {
1031 		u64 size = le64_to_cpu(info->size);
1032 		s64 old_pool = ci->i_layout.pool_id;
1033 		struct ceph_string *old_ns;
1034 
1035 		ceph_file_layout_from_legacy(&ci->i_layout, &info->layout);
1036 		old_ns = rcu_dereference_protected(ci->i_layout.pool_ns,
1037 					lockdep_is_held(&ci->i_ceph_lock));
1038 		rcu_assign_pointer(ci->i_layout.pool_ns, pool_ns);
1039 
1040 		if (ci->i_layout.pool_id != old_pool || pool_ns != old_ns)
1041 			ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
1042 
1043 		pool_ns = old_ns;
1044 
1045 		if (IS_ENCRYPTED(inode) && size &&
1046 		    iinfo->fscrypt_file_len == sizeof(__le64)) {
1047 			u64 fsize = __le64_to_cpu(*(__le64 *)iinfo->fscrypt_file);
1048 
1049 			if (size == round_up(fsize, CEPH_FSCRYPT_BLOCK_SIZE)) {
1050 				size = fsize;
1051 			} else {
1052 				pr_warn("fscrypt size mismatch: size=%llu fscrypt_file=%llu, discarding fscrypt_file size.\n",
1053 					info->size, size);
1054 			}
1055 		}
1056 
1057 		queue_trunc = ceph_fill_file_size(inode, issued,
1058 					le32_to_cpu(info->truncate_seq),
1059 					le64_to_cpu(info->truncate_size),
1060 					size);
1061 		/* only update max_size on auth cap */
1062 		if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
1063 		    ci->i_max_size != le64_to_cpu(info->max_size)) {
1064 			dout("max_size %lld -> %llu\n", ci->i_max_size,
1065 					le64_to_cpu(info->max_size));
1066 			ci->i_max_size = le64_to_cpu(info->max_size);
1067 		}
1068 	}
1069 
1070 	/* layout and rstat are not tracked by capability, update them if
1071 	 * the inode info is from auth mds */
1072 	if (new_version || (info->cap.flags & CEPH_CAP_FLAG_AUTH)) {
1073 		if (S_ISDIR(inode->i_mode)) {
1074 			ci->i_dir_layout = iinfo->dir_layout;
1075 			ci->i_rbytes = le64_to_cpu(info->rbytes);
1076 			ci->i_rfiles = le64_to_cpu(info->rfiles);
1077 			ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
1078 			ci->i_dir_pin = iinfo->dir_pin;
1079 			ci->i_rsnaps = iinfo->rsnaps;
1080 			ceph_decode_timespec64(&ci->i_rctime, &info->rctime);
1081 		}
1082 	}
1083 
1084 	/* xattrs */
1085 	/* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
1086 	if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL))  &&
1087 	    le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
1088 		if (ci->i_xattrs.blob)
1089 			old_blob = ci->i_xattrs.blob;
1090 		ci->i_xattrs.blob = xattr_blob;
1091 		if (xattr_blob)
1092 			memcpy(ci->i_xattrs.blob->vec.iov_base,
1093 			       iinfo->xattr_data, iinfo->xattr_len);
1094 		ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
1095 		ceph_forget_all_cached_acls(inode);
1096 		ceph_security_invalidate_secctx(inode);
1097 		xattr_blob = NULL;
1098 	}
1099 
1100 	/* finally update i_version */
1101 	if (le64_to_cpu(info->version) > ci->i_version)
1102 		ci->i_version = le64_to_cpu(info->version);
1103 
1104 	inode->i_mapping->a_ops = &ceph_aops;
1105 
1106 	switch (inode->i_mode & S_IFMT) {
1107 	case S_IFIFO:
1108 	case S_IFBLK:
1109 	case S_IFCHR:
1110 	case S_IFSOCK:
1111 		inode->i_blkbits = PAGE_SHIFT;
1112 		init_special_inode(inode, inode->i_mode, rdev);
1113 		inode->i_op = &ceph_file_iops;
1114 		break;
1115 	case S_IFREG:
1116 		inode->i_op = &ceph_file_iops;
1117 		inode->i_fop = &ceph_file_fops;
1118 		break;
1119 	case S_IFLNK:
1120 		if (!ci->i_symlink) {
1121 			u32 symlen = iinfo->symlink_len;
1122 			char *sym;
1123 
1124 			spin_unlock(&ci->i_ceph_lock);
1125 
1126 			if (IS_ENCRYPTED(inode)) {
1127 				if (symlen != i_size_read(inode))
1128 					pr_err("%s %llx.%llx BAD symlink size %lld\n",
1129 						__func__, ceph_vinop(inode),
1130 						i_size_read(inode));
1131 
1132 				err = decode_encrypted_symlink(iinfo->symlink,
1133 							       symlen, (u8 **)&sym);
1134 				if (err < 0) {
1135 					pr_err("%s decoding encrypted symlink failed: %d\n",
1136 						__func__, err);
1137 					goto out;
1138 				}
1139 				symlen = err;
1140 				i_size_write(inode, symlen);
1141 				inode->i_blocks = calc_inode_blocks(symlen);
1142 			} else {
1143 				if (symlen != i_size_read(inode)) {
1144 					pr_err("%s %llx.%llx BAD symlink size %lld\n",
1145 						__func__, ceph_vinop(inode),
1146 						i_size_read(inode));
1147 					i_size_write(inode, symlen);
1148 					inode->i_blocks = calc_inode_blocks(symlen);
1149 				}
1150 
1151 				err = -ENOMEM;
1152 				sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
1153 				if (!sym)
1154 					goto out;
1155 			}
1156 
1157 			spin_lock(&ci->i_ceph_lock);
1158 			if (!ci->i_symlink)
1159 				ci->i_symlink = sym;
1160 			else
1161 				kfree(sym); /* lost a race */
1162 		}
1163 
1164 		if (IS_ENCRYPTED(inode)) {
1165 			/*
1166 			 * Encrypted symlinks need to be decrypted before we can
1167 			 * cache their targets in i_link. Don't touch it here.
1168 			 */
1169 			inode->i_op = &ceph_encrypted_symlink_iops;
1170 		} else {
1171 			inode->i_link = ci->i_symlink;
1172 			inode->i_op = &ceph_symlink_iops;
1173 		}
1174 		break;
1175 	case S_IFDIR:
1176 		inode->i_op = &ceph_dir_iops;
1177 		inode->i_fop = &ceph_dir_fops;
1178 		break;
1179 	default:
1180 		pr_err("%s %llx.%llx BAD mode 0%o\n", __func__,
1181 		       ceph_vinop(inode), inode->i_mode);
1182 	}
1183 
1184 	/* were we issued a capability? */
1185 	if (info_caps) {
1186 		if (ceph_snap(inode) == CEPH_NOSNAP) {
1187 			ceph_add_cap(inode, session,
1188 				     le64_to_cpu(info->cap.cap_id),
1189 				     info_caps,
1190 				     le32_to_cpu(info->cap.wanted),
1191 				     le32_to_cpu(info->cap.seq),
1192 				     le32_to_cpu(info->cap.mseq),
1193 				     le64_to_cpu(info->cap.realm),
1194 				     info->cap.flags, &new_cap);
1195 
1196 			/* set dir completion flag? */
1197 			if (S_ISDIR(inode->i_mode) &&
1198 			    ci->i_files == 0 && ci->i_subdirs == 0 &&
1199 			    (info_caps & CEPH_CAP_FILE_SHARED) &&
1200 			    (issued & CEPH_CAP_FILE_EXCL) == 0 &&
1201 			    !__ceph_dir_is_complete(ci)) {
1202 				dout(" marking %p complete (empty)\n", inode);
1203 				i_size_write(inode, 0);
1204 				__ceph_dir_set_complete(ci,
1205 					atomic64_read(&ci->i_release_count),
1206 					atomic64_read(&ci->i_ordered_count));
1207 			}
1208 
1209 			wake = true;
1210 		} else {
1211 			dout(" %p got snap_caps %s\n", inode,
1212 			     ceph_cap_string(info_caps));
1213 			ci->i_snap_caps |= info_caps;
1214 		}
1215 	}
1216 
1217 	if (iinfo->inline_version > 0 &&
1218 	    iinfo->inline_version >= ci->i_inline_version) {
1219 		int cache_caps = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1220 		ci->i_inline_version = iinfo->inline_version;
1221 		if (ceph_has_inline_data(ci) &&
1222 		    (locked_page || (info_caps & cache_caps)))
1223 			fill_inline = true;
1224 	}
1225 
1226 	if (cap_fmode >= 0) {
1227 		if (!info_caps)
1228 			pr_warn("mds issued no caps on %llx.%llx\n",
1229 				ceph_vinop(inode));
1230 		__ceph_touch_fmode(ci, mdsc, cap_fmode);
1231 	}
1232 
1233 	spin_unlock(&ci->i_ceph_lock);
1234 
1235 	ceph_fscache_register_inode_cookie(inode);
1236 
1237 	if (fill_inline)
1238 		ceph_fill_inline_data(inode, locked_page,
1239 				      iinfo->inline_data, iinfo->inline_len);
1240 
1241 	if (wake)
1242 		wake_up_all(&ci->i_cap_wq);
1243 
1244 	/* queue truncate if we saw i_size decrease */
1245 	if (queue_trunc)
1246 		ceph_queue_vmtruncate(inode);
1247 
1248 	/* populate frag tree */
1249 	if (S_ISDIR(inode->i_mode))
1250 		ceph_fill_fragtree(inode, &info->fragtree, dirinfo);
1251 
1252 	/* update delegation info? */
1253 	if (dirinfo)
1254 		ceph_fill_dirfrag(inode, dirinfo);
1255 
1256 	err = 0;
1257 out:
1258 	if (new_cap)
1259 		ceph_put_cap(mdsc, new_cap);
1260 	ceph_buffer_put(old_blob);
1261 	ceph_buffer_put(xattr_blob);
1262 	ceph_put_string(pool_ns);
1263 	return err;
1264 }
1265 
1266 /*
1267  * caller should hold session s_mutex and dentry->d_lock.
1268  */
1269 static void __update_dentry_lease(struct inode *dir, struct dentry *dentry,
1270 				  struct ceph_mds_reply_lease *lease,
1271 				  struct ceph_mds_session *session,
1272 				  unsigned long from_time,
1273 				  struct ceph_mds_session **old_lease_session)
1274 {
1275 	struct ceph_dentry_info *di = ceph_dentry(dentry);
1276 	unsigned mask = le16_to_cpu(lease->mask);
1277 	long unsigned duration = le32_to_cpu(lease->duration_ms);
1278 	long unsigned ttl = from_time + (duration * HZ) / 1000;
1279 	long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
1280 
1281 	dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
1282 	     dentry, duration, ttl);
1283 
1284 	/* only track leases on regular dentries */
1285 	if (ceph_snap(dir) != CEPH_NOSNAP)
1286 		return;
1287 
1288 	if (mask & CEPH_LEASE_PRIMARY_LINK)
1289 		di->flags |= CEPH_DENTRY_PRIMARY_LINK;
1290 	else
1291 		di->flags &= ~CEPH_DENTRY_PRIMARY_LINK;
1292 
1293 	di->lease_shared_gen = atomic_read(&ceph_inode(dir)->i_shared_gen);
1294 	if (!(mask & CEPH_LEASE_VALID)) {
1295 		__ceph_dentry_dir_lease_touch(di);
1296 		return;
1297 	}
1298 
1299 	if (di->lease_gen == atomic_read(&session->s_cap_gen) &&
1300 	    time_before(ttl, di->time))
1301 		return;  /* we already have a newer lease. */
1302 
1303 	if (di->lease_session && di->lease_session != session) {
1304 		*old_lease_session = di->lease_session;
1305 		di->lease_session = NULL;
1306 	}
1307 
1308 	if (!di->lease_session)
1309 		di->lease_session = ceph_get_mds_session(session);
1310 	di->lease_gen = atomic_read(&session->s_cap_gen);
1311 	di->lease_seq = le32_to_cpu(lease->seq);
1312 	di->lease_renew_after = half_ttl;
1313 	di->lease_renew_from = 0;
1314 	di->time = ttl;
1315 
1316 	__ceph_dentry_lease_touch(di);
1317 }
1318 
1319 static inline void update_dentry_lease(struct inode *dir, struct dentry *dentry,
1320 					struct ceph_mds_reply_lease *lease,
1321 					struct ceph_mds_session *session,
1322 					unsigned long from_time)
1323 {
1324 	struct ceph_mds_session *old_lease_session = NULL;
1325 	spin_lock(&dentry->d_lock);
1326 	__update_dentry_lease(dir, dentry, lease, session, from_time,
1327 			      &old_lease_session);
1328 	spin_unlock(&dentry->d_lock);
1329 	ceph_put_mds_session(old_lease_session);
1330 }
1331 
1332 /*
1333  * update dentry lease without having parent inode locked
1334  */
1335 static void update_dentry_lease_careful(struct dentry *dentry,
1336 					struct ceph_mds_reply_lease *lease,
1337 					struct ceph_mds_session *session,
1338 					unsigned long from_time,
1339 					char *dname, u32 dname_len,
1340 					struct ceph_vino *pdvino,
1341 					struct ceph_vino *ptvino)
1342 
1343 {
1344 	struct inode *dir;
1345 	struct ceph_mds_session *old_lease_session = NULL;
1346 
1347 	spin_lock(&dentry->d_lock);
1348 	/* make sure dentry's name matches target */
1349 	if (dentry->d_name.len != dname_len ||
1350 	    memcmp(dentry->d_name.name, dname, dname_len))
1351 		goto out_unlock;
1352 
1353 	dir = d_inode(dentry->d_parent);
1354 	/* make sure parent matches dvino */
1355 	if (!ceph_ino_compare(dir, pdvino))
1356 		goto out_unlock;
1357 
1358 	/* make sure dentry's inode matches target. NULL ptvino means that
1359 	 * we expect a negative dentry */
1360 	if (ptvino) {
1361 		if (d_really_is_negative(dentry))
1362 			goto out_unlock;
1363 		if (!ceph_ino_compare(d_inode(dentry), ptvino))
1364 			goto out_unlock;
1365 	} else {
1366 		if (d_really_is_positive(dentry))
1367 			goto out_unlock;
1368 	}
1369 
1370 	__update_dentry_lease(dir, dentry, lease, session,
1371 			      from_time, &old_lease_session);
1372 out_unlock:
1373 	spin_unlock(&dentry->d_lock);
1374 	ceph_put_mds_session(old_lease_session);
1375 }
1376 
1377 /*
1378  * splice a dentry to an inode.
1379  * caller must hold directory i_rwsem for this to be safe.
1380  */
1381 static int splice_dentry(struct dentry **pdn, struct inode *in)
1382 {
1383 	struct dentry *dn = *pdn;
1384 	struct dentry *realdn;
1385 
1386 	BUG_ON(d_inode(dn));
1387 
1388 	if (S_ISDIR(in->i_mode)) {
1389 		/* If inode is directory, d_splice_alias() below will remove
1390 		 * 'realdn' from its origin parent. We need to ensure that
1391 		 * origin parent's readdir cache will not reference 'realdn'
1392 		 */
1393 		realdn = d_find_any_alias(in);
1394 		if (realdn) {
1395 			struct ceph_dentry_info *di = ceph_dentry(realdn);
1396 			spin_lock(&realdn->d_lock);
1397 
1398 			realdn->d_op->d_prune(realdn);
1399 
1400 			di->time = jiffies;
1401 			di->lease_shared_gen = 0;
1402 			di->offset = 0;
1403 
1404 			spin_unlock(&realdn->d_lock);
1405 			dput(realdn);
1406 		}
1407 	}
1408 
1409 	/* dn must be unhashed */
1410 	if (!d_unhashed(dn))
1411 		d_drop(dn);
1412 	realdn = d_splice_alias(in, dn);
1413 	if (IS_ERR(realdn)) {
1414 		pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
1415 		       PTR_ERR(realdn), dn, in, ceph_vinop(in));
1416 		return PTR_ERR(realdn);
1417 	}
1418 
1419 	if (realdn) {
1420 		dout("dn %p (%d) spliced with %p (%d) "
1421 		     "inode %p ino %llx.%llx\n",
1422 		     dn, d_count(dn),
1423 		     realdn, d_count(realdn),
1424 		     d_inode(realdn), ceph_vinop(d_inode(realdn)));
1425 		dput(dn);
1426 		*pdn = realdn;
1427 	} else {
1428 		BUG_ON(!ceph_dentry(dn));
1429 		dout("dn %p attached to %p ino %llx.%llx\n",
1430 		     dn, d_inode(dn), ceph_vinop(d_inode(dn)));
1431 	}
1432 	return 0;
1433 }
1434 
1435 /*
1436  * Incorporate results into the local cache.  This is either just
1437  * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
1438  * after a lookup).
1439  *
1440  * A reply may contain
1441  *         a directory inode along with a dentry.
1442  *  and/or a target inode
1443  *
1444  * Called with snap_rwsem (read).
1445  */
1446 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
1447 {
1448 	struct ceph_mds_session *session = req->r_session;
1449 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1450 	struct inode *in = NULL;
1451 	struct ceph_vino tvino, dvino;
1452 	struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
1453 	int err = 0;
1454 
1455 	dout("fill_trace %p is_dentry %d is_target %d\n", req,
1456 	     rinfo->head->is_dentry, rinfo->head->is_target);
1457 
1458 	if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
1459 		dout("fill_trace reply is empty!\n");
1460 		if (rinfo->head->result == 0 && req->r_parent)
1461 			ceph_invalidate_dir_request(req);
1462 		return 0;
1463 	}
1464 
1465 	if (rinfo->head->is_dentry) {
1466 		struct inode *dir = req->r_parent;
1467 
1468 		if (dir) {
1469 			err = ceph_fill_inode(dir, NULL, &rinfo->diri,
1470 					      rinfo->dirfrag, session, -1,
1471 					      &req->r_caps_reservation);
1472 			if (err < 0)
1473 				goto done;
1474 		} else {
1475 			WARN_ON_ONCE(1);
1476 		}
1477 
1478 		if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME &&
1479 		    test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1480 		    !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
1481 			bool is_nokey = false;
1482 			struct qstr dname;
1483 			struct dentry *dn, *parent;
1484 			struct fscrypt_str oname = FSTR_INIT(NULL, 0);
1485 			struct ceph_fname fname = { .dir	= dir,
1486 						    .name	= rinfo->dname,
1487 						    .ctext	= rinfo->altname,
1488 						    .name_len	= rinfo->dname_len,
1489 						    .ctext_len	= rinfo->altname_len };
1490 
1491 			BUG_ON(!rinfo->head->is_target);
1492 			BUG_ON(req->r_dentry);
1493 
1494 			parent = d_find_any_alias(dir);
1495 			BUG_ON(!parent);
1496 
1497 			err = ceph_fname_alloc_buffer(dir, &oname);
1498 			if (err < 0) {
1499 				dput(parent);
1500 				goto done;
1501 			}
1502 
1503 			err = ceph_fname_to_usr(&fname, NULL, &oname, &is_nokey);
1504 			if (err < 0) {
1505 				dput(parent);
1506 				ceph_fname_free_buffer(dir, &oname);
1507 				goto done;
1508 			}
1509 			dname.name = oname.name;
1510 			dname.len = oname.len;
1511 			dname.hash = full_name_hash(parent, dname.name, dname.len);
1512 			tvino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1513 			tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1514 retry_lookup:
1515 			dn = d_lookup(parent, &dname);
1516 			dout("d_lookup on parent=%p name=%.*s got %p\n",
1517 			     parent, dname.len, dname.name, dn);
1518 
1519 			if (!dn) {
1520 				dn = d_alloc(parent, &dname);
1521 				dout("d_alloc %p '%.*s' = %p\n", parent,
1522 				     dname.len, dname.name, dn);
1523 				if (!dn) {
1524 					dput(parent);
1525 					ceph_fname_free_buffer(dir, &oname);
1526 					err = -ENOMEM;
1527 					goto done;
1528 				}
1529 				if (is_nokey) {
1530 					spin_lock(&dn->d_lock);
1531 					dn->d_flags |= DCACHE_NOKEY_NAME;
1532 					spin_unlock(&dn->d_lock);
1533 				}
1534 				err = 0;
1535 			} else if (d_really_is_positive(dn) &&
1536 				   (ceph_ino(d_inode(dn)) != tvino.ino ||
1537 				    ceph_snap(d_inode(dn)) != tvino.snap)) {
1538 				dout(" dn %p points to wrong inode %p\n",
1539 				     dn, d_inode(dn));
1540 				ceph_dir_clear_ordered(dir);
1541 				d_delete(dn);
1542 				dput(dn);
1543 				goto retry_lookup;
1544 			}
1545 			ceph_fname_free_buffer(dir, &oname);
1546 
1547 			req->r_dentry = dn;
1548 			dput(parent);
1549 		}
1550 	}
1551 
1552 	if (rinfo->head->is_target) {
1553 		/* Should be filled in by handle_reply */
1554 		BUG_ON(!req->r_target_inode);
1555 
1556 		in = req->r_target_inode;
1557 		err = ceph_fill_inode(in, req->r_locked_page, &rinfo->targeti,
1558 				NULL, session,
1559 				(!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) &&
1560 				 !test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags) &&
1561 				 rinfo->head->result == 0) ?  req->r_fmode : -1,
1562 				&req->r_caps_reservation);
1563 		if (err < 0) {
1564 			pr_err("ceph_fill_inode badness %p %llx.%llx\n",
1565 				in, ceph_vinop(in));
1566 			req->r_target_inode = NULL;
1567 			if (in->i_state & I_NEW)
1568 				discard_new_inode(in);
1569 			else
1570 				iput(in);
1571 			goto done;
1572 		}
1573 		if (in->i_state & I_NEW)
1574 			unlock_new_inode(in);
1575 	}
1576 
1577 	/*
1578 	 * ignore null lease/binding on snapdir ENOENT, or else we
1579 	 * will have trouble splicing in the virtual snapdir later
1580 	 */
1581 	if (rinfo->head->is_dentry &&
1582             !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) &&
1583 	    test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1584 	    (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
1585 					       fsc->mount_options->snapdir_name,
1586 					       req->r_dentry->d_name.len))) {
1587 		/*
1588 		 * lookup link rename   : null -> possibly existing inode
1589 		 * mknod symlink mkdir  : null -> new inode
1590 		 * unlink               : linked -> null
1591 		 */
1592 		struct inode *dir = req->r_parent;
1593 		struct dentry *dn = req->r_dentry;
1594 		bool have_dir_cap, have_lease;
1595 
1596 		BUG_ON(!dn);
1597 		BUG_ON(!dir);
1598 		BUG_ON(d_inode(dn->d_parent) != dir);
1599 
1600 		dvino.ino = le64_to_cpu(rinfo->diri.in->ino);
1601 		dvino.snap = le64_to_cpu(rinfo->diri.in->snapid);
1602 
1603 		BUG_ON(ceph_ino(dir) != dvino.ino);
1604 		BUG_ON(ceph_snap(dir) != dvino.snap);
1605 
1606 		/* do we have a lease on the whole dir? */
1607 		have_dir_cap =
1608 			(le32_to_cpu(rinfo->diri.in->cap.caps) &
1609 			 CEPH_CAP_FILE_SHARED);
1610 
1611 		/* do we have a dn lease? */
1612 		have_lease = have_dir_cap ||
1613 			le32_to_cpu(rinfo->dlease->duration_ms);
1614 		if (!have_lease)
1615 			dout("fill_trace  no dentry lease or dir cap\n");
1616 
1617 		/* rename? */
1618 		if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
1619 			struct inode *olddir = req->r_old_dentry_dir;
1620 			BUG_ON(!olddir);
1621 
1622 			dout(" src %p '%pd' dst %p '%pd'\n",
1623 			     req->r_old_dentry,
1624 			     req->r_old_dentry,
1625 			     dn, dn);
1626 			dout("fill_trace doing d_move %p -> %p\n",
1627 			     req->r_old_dentry, dn);
1628 
1629 			/* d_move screws up sibling dentries' offsets */
1630 			ceph_dir_clear_ordered(dir);
1631 			ceph_dir_clear_ordered(olddir);
1632 
1633 			d_move(req->r_old_dentry, dn);
1634 			dout(" src %p '%pd' dst %p '%pd'\n",
1635 			     req->r_old_dentry,
1636 			     req->r_old_dentry,
1637 			     dn, dn);
1638 
1639 			/* ensure target dentry is invalidated, despite
1640 			   rehashing bug in vfs_rename_dir */
1641 			ceph_invalidate_dentry_lease(dn);
1642 
1643 			dout("dn %p gets new offset %lld\n", req->r_old_dentry,
1644 			     ceph_dentry(req->r_old_dentry)->offset);
1645 
1646 			/* swap r_dentry and r_old_dentry in case that
1647 			 * splice_dentry() gets called later. This is safe
1648 			 * because no other place will use them */
1649 			req->r_dentry = req->r_old_dentry;
1650 			req->r_old_dentry = dn;
1651 			dn = req->r_dentry;
1652 		}
1653 
1654 		/* null dentry? */
1655 		if (!rinfo->head->is_target) {
1656 			dout("fill_trace null dentry\n");
1657 			if (d_really_is_positive(dn)) {
1658 				dout("d_delete %p\n", dn);
1659 				ceph_dir_clear_ordered(dir);
1660 				d_delete(dn);
1661 			} else if (have_lease) {
1662 				if (d_unhashed(dn))
1663 					d_add(dn, NULL);
1664 			}
1665 
1666 			if (!d_unhashed(dn) && have_lease)
1667 				update_dentry_lease(dir, dn,
1668 						    rinfo->dlease, session,
1669 						    req->r_request_started);
1670 			goto done;
1671 		}
1672 
1673 		/* attach proper inode */
1674 		if (d_really_is_negative(dn)) {
1675 			ceph_dir_clear_ordered(dir);
1676 			ihold(in);
1677 			err = splice_dentry(&req->r_dentry, in);
1678 			if (err < 0)
1679 				goto done;
1680 			dn = req->r_dentry;  /* may have spliced */
1681 		} else if (d_really_is_positive(dn) && d_inode(dn) != in) {
1682 			dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
1683 			     dn, d_inode(dn), ceph_vinop(d_inode(dn)),
1684 			     ceph_vinop(in));
1685 			d_invalidate(dn);
1686 			have_lease = false;
1687 		}
1688 
1689 		if (have_lease) {
1690 			update_dentry_lease(dir, dn,
1691 					    rinfo->dlease, session,
1692 					    req->r_request_started);
1693 		}
1694 		dout(" final dn %p\n", dn);
1695 	} else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1696 		    req->r_op == CEPH_MDS_OP_MKSNAP) &&
1697 	           test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1698 		   !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
1699 		struct inode *dir = req->r_parent;
1700 
1701 		/* fill out a snapdir LOOKUPSNAP dentry */
1702 		BUG_ON(!dir);
1703 		BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR);
1704 		BUG_ON(!req->r_dentry);
1705 		dout(" linking snapped dir %p to dn %p\n", in, req->r_dentry);
1706 		ceph_dir_clear_ordered(dir);
1707 		ihold(in);
1708 		err = splice_dentry(&req->r_dentry, in);
1709 		if (err < 0)
1710 			goto done;
1711 	} else if (rinfo->head->is_dentry && req->r_dentry) {
1712 		/* parent inode is not locked, be carefull */
1713 		struct ceph_vino *ptvino = NULL;
1714 		dvino.ino = le64_to_cpu(rinfo->diri.in->ino);
1715 		dvino.snap = le64_to_cpu(rinfo->diri.in->snapid);
1716 		if (rinfo->head->is_target) {
1717 			tvino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1718 			tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1719 			ptvino = &tvino;
1720 		}
1721 		update_dentry_lease_careful(req->r_dentry, rinfo->dlease,
1722 					    session, req->r_request_started,
1723 					    rinfo->dname, rinfo->dname_len,
1724 					    &dvino, ptvino);
1725 	}
1726 done:
1727 	dout("fill_trace done err=%d\n", err);
1728 	return err;
1729 }
1730 
1731 /*
1732  * Prepopulate our cache with readdir results, leases, etc.
1733  */
1734 static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
1735 					   struct ceph_mds_session *session)
1736 {
1737 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1738 	int i, err = 0;
1739 
1740 	for (i = 0; i < rinfo->dir_nr; i++) {
1741 		struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
1742 		struct ceph_vino vino;
1743 		struct inode *in;
1744 		int rc;
1745 
1746 		vino.ino = le64_to_cpu(rde->inode.in->ino);
1747 		vino.snap = le64_to_cpu(rde->inode.in->snapid);
1748 
1749 		in = ceph_get_inode(req->r_dentry->d_sb, vino, NULL);
1750 		if (IS_ERR(in)) {
1751 			err = PTR_ERR(in);
1752 			dout("new_inode badness got %d\n", err);
1753 			continue;
1754 		}
1755 		rc = ceph_fill_inode(in, NULL, &rde->inode, NULL, session,
1756 				     -1, &req->r_caps_reservation);
1757 		if (rc < 0) {
1758 			pr_err("ceph_fill_inode badness on %p got %d\n",
1759 			       in, rc);
1760 			err = rc;
1761 			if (in->i_state & I_NEW) {
1762 				ihold(in);
1763 				discard_new_inode(in);
1764 			}
1765 		} else if (in->i_state & I_NEW) {
1766 			unlock_new_inode(in);
1767 		}
1768 
1769 		iput(in);
1770 	}
1771 
1772 	return err;
1773 }
1774 
1775 void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl)
1776 {
1777 	if (ctl->page) {
1778 		kunmap(ctl->page);
1779 		put_page(ctl->page);
1780 		ctl->page = NULL;
1781 	}
1782 }
1783 
1784 static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
1785 			      struct ceph_readdir_cache_control *ctl,
1786 			      struct ceph_mds_request *req)
1787 {
1788 	struct ceph_inode_info *ci = ceph_inode(dir);
1789 	unsigned nsize = PAGE_SIZE / sizeof(struct dentry*);
1790 	unsigned idx = ctl->index % nsize;
1791 	pgoff_t pgoff = ctl->index / nsize;
1792 
1793 	if (!ctl->page || pgoff != page_index(ctl->page)) {
1794 		ceph_readdir_cache_release(ctl);
1795 		if (idx == 0)
1796 			ctl->page = grab_cache_page(&dir->i_data, pgoff);
1797 		else
1798 			ctl->page = find_lock_page(&dir->i_data, pgoff);
1799 		if (!ctl->page) {
1800 			ctl->index = -1;
1801 			return idx == 0 ? -ENOMEM : 0;
1802 		}
1803 		/* reading/filling the cache are serialized by
1804 		 * i_rwsem, no need to use page lock */
1805 		unlock_page(ctl->page);
1806 		ctl->dentries = kmap(ctl->page);
1807 		if (idx == 0)
1808 			memset(ctl->dentries, 0, PAGE_SIZE);
1809 	}
1810 
1811 	if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) &&
1812 	    req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) {
1813 		dout("readdir cache dn %p idx %d\n", dn, ctl->index);
1814 		ctl->dentries[idx] = dn;
1815 		ctl->index++;
1816 	} else {
1817 		dout("disable readdir cache\n");
1818 		ctl->index = -1;
1819 	}
1820 	return 0;
1821 }
1822 
1823 int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1824 			     struct ceph_mds_session *session)
1825 {
1826 	struct dentry *parent = req->r_dentry;
1827 	struct inode *inode = d_inode(parent);
1828 	struct ceph_inode_info *ci = ceph_inode(inode);
1829 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1830 	struct qstr dname;
1831 	struct dentry *dn;
1832 	struct inode *in;
1833 	int err = 0, skipped = 0, ret, i;
1834 	u32 frag = le32_to_cpu(req->r_args.readdir.frag);
1835 	u32 last_hash = 0;
1836 	u32 fpos_offset;
1837 	struct ceph_readdir_cache_control cache_ctl = {};
1838 
1839 	if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
1840 		return readdir_prepopulate_inodes_only(req, session);
1841 
1842 	if (rinfo->hash_order) {
1843 		if (req->r_path2) {
1844 			last_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
1845 						  req->r_path2,
1846 						  strlen(req->r_path2));
1847 			last_hash = ceph_frag_value(last_hash);
1848 		} else if (rinfo->offset_hash) {
1849 			/* mds understands offset_hash */
1850 			WARN_ON_ONCE(req->r_readdir_offset != 2);
1851 			last_hash = le32_to_cpu(req->r_args.readdir.offset_hash);
1852 		}
1853 	}
1854 
1855 	if (rinfo->dir_dir &&
1856 	    le32_to_cpu(rinfo->dir_dir->frag) != frag) {
1857 		dout("readdir_prepopulate got new frag %x -> %x\n",
1858 		     frag, le32_to_cpu(rinfo->dir_dir->frag));
1859 		frag = le32_to_cpu(rinfo->dir_dir->frag);
1860 		if (!rinfo->hash_order)
1861 			req->r_readdir_offset = 2;
1862 	}
1863 
1864 	if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
1865 		dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
1866 		     rinfo->dir_nr, parent);
1867 	} else {
1868 		dout("readdir_prepopulate %d items under dn %p\n",
1869 		     rinfo->dir_nr, parent);
1870 		if (rinfo->dir_dir)
1871 			ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir);
1872 
1873 		if (ceph_frag_is_leftmost(frag) &&
1874 		    req->r_readdir_offset == 2 &&
1875 		    !(rinfo->hash_order && last_hash)) {
1876 			/* note dir version at start of readdir so we can
1877 			 * tell if any dentries get dropped */
1878 			req->r_dir_release_cnt =
1879 				atomic64_read(&ci->i_release_count);
1880 			req->r_dir_ordered_cnt =
1881 				atomic64_read(&ci->i_ordered_count);
1882 			req->r_readdir_cache_idx = 0;
1883 		}
1884 	}
1885 
1886 	cache_ctl.index = req->r_readdir_cache_idx;
1887 	fpos_offset = req->r_readdir_offset;
1888 
1889 	/* FIXME: release caps/leases if error occurs */
1890 	for (i = 0; i < rinfo->dir_nr; i++) {
1891 		struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
1892 		struct ceph_vino tvino;
1893 
1894 		dname.name = rde->name;
1895 		dname.len = rde->name_len;
1896 		dname.hash = full_name_hash(parent, dname.name, dname.len);
1897 
1898 		tvino.ino = le64_to_cpu(rde->inode.in->ino);
1899 		tvino.snap = le64_to_cpu(rde->inode.in->snapid);
1900 
1901 		if (rinfo->hash_order) {
1902 			u32 hash = ceph_frag_value(rde->raw_hash);
1903 			if (hash != last_hash)
1904 				fpos_offset = 2;
1905 			last_hash = hash;
1906 			rde->offset = ceph_make_fpos(hash, fpos_offset++, true);
1907 		} else {
1908 			rde->offset = ceph_make_fpos(frag, fpos_offset++, false);
1909 		}
1910 
1911 retry_lookup:
1912 		dn = d_lookup(parent, &dname);
1913 		dout("d_lookup on parent=%p name=%.*s got %p\n",
1914 		     parent, dname.len, dname.name, dn);
1915 
1916 		if (!dn) {
1917 			dn = d_alloc(parent, &dname);
1918 			dout("d_alloc %p '%.*s' = %p\n", parent,
1919 			     dname.len, dname.name, dn);
1920 			if (!dn) {
1921 				dout("d_alloc badness\n");
1922 				err = -ENOMEM;
1923 				goto out;
1924 			}
1925 			if (rde->is_nokey) {
1926 				spin_lock(&dn->d_lock);
1927 				dn->d_flags |= DCACHE_NOKEY_NAME;
1928 				spin_unlock(&dn->d_lock);
1929 			}
1930 		} else if (d_really_is_positive(dn) &&
1931 			   (ceph_ino(d_inode(dn)) != tvino.ino ||
1932 			    ceph_snap(d_inode(dn)) != tvino.snap)) {
1933 			struct ceph_dentry_info *di = ceph_dentry(dn);
1934 			dout(" dn %p points to wrong inode %p\n",
1935 			     dn, d_inode(dn));
1936 
1937 			spin_lock(&dn->d_lock);
1938 			if (di->offset > 0 &&
1939 			    di->lease_shared_gen ==
1940 			    atomic_read(&ci->i_shared_gen)) {
1941 				__ceph_dir_clear_ordered(ci);
1942 				di->offset = 0;
1943 			}
1944 			spin_unlock(&dn->d_lock);
1945 
1946 			d_delete(dn);
1947 			dput(dn);
1948 			goto retry_lookup;
1949 		}
1950 
1951 		/* inode */
1952 		if (d_really_is_positive(dn)) {
1953 			in = d_inode(dn);
1954 		} else {
1955 			in = ceph_get_inode(parent->d_sb, tvino, NULL);
1956 			if (IS_ERR(in)) {
1957 				dout("new_inode badness\n");
1958 				d_drop(dn);
1959 				dput(dn);
1960 				err = PTR_ERR(in);
1961 				goto out;
1962 			}
1963 		}
1964 
1965 		ret = ceph_fill_inode(in, NULL, &rde->inode, NULL, session,
1966 				      -1, &req->r_caps_reservation);
1967 		if (ret < 0) {
1968 			pr_err("ceph_fill_inode badness on %p\n", in);
1969 			if (d_really_is_negative(dn)) {
1970 				if (in->i_state & I_NEW) {
1971 					ihold(in);
1972 					discard_new_inode(in);
1973 				}
1974 				iput(in);
1975 			}
1976 			d_drop(dn);
1977 			err = ret;
1978 			goto next_item;
1979 		}
1980 		if (in->i_state & I_NEW)
1981 			unlock_new_inode(in);
1982 
1983 		if (d_really_is_negative(dn)) {
1984 			if (ceph_security_xattr_deadlock(in)) {
1985 				dout(" skip splicing dn %p to inode %p"
1986 				     " (security xattr deadlock)\n", dn, in);
1987 				iput(in);
1988 				skipped++;
1989 				goto next_item;
1990 			}
1991 
1992 			err = splice_dentry(&dn, in);
1993 			if (err < 0)
1994 				goto next_item;
1995 		}
1996 
1997 		ceph_dentry(dn)->offset = rde->offset;
1998 
1999 		update_dentry_lease(d_inode(parent), dn,
2000 				    rde->lease, req->r_session,
2001 				    req->r_request_started);
2002 
2003 		if (err == 0 && skipped == 0 && cache_ctl.index >= 0) {
2004 			ret = fill_readdir_cache(d_inode(parent), dn,
2005 						 &cache_ctl, req);
2006 			if (ret < 0)
2007 				err = ret;
2008 		}
2009 next_item:
2010 		dput(dn);
2011 	}
2012 out:
2013 	if (err == 0 && skipped == 0) {
2014 		set_bit(CEPH_MDS_R_DID_PREPOPULATE, &req->r_req_flags);
2015 		req->r_readdir_cache_idx = cache_ctl.index;
2016 	}
2017 	ceph_readdir_cache_release(&cache_ctl);
2018 	dout("readdir_prepopulate done\n");
2019 	return err;
2020 }
2021 
2022 bool ceph_inode_set_size(struct inode *inode, loff_t size)
2023 {
2024 	struct ceph_inode_info *ci = ceph_inode(inode);
2025 	bool ret;
2026 
2027 	spin_lock(&ci->i_ceph_lock);
2028 	dout("set_size %p %llu -> %llu\n", inode, i_size_read(inode), size);
2029 	i_size_write(inode, size);
2030 	ceph_fscache_update(inode);
2031 	inode->i_blocks = calc_inode_blocks(size);
2032 
2033 	ret = __ceph_should_report_size(ci);
2034 
2035 	spin_unlock(&ci->i_ceph_lock);
2036 
2037 	return ret;
2038 }
2039 
2040 void ceph_queue_inode_work(struct inode *inode, int work_bit)
2041 {
2042 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
2043 	struct ceph_inode_info *ci = ceph_inode(inode);
2044 	set_bit(work_bit, &ci->i_work_mask);
2045 
2046 	ihold(inode);
2047 	if (queue_work(fsc->inode_wq, &ci->i_work)) {
2048 		dout("queue_inode_work %p, mask=%lx\n", inode, ci->i_work_mask);
2049 	} else {
2050 		dout("queue_inode_work %p already queued, mask=%lx\n",
2051 		     inode, ci->i_work_mask);
2052 		iput(inode);
2053 	}
2054 }
2055 
2056 static void ceph_do_invalidate_pages(struct inode *inode)
2057 {
2058 	struct ceph_inode_info *ci = ceph_inode(inode);
2059 	u32 orig_gen;
2060 	int check = 0;
2061 
2062 	ceph_fscache_invalidate(inode, false);
2063 
2064 	mutex_lock(&ci->i_truncate_mutex);
2065 
2066 	if (ceph_inode_is_shutdown(inode)) {
2067 		pr_warn_ratelimited("%s: inode %llx.%llx is shut down\n",
2068 				    __func__, ceph_vinop(inode));
2069 		mapping_set_error(inode->i_mapping, -EIO);
2070 		truncate_pagecache(inode, 0);
2071 		mutex_unlock(&ci->i_truncate_mutex);
2072 		goto out;
2073 	}
2074 
2075 	spin_lock(&ci->i_ceph_lock);
2076 	dout("invalidate_pages %p gen %d revoking %d\n", inode,
2077 	     ci->i_rdcache_gen, ci->i_rdcache_revoking);
2078 	if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
2079 		if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
2080 			check = 1;
2081 		spin_unlock(&ci->i_ceph_lock);
2082 		mutex_unlock(&ci->i_truncate_mutex);
2083 		goto out;
2084 	}
2085 	orig_gen = ci->i_rdcache_gen;
2086 	spin_unlock(&ci->i_ceph_lock);
2087 
2088 	if (invalidate_inode_pages2(inode->i_mapping) < 0) {
2089 		pr_err("invalidate_inode_pages2 %llx.%llx failed\n",
2090 		       ceph_vinop(inode));
2091 	}
2092 
2093 	spin_lock(&ci->i_ceph_lock);
2094 	if (orig_gen == ci->i_rdcache_gen &&
2095 	    orig_gen == ci->i_rdcache_revoking) {
2096 		dout("invalidate_pages %p gen %d successful\n", inode,
2097 		     ci->i_rdcache_gen);
2098 		ci->i_rdcache_revoking--;
2099 		check = 1;
2100 	} else {
2101 		dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
2102 		     inode, orig_gen, ci->i_rdcache_gen,
2103 		     ci->i_rdcache_revoking);
2104 		if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
2105 			check = 1;
2106 	}
2107 	spin_unlock(&ci->i_ceph_lock);
2108 	mutex_unlock(&ci->i_truncate_mutex);
2109 out:
2110 	if (check)
2111 		ceph_check_caps(ci, 0);
2112 }
2113 
2114 /*
2115  * Make sure any pending truncation is applied before doing anything
2116  * that may depend on it.
2117  */
2118 void __ceph_do_pending_vmtruncate(struct inode *inode)
2119 {
2120 	struct ceph_inode_info *ci = ceph_inode(inode);
2121 	u64 to;
2122 	int wrbuffer_refs, finish = 0;
2123 
2124 	mutex_lock(&ci->i_truncate_mutex);
2125 retry:
2126 	spin_lock(&ci->i_ceph_lock);
2127 	if (ci->i_truncate_pending == 0) {
2128 		dout("__do_pending_vmtruncate %p none pending\n", inode);
2129 		spin_unlock(&ci->i_ceph_lock);
2130 		mutex_unlock(&ci->i_truncate_mutex);
2131 		return;
2132 	}
2133 
2134 	/*
2135 	 * make sure any dirty snapped pages are flushed before we
2136 	 * possibly truncate them.. so write AND block!
2137 	 */
2138 	if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
2139 		spin_unlock(&ci->i_ceph_lock);
2140 		dout("__do_pending_vmtruncate %p flushing snaps first\n",
2141 		     inode);
2142 		filemap_write_and_wait_range(&inode->i_data, 0,
2143 					     inode->i_sb->s_maxbytes);
2144 		goto retry;
2145 	}
2146 
2147 	/* there should be no reader or writer */
2148 	WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref);
2149 
2150 	to = ci->i_truncate_size;
2151 	wrbuffer_refs = ci->i_wrbuffer_ref;
2152 	dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
2153 	     ci->i_truncate_pending, to);
2154 	spin_unlock(&ci->i_ceph_lock);
2155 
2156 	ceph_fscache_resize(inode, to);
2157 	truncate_pagecache(inode, to);
2158 
2159 	spin_lock(&ci->i_ceph_lock);
2160 	if (to == ci->i_truncate_size) {
2161 		ci->i_truncate_pending = 0;
2162 		finish = 1;
2163 	}
2164 	spin_unlock(&ci->i_ceph_lock);
2165 	if (!finish)
2166 		goto retry;
2167 
2168 	mutex_unlock(&ci->i_truncate_mutex);
2169 
2170 	if (wrbuffer_refs == 0)
2171 		ceph_check_caps(ci, 0);
2172 
2173 	wake_up_all(&ci->i_cap_wq);
2174 }
2175 
2176 static void ceph_inode_work(struct work_struct *work)
2177 {
2178 	struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
2179 						 i_work);
2180 	struct inode *inode = &ci->netfs.inode;
2181 
2182 	if (test_and_clear_bit(CEPH_I_WORK_WRITEBACK, &ci->i_work_mask)) {
2183 		dout("writeback %p\n", inode);
2184 		filemap_fdatawrite(&inode->i_data);
2185 	}
2186 	if (test_and_clear_bit(CEPH_I_WORK_INVALIDATE_PAGES, &ci->i_work_mask))
2187 		ceph_do_invalidate_pages(inode);
2188 
2189 	if (test_and_clear_bit(CEPH_I_WORK_VMTRUNCATE, &ci->i_work_mask))
2190 		__ceph_do_pending_vmtruncate(inode);
2191 
2192 	if (test_and_clear_bit(CEPH_I_WORK_CHECK_CAPS, &ci->i_work_mask))
2193 		ceph_check_caps(ci, 0);
2194 
2195 	if (test_and_clear_bit(CEPH_I_WORK_FLUSH_SNAPS, &ci->i_work_mask))
2196 		ceph_flush_snaps(ci, NULL);
2197 
2198 	iput(inode);
2199 }
2200 
2201 static const char *ceph_encrypted_get_link(struct dentry *dentry,
2202 					   struct inode *inode,
2203 					   struct delayed_call *done)
2204 {
2205 	struct ceph_inode_info *ci = ceph_inode(inode);
2206 
2207 	if (!dentry)
2208 		return ERR_PTR(-ECHILD);
2209 
2210 	return fscrypt_get_symlink(inode, ci->i_symlink, i_size_read(inode),
2211 				   done);
2212 }
2213 
2214 static int ceph_encrypted_symlink_getattr(struct mnt_idmap *idmap,
2215 					  const struct path *path,
2216 					  struct kstat *stat, u32 request_mask,
2217 					  unsigned int query_flags)
2218 {
2219 	int ret;
2220 
2221 	ret = ceph_getattr(idmap, path, stat, request_mask, query_flags);
2222 	if (ret)
2223 		return ret;
2224 	return fscrypt_symlink_getattr(path, stat);
2225 }
2226 
2227 /*
2228  * symlinks
2229  */
2230 static const struct inode_operations ceph_symlink_iops = {
2231 	.get_link = simple_get_link,
2232 	.setattr = ceph_setattr,
2233 	.getattr = ceph_getattr,
2234 	.listxattr = ceph_listxattr,
2235 };
2236 
2237 static const struct inode_operations ceph_encrypted_symlink_iops = {
2238 	.get_link = ceph_encrypted_get_link,
2239 	.setattr = ceph_setattr,
2240 	.getattr = ceph_encrypted_symlink_getattr,
2241 	.listxattr = ceph_listxattr,
2242 };
2243 
2244 int __ceph_setattr(struct inode *inode, struct iattr *attr,
2245 		   struct ceph_iattr *cia)
2246 {
2247 	struct ceph_inode_info *ci = ceph_inode(inode);
2248 	unsigned int ia_valid = attr->ia_valid;
2249 	struct ceph_mds_request *req;
2250 	struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
2251 	struct ceph_cap_flush *prealloc_cf;
2252 	int issued;
2253 	int release = 0, dirtied = 0;
2254 	int mask = 0;
2255 	int err = 0;
2256 	int inode_dirty_flags = 0;
2257 	bool lock_snap_rwsem = false;
2258 
2259 	prealloc_cf = ceph_alloc_cap_flush();
2260 	if (!prealloc_cf)
2261 		return -ENOMEM;
2262 
2263 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
2264 				       USE_AUTH_MDS);
2265 	if (IS_ERR(req)) {
2266 		ceph_free_cap_flush(prealloc_cf);
2267 		return PTR_ERR(req);
2268 	}
2269 
2270 	spin_lock(&ci->i_ceph_lock);
2271 	issued = __ceph_caps_issued(ci, NULL);
2272 
2273 	if (!ci->i_head_snapc &&
2274 	    (issued & (CEPH_CAP_ANY_EXCL | CEPH_CAP_FILE_WR))) {
2275 		lock_snap_rwsem = true;
2276 		if (!down_read_trylock(&mdsc->snap_rwsem)) {
2277 			spin_unlock(&ci->i_ceph_lock);
2278 			down_read(&mdsc->snap_rwsem);
2279 			spin_lock(&ci->i_ceph_lock);
2280 			issued = __ceph_caps_issued(ci, NULL);
2281 		}
2282 	}
2283 
2284 	dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
2285 #if IS_ENABLED(CONFIG_FS_ENCRYPTION)
2286 	if (cia && cia->fscrypt_auth) {
2287 		u32 len = ceph_fscrypt_auth_len(cia->fscrypt_auth);
2288 
2289 		if (len > sizeof(*cia->fscrypt_auth)) {
2290 			err = -EINVAL;
2291 			spin_unlock(&ci->i_ceph_lock);
2292 			goto out;
2293 		}
2294 
2295 		dout("setattr %llx:%llx fscrypt_auth len %u to %u)\n",
2296 			ceph_vinop(inode), ci->fscrypt_auth_len, len);
2297 
2298 		/* It should never be re-set once set */
2299 		WARN_ON_ONCE(ci->fscrypt_auth);
2300 
2301 		if (issued & CEPH_CAP_AUTH_EXCL) {
2302 			dirtied |= CEPH_CAP_AUTH_EXCL;
2303 			kfree(ci->fscrypt_auth);
2304 			ci->fscrypt_auth = (u8 *)cia->fscrypt_auth;
2305 			ci->fscrypt_auth_len = len;
2306 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2307 			   ci->fscrypt_auth_len != len ||
2308 			   memcmp(ci->fscrypt_auth, cia->fscrypt_auth, len)) {
2309 			req->r_fscrypt_auth = cia->fscrypt_auth;
2310 			mask |= CEPH_SETATTR_FSCRYPT_AUTH;
2311 			release |= CEPH_CAP_AUTH_SHARED;
2312 		}
2313 		cia->fscrypt_auth = NULL;
2314 	}
2315 #else
2316 	if (cia && cia->fscrypt_auth) {
2317 		err = -EINVAL;
2318 		spin_unlock(&ci->i_ceph_lock);
2319 		goto out;
2320 	}
2321 #endif /* CONFIG_FS_ENCRYPTION */
2322 
2323 	if (ia_valid & ATTR_UID) {
2324 		dout("setattr %p uid %d -> %d\n", inode,
2325 		     from_kuid(&init_user_ns, inode->i_uid),
2326 		     from_kuid(&init_user_ns, attr->ia_uid));
2327 		if (issued & CEPH_CAP_AUTH_EXCL) {
2328 			inode->i_uid = attr->ia_uid;
2329 			dirtied |= CEPH_CAP_AUTH_EXCL;
2330 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2331 			   !uid_eq(attr->ia_uid, inode->i_uid)) {
2332 			req->r_args.setattr.uid = cpu_to_le32(
2333 				from_kuid(&init_user_ns, attr->ia_uid));
2334 			mask |= CEPH_SETATTR_UID;
2335 			release |= CEPH_CAP_AUTH_SHARED;
2336 		}
2337 	}
2338 	if (ia_valid & ATTR_GID) {
2339 		dout("setattr %p gid %d -> %d\n", inode,
2340 		     from_kgid(&init_user_ns, inode->i_gid),
2341 		     from_kgid(&init_user_ns, attr->ia_gid));
2342 		if (issued & CEPH_CAP_AUTH_EXCL) {
2343 			inode->i_gid = attr->ia_gid;
2344 			dirtied |= CEPH_CAP_AUTH_EXCL;
2345 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2346 			   !gid_eq(attr->ia_gid, inode->i_gid)) {
2347 			req->r_args.setattr.gid = cpu_to_le32(
2348 				from_kgid(&init_user_ns, attr->ia_gid));
2349 			mask |= CEPH_SETATTR_GID;
2350 			release |= CEPH_CAP_AUTH_SHARED;
2351 		}
2352 	}
2353 	if (ia_valid & ATTR_MODE) {
2354 		dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
2355 		     attr->ia_mode);
2356 		if (issued & CEPH_CAP_AUTH_EXCL) {
2357 			inode->i_mode = attr->ia_mode;
2358 			dirtied |= CEPH_CAP_AUTH_EXCL;
2359 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2360 			   attr->ia_mode != inode->i_mode) {
2361 			inode->i_mode = attr->ia_mode;
2362 			req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
2363 			mask |= CEPH_SETATTR_MODE;
2364 			release |= CEPH_CAP_AUTH_SHARED;
2365 		}
2366 	}
2367 
2368 	if (ia_valid & ATTR_ATIME) {
2369 		dout("setattr %p atime %lld.%ld -> %lld.%ld\n", inode,
2370 		     inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
2371 		     attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
2372 		if (issued & CEPH_CAP_FILE_EXCL) {
2373 			ci->i_time_warp_seq++;
2374 			inode->i_atime = attr->ia_atime;
2375 			dirtied |= CEPH_CAP_FILE_EXCL;
2376 		} else if ((issued & CEPH_CAP_FILE_WR) &&
2377 			   timespec64_compare(&inode->i_atime,
2378 					    &attr->ia_atime) < 0) {
2379 			inode->i_atime = attr->ia_atime;
2380 			dirtied |= CEPH_CAP_FILE_WR;
2381 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2382 			   !timespec64_equal(&inode->i_atime, &attr->ia_atime)) {
2383 			ceph_encode_timespec64(&req->r_args.setattr.atime,
2384 					       &attr->ia_atime);
2385 			mask |= CEPH_SETATTR_ATIME;
2386 			release |= CEPH_CAP_FILE_SHARED |
2387 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2388 		}
2389 	}
2390 	if (ia_valid & ATTR_SIZE) {
2391 		loff_t isize = i_size_read(inode);
2392 
2393 		dout("setattr %p size %lld -> %lld\n", inode, isize, attr->ia_size);
2394 		if ((issued & CEPH_CAP_FILE_EXCL) && attr->ia_size >= isize) {
2395 			if (attr->ia_size > isize) {
2396 				i_size_write(inode, attr->ia_size);
2397 				inode->i_blocks = calc_inode_blocks(attr->ia_size);
2398 				ci->i_reported_size = attr->ia_size;
2399 				dirtied |= CEPH_CAP_FILE_EXCL;
2400 				ia_valid |= ATTR_MTIME;
2401 			}
2402 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2403 			   attr->ia_size != isize) {
2404 			mask |= CEPH_SETATTR_SIZE;
2405 			release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL |
2406 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2407 			if (IS_ENCRYPTED(inode) && attr->ia_size) {
2408 				set_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags);
2409 				mask |= CEPH_SETATTR_FSCRYPT_FILE;
2410 				req->r_args.setattr.size =
2411 					cpu_to_le64(round_up(attr->ia_size,
2412 							     CEPH_FSCRYPT_BLOCK_SIZE));
2413 				req->r_args.setattr.old_size =
2414 					cpu_to_le64(round_up(isize,
2415 							     CEPH_FSCRYPT_BLOCK_SIZE));
2416 				req->r_fscrypt_file = attr->ia_size;
2417 				/* FIXME: client must zero out any partial blocks! */
2418 			} else {
2419 				req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
2420 				req->r_args.setattr.old_size = cpu_to_le64(isize);
2421 				req->r_fscrypt_file = 0;
2422 			}
2423 		}
2424 	}
2425 	if (ia_valid & ATTR_MTIME) {
2426 		dout("setattr %p mtime %lld.%ld -> %lld.%ld\n", inode,
2427 		     inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
2428 		     attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
2429 		if (issued & CEPH_CAP_FILE_EXCL) {
2430 			ci->i_time_warp_seq++;
2431 			inode->i_mtime = attr->ia_mtime;
2432 			dirtied |= CEPH_CAP_FILE_EXCL;
2433 		} else if ((issued & CEPH_CAP_FILE_WR) &&
2434 			   timespec64_compare(&inode->i_mtime,
2435 					    &attr->ia_mtime) < 0) {
2436 			inode->i_mtime = attr->ia_mtime;
2437 			dirtied |= CEPH_CAP_FILE_WR;
2438 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2439 			   !timespec64_equal(&inode->i_mtime, &attr->ia_mtime)) {
2440 			ceph_encode_timespec64(&req->r_args.setattr.mtime,
2441 					       &attr->ia_mtime);
2442 			mask |= CEPH_SETATTR_MTIME;
2443 			release |= CEPH_CAP_FILE_SHARED |
2444 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2445 		}
2446 	}
2447 
2448 	/* these do nothing */
2449 	if (ia_valid & ATTR_CTIME) {
2450 		bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
2451 					 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
2452 		dout("setattr %p ctime %lld.%ld -> %lld.%ld (%s)\n", inode,
2453 		     inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
2454 		     attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
2455 		     only ? "ctime only" : "ignored");
2456 		if (only) {
2457 			/*
2458 			 * if kernel wants to dirty ctime but nothing else,
2459 			 * we need to choose a cap to dirty under, or do
2460 			 * a almost-no-op setattr
2461 			 */
2462 			if (issued & CEPH_CAP_AUTH_EXCL)
2463 				dirtied |= CEPH_CAP_AUTH_EXCL;
2464 			else if (issued & CEPH_CAP_FILE_EXCL)
2465 				dirtied |= CEPH_CAP_FILE_EXCL;
2466 			else if (issued & CEPH_CAP_XATTR_EXCL)
2467 				dirtied |= CEPH_CAP_XATTR_EXCL;
2468 			else
2469 				mask |= CEPH_SETATTR_CTIME;
2470 		}
2471 	}
2472 	if (ia_valid & ATTR_FILE)
2473 		dout("setattr %p ATTR_FILE ... hrm!\n", inode);
2474 
2475 	if (dirtied) {
2476 		inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied,
2477 							   &prealloc_cf);
2478 		inode->i_ctime = attr->ia_ctime;
2479 		inode_inc_iversion_raw(inode);
2480 	}
2481 
2482 	release &= issued;
2483 	spin_unlock(&ci->i_ceph_lock);
2484 	if (lock_snap_rwsem)
2485 		up_read(&mdsc->snap_rwsem);
2486 
2487 	if (inode_dirty_flags)
2488 		__mark_inode_dirty(inode, inode_dirty_flags);
2489 
2490 	if (mask) {
2491 		req->r_inode = inode;
2492 		ihold(inode);
2493 		req->r_inode_drop = release;
2494 		req->r_args.setattr.mask = cpu_to_le32(mask);
2495 		req->r_num_caps = 1;
2496 		req->r_stamp = attr->ia_ctime;
2497 		err = ceph_mdsc_do_request(mdsc, NULL, req);
2498 	}
2499 out:
2500 	dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
2501 	     ceph_cap_string(dirtied), mask);
2502 
2503 	ceph_mdsc_put_request(req);
2504 	ceph_free_cap_flush(prealloc_cf);
2505 
2506 	if (err >= 0 && (mask & CEPH_SETATTR_SIZE))
2507 		__ceph_do_pending_vmtruncate(inode);
2508 
2509 	return err;
2510 }
2511 
2512 /*
2513  * setattr
2514  */
2515 int ceph_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
2516 		 struct iattr *attr)
2517 {
2518 	struct inode *inode = d_inode(dentry);
2519 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
2520 	int err;
2521 
2522 	if (ceph_snap(inode) != CEPH_NOSNAP)
2523 		return -EROFS;
2524 
2525 	if (ceph_inode_is_shutdown(inode))
2526 		return -ESTALE;
2527 
2528 	err = fscrypt_prepare_setattr(dentry, attr);
2529 	if (err)
2530 		return err;
2531 
2532 	err = setattr_prepare(&nop_mnt_idmap, dentry, attr);
2533 	if (err != 0)
2534 		return err;
2535 
2536 	if ((attr->ia_valid & ATTR_SIZE) &&
2537 	    attr->ia_size > max(i_size_read(inode), fsc->max_file_size))
2538 		return -EFBIG;
2539 
2540 	if ((attr->ia_valid & ATTR_SIZE) &&
2541 	    ceph_quota_is_max_bytes_exceeded(inode, attr->ia_size))
2542 		return -EDQUOT;
2543 
2544 	err = __ceph_setattr(inode, attr, NULL);
2545 
2546 	if (err >= 0 && (attr->ia_valid & ATTR_MODE))
2547 		err = posix_acl_chmod(&nop_mnt_idmap, dentry, attr->ia_mode);
2548 
2549 	return err;
2550 }
2551 
2552 int ceph_try_to_choose_auth_mds(struct inode *inode, int mask)
2553 {
2554 	int issued = ceph_caps_issued(ceph_inode(inode));
2555 
2556 	/*
2557 	 * If any 'x' caps is issued we can just choose the auth MDS
2558 	 * instead of the random replica MDSes. Because only when the
2559 	 * Locker is in LOCK_EXEC state will the loner client could
2560 	 * get the 'x' caps. And if we send the getattr requests to
2561 	 * any replica MDS it must auth pin and tries to rdlock from
2562 	 * the auth MDS, and then the auth MDS need to do the Locker
2563 	 * state transition to LOCK_SYNC. And after that the lock state
2564 	 * will change back.
2565 	 *
2566 	 * This cost much when doing the Locker state transition and
2567 	 * usually will need to revoke caps from clients.
2568 	 *
2569 	 * And for the 'Xs' caps for getxattr we will also choose the
2570 	 * auth MDS, because the MDS side code is buggy due to setxattr
2571 	 * won't notify the replica MDSes when the values changed and
2572 	 * the replica MDS will return the old values. Though we will
2573 	 * fix it in MDS code, but this still makes sense for old ceph.
2574 	 */
2575 	if (((mask & CEPH_CAP_ANY_SHARED) && (issued & CEPH_CAP_ANY_EXCL))
2576 	    || (mask & (CEPH_STAT_RSTAT | CEPH_STAT_CAP_XATTR)))
2577 		return USE_AUTH_MDS;
2578 	else
2579 		return USE_ANY_MDS;
2580 }
2581 
2582 /*
2583  * Verify that we have a lease on the given mask.  If not,
2584  * do a getattr against an mds.
2585  */
2586 int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
2587 		      int mask, bool force)
2588 {
2589 	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
2590 	struct ceph_mds_client *mdsc = fsc->mdsc;
2591 	struct ceph_mds_request *req;
2592 	int mode;
2593 	int err;
2594 
2595 	if (ceph_snap(inode) == CEPH_SNAPDIR) {
2596 		dout("do_getattr inode %p SNAPDIR\n", inode);
2597 		return 0;
2598 	}
2599 
2600 	dout("do_getattr inode %p mask %s mode 0%o\n",
2601 	     inode, ceph_cap_string(mask), inode->i_mode);
2602 	if (!force && ceph_caps_issued_mask_metric(ceph_inode(inode), mask, 1))
2603 			return 0;
2604 
2605 	mode = ceph_try_to_choose_auth_mds(inode, mask);
2606 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, mode);
2607 	if (IS_ERR(req))
2608 		return PTR_ERR(req);
2609 	req->r_inode = inode;
2610 	ihold(inode);
2611 	req->r_num_caps = 1;
2612 	req->r_args.getattr.mask = cpu_to_le32(mask);
2613 	req->r_locked_page = locked_page;
2614 	err = ceph_mdsc_do_request(mdsc, NULL, req);
2615 	if (locked_page && err == 0) {
2616 		u64 inline_version = req->r_reply_info.targeti.inline_version;
2617 		if (inline_version == 0) {
2618 			/* the reply is supposed to contain inline data */
2619 			err = -EINVAL;
2620 		} else if (inline_version == CEPH_INLINE_NONE ||
2621 			   inline_version == 1) {
2622 			err = -ENODATA;
2623 		} else {
2624 			err = req->r_reply_info.targeti.inline_len;
2625 		}
2626 	}
2627 	ceph_mdsc_put_request(req);
2628 	dout("do_getattr result=%d\n", err);
2629 	return err;
2630 }
2631 
2632 int ceph_do_getvxattr(struct inode *inode, const char *name, void *value,
2633 		      size_t size)
2634 {
2635 	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
2636 	struct ceph_mds_client *mdsc = fsc->mdsc;
2637 	struct ceph_mds_request *req;
2638 	int mode = USE_AUTH_MDS;
2639 	int err;
2640 	char *xattr_value;
2641 	size_t xattr_value_len;
2642 
2643 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETVXATTR, mode);
2644 	if (IS_ERR(req)) {
2645 		err = -ENOMEM;
2646 		goto out;
2647 	}
2648 
2649 	req->r_feature_needed = CEPHFS_FEATURE_OP_GETVXATTR;
2650 	req->r_path2 = kstrdup(name, GFP_NOFS);
2651 	if (!req->r_path2) {
2652 		err = -ENOMEM;
2653 		goto put;
2654 	}
2655 
2656 	ihold(inode);
2657 	req->r_inode = inode;
2658 	err = ceph_mdsc_do_request(mdsc, NULL, req);
2659 	if (err < 0)
2660 		goto put;
2661 
2662 	xattr_value = req->r_reply_info.xattr_info.xattr_value;
2663 	xattr_value_len = req->r_reply_info.xattr_info.xattr_value_len;
2664 
2665 	dout("do_getvxattr xattr_value_len:%zu, size:%zu\n", xattr_value_len, size);
2666 
2667 	err = (int)xattr_value_len;
2668 	if (size == 0)
2669 		goto put;
2670 
2671 	if (xattr_value_len > size) {
2672 		err = -ERANGE;
2673 		goto put;
2674 	}
2675 
2676 	memcpy(value, xattr_value, xattr_value_len);
2677 put:
2678 	ceph_mdsc_put_request(req);
2679 out:
2680 	dout("do_getvxattr result=%d\n", err);
2681 	return err;
2682 }
2683 
2684 
2685 /*
2686  * Check inode permissions.  We verify we have a valid value for
2687  * the AUTH cap, then call the generic handler.
2688  */
2689 int ceph_permission(struct mnt_idmap *idmap, struct inode *inode,
2690 		    int mask)
2691 {
2692 	int err;
2693 
2694 	if (mask & MAY_NOT_BLOCK)
2695 		return -ECHILD;
2696 
2697 	err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED, false);
2698 
2699 	if (!err)
2700 		err = generic_permission(&nop_mnt_idmap, inode, mask);
2701 	return err;
2702 }
2703 
2704 /* Craft a mask of needed caps given a set of requested statx attrs. */
2705 static int statx_to_caps(u32 want, umode_t mode)
2706 {
2707 	int mask = 0;
2708 
2709 	if (want & (STATX_MODE|STATX_UID|STATX_GID|STATX_CTIME|STATX_BTIME|STATX_CHANGE_COOKIE))
2710 		mask |= CEPH_CAP_AUTH_SHARED;
2711 
2712 	if (want & (STATX_NLINK|STATX_CTIME|STATX_CHANGE_COOKIE)) {
2713 		/*
2714 		 * The link count for directories depends on inode->i_subdirs,
2715 		 * and that is only updated when Fs caps are held.
2716 		 */
2717 		if (S_ISDIR(mode))
2718 			mask |= CEPH_CAP_FILE_SHARED;
2719 		else
2720 			mask |= CEPH_CAP_LINK_SHARED;
2721 	}
2722 
2723 	if (want & (STATX_ATIME|STATX_MTIME|STATX_CTIME|STATX_SIZE|STATX_BLOCKS|STATX_CHANGE_COOKIE))
2724 		mask |= CEPH_CAP_FILE_SHARED;
2725 
2726 	if (want & (STATX_CTIME|STATX_CHANGE_COOKIE))
2727 		mask |= CEPH_CAP_XATTR_SHARED;
2728 
2729 	return mask;
2730 }
2731 
2732 /*
2733  * Get all the attributes. If we have sufficient caps for the requested attrs,
2734  * then we can avoid talking to the MDS at all.
2735  */
2736 int ceph_getattr(struct mnt_idmap *idmap, const struct path *path,
2737 		 struct kstat *stat, u32 request_mask, unsigned int flags)
2738 {
2739 	struct inode *inode = d_inode(path->dentry);
2740 	struct super_block *sb = inode->i_sb;
2741 	struct ceph_inode_info *ci = ceph_inode(inode);
2742 	u32 valid_mask = STATX_BASIC_STATS;
2743 	int err = 0;
2744 
2745 	if (ceph_inode_is_shutdown(inode))
2746 		return -ESTALE;
2747 
2748 	/* Skip the getattr altogether if we're asked not to sync */
2749 	if ((flags & AT_STATX_SYNC_TYPE) != AT_STATX_DONT_SYNC) {
2750 		err = ceph_do_getattr(inode,
2751 				statx_to_caps(request_mask, inode->i_mode),
2752 				flags & AT_STATX_FORCE_SYNC);
2753 		if (err)
2754 			return err;
2755 	}
2756 
2757 	generic_fillattr(&nop_mnt_idmap, inode, stat);
2758 	stat->ino = ceph_present_inode(inode);
2759 
2760 	/*
2761 	 * btime on newly-allocated inodes is 0, so if this is still set to
2762 	 * that, then assume that it's not valid.
2763 	 */
2764 	if (ci->i_btime.tv_sec || ci->i_btime.tv_nsec) {
2765 		stat->btime = ci->i_btime;
2766 		valid_mask |= STATX_BTIME;
2767 	}
2768 
2769 	if (request_mask & STATX_CHANGE_COOKIE) {
2770 		stat->change_cookie = inode_peek_iversion_raw(inode);
2771 		valid_mask |= STATX_CHANGE_COOKIE;
2772 	}
2773 
2774 	if (ceph_snap(inode) == CEPH_NOSNAP)
2775 		stat->dev = sb->s_dev;
2776 	else
2777 		stat->dev = ci->i_snapid_map ? ci->i_snapid_map->dev : 0;
2778 
2779 	if (S_ISDIR(inode->i_mode)) {
2780 		if (ceph_test_mount_opt(ceph_sb_to_client(sb), RBYTES)) {
2781 			stat->size = ci->i_rbytes;
2782 		} else if (ceph_snap(inode) == CEPH_SNAPDIR) {
2783 			struct ceph_inode_info *pci;
2784 			struct ceph_snap_realm *realm;
2785 			struct inode *parent;
2786 
2787 			parent = ceph_lookup_inode(sb, ceph_ino(inode));
2788 			if (IS_ERR(parent))
2789 				return PTR_ERR(parent);
2790 
2791 			pci = ceph_inode(parent);
2792 			spin_lock(&pci->i_ceph_lock);
2793 			realm = pci->i_snap_realm;
2794 			if (realm)
2795 				stat->size = realm->num_snaps;
2796 			else
2797 				stat->size = 0;
2798 			spin_unlock(&pci->i_ceph_lock);
2799 			iput(parent);
2800 		} else {
2801 			stat->size = ci->i_files + ci->i_subdirs;
2802 		}
2803 		stat->blocks = 0;
2804 		stat->blksize = 65536;
2805 		/*
2806 		 * Some applications rely on the number of st_nlink
2807 		 * value on directories to be either 0 (if unlinked)
2808 		 * or 2 + number of subdirectories.
2809 		 */
2810 		if (stat->nlink == 1)
2811 			/* '.' + '..' + subdirs */
2812 			stat->nlink = 1 + 1 + ci->i_subdirs;
2813 	}
2814 
2815 	stat->attributes |= STATX_ATTR_CHANGE_MONOTONIC;
2816 	if (IS_ENCRYPTED(inode))
2817 		stat->attributes |= STATX_ATTR_ENCRYPTED;
2818 	stat->attributes_mask |= (STATX_ATTR_CHANGE_MONOTONIC |
2819 				  STATX_ATTR_ENCRYPTED);
2820 
2821 	stat->result_mask = request_mask & valid_mask;
2822 	return err;
2823 }
2824 
2825 void ceph_inode_shutdown(struct inode *inode)
2826 {
2827 	struct ceph_inode_info *ci = ceph_inode(inode);
2828 	struct rb_node *p;
2829 	int iputs = 0;
2830 	bool invalidate = false;
2831 
2832 	spin_lock(&ci->i_ceph_lock);
2833 	ci->i_ceph_flags |= CEPH_I_SHUTDOWN;
2834 	p = rb_first(&ci->i_caps);
2835 	while (p) {
2836 		struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
2837 
2838 		p = rb_next(p);
2839 		iputs += ceph_purge_inode_cap(inode, cap, &invalidate);
2840 	}
2841 	spin_unlock(&ci->i_ceph_lock);
2842 
2843 	if (invalidate)
2844 		ceph_queue_invalidate(inode);
2845 	while (iputs--)
2846 		iput(inode);
2847 }
2848