xref: /openbmc/linux/fs/ceph/inode.c (revision d3c51ae1)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 
4 #include <linux/module.h>
5 #include <linux/fs.h>
6 #include <linux/slab.h>
7 #include <linux/string.h>
8 #include <linux/uaccess.h>
9 #include <linux/kernel.h>
10 #include <linux/writeback.h>
11 #include <linux/vmalloc.h>
12 #include <linux/xattr.h>
13 #include <linux/posix_acl.h>
14 #include <linux/random.h>
15 #include <linux/sort.h>
16 #include <linux/iversion.h>
17 
18 #include "super.h"
19 #include "mds_client.h"
20 #include "cache.h"
21 #include <linux/ceph/decode.h>
22 
23 /*
24  * Ceph inode operations
25  *
26  * Implement basic inode helpers (get, alloc) and inode ops (getattr,
27  * setattr, etc.), xattr helpers, and helpers for assimilating
28  * metadata returned by the MDS into our cache.
29  *
30  * Also define helpers for doing asynchronous writeback, invalidation,
31  * and truncation for the benefit of those who can't afford to block
32  * (typically because they are in the message handler path).
33  */
34 
35 static const struct inode_operations ceph_symlink_iops;
36 
37 static void ceph_inode_work(struct work_struct *work);
38 
39 /*
40  * find or create an inode, given the ceph ino number
41  */
42 static int ceph_set_ino_cb(struct inode *inode, void *data)
43 {
44 	struct ceph_inode_info *ci = ceph_inode(inode);
45 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
46 
47 	ci->i_vino = *(struct ceph_vino *)data;
48 	inode->i_ino = ceph_vino_to_ino_t(ci->i_vino);
49 	inode_set_iversion_raw(inode, 0);
50 	percpu_counter_inc(&mdsc->metric.total_inodes);
51 
52 	return 0;
53 }
54 
55 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
56 {
57 	struct inode *inode;
58 
59 	inode = iget5_locked(sb, (unsigned long)vino.ino, ceph_ino_compare,
60 			     ceph_set_ino_cb, &vino);
61 	if (!inode)
62 		return ERR_PTR(-ENOMEM);
63 
64 	dout("get_inode on %llu=%llx.%llx got %p new %d\n", ceph_present_inode(inode),
65 	     ceph_vinop(inode), inode, !!(inode->i_state & I_NEW));
66 	return inode;
67 }
68 
69 /*
70  * get/constuct snapdir inode for a given directory
71  */
72 struct inode *ceph_get_snapdir(struct inode *parent)
73 {
74 	struct ceph_vino vino = {
75 		.ino = ceph_ino(parent),
76 		.snap = CEPH_SNAPDIR,
77 	};
78 	struct inode *inode = ceph_get_inode(parent->i_sb, vino);
79 	struct ceph_inode_info *ci = ceph_inode(inode);
80 
81 	BUG_ON(!S_ISDIR(parent->i_mode));
82 	if (IS_ERR(inode))
83 		return inode;
84 	inode->i_mode = parent->i_mode;
85 	inode->i_uid = parent->i_uid;
86 	inode->i_gid = parent->i_gid;
87 	inode->i_mtime = parent->i_mtime;
88 	inode->i_ctime = parent->i_ctime;
89 	inode->i_atime = parent->i_atime;
90 	ci->i_rbytes = 0;
91 	ci->i_btime = ceph_inode(parent)->i_btime;
92 
93 	if (inode->i_state & I_NEW) {
94 		inode->i_op = &ceph_snapdir_iops;
95 		inode->i_fop = &ceph_snapdir_fops;
96 		ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
97 		unlock_new_inode(inode);
98 	}
99 
100 	return inode;
101 }
102 
103 const struct inode_operations ceph_file_iops = {
104 	.permission = ceph_permission,
105 	.setattr = ceph_setattr,
106 	.getattr = ceph_getattr,
107 	.listxattr = ceph_listxattr,
108 	.get_acl = ceph_get_acl,
109 	.set_acl = ceph_set_acl,
110 };
111 
112 
113 /*
114  * We use a 'frag tree' to keep track of the MDS's directory fragments
115  * for a given inode (usually there is just a single fragment).  We
116  * need to know when a child frag is delegated to a new MDS, or when
117  * it is flagged as replicated, so we can direct our requests
118  * accordingly.
119  */
120 
121 /*
122  * find/create a frag in the tree
123  */
124 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
125 						    u32 f)
126 {
127 	struct rb_node **p;
128 	struct rb_node *parent = NULL;
129 	struct ceph_inode_frag *frag;
130 	int c;
131 
132 	p = &ci->i_fragtree.rb_node;
133 	while (*p) {
134 		parent = *p;
135 		frag = rb_entry(parent, struct ceph_inode_frag, node);
136 		c = ceph_frag_compare(f, frag->frag);
137 		if (c < 0)
138 			p = &(*p)->rb_left;
139 		else if (c > 0)
140 			p = &(*p)->rb_right;
141 		else
142 			return frag;
143 	}
144 
145 	frag = kmalloc(sizeof(*frag), GFP_NOFS);
146 	if (!frag)
147 		return ERR_PTR(-ENOMEM);
148 
149 	frag->frag = f;
150 	frag->split_by = 0;
151 	frag->mds = -1;
152 	frag->ndist = 0;
153 
154 	rb_link_node(&frag->node, parent, p);
155 	rb_insert_color(&frag->node, &ci->i_fragtree);
156 
157 	dout("get_or_create_frag added %llx.%llx frag %x\n",
158 	     ceph_vinop(&ci->vfs_inode), f);
159 	return frag;
160 }
161 
162 /*
163  * find a specific frag @f
164  */
165 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
166 {
167 	struct rb_node *n = ci->i_fragtree.rb_node;
168 
169 	while (n) {
170 		struct ceph_inode_frag *frag =
171 			rb_entry(n, struct ceph_inode_frag, node);
172 		int c = ceph_frag_compare(f, frag->frag);
173 		if (c < 0)
174 			n = n->rb_left;
175 		else if (c > 0)
176 			n = n->rb_right;
177 		else
178 			return frag;
179 	}
180 	return NULL;
181 }
182 
183 /*
184  * Choose frag containing the given value @v.  If @pfrag is
185  * specified, copy the frag delegation info to the caller if
186  * it is present.
187  */
188 static u32 __ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
189 			      struct ceph_inode_frag *pfrag, int *found)
190 {
191 	u32 t = ceph_frag_make(0, 0);
192 	struct ceph_inode_frag *frag;
193 	unsigned nway, i;
194 	u32 n;
195 
196 	if (found)
197 		*found = 0;
198 
199 	while (1) {
200 		WARN_ON(!ceph_frag_contains_value(t, v));
201 		frag = __ceph_find_frag(ci, t);
202 		if (!frag)
203 			break; /* t is a leaf */
204 		if (frag->split_by == 0) {
205 			if (pfrag)
206 				memcpy(pfrag, frag, sizeof(*pfrag));
207 			if (found)
208 				*found = 1;
209 			break;
210 		}
211 
212 		/* choose child */
213 		nway = 1 << frag->split_by;
214 		dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
215 		     frag->split_by, nway);
216 		for (i = 0; i < nway; i++) {
217 			n = ceph_frag_make_child(t, frag->split_by, i);
218 			if (ceph_frag_contains_value(n, v)) {
219 				t = n;
220 				break;
221 			}
222 		}
223 		BUG_ON(i == nway);
224 	}
225 	dout("choose_frag(%x) = %x\n", v, t);
226 
227 	return t;
228 }
229 
230 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
231 		     struct ceph_inode_frag *pfrag, int *found)
232 {
233 	u32 ret;
234 	mutex_lock(&ci->i_fragtree_mutex);
235 	ret = __ceph_choose_frag(ci, v, pfrag, found);
236 	mutex_unlock(&ci->i_fragtree_mutex);
237 	return ret;
238 }
239 
240 /*
241  * Process dirfrag (delegation) info from the mds.  Include leaf
242  * fragment in tree ONLY if ndist > 0.  Otherwise, only
243  * branches/splits are included in i_fragtree)
244  */
245 static int ceph_fill_dirfrag(struct inode *inode,
246 			     struct ceph_mds_reply_dirfrag *dirinfo)
247 {
248 	struct ceph_inode_info *ci = ceph_inode(inode);
249 	struct ceph_inode_frag *frag;
250 	u32 id = le32_to_cpu(dirinfo->frag);
251 	int mds = le32_to_cpu(dirinfo->auth);
252 	int ndist = le32_to_cpu(dirinfo->ndist);
253 	int diri_auth = -1;
254 	int i;
255 	int err = 0;
256 
257 	spin_lock(&ci->i_ceph_lock);
258 	if (ci->i_auth_cap)
259 		diri_auth = ci->i_auth_cap->mds;
260 	spin_unlock(&ci->i_ceph_lock);
261 
262 	if (mds == -1) /* CDIR_AUTH_PARENT */
263 		mds = diri_auth;
264 
265 	mutex_lock(&ci->i_fragtree_mutex);
266 	if (ndist == 0 && mds == diri_auth) {
267 		/* no delegation info needed. */
268 		frag = __ceph_find_frag(ci, id);
269 		if (!frag)
270 			goto out;
271 		if (frag->split_by == 0) {
272 			/* tree leaf, remove */
273 			dout("fill_dirfrag removed %llx.%llx frag %x"
274 			     " (no ref)\n", ceph_vinop(inode), id);
275 			rb_erase(&frag->node, &ci->i_fragtree);
276 			kfree(frag);
277 		} else {
278 			/* tree branch, keep and clear */
279 			dout("fill_dirfrag cleared %llx.%llx frag %x"
280 			     " referral\n", ceph_vinop(inode), id);
281 			frag->mds = -1;
282 			frag->ndist = 0;
283 		}
284 		goto out;
285 	}
286 
287 
288 	/* find/add this frag to store mds delegation info */
289 	frag = __get_or_create_frag(ci, id);
290 	if (IS_ERR(frag)) {
291 		/* this is not the end of the world; we can continue
292 		   with bad/inaccurate delegation info */
293 		pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
294 		       ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
295 		err = -ENOMEM;
296 		goto out;
297 	}
298 
299 	frag->mds = mds;
300 	frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
301 	for (i = 0; i < frag->ndist; i++)
302 		frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
303 	dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
304 	     ceph_vinop(inode), frag->frag, frag->ndist);
305 
306 out:
307 	mutex_unlock(&ci->i_fragtree_mutex);
308 	return err;
309 }
310 
311 static int frag_tree_split_cmp(const void *l, const void *r)
312 {
313 	struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l;
314 	struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r;
315 	return ceph_frag_compare(le32_to_cpu(ls->frag),
316 				 le32_to_cpu(rs->frag));
317 }
318 
319 static bool is_frag_child(u32 f, struct ceph_inode_frag *frag)
320 {
321 	if (!frag)
322 		return f == ceph_frag_make(0, 0);
323 	if (ceph_frag_bits(f) != ceph_frag_bits(frag->frag) + frag->split_by)
324 		return false;
325 	return ceph_frag_contains_value(frag->frag, ceph_frag_value(f));
326 }
327 
328 static int ceph_fill_fragtree(struct inode *inode,
329 			      struct ceph_frag_tree_head *fragtree,
330 			      struct ceph_mds_reply_dirfrag *dirinfo)
331 {
332 	struct ceph_inode_info *ci = ceph_inode(inode);
333 	struct ceph_inode_frag *frag, *prev_frag = NULL;
334 	struct rb_node *rb_node;
335 	unsigned i, split_by, nsplits;
336 	u32 id;
337 	bool update = false;
338 
339 	mutex_lock(&ci->i_fragtree_mutex);
340 	nsplits = le32_to_cpu(fragtree->nsplits);
341 	if (nsplits != ci->i_fragtree_nsplits) {
342 		update = true;
343 	} else if (nsplits) {
344 		i = prandom_u32() % nsplits;
345 		id = le32_to_cpu(fragtree->splits[i].frag);
346 		if (!__ceph_find_frag(ci, id))
347 			update = true;
348 	} else if (!RB_EMPTY_ROOT(&ci->i_fragtree)) {
349 		rb_node = rb_first(&ci->i_fragtree);
350 		frag = rb_entry(rb_node, struct ceph_inode_frag, node);
351 		if (frag->frag != ceph_frag_make(0, 0) || rb_next(rb_node))
352 			update = true;
353 	}
354 	if (!update && dirinfo) {
355 		id = le32_to_cpu(dirinfo->frag);
356 		if (id != __ceph_choose_frag(ci, id, NULL, NULL))
357 			update = true;
358 	}
359 	if (!update)
360 		goto out_unlock;
361 
362 	if (nsplits > 1) {
363 		sort(fragtree->splits, nsplits, sizeof(fragtree->splits[0]),
364 		     frag_tree_split_cmp, NULL);
365 	}
366 
367 	dout("fill_fragtree %llx.%llx\n", ceph_vinop(inode));
368 	rb_node = rb_first(&ci->i_fragtree);
369 	for (i = 0; i < nsplits; i++) {
370 		id = le32_to_cpu(fragtree->splits[i].frag);
371 		split_by = le32_to_cpu(fragtree->splits[i].by);
372 		if (split_by == 0 || ceph_frag_bits(id) + split_by > 24) {
373 			pr_err("fill_fragtree %llx.%llx invalid split %d/%u, "
374 			       "frag %x split by %d\n", ceph_vinop(inode),
375 			       i, nsplits, id, split_by);
376 			continue;
377 		}
378 		frag = NULL;
379 		while (rb_node) {
380 			frag = rb_entry(rb_node, struct ceph_inode_frag, node);
381 			if (ceph_frag_compare(frag->frag, id) >= 0) {
382 				if (frag->frag != id)
383 					frag = NULL;
384 				else
385 					rb_node = rb_next(rb_node);
386 				break;
387 			}
388 			rb_node = rb_next(rb_node);
389 			/* delete stale split/leaf node */
390 			if (frag->split_by > 0 ||
391 			    !is_frag_child(frag->frag, prev_frag)) {
392 				rb_erase(&frag->node, &ci->i_fragtree);
393 				if (frag->split_by > 0)
394 					ci->i_fragtree_nsplits--;
395 				kfree(frag);
396 			}
397 			frag = NULL;
398 		}
399 		if (!frag) {
400 			frag = __get_or_create_frag(ci, id);
401 			if (IS_ERR(frag))
402 				continue;
403 		}
404 		if (frag->split_by == 0)
405 			ci->i_fragtree_nsplits++;
406 		frag->split_by = split_by;
407 		dout(" frag %x split by %d\n", frag->frag, frag->split_by);
408 		prev_frag = frag;
409 	}
410 	while (rb_node) {
411 		frag = rb_entry(rb_node, struct ceph_inode_frag, node);
412 		rb_node = rb_next(rb_node);
413 		/* delete stale split/leaf node */
414 		if (frag->split_by > 0 ||
415 		    !is_frag_child(frag->frag, prev_frag)) {
416 			rb_erase(&frag->node, &ci->i_fragtree);
417 			if (frag->split_by > 0)
418 				ci->i_fragtree_nsplits--;
419 			kfree(frag);
420 		}
421 	}
422 out_unlock:
423 	mutex_unlock(&ci->i_fragtree_mutex);
424 	return 0;
425 }
426 
427 /*
428  * initialize a newly allocated inode.
429  */
430 struct inode *ceph_alloc_inode(struct super_block *sb)
431 {
432 	struct ceph_inode_info *ci;
433 	int i;
434 
435 	ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
436 	if (!ci)
437 		return NULL;
438 
439 	dout("alloc_inode %p\n", &ci->vfs_inode);
440 
441 	spin_lock_init(&ci->i_ceph_lock);
442 
443 	ci->i_version = 0;
444 	ci->i_inline_version = 0;
445 	ci->i_time_warp_seq = 0;
446 	ci->i_ceph_flags = 0;
447 	atomic64_set(&ci->i_ordered_count, 1);
448 	atomic64_set(&ci->i_release_count, 1);
449 	atomic64_set(&ci->i_complete_seq[0], 0);
450 	atomic64_set(&ci->i_complete_seq[1], 0);
451 	ci->i_symlink = NULL;
452 
453 	ci->i_max_bytes = 0;
454 	ci->i_max_files = 0;
455 
456 	memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
457 	memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout));
458 	RCU_INIT_POINTER(ci->i_layout.pool_ns, NULL);
459 
460 	ci->i_fragtree = RB_ROOT;
461 	mutex_init(&ci->i_fragtree_mutex);
462 
463 	ci->i_xattrs.blob = NULL;
464 	ci->i_xattrs.prealloc_blob = NULL;
465 	ci->i_xattrs.dirty = false;
466 	ci->i_xattrs.index = RB_ROOT;
467 	ci->i_xattrs.count = 0;
468 	ci->i_xattrs.names_size = 0;
469 	ci->i_xattrs.vals_size = 0;
470 	ci->i_xattrs.version = 0;
471 	ci->i_xattrs.index_version = 0;
472 
473 	ci->i_caps = RB_ROOT;
474 	ci->i_auth_cap = NULL;
475 	ci->i_dirty_caps = 0;
476 	ci->i_flushing_caps = 0;
477 	INIT_LIST_HEAD(&ci->i_dirty_item);
478 	INIT_LIST_HEAD(&ci->i_flushing_item);
479 	ci->i_prealloc_cap_flush = NULL;
480 	INIT_LIST_HEAD(&ci->i_cap_flush_list);
481 	init_waitqueue_head(&ci->i_cap_wq);
482 	ci->i_hold_caps_max = 0;
483 	INIT_LIST_HEAD(&ci->i_cap_delay_list);
484 	INIT_LIST_HEAD(&ci->i_cap_snaps);
485 	ci->i_head_snapc = NULL;
486 	ci->i_snap_caps = 0;
487 
488 	ci->i_last_rd = ci->i_last_wr = jiffies - 3600 * HZ;
489 	for (i = 0; i < CEPH_FILE_MODE_BITS; i++)
490 		ci->i_nr_by_mode[i] = 0;
491 
492 	mutex_init(&ci->i_truncate_mutex);
493 	ci->i_truncate_seq = 0;
494 	ci->i_truncate_size = 0;
495 	ci->i_truncate_pending = 0;
496 
497 	ci->i_max_size = 0;
498 	ci->i_reported_size = 0;
499 	ci->i_wanted_max_size = 0;
500 	ci->i_requested_max_size = 0;
501 
502 	ci->i_pin_ref = 0;
503 	ci->i_rd_ref = 0;
504 	ci->i_rdcache_ref = 0;
505 	ci->i_wr_ref = 0;
506 	ci->i_wb_ref = 0;
507 	ci->i_fx_ref = 0;
508 	ci->i_wrbuffer_ref = 0;
509 	ci->i_wrbuffer_ref_head = 0;
510 	atomic_set(&ci->i_filelock_ref, 0);
511 	atomic_set(&ci->i_shared_gen, 1);
512 	ci->i_rdcache_gen = 0;
513 	ci->i_rdcache_revoking = 0;
514 
515 	INIT_LIST_HEAD(&ci->i_unsafe_dirops);
516 	INIT_LIST_HEAD(&ci->i_unsafe_iops);
517 	spin_lock_init(&ci->i_unsafe_lock);
518 
519 	ci->i_snap_realm = NULL;
520 	INIT_LIST_HEAD(&ci->i_snap_realm_item);
521 	INIT_LIST_HEAD(&ci->i_snap_flush_item);
522 
523 	INIT_WORK(&ci->i_work, ceph_inode_work);
524 	ci->i_work_mask = 0;
525 	memset(&ci->i_btime, '\0', sizeof(ci->i_btime));
526 
527 	ceph_fscache_inode_init(ci);
528 
529 	ci->i_meta_err = 0;
530 
531 	return &ci->vfs_inode;
532 }
533 
534 void ceph_free_inode(struct inode *inode)
535 {
536 	struct ceph_inode_info *ci = ceph_inode(inode);
537 
538 	kfree(ci->i_symlink);
539 	kmem_cache_free(ceph_inode_cachep, ci);
540 }
541 
542 void ceph_evict_inode(struct inode *inode)
543 {
544 	struct ceph_inode_info *ci = ceph_inode(inode);
545 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
546 	struct ceph_inode_frag *frag;
547 	struct rb_node *n;
548 
549 	dout("evict_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
550 
551 	percpu_counter_dec(&mdsc->metric.total_inodes);
552 
553 	truncate_inode_pages_final(&inode->i_data);
554 	clear_inode(inode);
555 
556 	ceph_fscache_unregister_inode_cookie(ci);
557 
558 	__ceph_remove_caps(ci);
559 
560 	if (__ceph_has_any_quota(ci))
561 		ceph_adjust_quota_realms_count(inode, false);
562 
563 	/*
564 	 * we may still have a snap_realm reference if there are stray
565 	 * caps in i_snap_caps.
566 	 */
567 	if (ci->i_snap_realm) {
568 		if (ceph_snap(inode) == CEPH_NOSNAP) {
569 			struct ceph_snap_realm *realm = ci->i_snap_realm;
570 			dout(" dropping residual ref to snap realm %p\n",
571 			     realm);
572 			spin_lock(&realm->inodes_with_caps_lock);
573 			list_del_init(&ci->i_snap_realm_item);
574 			ci->i_snap_realm = NULL;
575 			if (realm->ino == ci->i_vino.ino)
576 				realm->inode = NULL;
577 			spin_unlock(&realm->inodes_with_caps_lock);
578 			ceph_put_snap_realm(mdsc, realm);
579 		} else {
580 			ceph_put_snapid_map(mdsc, ci->i_snapid_map);
581 			ci->i_snap_realm = NULL;
582 		}
583 	}
584 
585 	while ((n = rb_first(&ci->i_fragtree)) != NULL) {
586 		frag = rb_entry(n, struct ceph_inode_frag, node);
587 		rb_erase(n, &ci->i_fragtree);
588 		kfree(frag);
589 	}
590 	ci->i_fragtree_nsplits = 0;
591 
592 	__ceph_destroy_xattrs(ci);
593 	if (ci->i_xattrs.blob)
594 		ceph_buffer_put(ci->i_xattrs.blob);
595 	if (ci->i_xattrs.prealloc_blob)
596 		ceph_buffer_put(ci->i_xattrs.prealloc_blob);
597 
598 	ceph_put_string(rcu_dereference_raw(ci->i_layout.pool_ns));
599 	ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns));
600 }
601 
602 static inline blkcnt_t calc_inode_blocks(u64 size)
603 {
604 	return (size + (1<<9) - 1) >> 9;
605 }
606 
607 /*
608  * Helpers to fill in size, ctime, mtime, and atime.  We have to be
609  * careful because either the client or MDS may have more up to date
610  * info, depending on which capabilities are held, and whether
611  * time_warp_seq or truncate_seq have increased.  (Ordinarily, mtime
612  * and size are monotonically increasing, except when utimes() or
613  * truncate() increments the corresponding _seq values.)
614  */
615 int ceph_fill_file_size(struct inode *inode, int issued,
616 			u32 truncate_seq, u64 truncate_size, u64 size)
617 {
618 	struct ceph_inode_info *ci = ceph_inode(inode);
619 	int queue_trunc = 0;
620 
621 	if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
622 	    (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
623 		dout("size %lld -> %llu\n", inode->i_size, size);
624 		if (size > 0 && S_ISDIR(inode->i_mode)) {
625 			pr_err("fill_file_size non-zero size for directory\n");
626 			size = 0;
627 		}
628 		i_size_write(inode, size);
629 		inode->i_blocks = calc_inode_blocks(size);
630 		ci->i_reported_size = size;
631 		if (truncate_seq != ci->i_truncate_seq) {
632 			dout("truncate_seq %u -> %u\n",
633 			     ci->i_truncate_seq, truncate_seq);
634 			ci->i_truncate_seq = truncate_seq;
635 
636 			/* the MDS should have revoked these caps */
637 			WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL |
638 					       CEPH_CAP_FILE_RD |
639 					       CEPH_CAP_FILE_WR |
640 					       CEPH_CAP_FILE_LAZYIO));
641 			/*
642 			 * If we hold relevant caps, or in the case where we're
643 			 * not the only client referencing this file and we
644 			 * don't hold those caps, then we need to check whether
645 			 * the file is either opened or mmaped
646 			 */
647 			if ((issued & (CEPH_CAP_FILE_CACHE|
648 				       CEPH_CAP_FILE_BUFFER)) ||
649 			    mapping_mapped(inode->i_mapping) ||
650 			    __ceph_is_file_opened(ci)) {
651 				ci->i_truncate_pending++;
652 				queue_trunc = 1;
653 			}
654 		}
655 	}
656 	if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
657 	    ci->i_truncate_size != truncate_size) {
658 		dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
659 		     truncate_size);
660 		ci->i_truncate_size = truncate_size;
661 	}
662 
663 	if (queue_trunc)
664 		ceph_fscache_invalidate(inode);
665 
666 	return queue_trunc;
667 }
668 
669 void ceph_fill_file_time(struct inode *inode, int issued,
670 			 u64 time_warp_seq, struct timespec64 *ctime,
671 			 struct timespec64 *mtime, struct timespec64 *atime)
672 {
673 	struct ceph_inode_info *ci = ceph_inode(inode);
674 	int warn = 0;
675 
676 	if (issued & (CEPH_CAP_FILE_EXCL|
677 		      CEPH_CAP_FILE_WR|
678 		      CEPH_CAP_FILE_BUFFER|
679 		      CEPH_CAP_AUTH_EXCL|
680 		      CEPH_CAP_XATTR_EXCL)) {
681 		if (ci->i_version == 0 ||
682 		    timespec64_compare(ctime, &inode->i_ctime) > 0) {
683 			dout("ctime %lld.%09ld -> %lld.%09ld inc w/ cap\n",
684 			     inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
685 			     ctime->tv_sec, ctime->tv_nsec);
686 			inode->i_ctime = *ctime;
687 		}
688 		if (ci->i_version == 0 ||
689 		    ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
690 			/* the MDS did a utimes() */
691 			dout("mtime %lld.%09ld -> %lld.%09ld "
692 			     "tw %d -> %d\n",
693 			     inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
694 			     mtime->tv_sec, mtime->tv_nsec,
695 			     ci->i_time_warp_seq, (int)time_warp_seq);
696 
697 			inode->i_mtime = *mtime;
698 			inode->i_atime = *atime;
699 			ci->i_time_warp_seq = time_warp_seq;
700 		} else if (time_warp_seq == ci->i_time_warp_seq) {
701 			/* nobody did utimes(); take the max */
702 			if (timespec64_compare(mtime, &inode->i_mtime) > 0) {
703 				dout("mtime %lld.%09ld -> %lld.%09ld inc\n",
704 				     inode->i_mtime.tv_sec,
705 				     inode->i_mtime.tv_nsec,
706 				     mtime->tv_sec, mtime->tv_nsec);
707 				inode->i_mtime = *mtime;
708 			}
709 			if (timespec64_compare(atime, &inode->i_atime) > 0) {
710 				dout("atime %lld.%09ld -> %lld.%09ld inc\n",
711 				     inode->i_atime.tv_sec,
712 				     inode->i_atime.tv_nsec,
713 				     atime->tv_sec, atime->tv_nsec);
714 				inode->i_atime = *atime;
715 			}
716 		} else if (issued & CEPH_CAP_FILE_EXCL) {
717 			/* we did a utimes(); ignore mds values */
718 		} else {
719 			warn = 1;
720 		}
721 	} else {
722 		/* we have no write|excl caps; whatever the MDS says is true */
723 		if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
724 			inode->i_ctime = *ctime;
725 			inode->i_mtime = *mtime;
726 			inode->i_atime = *atime;
727 			ci->i_time_warp_seq = time_warp_seq;
728 		} else {
729 			warn = 1;
730 		}
731 	}
732 	if (warn) /* time_warp_seq shouldn't go backwards */
733 		dout("%p mds time_warp_seq %llu < %u\n",
734 		     inode, time_warp_seq, ci->i_time_warp_seq);
735 }
736 
737 /*
738  * Populate an inode based on info from mds.  May be called on new or
739  * existing inodes.
740  */
741 int ceph_fill_inode(struct inode *inode, struct page *locked_page,
742 		    struct ceph_mds_reply_info_in *iinfo,
743 		    struct ceph_mds_reply_dirfrag *dirinfo,
744 		    struct ceph_mds_session *session, int cap_fmode,
745 		    struct ceph_cap_reservation *caps_reservation)
746 {
747 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
748 	struct ceph_mds_reply_inode *info = iinfo->in;
749 	struct ceph_inode_info *ci = ceph_inode(inode);
750 	int issued, new_issued, info_caps;
751 	struct timespec64 mtime, atime, ctime;
752 	struct ceph_buffer *xattr_blob = NULL;
753 	struct ceph_buffer *old_blob = NULL;
754 	struct ceph_string *pool_ns = NULL;
755 	struct ceph_cap *new_cap = NULL;
756 	int err = 0;
757 	bool wake = false;
758 	bool queue_trunc = false;
759 	bool new_version = false;
760 	bool fill_inline = false;
761 
762 	dout("%s %p ino %llx.%llx v %llu had %llu\n", __func__,
763 	     inode, ceph_vinop(inode), le64_to_cpu(info->version),
764 	     ci->i_version);
765 
766 	info_caps = le32_to_cpu(info->cap.caps);
767 
768 	/* prealloc new cap struct */
769 	if (info_caps && ceph_snap(inode) == CEPH_NOSNAP) {
770 		new_cap = ceph_get_cap(mdsc, caps_reservation);
771 		if (!new_cap)
772 			return -ENOMEM;
773 	}
774 
775 	/*
776 	 * prealloc xattr data, if it looks like we'll need it.  only
777 	 * if len > 4 (meaning there are actually xattrs; the first 4
778 	 * bytes are the xattr count).
779 	 */
780 	if (iinfo->xattr_len > 4) {
781 		xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
782 		if (!xattr_blob)
783 			pr_err("%s ENOMEM xattr blob %d bytes\n", __func__,
784 			       iinfo->xattr_len);
785 	}
786 
787 	if (iinfo->pool_ns_len > 0)
788 		pool_ns = ceph_find_or_create_string(iinfo->pool_ns_data,
789 						     iinfo->pool_ns_len);
790 
791 	if (ceph_snap(inode) != CEPH_NOSNAP && !ci->i_snapid_map)
792 		ci->i_snapid_map = ceph_get_snapid_map(mdsc, ceph_snap(inode));
793 
794 	spin_lock(&ci->i_ceph_lock);
795 
796 	/*
797 	 * provided version will be odd if inode value is projected,
798 	 * even if stable.  skip the update if we have newer stable
799 	 * info (ours>=theirs, e.g. due to racing mds replies), unless
800 	 * we are getting projected (unstable) info (in which case the
801 	 * version is odd, and we want ours>theirs).
802 	 *   us   them
803 	 *   2    2     skip
804 	 *   3    2     skip
805 	 *   3    3     update
806 	 */
807 	if (ci->i_version == 0 ||
808 	    ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
809 	     le64_to_cpu(info->version) > (ci->i_version & ~1)))
810 		new_version = true;
811 
812 	/* Update change_attribute */
813 	inode_set_max_iversion_raw(inode, iinfo->change_attr);
814 
815 	__ceph_caps_issued(ci, &issued);
816 	issued |= __ceph_caps_dirty(ci);
817 	new_issued = ~issued & info_caps;
818 
819 	/* update inode */
820 	inode->i_rdev = le32_to_cpu(info->rdev);
821 	/* directories have fl_stripe_unit set to zero */
822 	if (le32_to_cpu(info->layout.fl_stripe_unit))
823 		inode->i_blkbits =
824 			fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
825 	else
826 		inode->i_blkbits = CEPH_BLOCK_SHIFT;
827 
828 	__ceph_update_quota(ci, iinfo->max_bytes, iinfo->max_files);
829 
830 	if ((new_version || (new_issued & CEPH_CAP_AUTH_SHARED)) &&
831 	    (issued & CEPH_CAP_AUTH_EXCL) == 0) {
832 		inode->i_mode = le32_to_cpu(info->mode);
833 		inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid));
834 		inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid));
835 		dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
836 		     from_kuid(&init_user_ns, inode->i_uid),
837 		     from_kgid(&init_user_ns, inode->i_gid));
838 		ceph_decode_timespec64(&ci->i_btime, &iinfo->btime);
839 		ceph_decode_timespec64(&ci->i_snap_btime, &iinfo->snap_btime);
840 	}
841 
842 	if ((new_version || (new_issued & CEPH_CAP_LINK_SHARED)) &&
843 	    (issued & CEPH_CAP_LINK_EXCL) == 0)
844 		set_nlink(inode, le32_to_cpu(info->nlink));
845 
846 	if (new_version || (new_issued & CEPH_CAP_ANY_RD)) {
847 		/* be careful with mtime, atime, size */
848 		ceph_decode_timespec64(&atime, &info->atime);
849 		ceph_decode_timespec64(&mtime, &info->mtime);
850 		ceph_decode_timespec64(&ctime, &info->ctime);
851 		ceph_fill_file_time(inode, issued,
852 				le32_to_cpu(info->time_warp_seq),
853 				&ctime, &mtime, &atime);
854 	}
855 
856 	if (new_version || (info_caps & CEPH_CAP_FILE_SHARED)) {
857 		ci->i_files = le64_to_cpu(info->files);
858 		ci->i_subdirs = le64_to_cpu(info->subdirs);
859 	}
860 
861 	if (new_version ||
862 	    (new_issued & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR))) {
863 		s64 old_pool = ci->i_layout.pool_id;
864 		struct ceph_string *old_ns;
865 
866 		ceph_file_layout_from_legacy(&ci->i_layout, &info->layout);
867 		old_ns = rcu_dereference_protected(ci->i_layout.pool_ns,
868 					lockdep_is_held(&ci->i_ceph_lock));
869 		rcu_assign_pointer(ci->i_layout.pool_ns, pool_ns);
870 
871 		if (ci->i_layout.pool_id != old_pool || pool_ns != old_ns)
872 			ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
873 
874 		pool_ns = old_ns;
875 
876 		queue_trunc = ceph_fill_file_size(inode, issued,
877 					le32_to_cpu(info->truncate_seq),
878 					le64_to_cpu(info->truncate_size),
879 					le64_to_cpu(info->size));
880 		/* only update max_size on auth cap */
881 		if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
882 		    ci->i_max_size != le64_to_cpu(info->max_size)) {
883 			dout("max_size %lld -> %llu\n", ci->i_max_size,
884 					le64_to_cpu(info->max_size));
885 			ci->i_max_size = le64_to_cpu(info->max_size);
886 		}
887 	}
888 
889 	/* layout and rstat are not tracked by capability, update them if
890 	 * the inode info is from auth mds */
891 	if (new_version || (info->cap.flags & CEPH_CAP_FLAG_AUTH)) {
892 		if (S_ISDIR(inode->i_mode)) {
893 			ci->i_dir_layout = iinfo->dir_layout;
894 			ci->i_rbytes = le64_to_cpu(info->rbytes);
895 			ci->i_rfiles = le64_to_cpu(info->rfiles);
896 			ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
897 			ci->i_dir_pin = iinfo->dir_pin;
898 			ceph_decode_timespec64(&ci->i_rctime, &info->rctime);
899 		}
900 	}
901 
902 	/* xattrs */
903 	/* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
904 	if ((ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL))  &&
905 	    le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
906 		if (ci->i_xattrs.blob)
907 			old_blob = ci->i_xattrs.blob;
908 		ci->i_xattrs.blob = xattr_blob;
909 		if (xattr_blob)
910 			memcpy(ci->i_xattrs.blob->vec.iov_base,
911 			       iinfo->xattr_data, iinfo->xattr_len);
912 		ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
913 		ceph_forget_all_cached_acls(inode);
914 		ceph_security_invalidate_secctx(inode);
915 		xattr_blob = NULL;
916 	}
917 
918 	/* finally update i_version */
919 	if (le64_to_cpu(info->version) > ci->i_version)
920 		ci->i_version = le64_to_cpu(info->version);
921 
922 	inode->i_mapping->a_ops = &ceph_aops;
923 
924 	switch (inode->i_mode & S_IFMT) {
925 	case S_IFIFO:
926 	case S_IFBLK:
927 	case S_IFCHR:
928 	case S_IFSOCK:
929 		inode->i_blkbits = PAGE_SHIFT;
930 		init_special_inode(inode, inode->i_mode, inode->i_rdev);
931 		inode->i_op = &ceph_file_iops;
932 		break;
933 	case S_IFREG:
934 		inode->i_op = &ceph_file_iops;
935 		inode->i_fop = &ceph_file_fops;
936 		break;
937 	case S_IFLNK:
938 		inode->i_op = &ceph_symlink_iops;
939 		if (!ci->i_symlink) {
940 			u32 symlen = iinfo->symlink_len;
941 			char *sym;
942 
943 			spin_unlock(&ci->i_ceph_lock);
944 
945 			if (symlen != i_size_read(inode)) {
946 				pr_err("%s %llx.%llx BAD symlink "
947 					"size %lld\n", __func__,
948 					ceph_vinop(inode),
949 					i_size_read(inode));
950 				i_size_write(inode, symlen);
951 				inode->i_blocks = calc_inode_blocks(symlen);
952 			}
953 
954 			err = -ENOMEM;
955 			sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
956 			if (!sym)
957 				goto out;
958 
959 			spin_lock(&ci->i_ceph_lock);
960 			if (!ci->i_symlink)
961 				ci->i_symlink = sym;
962 			else
963 				kfree(sym); /* lost a race */
964 		}
965 		inode->i_link = ci->i_symlink;
966 		break;
967 	case S_IFDIR:
968 		inode->i_op = &ceph_dir_iops;
969 		inode->i_fop = &ceph_dir_fops;
970 		break;
971 	default:
972 		pr_err("%s %llx.%llx BAD mode 0%o\n", __func__,
973 		       ceph_vinop(inode), inode->i_mode);
974 	}
975 
976 	/* were we issued a capability? */
977 	if (info_caps) {
978 		if (ceph_snap(inode) == CEPH_NOSNAP) {
979 			ceph_add_cap(inode, session,
980 				     le64_to_cpu(info->cap.cap_id),
981 				     info_caps,
982 				     le32_to_cpu(info->cap.wanted),
983 				     le32_to_cpu(info->cap.seq),
984 				     le32_to_cpu(info->cap.mseq),
985 				     le64_to_cpu(info->cap.realm),
986 				     info->cap.flags, &new_cap);
987 
988 			/* set dir completion flag? */
989 			if (S_ISDIR(inode->i_mode) &&
990 			    ci->i_files == 0 && ci->i_subdirs == 0 &&
991 			    (info_caps & CEPH_CAP_FILE_SHARED) &&
992 			    (issued & CEPH_CAP_FILE_EXCL) == 0 &&
993 			    !__ceph_dir_is_complete(ci)) {
994 				dout(" marking %p complete (empty)\n", inode);
995 				i_size_write(inode, 0);
996 				__ceph_dir_set_complete(ci,
997 					atomic64_read(&ci->i_release_count),
998 					atomic64_read(&ci->i_ordered_count));
999 			}
1000 
1001 			wake = true;
1002 		} else {
1003 			dout(" %p got snap_caps %s\n", inode,
1004 			     ceph_cap_string(info_caps));
1005 			ci->i_snap_caps |= info_caps;
1006 		}
1007 	}
1008 
1009 	if (iinfo->inline_version > 0 &&
1010 	    iinfo->inline_version >= ci->i_inline_version) {
1011 		int cache_caps = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1012 		ci->i_inline_version = iinfo->inline_version;
1013 		if (ci->i_inline_version != CEPH_INLINE_NONE &&
1014 		    (locked_page || (info_caps & cache_caps)))
1015 			fill_inline = true;
1016 	}
1017 
1018 	if (cap_fmode >= 0) {
1019 		if (!info_caps)
1020 			pr_warn("mds issued no caps on %llx.%llx\n",
1021 				ceph_vinop(inode));
1022 		__ceph_touch_fmode(ci, mdsc, cap_fmode);
1023 	}
1024 
1025 	spin_unlock(&ci->i_ceph_lock);
1026 
1027 	if (fill_inline)
1028 		ceph_fill_inline_data(inode, locked_page,
1029 				      iinfo->inline_data, iinfo->inline_len);
1030 
1031 	if (wake)
1032 		wake_up_all(&ci->i_cap_wq);
1033 
1034 	/* queue truncate if we saw i_size decrease */
1035 	if (queue_trunc)
1036 		ceph_queue_vmtruncate(inode);
1037 
1038 	/* populate frag tree */
1039 	if (S_ISDIR(inode->i_mode))
1040 		ceph_fill_fragtree(inode, &info->fragtree, dirinfo);
1041 
1042 	/* update delegation info? */
1043 	if (dirinfo)
1044 		ceph_fill_dirfrag(inode, dirinfo);
1045 
1046 	err = 0;
1047 out:
1048 	if (new_cap)
1049 		ceph_put_cap(mdsc, new_cap);
1050 	ceph_buffer_put(old_blob);
1051 	ceph_buffer_put(xattr_blob);
1052 	ceph_put_string(pool_ns);
1053 	return err;
1054 }
1055 
1056 /*
1057  * caller should hold session s_mutex and dentry->d_lock.
1058  */
1059 static void __update_dentry_lease(struct inode *dir, struct dentry *dentry,
1060 				  struct ceph_mds_reply_lease *lease,
1061 				  struct ceph_mds_session *session,
1062 				  unsigned long from_time,
1063 				  struct ceph_mds_session **old_lease_session)
1064 {
1065 	struct ceph_dentry_info *di = ceph_dentry(dentry);
1066 	unsigned mask = le16_to_cpu(lease->mask);
1067 	long unsigned duration = le32_to_cpu(lease->duration_ms);
1068 	long unsigned ttl = from_time + (duration * HZ) / 1000;
1069 	long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
1070 
1071 	dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
1072 	     dentry, duration, ttl);
1073 
1074 	/* only track leases on regular dentries */
1075 	if (ceph_snap(dir) != CEPH_NOSNAP)
1076 		return;
1077 
1078 	if (mask & CEPH_LEASE_PRIMARY_LINK)
1079 		di->flags |= CEPH_DENTRY_PRIMARY_LINK;
1080 	else
1081 		di->flags &= ~CEPH_DENTRY_PRIMARY_LINK;
1082 
1083 	di->lease_shared_gen = atomic_read(&ceph_inode(dir)->i_shared_gen);
1084 	if (!(mask & CEPH_LEASE_VALID)) {
1085 		__ceph_dentry_dir_lease_touch(di);
1086 		return;
1087 	}
1088 
1089 	if (di->lease_gen == session->s_cap_gen &&
1090 	    time_before(ttl, di->time))
1091 		return;  /* we already have a newer lease. */
1092 
1093 	if (di->lease_session && di->lease_session != session) {
1094 		*old_lease_session = di->lease_session;
1095 		di->lease_session = NULL;
1096 	}
1097 
1098 	if (!di->lease_session)
1099 		di->lease_session = ceph_get_mds_session(session);
1100 	di->lease_gen = session->s_cap_gen;
1101 	di->lease_seq = le32_to_cpu(lease->seq);
1102 	di->lease_renew_after = half_ttl;
1103 	di->lease_renew_from = 0;
1104 	di->time = ttl;
1105 
1106 	__ceph_dentry_lease_touch(di);
1107 }
1108 
1109 static inline void update_dentry_lease(struct inode *dir, struct dentry *dentry,
1110 					struct ceph_mds_reply_lease *lease,
1111 					struct ceph_mds_session *session,
1112 					unsigned long from_time)
1113 {
1114 	struct ceph_mds_session *old_lease_session = NULL;
1115 	spin_lock(&dentry->d_lock);
1116 	__update_dentry_lease(dir, dentry, lease, session, from_time,
1117 			      &old_lease_session);
1118 	spin_unlock(&dentry->d_lock);
1119 	if (old_lease_session)
1120 		ceph_put_mds_session(old_lease_session);
1121 }
1122 
1123 /*
1124  * update dentry lease without having parent inode locked
1125  */
1126 static void update_dentry_lease_careful(struct dentry *dentry,
1127 					struct ceph_mds_reply_lease *lease,
1128 					struct ceph_mds_session *session,
1129 					unsigned long from_time,
1130 					char *dname, u32 dname_len,
1131 					struct ceph_vino *pdvino,
1132 					struct ceph_vino *ptvino)
1133 
1134 {
1135 	struct inode *dir;
1136 	struct ceph_mds_session *old_lease_session = NULL;
1137 
1138 	spin_lock(&dentry->d_lock);
1139 	/* make sure dentry's name matches target */
1140 	if (dentry->d_name.len != dname_len ||
1141 	    memcmp(dentry->d_name.name, dname, dname_len))
1142 		goto out_unlock;
1143 
1144 	dir = d_inode(dentry->d_parent);
1145 	/* make sure parent matches dvino */
1146 	if (!ceph_ino_compare(dir, pdvino))
1147 		goto out_unlock;
1148 
1149 	/* make sure dentry's inode matches target. NULL ptvino means that
1150 	 * we expect a negative dentry */
1151 	if (ptvino) {
1152 		if (d_really_is_negative(dentry))
1153 			goto out_unlock;
1154 		if (!ceph_ino_compare(d_inode(dentry), ptvino))
1155 			goto out_unlock;
1156 	} else {
1157 		if (d_really_is_positive(dentry))
1158 			goto out_unlock;
1159 	}
1160 
1161 	__update_dentry_lease(dir, dentry, lease, session,
1162 			      from_time, &old_lease_session);
1163 out_unlock:
1164 	spin_unlock(&dentry->d_lock);
1165 	if (old_lease_session)
1166 		ceph_put_mds_session(old_lease_session);
1167 }
1168 
1169 /*
1170  * splice a dentry to an inode.
1171  * caller must hold directory i_mutex for this to be safe.
1172  */
1173 static int splice_dentry(struct dentry **pdn, struct inode *in)
1174 {
1175 	struct dentry *dn = *pdn;
1176 	struct dentry *realdn;
1177 
1178 	BUG_ON(d_inode(dn));
1179 
1180 	if (S_ISDIR(in->i_mode)) {
1181 		/* If inode is directory, d_splice_alias() below will remove
1182 		 * 'realdn' from its origin parent. We need to ensure that
1183 		 * origin parent's readdir cache will not reference 'realdn'
1184 		 */
1185 		realdn = d_find_any_alias(in);
1186 		if (realdn) {
1187 			struct ceph_dentry_info *di = ceph_dentry(realdn);
1188 			spin_lock(&realdn->d_lock);
1189 
1190 			realdn->d_op->d_prune(realdn);
1191 
1192 			di->time = jiffies;
1193 			di->lease_shared_gen = 0;
1194 			di->offset = 0;
1195 
1196 			spin_unlock(&realdn->d_lock);
1197 			dput(realdn);
1198 		}
1199 	}
1200 
1201 	/* dn must be unhashed */
1202 	if (!d_unhashed(dn))
1203 		d_drop(dn);
1204 	realdn = d_splice_alias(in, dn);
1205 	if (IS_ERR(realdn)) {
1206 		pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
1207 		       PTR_ERR(realdn), dn, in, ceph_vinop(in));
1208 		return PTR_ERR(realdn);
1209 	}
1210 
1211 	if (realdn) {
1212 		dout("dn %p (%d) spliced with %p (%d) "
1213 		     "inode %p ino %llx.%llx\n",
1214 		     dn, d_count(dn),
1215 		     realdn, d_count(realdn),
1216 		     d_inode(realdn), ceph_vinop(d_inode(realdn)));
1217 		dput(dn);
1218 		*pdn = realdn;
1219 	} else {
1220 		BUG_ON(!ceph_dentry(dn));
1221 		dout("dn %p attached to %p ino %llx.%llx\n",
1222 		     dn, d_inode(dn), ceph_vinop(d_inode(dn)));
1223 	}
1224 	return 0;
1225 }
1226 
1227 /*
1228  * Incorporate results into the local cache.  This is either just
1229  * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
1230  * after a lookup).
1231  *
1232  * A reply may contain
1233  *         a directory inode along with a dentry.
1234  *  and/or a target inode
1235  *
1236  * Called with snap_rwsem (read).
1237  */
1238 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req)
1239 {
1240 	struct ceph_mds_session *session = req->r_session;
1241 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1242 	struct inode *in = NULL;
1243 	struct ceph_vino tvino, dvino;
1244 	struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
1245 	int err = 0;
1246 
1247 	dout("fill_trace %p is_dentry %d is_target %d\n", req,
1248 	     rinfo->head->is_dentry, rinfo->head->is_target);
1249 
1250 	if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
1251 		dout("fill_trace reply is empty!\n");
1252 		if (rinfo->head->result == 0 && req->r_parent)
1253 			ceph_invalidate_dir_request(req);
1254 		return 0;
1255 	}
1256 
1257 	if (rinfo->head->is_dentry) {
1258 		struct inode *dir = req->r_parent;
1259 
1260 		if (dir) {
1261 			err = ceph_fill_inode(dir, NULL, &rinfo->diri,
1262 					      rinfo->dirfrag, session, -1,
1263 					      &req->r_caps_reservation);
1264 			if (err < 0)
1265 				goto done;
1266 		} else {
1267 			WARN_ON_ONCE(1);
1268 		}
1269 
1270 		if (dir && req->r_op == CEPH_MDS_OP_LOOKUPNAME &&
1271 		    test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1272 		    !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
1273 			struct qstr dname;
1274 			struct dentry *dn, *parent;
1275 
1276 			BUG_ON(!rinfo->head->is_target);
1277 			BUG_ON(req->r_dentry);
1278 
1279 			parent = d_find_any_alias(dir);
1280 			BUG_ON(!parent);
1281 
1282 			dname.name = rinfo->dname;
1283 			dname.len = rinfo->dname_len;
1284 			dname.hash = full_name_hash(parent, dname.name, dname.len);
1285 			tvino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1286 			tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1287 retry_lookup:
1288 			dn = d_lookup(parent, &dname);
1289 			dout("d_lookup on parent=%p name=%.*s got %p\n",
1290 			     parent, dname.len, dname.name, dn);
1291 
1292 			if (!dn) {
1293 				dn = d_alloc(parent, &dname);
1294 				dout("d_alloc %p '%.*s' = %p\n", parent,
1295 				     dname.len, dname.name, dn);
1296 				if (!dn) {
1297 					dput(parent);
1298 					err = -ENOMEM;
1299 					goto done;
1300 				}
1301 				err = 0;
1302 			} else if (d_really_is_positive(dn) &&
1303 				   (ceph_ino(d_inode(dn)) != tvino.ino ||
1304 				    ceph_snap(d_inode(dn)) != tvino.snap)) {
1305 				dout(" dn %p points to wrong inode %p\n",
1306 				     dn, d_inode(dn));
1307 				ceph_dir_clear_ordered(dir);
1308 				d_delete(dn);
1309 				dput(dn);
1310 				goto retry_lookup;
1311 			}
1312 
1313 			req->r_dentry = dn;
1314 			dput(parent);
1315 		}
1316 	}
1317 
1318 	if (rinfo->head->is_target) {
1319 		/* Should be filled in by handle_reply */
1320 		BUG_ON(!req->r_target_inode);
1321 
1322 		in = req->r_target_inode;
1323 		err = ceph_fill_inode(in, req->r_locked_page, &rinfo->targeti,
1324 				NULL, session,
1325 				(!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) &&
1326 				 !test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags) &&
1327 				 rinfo->head->result == 0) ?  req->r_fmode : -1,
1328 				&req->r_caps_reservation);
1329 		if (err < 0) {
1330 			pr_err("ceph_fill_inode badness %p %llx.%llx\n",
1331 				in, ceph_vinop(in));
1332 			req->r_target_inode = NULL;
1333 			if (in->i_state & I_NEW)
1334 				discard_new_inode(in);
1335 			else
1336 				iput(in);
1337 			goto done;
1338 		}
1339 		if (in->i_state & I_NEW)
1340 			unlock_new_inode(in);
1341 	}
1342 
1343 	/*
1344 	 * ignore null lease/binding on snapdir ENOENT, or else we
1345 	 * will have trouble splicing in the virtual snapdir later
1346 	 */
1347 	if (rinfo->head->is_dentry &&
1348             !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags) &&
1349 	    test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1350 	    (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
1351 					       fsc->mount_options->snapdir_name,
1352 					       req->r_dentry->d_name.len))) {
1353 		/*
1354 		 * lookup link rename   : null -> possibly existing inode
1355 		 * mknod symlink mkdir  : null -> new inode
1356 		 * unlink               : linked -> null
1357 		 */
1358 		struct inode *dir = req->r_parent;
1359 		struct dentry *dn = req->r_dentry;
1360 		bool have_dir_cap, have_lease;
1361 
1362 		BUG_ON(!dn);
1363 		BUG_ON(!dir);
1364 		BUG_ON(d_inode(dn->d_parent) != dir);
1365 
1366 		dvino.ino = le64_to_cpu(rinfo->diri.in->ino);
1367 		dvino.snap = le64_to_cpu(rinfo->diri.in->snapid);
1368 
1369 		BUG_ON(ceph_ino(dir) != dvino.ino);
1370 		BUG_ON(ceph_snap(dir) != dvino.snap);
1371 
1372 		/* do we have a lease on the whole dir? */
1373 		have_dir_cap =
1374 			(le32_to_cpu(rinfo->diri.in->cap.caps) &
1375 			 CEPH_CAP_FILE_SHARED);
1376 
1377 		/* do we have a dn lease? */
1378 		have_lease = have_dir_cap ||
1379 			le32_to_cpu(rinfo->dlease->duration_ms);
1380 		if (!have_lease)
1381 			dout("fill_trace  no dentry lease or dir cap\n");
1382 
1383 		/* rename? */
1384 		if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
1385 			struct inode *olddir = req->r_old_dentry_dir;
1386 			BUG_ON(!olddir);
1387 
1388 			dout(" src %p '%pd' dst %p '%pd'\n",
1389 			     req->r_old_dentry,
1390 			     req->r_old_dentry,
1391 			     dn, dn);
1392 			dout("fill_trace doing d_move %p -> %p\n",
1393 			     req->r_old_dentry, dn);
1394 
1395 			/* d_move screws up sibling dentries' offsets */
1396 			ceph_dir_clear_ordered(dir);
1397 			ceph_dir_clear_ordered(olddir);
1398 
1399 			d_move(req->r_old_dentry, dn);
1400 			dout(" src %p '%pd' dst %p '%pd'\n",
1401 			     req->r_old_dentry,
1402 			     req->r_old_dentry,
1403 			     dn, dn);
1404 
1405 			/* ensure target dentry is invalidated, despite
1406 			   rehashing bug in vfs_rename_dir */
1407 			ceph_invalidate_dentry_lease(dn);
1408 
1409 			dout("dn %p gets new offset %lld\n", req->r_old_dentry,
1410 			     ceph_dentry(req->r_old_dentry)->offset);
1411 
1412 			/* swap r_dentry and r_old_dentry in case that
1413 			 * splice_dentry() gets called later. This is safe
1414 			 * because no other place will use them */
1415 			req->r_dentry = req->r_old_dentry;
1416 			req->r_old_dentry = dn;
1417 			dn = req->r_dentry;
1418 		}
1419 
1420 		/* null dentry? */
1421 		if (!rinfo->head->is_target) {
1422 			dout("fill_trace null dentry\n");
1423 			if (d_really_is_positive(dn)) {
1424 				dout("d_delete %p\n", dn);
1425 				ceph_dir_clear_ordered(dir);
1426 				d_delete(dn);
1427 			} else if (have_lease) {
1428 				if (d_unhashed(dn))
1429 					d_add(dn, NULL);
1430 				update_dentry_lease(dir, dn,
1431 						    rinfo->dlease, session,
1432 						    req->r_request_started);
1433 			}
1434 			goto done;
1435 		}
1436 
1437 		/* attach proper inode */
1438 		if (d_really_is_negative(dn)) {
1439 			ceph_dir_clear_ordered(dir);
1440 			ihold(in);
1441 			err = splice_dentry(&req->r_dentry, in);
1442 			if (err < 0)
1443 				goto done;
1444 			dn = req->r_dentry;  /* may have spliced */
1445 		} else if (d_really_is_positive(dn) && d_inode(dn) != in) {
1446 			dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
1447 			     dn, d_inode(dn), ceph_vinop(d_inode(dn)),
1448 			     ceph_vinop(in));
1449 			d_invalidate(dn);
1450 			have_lease = false;
1451 		}
1452 
1453 		if (have_lease) {
1454 			update_dentry_lease(dir, dn,
1455 					    rinfo->dlease, session,
1456 					    req->r_request_started);
1457 		}
1458 		dout(" final dn %p\n", dn);
1459 	} else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1460 		    req->r_op == CEPH_MDS_OP_MKSNAP) &&
1461 	           test_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags) &&
1462 		   !test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
1463 		struct inode *dir = req->r_parent;
1464 
1465 		/* fill out a snapdir LOOKUPSNAP dentry */
1466 		BUG_ON(!dir);
1467 		BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR);
1468 		BUG_ON(!req->r_dentry);
1469 		dout(" linking snapped dir %p to dn %p\n", in, req->r_dentry);
1470 		ceph_dir_clear_ordered(dir);
1471 		ihold(in);
1472 		err = splice_dentry(&req->r_dentry, in);
1473 		if (err < 0)
1474 			goto done;
1475 	} else if (rinfo->head->is_dentry && req->r_dentry) {
1476 		/* parent inode is not locked, be carefull */
1477 		struct ceph_vino *ptvino = NULL;
1478 		dvino.ino = le64_to_cpu(rinfo->diri.in->ino);
1479 		dvino.snap = le64_to_cpu(rinfo->diri.in->snapid);
1480 		if (rinfo->head->is_target) {
1481 			tvino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1482 			tvino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1483 			ptvino = &tvino;
1484 		}
1485 		update_dentry_lease_careful(req->r_dentry, rinfo->dlease,
1486 					    session, req->r_request_started,
1487 					    rinfo->dname, rinfo->dname_len,
1488 					    &dvino, ptvino);
1489 	}
1490 done:
1491 	dout("fill_trace done err=%d\n", err);
1492 	return err;
1493 }
1494 
1495 /*
1496  * Prepopulate our cache with readdir results, leases, etc.
1497  */
1498 static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
1499 					   struct ceph_mds_session *session)
1500 {
1501 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1502 	int i, err = 0;
1503 
1504 	for (i = 0; i < rinfo->dir_nr; i++) {
1505 		struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
1506 		struct ceph_vino vino;
1507 		struct inode *in;
1508 		int rc;
1509 
1510 		vino.ino = le64_to_cpu(rde->inode.in->ino);
1511 		vino.snap = le64_to_cpu(rde->inode.in->snapid);
1512 
1513 		in = ceph_get_inode(req->r_dentry->d_sb, vino);
1514 		if (IS_ERR(in)) {
1515 			err = PTR_ERR(in);
1516 			dout("new_inode badness got %d\n", err);
1517 			continue;
1518 		}
1519 		rc = ceph_fill_inode(in, NULL, &rde->inode, NULL, session,
1520 				     -1, &req->r_caps_reservation);
1521 		if (rc < 0) {
1522 			pr_err("ceph_fill_inode badness on %p got %d\n",
1523 			       in, rc);
1524 			err = rc;
1525 			if (in->i_state & I_NEW) {
1526 				ihold(in);
1527 				discard_new_inode(in);
1528 			}
1529 		} else if (in->i_state & I_NEW) {
1530 			unlock_new_inode(in);
1531 		}
1532 
1533 		/* avoid calling iput_final() in mds dispatch threads */
1534 		ceph_async_iput(in);
1535 	}
1536 
1537 	return err;
1538 }
1539 
1540 void ceph_readdir_cache_release(struct ceph_readdir_cache_control *ctl)
1541 {
1542 	if (ctl->page) {
1543 		kunmap(ctl->page);
1544 		put_page(ctl->page);
1545 		ctl->page = NULL;
1546 	}
1547 }
1548 
1549 static int fill_readdir_cache(struct inode *dir, struct dentry *dn,
1550 			      struct ceph_readdir_cache_control *ctl,
1551 			      struct ceph_mds_request *req)
1552 {
1553 	struct ceph_inode_info *ci = ceph_inode(dir);
1554 	unsigned nsize = PAGE_SIZE / sizeof(struct dentry*);
1555 	unsigned idx = ctl->index % nsize;
1556 	pgoff_t pgoff = ctl->index / nsize;
1557 
1558 	if (!ctl->page || pgoff != page_index(ctl->page)) {
1559 		ceph_readdir_cache_release(ctl);
1560 		if (idx == 0)
1561 			ctl->page = grab_cache_page(&dir->i_data, pgoff);
1562 		else
1563 			ctl->page = find_lock_page(&dir->i_data, pgoff);
1564 		if (!ctl->page) {
1565 			ctl->index = -1;
1566 			return idx == 0 ? -ENOMEM : 0;
1567 		}
1568 		/* reading/filling the cache are serialized by
1569 		 * i_mutex, no need to use page lock */
1570 		unlock_page(ctl->page);
1571 		ctl->dentries = kmap(ctl->page);
1572 		if (idx == 0)
1573 			memset(ctl->dentries, 0, PAGE_SIZE);
1574 	}
1575 
1576 	if (req->r_dir_release_cnt == atomic64_read(&ci->i_release_count) &&
1577 	    req->r_dir_ordered_cnt == atomic64_read(&ci->i_ordered_count)) {
1578 		dout("readdir cache dn %p idx %d\n", dn, ctl->index);
1579 		ctl->dentries[idx] = dn;
1580 		ctl->index++;
1581 	} else {
1582 		dout("disable readdir cache\n");
1583 		ctl->index = -1;
1584 	}
1585 	return 0;
1586 }
1587 
1588 int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1589 			     struct ceph_mds_session *session)
1590 {
1591 	struct dentry *parent = req->r_dentry;
1592 	struct ceph_inode_info *ci = ceph_inode(d_inode(parent));
1593 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1594 	struct qstr dname;
1595 	struct dentry *dn;
1596 	struct inode *in;
1597 	int err = 0, skipped = 0, ret, i;
1598 	u32 frag = le32_to_cpu(req->r_args.readdir.frag);
1599 	u32 last_hash = 0;
1600 	u32 fpos_offset;
1601 	struct ceph_readdir_cache_control cache_ctl = {};
1602 
1603 	if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
1604 		return readdir_prepopulate_inodes_only(req, session);
1605 
1606 	if (rinfo->hash_order) {
1607 		if (req->r_path2) {
1608 			last_hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
1609 						  req->r_path2,
1610 						  strlen(req->r_path2));
1611 			last_hash = ceph_frag_value(last_hash);
1612 		} else if (rinfo->offset_hash) {
1613 			/* mds understands offset_hash */
1614 			WARN_ON_ONCE(req->r_readdir_offset != 2);
1615 			last_hash = le32_to_cpu(req->r_args.readdir.offset_hash);
1616 		}
1617 	}
1618 
1619 	if (rinfo->dir_dir &&
1620 	    le32_to_cpu(rinfo->dir_dir->frag) != frag) {
1621 		dout("readdir_prepopulate got new frag %x -> %x\n",
1622 		     frag, le32_to_cpu(rinfo->dir_dir->frag));
1623 		frag = le32_to_cpu(rinfo->dir_dir->frag);
1624 		if (!rinfo->hash_order)
1625 			req->r_readdir_offset = 2;
1626 	}
1627 
1628 	if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
1629 		dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
1630 		     rinfo->dir_nr, parent);
1631 	} else {
1632 		dout("readdir_prepopulate %d items under dn %p\n",
1633 		     rinfo->dir_nr, parent);
1634 		if (rinfo->dir_dir)
1635 			ceph_fill_dirfrag(d_inode(parent), rinfo->dir_dir);
1636 
1637 		if (ceph_frag_is_leftmost(frag) &&
1638 		    req->r_readdir_offset == 2 &&
1639 		    !(rinfo->hash_order && last_hash)) {
1640 			/* note dir version at start of readdir so we can
1641 			 * tell if any dentries get dropped */
1642 			req->r_dir_release_cnt =
1643 				atomic64_read(&ci->i_release_count);
1644 			req->r_dir_ordered_cnt =
1645 				atomic64_read(&ci->i_ordered_count);
1646 			req->r_readdir_cache_idx = 0;
1647 		}
1648 	}
1649 
1650 	cache_ctl.index = req->r_readdir_cache_idx;
1651 	fpos_offset = req->r_readdir_offset;
1652 
1653 	/* FIXME: release caps/leases if error occurs */
1654 	for (i = 0; i < rinfo->dir_nr; i++) {
1655 		struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
1656 		struct ceph_vino tvino;
1657 
1658 		dname.name = rde->name;
1659 		dname.len = rde->name_len;
1660 		dname.hash = full_name_hash(parent, dname.name, dname.len);
1661 
1662 		tvino.ino = le64_to_cpu(rde->inode.in->ino);
1663 		tvino.snap = le64_to_cpu(rde->inode.in->snapid);
1664 
1665 		if (rinfo->hash_order) {
1666 			u32 hash = ceph_str_hash(ci->i_dir_layout.dl_dir_hash,
1667 						 rde->name, rde->name_len);
1668 			hash = ceph_frag_value(hash);
1669 			if (hash != last_hash)
1670 				fpos_offset = 2;
1671 			last_hash = hash;
1672 			rde->offset = ceph_make_fpos(hash, fpos_offset++, true);
1673 		} else {
1674 			rde->offset = ceph_make_fpos(frag, fpos_offset++, false);
1675 		}
1676 
1677 retry_lookup:
1678 		dn = d_lookup(parent, &dname);
1679 		dout("d_lookup on parent=%p name=%.*s got %p\n",
1680 		     parent, dname.len, dname.name, dn);
1681 
1682 		if (!dn) {
1683 			dn = d_alloc(parent, &dname);
1684 			dout("d_alloc %p '%.*s' = %p\n", parent,
1685 			     dname.len, dname.name, dn);
1686 			if (!dn) {
1687 				dout("d_alloc badness\n");
1688 				err = -ENOMEM;
1689 				goto out;
1690 			}
1691 		} else if (d_really_is_positive(dn) &&
1692 			   (ceph_ino(d_inode(dn)) != tvino.ino ||
1693 			    ceph_snap(d_inode(dn)) != tvino.snap)) {
1694 			struct ceph_dentry_info *di = ceph_dentry(dn);
1695 			dout(" dn %p points to wrong inode %p\n",
1696 			     dn, d_inode(dn));
1697 
1698 			spin_lock(&dn->d_lock);
1699 			if (di->offset > 0 &&
1700 			    di->lease_shared_gen ==
1701 			    atomic_read(&ci->i_shared_gen)) {
1702 				__ceph_dir_clear_ordered(ci);
1703 				di->offset = 0;
1704 			}
1705 			spin_unlock(&dn->d_lock);
1706 
1707 			d_delete(dn);
1708 			dput(dn);
1709 			goto retry_lookup;
1710 		}
1711 
1712 		/* inode */
1713 		if (d_really_is_positive(dn)) {
1714 			in = d_inode(dn);
1715 		} else {
1716 			in = ceph_get_inode(parent->d_sb, tvino);
1717 			if (IS_ERR(in)) {
1718 				dout("new_inode badness\n");
1719 				d_drop(dn);
1720 				dput(dn);
1721 				err = PTR_ERR(in);
1722 				goto out;
1723 			}
1724 		}
1725 
1726 		ret = ceph_fill_inode(in, NULL, &rde->inode, NULL, session,
1727 				      -1, &req->r_caps_reservation);
1728 		if (ret < 0) {
1729 			pr_err("ceph_fill_inode badness on %p\n", in);
1730 			if (d_really_is_negative(dn)) {
1731 				/* avoid calling iput_final() in mds
1732 				 * dispatch threads */
1733 				if (in->i_state & I_NEW) {
1734 					ihold(in);
1735 					discard_new_inode(in);
1736 				}
1737 				ceph_async_iput(in);
1738 			}
1739 			d_drop(dn);
1740 			err = ret;
1741 			goto next_item;
1742 		}
1743 		if (in->i_state & I_NEW)
1744 			unlock_new_inode(in);
1745 
1746 		if (d_really_is_negative(dn)) {
1747 			if (ceph_security_xattr_deadlock(in)) {
1748 				dout(" skip splicing dn %p to inode %p"
1749 				     " (security xattr deadlock)\n", dn, in);
1750 				ceph_async_iput(in);
1751 				skipped++;
1752 				goto next_item;
1753 			}
1754 
1755 			err = splice_dentry(&dn, in);
1756 			if (err < 0)
1757 				goto next_item;
1758 		}
1759 
1760 		ceph_dentry(dn)->offset = rde->offset;
1761 
1762 		update_dentry_lease(d_inode(parent), dn,
1763 				    rde->lease, req->r_session,
1764 				    req->r_request_started);
1765 
1766 		if (err == 0 && skipped == 0 && cache_ctl.index >= 0) {
1767 			ret = fill_readdir_cache(d_inode(parent), dn,
1768 						 &cache_ctl, req);
1769 			if (ret < 0)
1770 				err = ret;
1771 		}
1772 next_item:
1773 		dput(dn);
1774 	}
1775 out:
1776 	if (err == 0 && skipped == 0) {
1777 		set_bit(CEPH_MDS_R_DID_PREPOPULATE, &req->r_req_flags);
1778 		req->r_readdir_cache_idx = cache_ctl.index;
1779 	}
1780 	ceph_readdir_cache_release(&cache_ctl);
1781 	dout("readdir_prepopulate done\n");
1782 	return err;
1783 }
1784 
1785 bool ceph_inode_set_size(struct inode *inode, loff_t size)
1786 {
1787 	struct ceph_inode_info *ci = ceph_inode(inode);
1788 	bool ret;
1789 
1790 	spin_lock(&ci->i_ceph_lock);
1791 	dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
1792 	i_size_write(inode, size);
1793 	inode->i_blocks = calc_inode_blocks(size);
1794 
1795 	ret = __ceph_should_report_size(ci);
1796 
1797 	spin_unlock(&ci->i_ceph_lock);
1798 	return ret;
1799 }
1800 
1801 /*
1802  * Put reference to inode, but avoid calling iput_final() in current thread.
1803  * iput_final() may wait for reahahead pages. The wait can cause deadlock in
1804  * some contexts.
1805  */
1806 void ceph_async_iput(struct inode *inode)
1807 {
1808 	if (!inode)
1809 		return;
1810 	for (;;) {
1811 		if (atomic_add_unless(&inode->i_count, -1, 1))
1812 			break;
1813 		if (queue_work(ceph_inode_to_client(inode)->inode_wq,
1814 			       &ceph_inode(inode)->i_work))
1815 			break;
1816 		/* queue work failed, i_count must be at least 2 */
1817 	}
1818 }
1819 
1820 void ceph_queue_inode_work(struct inode *inode, int work_bit)
1821 {
1822 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1823 	struct ceph_inode_info *ci = ceph_inode(inode);
1824 	set_bit(work_bit, &ci->i_work_mask);
1825 
1826 	ihold(inode);
1827 	if (queue_work(fsc->inode_wq, &ci->i_work)) {
1828 		dout("queue_inode_work %p, mask=%lx\n", inode, ci->i_work_mask);
1829 	} else {
1830 		dout("queue_inode_work %p already queued, mask=%lx\n",
1831 		     inode, ci->i_work_mask);
1832 		iput(inode);
1833 	}
1834 }
1835 
1836 static void ceph_do_invalidate_pages(struct inode *inode)
1837 {
1838 	struct ceph_inode_info *ci = ceph_inode(inode);
1839 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1840 	u32 orig_gen;
1841 	int check = 0;
1842 
1843 	mutex_lock(&ci->i_truncate_mutex);
1844 
1845 	if (READ_ONCE(fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) {
1846 		pr_warn_ratelimited("invalidate_pages %p %lld forced umount\n",
1847 				    inode, ceph_ino(inode));
1848 		mapping_set_error(inode->i_mapping, -EIO);
1849 		truncate_pagecache(inode, 0);
1850 		mutex_unlock(&ci->i_truncate_mutex);
1851 		goto out;
1852 	}
1853 
1854 	spin_lock(&ci->i_ceph_lock);
1855 	dout("invalidate_pages %p gen %d revoking %d\n", inode,
1856 	     ci->i_rdcache_gen, ci->i_rdcache_revoking);
1857 	if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
1858 		if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1859 			check = 1;
1860 		spin_unlock(&ci->i_ceph_lock);
1861 		mutex_unlock(&ci->i_truncate_mutex);
1862 		goto out;
1863 	}
1864 	orig_gen = ci->i_rdcache_gen;
1865 	spin_unlock(&ci->i_ceph_lock);
1866 
1867 	ceph_fscache_invalidate(inode);
1868 	if (invalidate_inode_pages2(inode->i_mapping) < 0) {
1869 		pr_err("invalidate_pages %p fails\n", inode);
1870 	}
1871 
1872 	spin_lock(&ci->i_ceph_lock);
1873 	if (orig_gen == ci->i_rdcache_gen &&
1874 	    orig_gen == ci->i_rdcache_revoking) {
1875 		dout("invalidate_pages %p gen %d successful\n", inode,
1876 		     ci->i_rdcache_gen);
1877 		ci->i_rdcache_revoking--;
1878 		check = 1;
1879 	} else {
1880 		dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
1881 		     inode, orig_gen, ci->i_rdcache_gen,
1882 		     ci->i_rdcache_revoking);
1883 		if (__ceph_caps_revoking_other(ci, NULL, CEPH_CAP_FILE_CACHE))
1884 			check = 1;
1885 	}
1886 	spin_unlock(&ci->i_ceph_lock);
1887 	mutex_unlock(&ci->i_truncate_mutex);
1888 out:
1889 	if (check)
1890 		ceph_check_caps(ci, 0, NULL);
1891 }
1892 
1893 /*
1894  * Make sure any pending truncation is applied before doing anything
1895  * that may depend on it.
1896  */
1897 void __ceph_do_pending_vmtruncate(struct inode *inode)
1898 {
1899 	struct ceph_inode_info *ci = ceph_inode(inode);
1900 	u64 to;
1901 	int wrbuffer_refs, finish = 0;
1902 
1903 	mutex_lock(&ci->i_truncate_mutex);
1904 retry:
1905 	spin_lock(&ci->i_ceph_lock);
1906 	if (ci->i_truncate_pending == 0) {
1907 		dout("__do_pending_vmtruncate %p none pending\n", inode);
1908 		spin_unlock(&ci->i_ceph_lock);
1909 		mutex_unlock(&ci->i_truncate_mutex);
1910 		return;
1911 	}
1912 
1913 	/*
1914 	 * make sure any dirty snapped pages are flushed before we
1915 	 * possibly truncate them.. so write AND block!
1916 	 */
1917 	if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
1918 		spin_unlock(&ci->i_ceph_lock);
1919 		dout("__do_pending_vmtruncate %p flushing snaps first\n",
1920 		     inode);
1921 		filemap_write_and_wait_range(&inode->i_data, 0,
1922 					     inode->i_sb->s_maxbytes);
1923 		goto retry;
1924 	}
1925 
1926 	/* there should be no reader or writer */
1927 	WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref);
1928 
1929 	to = ci->i_truncate_size;
1930 	wrbuffer_refs = ci->i_wrbuffer_ref;
1931 	dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
1932 	     ci->i_truncate_pending, to);
1933 	spin_unlock(&ci->i_ceph_lock);
1934 
1935 	truncate_pagecache(inode, to);
1936 
1937 	spin_lock(&ci->i_ceph_lock);
1938 	if (to == ci->i_truncate_size) {
1939 		ci->i_truncate_pending = 0;
1940 		finish = 1;
1941 	}
1942 	spin_unlock(&ci->i_ceph_lock);
1943 	if (!finish)
1944 		goto retry;
1945 
1946 	mutex_unlock(&ci->i_truncate_mutex);
1947 
1948 	if (wrbuffer_refs == 0)
1949 		ceph_check_caps(ci, 0, NULL);
1950 
1951 	wake_up_all(&ci->i_cap_wq);
1952 }
1953 
1954 static void ceph_inode_work(struct work_struct *work)
1955 {
1956 	struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1957 						 i_work);
1958 	struct inode *inode = &ci->vfs_inode;
1959 
1960 	if (test_and_clear_bit(CEPH_I_WORK_WRITEBACK, &ci->i_work_mask)) {
1961 		dout("writeback %p\n", inode);
1962 		filemap_fdatawrite(&inode->i_data);
1963 	}
1964 	if (test_and_clear_bit(CEPH_I_WORK_INVALIDATE_PAGES, &ci->i_work_mask))
1965 		ceph_do_invalidate_pages(inode);
1966 
1967 	if (test_and_clear_bit(CEPH_I_WORK_VMTRUNCATE, &ci->i_work_mask))
1968 		__ceph_do_pending_vmtruncate(inode);
1969 
1970 	if (test_and_clear_bit(CEPH_I_WORK_CHECK_CAPS, &ci->i_work_mask))
1971 		ceph_check_caps(ci, 0, NULL);
1972 
1973 	if (test_and_clear_bit(CEPH_I_WORK_FLUSH_SNAPS, &ci->i_work_mask))
1974 		ceph_flush_snaps(ci, NULL);
1975 
1976 	iput(inode);
1977 }
1978 
1979 /*
1980  * symlinks
1981  */
1982 static const struct inode_operations ceph_symlink_iops = {
1983 	.get_link = simple_get_link,
1984 	.setattr = ceph_setattr,
1985 	.getattr = ceph_getattr,
1986 	.listxattr = ceph_listxattr,
1987 };
1988 
1989 int __ceph_setattr(struct inode *inode, struct iattr *attr)
1990 {
1991 	struct ceph_inode_info *ci = ceph_inode(inode);
1992 	unsigned int ia_valid = attr->ia_valid;
1993 	struct ceph_mds_request *req;
1994 	struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1995 	struct ceph_cap_flush *prealloc_cf;
1996 	int issued;
1997 	int release = 0, dirtied = 0;
1998 	int mask = 0;
1999 	int err = 0;
2000 	int inode_dirty_flags = 0;
2001 	bool lock_snap_rwsem = false;
2002 
2003 	prealloc_cf = ceph_alloc_cap_flush();
2004 	if (!prealloc_cf)
2005 		return -ENOMEM;
2006 
2007 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
2008 				       USE_AUTH_MDS);
2009 	if (IS_ERR(req)) {
2010 		ceph_free_cap_flush(prealloc_cf);
2011 		return PTR_ERR(req);
2012 	}
2013 
2014 	spin_lock(&ci->i_ceph_lock);
2015 	issued = __ceph_caps_issued(ci, NULL);
2016 
2017 	if (!ci->i_head_snapc &&
2018 	    (issued & (CEPH_CAP_ANY_EXCL | CEPH_CAP_FILE_WR))) {
2019 		lock_snap_rwsem = true;
2020 		if (!down_read_trylock(&mdsc->snap_rwsem)) {
2021 			spin_unlock(&ci->i_ceph_lock);
2022 			down_read(&mdsc->snap_rwsem);
2023 			spin_lock(&ci->i_ceph_lock);
2024 			issued = __ceph_caps_issued(ci, NULL);
2025 		}
2026 	}
2027 
2028 	dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
2029 
2030 	if (ia_valid & ATTR_UID) {
2031 		dout("setattr %p uid %d -> %d\n", inode,
2032 		     from_kuid(&init_user_ns, inode->i_uid),
2033 		     from_kuid(&init_user_ns, attr->ia_uid));
2034 		if (issued & CEPH_CAP_AUTH_EXCL) {
2035 			inode->i_uid = attr->ia_uid;
2036 			dirtied |= CEPH_CAP_AUTH_EXCL;
2037 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2038 			   !uid_eq(attr->ia_uid, inode->i_uid)) {
2039 			req->r_args.setattr.uid = cpu_to_le32(
2040 				from_kuid(&init_user_ns, attr->ia_uid));
2041 			mask |= CEPH_SETATTR_UID;
2042 			release |= CEPH_CAP_AUTH_SHARED;
2043 		}
2044 	}
2045 	if (ia_valid & ATTR_GID) {
2046 		dout("setattr %p gid %d -> %d\n", inode,
2047 		     from_kgid(&init_user_ns, inode->i_gid),
2048 		     from_kgid(&init_user_ns, attr->ia_gid));
2049 		if (issued & CEPH_CAP_AUTH_EXCL) {
2050 			inode->i_gid = attr->ia_gid;
2051 			dirtied |= CEPH_CAP_AUTH_EXCL;
2052 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2053 			   !gid_eq(attr->ia_gid, inode->i_gid)) {
2054 			req->r_args.setattr.gid = cpu_to_le32(
2055 				from_kgid(&init_user_ns, attr->ia_gid));
2056 			mask |= CEPH_SETATTR_GID;
2057 			release |= CEPH_CAP_AUTH_SHARED;
2058 		}
2059 	}
2060 	if (ia_valid & ATTR_MODE) {
2061 		dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
2062 		     attr->ia_mode);
2063 		if (issued & CEPH_CAP_AUTH_EXCL) {
2064 			inode->i_mode = attr->ia_mode;
2065 			dirtied |= CEPH_CAP_AUTH_EXCL;
2066 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
2067 			   attr->ia_mode != inode->i_mode) {
2068 			inode->i_mode = attr->ia_mode;
2069 			req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
2070 			mask |= CEPH_SETATTR_MODE;
2071 			release |= CEPH_CAP_AUTH_SHARED;
2072 		}
2073 	}
2074 
2075 	if (ia_valid & ATTR_ATIME) {
2076 		dout("setattr %p atime %lld.%ld -> %lld.%ld\n", inode,
2077 		     inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
2078 		     attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
2079 		if (issued & CEPH_CAP_FILE_EXCL) {
2080 			ci->i_time_warp_seq++;
2081 			inode->i_atime = attr->ia_atime;
2082 			dirtied |= CEPH_CAP_FILE_EXCL;
2083 		} else if ((issued & CEPH_CAP_FILE_WR) &&
2084 			   timespec64_compare(&inode->i_atime,
2085 					    &attr->ia_atime) < 0) {
2086 			inode->i_atime = attr->ia_atime;
2087 			dirtied |= CEPH_CAP_FILE_WR;
2088 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2089 			   !timespec64_equal(&inode->i_atime, &attr->ia_atime)) {
2090 			ceph_encode_timespec64(&req->r_args.setattr.atime,
2091 					       &attr->ia_atime);
2092 			mask |= CEPH_SETATTR_ATIME;
2093 			release |= CEPH_CAP_FILE_SHARED |
2094 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2095 		}
2096 	}
2097 	if (ia_valid & ATTR_SIZE) {
2098 		dout("setattr %p size %lld -> %lld\n", inode,
2099 		     inode->i_size, attr->ia_size);
2100 		if ((issued & CEPH_CAP_FILE_EXCL) &&
2101 		    attr->ia_size > inode->i_size) {
2102 			i_size_write(inode, attr->ia_size);
2103 			inode->i_blocks = calc_inode_blocks(attr->ia_size);
2104 			ci->i_reported_size = attr->ia_size;
2105 			dirtied |= CEPH_CAP_FILE_EXCL;
2106 			ia_valid |= ATTR_MTIME;
2107 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2108 			   attr->ia_size != inode->i_size) {
2109 			req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
2110 			req->r_args.setattr.old_size =
2111 				cpu_to_le64(inode->i_size);
2112 			mask |= CEPH_SETATTR_SIZE;
2113 			release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL |
2114 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2115 		}
2116 	}
2117 	if (ia_valid & ATTR_MTIME) {
2118 		dout("setattr %p mtime %lld.%ld -> %lld.%ld\n", inode,
2119 		     inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
2120 		     attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
2121 		if (issued & CEPH_CAP_FILE_EXCL) {
2122 			ci->i_time_warp_seq++;
2123 			inode->i_mtime = attr->ia_mtime;
2124 			dirtied |= CEPH_CAP_FILE_EXCL;
2125 		} else if ((issued & CEPH_CAP_FILE_WR) &&
2126 			   timespec64_compare(&inode->i_mtime,
2127 					    &attr->ia_mtime) < 0) {
2128 			inode->i_mtime = attr->ia_mtime;
2129 			dirtied |= CEPH_CAP_FILE_WR;
2130 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
2131 			   !timespec64_equal(&inode->i_mtime, &attr->ia_mtime)) {
2132 			ceph_encode_timespec64(&req->r_args.setattr.mtime,
2133 					       &attr->ia_mtime);
2134 			mask |= CEPH_SETATTR_MTIME;
2135 			release |= CEPH_CAP_FILE_SHARED |
2136 				   CEPH_CAP_FILE_RD | CEPH_CAP_FILE_WR;
2137 		}
2138 	}
2139 
2140 	/* these do nothing */
2141 	if (ia_valid & ATTR_CTIME) {
2142 		bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
2143 					 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
2144 		dout("setattr %p ctime %lld.%ld -> %lld.%ld (%s)\n", inode,
2145 		     inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
2146 		     attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
2147 		     only ? "ctime only" : "ignored");
2148 		if (only) {
2149 			/*
2150 			 * if kernel wants to dirty ctime but nothing else,
2151 			 * we need to choose a cap to dirty under, or do
2152 			 * a almost-no-op setattr
2153 			 */
2154 			if (issued & CEPH_CAP_AUTH_EXCL)
2155 				dirtied |= CEPH_CAP_AUTH_EXCL;
2156 			else if (issued & CEPH_CAP_FILE_EXCL)
2157 				dirtied |= CEPH_CAP_FILE_EXCL;
2158 			else if (issued & CEPH_CAP_XATTR_EXCL)
2159 				dirtied |= CEPH_CAP_XATTR_EXCL;
2160 			else
2161 				mask |= CEPH_SETATTR_CTIME;
2162 		}
2163 	}
2164 	if (ia_valid & ATTR_FILE)
2165 		dout("setattr %p ATTR_FILE ... hrm!\n", inode);
2166 
2167 	if (dirtied) {
2168 		inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied,
2169 							   &prealloc_cf);
2170 		inode->i_ctime = attr->ia_ctime;
2171 	}
2172 
2173 	release &= issued;
2174 	spin_unlock(&ci->i_ceph_lock);
2175 	if (lock_snap_rwsem)
2176 		up_read(&mdsc->snap_rwsem);
2177 
2178 	if (inode_dirty_flags)
2179 		__mark_inode_dirty(inode, inode_dirty_flags);
2180 
2181 
2182 	if (mask) {
2183 		req->r_inode = inode;
2184 		ihold(inode);
2185 		req->r_inode_drop = release;
2186 		req->r_args.setattr.mask = cpu_to_le32(mask);
2187 		req->r_num_caps = 1;
2188 		req->r_stamp = attr->ia_ctime;
2189 		err = ceph_mdsc_do_request(mdsc, NULL, req);
2190 	}
2191 	dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
2192 	     ceph_cap_string(dirtied), mask);
2193 
2194 	ceph_mdsc_put_request(req);
2195 	ceph_free_cap_flush(prealloc_cf);
2196 
2197 	if (err >= 0 && (mask & CEPH_SETATTR_SIZE))
2198 		__ceph_do_pending_vmtruncate(inode);
2199 
2200 	return err;
2201 }
2202 
2203 /*
2204  * setattr
2205  */
2206 int ceph_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
2207 		 struct iattr *attr)
2208 {
2209 	struct inode *inode = d_inode(dentry);
2210 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
2211 	int err;
2212 
2213 	if (ceph_snap(inode) != CEPH_NOSNAP)
2214 		return -EROFS;
2215 
2216 	err = setattr_prepare(&init_user_ns, dentry, attr);
2217 	if (err != 0)
2218 		return err;
2219 
2220 	if ((attr->ia_valid & ATTR_SIZE) &&
2221 	    attr->ia_size > max(inode->i_size, fsc->max_file_size))
2222 		return -EFBIG;
2223 
2224 	if ((attr->ia_valid & ATTR_SIZE) &&
2225 	    ceph_quota_is_max_bytes_exceeded(inode, attr->ia_size))
2226 		return -EDQUOT;
2227 
2228 	err = __ceph_setattr(inode, attr);
2229 
2230 	if (err >= 0 && (attr->ia_valid & ATTR_MODE))
2231 		err = posix_acl_chmod(&init_user_ns, inode, attr->ia_mode);
2232 
2233 	return err;
2234 }
2235 
2236 /*
2237  * Verify that we have a lease on the given mask.  If not,
2238  * do a getattr against an mds.
2239  */
2240 int __ceph_do_getattr(struct inode *inode, struct page *locked_page,
2241 		      int mask, bool force)
2242 {
2243 	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
2244 	struct ceph_mds_client *mdsc = fsc->mdsc;
2245 	struct ceph_mds_request *req;
2246 	int mode;
2247 	int err;
2248 
2249 	if (ceph_snap(inode) == CEPH_SNAPDIR) {
2250 		dout("do_getattr inode %p SNAPDIR\n", inode);
2251 		return 0;
2252 	}
2253 
2254 	dout("do_getattr inode %p mask %s mode 0%o\n",
2255 	     inode, ceph_cap_string(mask), inode->i_mode);
2256 	if (!force && ceph_caps_issued_mask_metric(ceph_inode(inode), mask, 1))
2257 			return 0;
2258 
2259 	mode = (mask & CEPH_STAT_RSTAT) ? USE_AUTH_MDS : USE_ANY_MDS;
2260 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, mode);
2261 	if (IS_ERR(req))
2262 		return PTR_ERR(req);
2263 	req->r_inode = inode;
2264 	ihold(inode);
2265 	req->r_num_caps = 1;
2266 	req->r_args.getattr.mask = cpu_to_le32(mask);
2267 	req->r_locked_page = locked_page;
2268 	err = ceph_mdsc_do_request(mdsc, NULL, req);
2269 	if (locked_page && err == 0) {
2270 		u64 inline_version = req->r_reply_info.targeti.inline_version;
2271 		if (inline_version == 0) {
2272 			/* the reply is supposed to contain inline data */
2273 			err = -EINVAL;
2274 		} else if (inline_version == CEPH_INLINE_NONE) {
2275 			err = -ENODATA;
2276 		} else {
2277 			err = req->r_reply_info.targeti.inline_len;
2278 		}
2279 	}
2280 	ceph_mdsc_put_request(req);
2281 	dout("do_getattr result=%d\n", err);
2282 	return err;
2283 }
2284 
2285 
2286 /*
2287  * Check inode permissions.  We verify we have a valid value for
2288  * the AUTH cap, then call the generic handler.
2289  */
2290 int ceph_permission(struct user_namespace *mnt_userns, struct inode *inode,
2291 		    int mask)
2292 {
2293 	int err;
2294 
2295 	if (mask & MAY_NOT_BLOCK)
2296 		return -ECHILD;
2297 
2298 	err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED, false);
2299 
2300 	if (!err)
2301 		err = generic_permission(&init_user_ns, inode, mask);
2302 	return err;
2303 }
2304 
2305 /* Craft a mask of needed caps given a set of requested statx attrs. */
2306 static int statx_to_caps(u32 want, umode_t mode)
2307 {
2308 	int mask = 0;
2309 
2310 	if (want & (STATX_MODE|STATX_UID|STATX_GID|STATX_CTIME|STATX_BTIME))
2311 		mask |= CEPH_CAP_AUTH_SHARED;
2312 
2313 	if (want & (STATX_NLINK|STATX_CTIME)) {
2314 		/*
2315 		 * The link count for directories depends on inode->i_subdirs,
2316 		 * and that is only updated when Fs caps are held.
2317 		 */
2318 		if (S_ISDIR(mode))
2319 			mask |= CEPH_CAP_FILE_SHARED;
2320 		else
2321 			mask |= CEPH_CAP_LINK_SHARED;
2322 	}
2323 
2324 	if (want & (STATX_ATIME|STATX_MTIME|STATX_CTIME|STATX_SIZE|
2325 		    STATX_BLOCKS))
2326 		mask |= CEPH_CAP_FILE_SHARED;
2327 
2328 	if (want & (STATX_CTIME))
2329 		mask |= CEPH_CAP_XATTR_SHARED;
2330 
2331 	return mask;
2332 }
2333 
2334 /*
2335  * Get all the attributes. If we have sufficient caps for the requested attrs,
2336  * then we can avoid talking to the MDS at all.
2337  */
2338 int ceph_getattr(struct user_namespace *mnt_userns, const struct path *path,
2339 		 struct kstat *stat, u32 request_mask, unsigned int flags)
2340 {
2341 	struct inode *inode = d_inode(path->dentry);
2342 	struct ceph_inode_info *ci = ceph_inode(inode);
2343 	u32 valid_mask = STATX_BASIC_STATS;
2344 	int err = 0;
2345 
2346 	/* Skip the getattr altogether if we're asked not to sync */
2347 	if (!(flags & AT_STATX_DONT_SYNC)) {
2348 		err = ceph_do_getattr(inode,
2349 				statx_to_caps(request_mask, inode->i_mode),
2350 				flags & AT_STATX_FORCE_SYNC);
2351 		if (err)
2352 			return err;
2353 	}
2354 
2355 	generic_fillattr(&init_user_ns, inode, stat);
2356 	stat->ino = ceph_present_inode(inode);
2357 
2358 	/*
2359 	 * btime on newly-allocated inodes is 0, so if this is still set to
2360 	 * that, then assume that it's not valid.
2361 	 */
2362 	if (ci->i_btime.tv_sec || ci->i_btime.tv_nsec) {
2363 		stat->btime = ci->i_btime;
2364 		valid_mask |= STATX_BTIME;
2365 	}
2366 
2367 	if (ceph_snap(inode) == CEPH_NOSNAP)
2368 		stat->dev = inode->i_sb->s_dev;
2369 	else
2370 		stat->dev = ci->i_snapid_map ? ci->i_snapid_map->dev : 0;
2371 
2372 	if (S_ISDIR(inode->i_mode)) {
2373 		if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb),
2374 					RBYTES))
2375 			stat->size = ci->i_rbytes;
2376 		else
2377 			stat->size = ci->i_files + ci->i_subdirs;
2378 		stat->blocks = 0;
2379 		stat->blksize = 65536;
2380 		/*
2381 		 * Some applications rely on the number of st_nlink
2382 		 * value on directories to be either 0 (if unlinked)
2383 		 * or 2 + number of subdirectories.
2384 		 */
2385 		if (stat->nlink == 1)
2386 			/* '.' + '..' + subdirs */
2387 			stat->nlink = 1 + 1 + ci->i_subdirs;
2388 	}
2389 
2390 	stat->result_mask = request_mask & valid_mask;
2391 	return err;
2392 }
2393