xref: /openbmc/linux/fs/ceph/inode.c (revision 8b218b8a)
1 #include "ceph_debug.h"
2 
3 #include <linux/module.h>
4 #include <linux/fs.h>
5 #include <linux/smp_lock.h>
6 #include <linux/slab.h>
7 #include <linux/string.h>
8 #include <linux/uaccess.h>
9 #include <linux/kernel.h>
10 #include <linux/namei.h>
11 #include <linux/writeback.h>
12 #include <linux/vmalloc.h>
13 #include <linux/pagevec.h>
14 
15 #include "super.h"
16 #include "decode.h"
17 
18 /*
19  * Ceph inode operations
20  *
21  * Implement basic inode helpers (get, alloc) and inode ops (getattr,
22  * setattr, etc.), xattr helpers, and helpers for assimilating
23  * metadata returned by the MDS into our cache.
24  *
25  * Also define helpers for doing asynchronous writeback, invalidation,
26  * and truncation for the benefit of those who can't afford to block
27  * (typically because they are in the message handler path).
28  */
29 
30 static const struct inode_operations ceph_symlink_iops;
31 
32 static void ceph_invalidate_work(struct work_struct *work);
33 static void ceph_writeback_work(struct work_struct *work);
34 static void ceph_vmtruncate_work(struct work_struct *work);
35 
36 /*
37  * find or create an inode, given the ceph ino number
38  */
39 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
40 {
41 	struct inode *inode;
42 	ino_t t = ceph_vino_to_ino(vino);
43 
44 	inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
45 	if (inode == NULL)
46 		return ERR_PTR(-ENOMEM);
47 	if (inode->i_state & I_NEW) {
48 		dout("get_inode created new inode %p %llx.%llx ino %llx\n",
49 		     inode, ceph_vinop(inode), (u64)inode->i_ino);
50 		unlock_new_inode(inode);
51 	}
52 
53 	dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
54 	     vino.snap, inode);
55 	return inode;
56 }
57 
58 /*
59  * get/constuct snapdir inode for a given directory
60  */
61 struct inode *ceph_get_snapdir(struct inode *parent)
62 {
63 	struct ceph_vino vino = {
64 		.ino = ceph_ino(parent),
65 		.snap = CEPH_SNAPDIR,
66 	};
67 	struct inode *inode = ceph_get_inode(parent->i_sb, vino);
68 	struct ceph_inode_info *ci = ceph_inode(inode);
69 
70 	BUG_ON(!S_ISDIR(parent->i_mode));
71 	if (IS_ERR(inode))
72 		return ERR_PTR(PTR_ERR(inode));
73 	inode->i_mode = parent->i_mode;
74 	inode->i_uid = parent->i_uid;
75 	inode->i_gid = parent->i_gid;
76 	inode->i_op = &ceph_dir_iops;
77 	inode->i_fop = &ceph_dir_fops;
78 	ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
79 	ci->i_rbytes = 0;
80 	return inode;
81 }
82 
83 const struct inode_operations ceph_file_iops = {
84 	.permission = ceph_permission,
85 	.setattr = ceph_setattr,
86 	.getattr = ceph_getattr,
87 	.setxattr = ceph_setxattr,
88 	.getxattr = ceph_getxattr,
89 	.listxattr = ceph_listxattr,
90 	.removexattr = ceph_removexattr,
91 };
92 
93 
94 /*
95  * We use a 'frag tree' to keep track of the MDS's directory fragments
96  * for a given inode (usually there is just a single fragment).  We
97  * need to know when a child frag is delegated to a new MDS, or when
98  * it is flagged as replicated, so we can direct our requests
99  * accordingly.
100  */
101 
102 /*
103  * find/create a frag in the tree
104  */
105 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
106 						    u32 f)
107 {
108 	struct rb_node **p;
109 	struct rb_node *parent = NULL;
110 	struct ceph_inode_frag *frag;
111 	int c;
112 
113 	p = &ci->i_fragtree.rb_node;
114 	while (*p) {
115 		parent = *p;
116 		frag = rb_entry(parent, struct ceph_inode_frag, node);
117 		c = ceph_frag_compare(f, frag->frag);
118 		if (c < 0)
119 			p = &(*p)->rb_left;
120 		else if (c > 0)
121 			p = &(*p)->rb_right;
122 		else
123 			return frag;
124 	}
125 
126 	frag = kmalloc(sizeof(*frag), GFP_NOFS);
127 	if (!frag) {
128 		pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx "
129 		       "frag %x\n", &ci->vfs_inode,
130 		       ceph_vinop(&ci->vfs_inode), f);
131 		return ERR_PTR(-ENOMEM);
132 	}
133 	frag->frag = f;
134 	frag->split_by = 0;
135 	frag->mds = -1;
136 	frag->ndist = 0;
137 
138 	rb_link_node(&frag->node, parent, p);
139 	rb_insert_color(&frag->node, &ci->i_fragtree);
140 
141 	dout("get_or_create_frag added %llx.%llx frag %x\n",
142 	     ceph_vinop(&ci->vfs_inode), f);
143 	return frag;
144 }
145 
146 /*
147  * find a specific frag @f
148  */
149 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
150 {
151 	struct rb_node *n = ci->i_fragtree.rb_node;
152 
153 	while (n) {
154 		struct ceph_inode_frag *frag =
155 			rb_entry(n, struct ceph_inode_frag, node);
156 		int c = ceph_frag_compare(f, frag->frag);
157 		if (c < 0)
158 			n = n->rb_left;
159 		else if (c > 0)
160 			n = n->rb_right;
161 		else
162 			return frag;
163 	}
164 	return NULL;
165 }
166 
167 /*
168  * Choose frag containing the given value @v.  If @pfrag is
169  * specified, copy the frag delegation info to the caller if
170  * it is present.
171  */
172 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
173 		     struct ceph_inode_frag *pfrag,
174 		     int *found)
175 {
176 	u32 t = ceph_frag_make(0, 0);
177 	struct ceph_inode_frag *frag;
178 	unsigned nway, i;
179 	u32 n;
180 
181 	if (found)
182 		*found = 0;
183 
184 	mutex_lock(&ci->i_fragtree_mutex);
185 	while (1) {
186 		WARN_ON(!ceph_frag_contains_value(t, v));
187 		frag = __ceph_find_frag(ci, t);
188 		if (!frag)
189 			break; /* t is a leaf */
190 		if (frag->split_by == 0) {
191 			if (pfrag)
192 				memcpy(pfrag, frag, sizeof(*pfrag));
193 			if (found)
194 				*found = 1;
195 			break;
196 		}
197 
198 		/* choose child */
199 		nway = 1 << frag->split_by;
200 		dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
201 		     frag->split_by, nway);
202 		for (i = 0; i < nway; i++) {
203 			n = ceph_frag_make_child(t, frag->split_by, i);
204 			if (ceph_frag_contains_value(n, v)) {
205 				t = n;
206 				break;
207 			}
208 		}
209 		BUG_ON(i == nway);
210 	}
211 	dout("choose_frag(%x) = %x\n", v, t);
212 
213 	mutex_unlock(&ci->i_fragtree_mutex);
214 	return t;
215 }
216 
217 /*
218  * Process dirfrag (delegation) info from the mds.  Include leaf
219  * fragment in tree ONLY if ndist > 0.  Otherwise, only
220  * branches/splits are included in i_fragtree)
221  */
222 static int ceph_fill_dirfrag(struct inode *inode,
223 			     struct ceph_mds_reply_dirfrag *dirinfo)
224 {
225 	struct ceph_inode_info *ci = ceph_inode(inode);
226 	struct ceph_inode_frag *frag;
227 	u32 id = le32_to_cpu(dirinfo->frag);
228 	int mds = le32_to_cpu(dirinfo->auth);
229 	int ndist = le32_to_cpu(dirinfo->ndist);
230 	int i;
231 	int err = 0;
232 
233 	mutex_lock(&ci->i_fragtree_mutex);
234 	if (ndist == 0) {
235 		/* no delegation info needed. */
236 		frag = __ceph_find_frag(ci, id);
237 		if (!frag)
238 			goto out;
239 		if (frag->split_by == 0) {
240 			/* tree leaf, remove */
241 			dout("fill_dirfrag removed %llx.%llx frag %x"
242 			     " (no ref)\n", ceph_vinop(inode), id);
243 			rb_erase(&frag->node, &ci->i_fragtree);
244 			kfree(frag);
245 		} else {
246 			/* tree branch, keep and clear */
247 			dout("fill_dirfrag cleared %llx.%llx frag %x"
248 			     " referral\n", ceph_vinop(inode), id);
249 			frag->mds = -1;
250 			frag->ndist = 0;
251 		}
252 		goto out;
253 	}
254 
255 
256 	/* find/add this frag to store mds delegation info */
257 	frag = __get_or_create_frag(ci, id);
258 	if (IS_ERR(frag)) {
259 		/* this is not the end of the world; we can continue
260 		   with bad/inaccurate delegation info */
261 		pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
262 		       ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
263 		err = -ENOMEM;
264 		goto out;
265 	}
266 
267 	frag->mds = mds;
268 	frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
269 	for (i = 0; i < frag->ndist; i++)
270 		frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
271 	dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
272 	     ceph_vinop(inode), frag->frag, frag->ndist);
273 
274 out:
275 	mutex_unlock(&ci->i_fragtree_mutex);
276 	return err;
277 }
278 
279 
280 /*
281  * initialize a newly allocated inode.
282  */
283 struct inode *ceph_alloc_inode(struct super_block *sb)
284 {
285 	struct ceph_inode_info *ci;
286 	int i;
287 
288 	ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
289 	if (!ci)
290 		return NULL;
291 
292 	dout("alloc_inode %p\n", &ci->vfs_inode);
293 
294 	ci->i_version = 0;
295 	ci->i_time_warp_seq = 0;
296 	ci->i_ceph_flags = 0;
297 	ci->i_release_count = 0;
298 	ci->i_symlink = NULL;
299 
300 	ci->i_fragtree = RB_ROOT;
301 	mutex_init(&ci->i_fragtree_mutex);
302 
303 	ci->i_xattrs.blob = NULL;
304 	ci->i_xattrs.prealloc_blob = NULL;
305 	ci->i_xattrs.dirty = false;
306 	ci->i_xattrs.index = RB_ROOT;
307 	ci->i_xattrs.count = 0;
308 	ci->i_xattrs.names_size = 0;
309 	ci->i_xattrs.vals_size = 0;
310 	ci->i_xattrs.version = 0;
311 	ci->i_xattrs.index_version = 0;
312 
313 	ci->i_caps = RB_ROOT;
314 	ci->i_auth_cap = NULL;
315 	ci->i_dirty_caps = 0;
316 	ci->i_flushing_caps = 0;
317 	INIT_LIST_HEAD(&ci->i_dirty_item);
318 	INIT_LIST_HEAD(&ci->i_flushing_item);
319 	ci->i_cap_flush_seq = 0;
320 	ci->i_cap_flush_last_tid = 0;
321 	memset(&ci->i_cap_flush_tid, 0, sizeof(ci->i_cap_flush_tid));
322 	init_waitqueue_head(&ci->i_cap_wq);
323 	ci->i_hold_caps_min = 0;
324 	ci->i_hold_caps_max = 0;
325 	INIT_LIST_HEAD(&ci->i_cap_delay_list);
326 	ci->i_cap_exporting_mds = 0;
327 	ci->i_cap_exporting_mseq = 0;
328 	ci->i_cap_exporting_issued = 0;
329 	INIT_LIST_HEAD(&ci->i_cap_snaps);
330 	ci->i_head_snapc = NULL;
331 	ci->i_snap_caps = 0;
332 
333 	for (i = 0; i < CEPH_FILE_MODE_NUM; i++)
334 		ci->i_nr_by_mode[i] = 0;
335 
336 	ci->i_truncate_seq = 0;
337 	ci->i_truncate_size = 0;
338 	ci->i_truncate_pending = 0;
339 
340 	ci->i_max_size = 0;
341 	ci->i_reported_size = 0;
342 	ci->i_wanted_max_size = 0;
343 	ci->i_requested_max_size = 0;
344 
345 	ci->i_pin_ref = 0;
346 	ci->i_rd_ref = 0;
347 	ci->i_rdcache_ref = 0;
348 	ci->i_wr_ref = 0;
349 	ci->i_wrbuffer_ref = 0;
350 	ci->i_wrbuffer_ref_head = 0;
351 	ci->i_shared_gen = 0;
352 	ci->i_rdcache_gen = 0;
353 	ci->i_rdcache_revoking = 0;
354 
355 	INIT_LIST_HEAD(&ci->i_unsafe_writes);
356 	INIT_LIST_HEAD(&ci->i_unsafe_dirops);
357 	spin_lock_init(&ci->i_unsafe_lock);
358 
359 	ci->i_snap_realm = NULL;
360 	INIT_LIST_HEAD(&ci->i_snap_realm_item);
361 	INIT_LIST_HEAD(&ci->i_snap_flush_item);
362 
363 	INIT_WORK(&ci->i_wb_work, ceph_writeback_work);
364 	INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work);
365 
366 	INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);
367 
368 	return &ci->vfs_inode;
369 }
370 
371 void ceph_destroy_inode(struct inode *inode)
372 {
373 	struct ceph_inode_info *ci = ceph_inode(inode);
374 	struct ceph_inode_frag *frag;
375 	struct rb_node *n;
376 
377 	dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
378 
379 	ceph_queue_caps_release(inode);
380 
381 	/*
382 	 * we may still have a snap_realm reference if there are stray
383 	 * caps in i_cap_exporting_issued or i_snap_caps.
384 	 */
385 	if (ci->i_snap_realm) {
386 		struct ceph_mds_client *mdsc =
387 			&ceph_client(ci->vfs_inode.i_sb)->mdsc;
388 		struct ceph_snap_realm *realm = ci->i_snap_realm;
389 
390 		dout(" dropping residual ref to snap realm %p\n", realm);
391 		spin_lock(&realm->inodes_with_caps_lock);
392 		list_del_init(&ci->i_snap_realm_item);
393 		spin_unlock(&realm->inodes_with_caps_lock);
394 		ceph_put_snap_realm(mdsc, realm);
395 	}
396 
397 	kfree(ci->i_symlink);
398 	while ((n = rb_first(&ci->i_fragtree)) != NULL) {
399 		frag = rb_entry(n, struct ceph_inode_frag, node);
400 		rb_erase(n, &ci->i_fragtree);
401 		kfree(frag);
402 	}
403 
404 	__ceph_destroy_xattrs(ci);
405 	if (ci->i_xattrs.blob)
406 		ceph_buffer_put(ci->i_xattrs.blob);
407 	if (ci->i_xattrs.prealloc_blob)
408 		ceph_buffer_put(ci->i_xattrs.prealloc_blob);
409 
410 	kmem_cache_free(ceph_inode_cachep, ci);
411 }
412 
413 
414 /*
415  * Helpers to fill in size, ctime, mtime, and atime.  We have to be
416  * careful because either the client or MDS may have more up to date
417  * info, depending on which capabilities are held, and whether
418  * time_warp_seq or truncate_seq have increased.  (Ordinarily, mtime
419  * and size are monotonically increasing, except when utimes() or
420  * truncate() increments the corresponding _seq values.)
421  */
422 int ceph_fill_file_size(struct inode *inode, int issued,
423 			u32 truncate_seq, u64 truncate_size, u64 size)
424 {
425 	struct ceph_inode_info *ci = ceph_inode(inode);
426 	int queue_trunc = 0;
427 
428 	if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
429 	    (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
430 		dout("size %lld -> %llu\n", inode->i_size, size);
431 		inode->i_size = size;
432 		inode->i_blocks = (size + (1<<9) - 1) >> 9;
433 		ci->i_reported_size = size;
434 		if (truncate_seq != ci->i_truncate_seq) {
435 			dout("truncate_seq %u -> %u\n",
436 			     ci->i_truncate_seq, truncate_seq);
437 			ci->i_truncate_seq = truncate_seq;
438 			/*
439 			 * If we hold relevant caps, or in the case where we're
440 			 * not the only client referencing this file and we
441 			 * don't hold those caps, then we need to check whether
442 			 * the file is either opened or mmaped
443 			 */
444 			if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_RD|
445 				      CEPH_CAP_FILE_WR|CEPH_CAP_FILE_BUFFER|
446 				      CEPH_CAP_FILE_EXCL)) ||
447 			    mapping_mapped(inode->i_mapping) ||
448 			    __ceph_caps_file_wanted(ci)) {
449 				ci->i_truncate_pending++;
450 				queue_trunc = 1;
451 			}
452 		}
453 	}
454 	if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
455 	    ci->i_truncate_size != truncate_size) {
456 		dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
457 		     truncate_size);
458 		ci->i_truncate_size = truncate_size;
459 	}
460 	return queue_trunc;
461 }
462 
463 void ceph_fill_file_time(struct inode *inode, int issued,
464 			 u64 time_warp_seq, struct timespec *ctime,
465 			 struct timespec *mtime, struct timespec *atime)
466 {
467 	struct ceph_inode_info *ci = ceph_inode(inode);
468 	int warn = 0;
469 
470 	if (issued & (CEPH_CAP_FILE_EXCL|
471 		      CEPH_CAP_FILE_WR|
472 		      CEPH_CAP_FILE_BUFFER)) {
473 		if (timespec_compare(ctime, &inode->i_ctime) > 0) {
474 			dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
475 			     inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
476 			     ctime->tv_sec, ctime->tv_nsec);
477 			inode->i_ctime = *ctime;
478 		}
479 		if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
480 			/* the MDS did a utimes() */
481 			dout("mtime %ld.%09ld -> %ld.%09ld "
482 			     "tw %d -> %d\n",
483 			     inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
484 			     mtime->tv_sec, mtime->tv_nsec,
485 			     ci->i_time_warp_seq, (int)time_warp_seq);
486 
487 			inode->i_mtime = *mtime;
488 			inode->i_atime = *atime;
489 			ci->i_time_warp_seq = time_warp_seq;
490 		} else if (time_warp_seq == ci->i_time_warp_seq) {
491 			/* nobody did utimes(); take the max */
492 			if (timespec_compare(mtime, &inode->i_mtime) > 0) {
493 				dout("mtime %ld.%09ld -> %ld.%09ld inc\n",
494 				     inode->i_mtime.tv_sec,
495 				     inode->i_mtime.tv_nsec,
496 				     mtime->tv_sec, mtime->tv_nsec);
497 				inode->i_mtime = *mtime;
498 			}
499 			if (timespec_compare(atime, &inode->i_atime) > 0) {
500 				dout("atime %ld.%09ld -> %ld.%09ld inc\n",
501 				     inode->i_atime.tv_sec,
502 				     inode->i_atime.tv_nsec,
503 				     atime->tv_sec, atime->tv_nsec);
504 				inode->i_atime = *atime;
505 			}
506 		} else if (issued & CEPH_CAP_FILE_EXCL) {
507 			/* we did a utimes(); ignore mds values */
508 		} else {
509 			warn = 1;
510 		}
511 	} else {
512 		/* we have no write caps; whatever the MDS says is true */
513 		if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
514 			inode->i_ctime = *ctime;
515 			inode->i_mtime = *mtime;
516 			inode->i_atime = *atime;
517 			ci->i_time_warp_seq = time_warp_seq;
518 		} else {
519 			warn = 1;
520 		}
521 	}
522 	if (warn) /* time_warp_seq shouldn't go backwards */
523 		dout("%p mds time_warp_seq %llu < %u\n",
524 		     inode, time_warp_seq, ci->i_time_warp_seq);
525 }
526 
527 /*
528  * Populate an inode based on info from mds.  May be called on new or
529  * existing inodes.
530  */
531 static int fill_inode(struct inode *inode,
532 		      struct ceph_mds_reply_info_in *iinfo,
533 		      struct ceph_mds_reply_dirfrag *dirinfo,
534 		      struct ceph_mds_session *session,
535 		      unsigned long ttl_from, int cap_fmode,
536 		      struct ceph_cap_reservation *caps_reservation)
537 {
538 	struct ceph_mds_reply_inode *info = iinfo->in;
539 	struct ceph_inode_info *ci = ceph_inode(inode);
540 	int i;
541 	int issued, implemented;
542 	struct timespec mtime, atime, ctime;
543 	u32 nsplits;
544 	struct ceph_buffer *xattr_blob = NULL;
545 	int err = 0;
546 	int queue_trunc = 0;
547 
548 	dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
549 	     inode, ceph_vinop(inode), le64_to_cpu(info->version),
550 	     ci->i_version);
551 
552 	/*
553 	 * prealloc xattr data, if it looks like we'll need it.  only
554 	 * if len > 4 (meaning there are actually xattrs; the first 4
555 	 * bytes are the xattr count).
556 	 */
557 	if (iinfo->xattr_len > 4) {
558 		xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
559 		if (!xattr_blob)
560 			pr_err("fill_inode ENOMEM xattr blob %d bytes\n",
561 			       iinfo->xattr_len);
562 	}
563 
564 	spin_lock(&inode->i_lock);
565 
566 	/*
567 	 * provided version will be odd if inode value is projected,
568 	 * even if stable.  skip the update if we have a newer info
569 	 * (e.g., due to inode info racing form multiple MDSs), or if
570 	 * we are getting projected (unstable) inode info.
571 	 */
572 	if (le64_to_cpu(info->version) > 0 &&
573 	    (ci->i_version & ~1) > le64_to_cpu(info->version))
574 		goto no_change;
575 
576 	issued = __ceph_caps_issued(ci, &implemented);
577 	issued |= implemented | __ceph_caps_dirty(ci);
578 
579 	/* update inode */
580 	ci->i_version = le64_to_cpu(info->version);
581 	inode->i_version++;
582 	inode->i_rdev = le32_to_cpu(info->rdev);
583 
584 	if ((issued & CEPH_CAP_AUTH_EXCL) == 0) {
585 		inode->i_mode = le32_to_cpu(info->mode);
586 		inode->i_uid = le32_to_cpu(info->uid);
587 		inode->i_gid = le32_to_cpu(info->gid);
588 		dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
589 		     inode->i_uid, inode->i_gid);
590 	}
591 
592 	if ((issued & CEPH_CAP_LINK_EXCL) == 0)
593 		inode->i_nlink = le32_to_cpu(info->nlink);
594 
595 	/* be careful with mtime, atime, size */
596 	ceph_decode_timespec(&atime, &info->atime);
597 	ceph_decode_timespec(&mtime, &info->mtime);
598 	ceph_decode_timespec(&ctime, &info->ctime);
599 	queue_trunc = ceph_fill_file_size(inode, issued,
600 					  le32_to_cpu(info->truncate_seq),
601 					  le64_to_cpu(info->truncate_size),
602 					  le64_to_cpu(info->size));
603 	ceph_fill_file_time(inode, issued,
604 			    le32_to_cpu(info->time_warp_seq),
605 			    &ctime, &mtime, &atime);
606 
607 	ci->i_max_size = le64_to_cpu(info->max_size);
608 	ci->i_layout = info->layout;
609 	inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
610 
611 	/* xattrs */
612 	/* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
613 	if ((issued & CEPH_CAP_XATTR_EXCL) == 0 &&
614 	    le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
615 		if (ci->i_xattrs.blob)
616 			ceph_buffer_put(ci->i_xattrs.blob);
617 		ci->i_xattrs.blob = xattr_blob;
618 		if (xattr_blob)
619 			memcpy(ci->i_xattrs.blob->vec.iov_base,
620 			       iinfo->xattr_data, iinfo->xattr_len);
621 		ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
622 	}
623 
624 	inode->i_mapping->a_ops = &ceph_aops;
625 	inode->i_mapping->backing_dev_info =
626 		&ceph_client(inode->i_sb)->backing_dev_info;
627 
628 	switch (inode->i_mode & S_IFMT) {
629 	case S_IFIFO:
630 	case S_IFBLK:
631 	case S_IFCHR:
632 	case S_IFSOCK:
633 		init_special_inode(inode, inode->i_mode, inode->i_rdev);
634 		inode->i_op = &ceph_file_iops;
635 		break;
636 	case S_IFREG:
637 		inode->i_op = &ceph_file_iops;
638 		inode->i_fop = &ceph_file_fops;
639 		break;
640 	case S_IFLNK:
641 		inode->i_op = &ceph_symlink_iops;
642 		if (!ci->i_symlink) {
643 			int symlen = iinfo->symlink_len;
644 			char *sym;
645 
646 			BUG_ON(symlen != inode->i_size);
647 			spin_unlock(&inode->i_lock);
648 
649 			err = -ENOMEM;
650 			sym = kmalloc(symlen+1, GFP_NOFS);
651 			if (!sym)
652 				goto out;
653 			memcpy(sym, iinfo->symlink, symlen);
654 			sym[symlen] = 0;
655 
656 			spin_lock(&inode->i_lock);
657 			if (!ci->i_symlink)
658 				ci->i_symlink = sym;
659 			else
660 				kfree(sym); /* lost a race */
661 		}
662 		break;
663 	case S_IFDIR:
664 		inode->i_op = &ceph_dir_iops;
665 		inode->i_fop = &ceph_dir_fops;
666 
667 		ci->i_files = le64_to_cpu(info->files);
668 		ci->i_subdirs = le64_to_cpu(info->subdirs);
669 		ci->i_rbytes = le64_to_cpu(info->rbytes);
670 		ci->i_rfiles = le64_to_cpu(info->rfiles);
671 		ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
672 		ceph_decode_timespec(&ci->i_rctime, &info->rctime);
673 
674 		/* set dir completion flag? */
675 		if (ci->i_files == 0 && ci->i_subdirs == 0 &&
676 		    ceph_snap(inode) == CEPH_NOSNAP &&
677 		    (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED)) {
678 			dout(" marking %p complete (empty)\n", inode);
679 			ci->i_ceph_flags |= CEPH_I_COMPLETE;
680 			ci->i_max_offset = 2;
681 		}
682 
683 		/* it may be better to set st_size in getattr instead? */
684 		if (ceph_test_opt(ceph_client(inode->i_sb), RBYTES))
685 			inode->i_size = ci->i_rbytes;
686 		break;
687 	default:
688 		pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
689 		       ceph_vinop(inode), inode->i_mode);
690 	}
691 
692 no_change:
693 	spin_unlock(&inode->i_lock);
694 
695 	/* queue truncate if we saw i_size decrease */
696 	if (queue_trunc)
697 		ceph_queue_vmtruncate(inode);
698 
699 	/* populate frag tree */
700 	/* FIXME: move me up, if/when version reflects fragtree changes */
701 	nsplits = le32_to_cpu(info->fragtree.nsplits);
702 	mutex_lock(&ci->i_fragtree_mutex);
703 	for (i = 0; i < nsplits; i++) {
704 		u32 id = le32_to_cpu(info->fragtree.splits[i].frag);
705 		struct ceph_inode_frag *frag = __get_or_create_frag(ci, id);
706 
707 		if (IS_ERR(frag))
708 			continue;
709 		frag->split_by = le32_to_cpu(info->fragtree.splits[i].by);
710 		dout(" frag %x split by %d\n", frag->frag, frag->split_by);
711 	}
712 	mutex_unlock(&ci->i_fragtree_mutex);
713 
714 	/* were we issued a capability? */
715 	if (info->cap.caps) {
716 		if (ceph_snap(inode) == CEPH_NOSNAP) {
717 			ceph_add_cap(inode, session,
718 				     le64_to_cpu(info->cap.cap_id),
719 				     cap_fmode,
720 				     le32_to_cpu(info->cap.caps),
721 				     le32_to_cpu(info->cap.wanted),
722 				     le32_to_cpu(info->cap.seq),
723 				     le32_to_cpu(info->cap.mseq),
724 				     le64_to_cpu(info->cap.realm),
725 				     info->cap.flags,
726 				     caps_reservation);
727 		} else {
728 			spin_lock(&inode->i_lock);
729 			dout(" %p got snap_caps %s\n", inode,
730 			     ceph_cap_string(le32_to_cpu(info->cap.caps)));
731 			ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
732 			if (cap_fmode >= 0)
733 				__ceph_get_fmode(ci, cap_fmode);
734 			spin_unlock(&inode->i_lock);
735 		}
736 	}
737 
738 	/* update delegation info? */
739 	if (dirinfo)
740 		ceph_fill_dirfrag(inode, dirinfo);
741 
742 	err = 0;
743 
744 out:
745 	if (xattr_blob)
746 		ceph_buffer_put(xattr_blob);
747 	return err;
748 }
749 
750 /*
751  * caller should hold session s_mutex.
752  */
753 static void update_dentry_lease(struct dentry *dentry,
754 				struct ceph_mds_reply_lease *lease,
755 				struct ceph_mds_session *session,
756 				unsigned long from_time)
757 {
758 	struct ceph_dentry_info *di = ceph_dentry(dentry);
759 	long unsigned duration = le32_to_cpu(lease->duration_ms);
760 	long unsigned ttl = from_time + (duration * HZ) / 1000;
761 	long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
762 	struct inode *dir;
763 
764 	/* only track leases on regular dentries */
765 	if (dentry->d_op != &ceph_dentry_ops)
766 		return;
767 
768 	spin_lock(&dentry->d_lock);
769 	dout("update_dentry_lease %p mask %d duration %lu ms ttl %lu\n",
770 	     dentry, le16_to_cpu(lease->mask), duration, ttl);
771 
772 	/* make lease_rdcache_gen match directory */
773 	dir = dentry->d_parent->d_inode;
774 	di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
775 
776 	if (lease->mask == 0)
777 		goto out_unlock;
778 
779 	if (di->lease_gen == session->s_cap_gen &&
780 	    time_before(ttl, dentry->d_time))
781 		goto out_unlock;  /* we already have a newer lease. */
782 
783 	if (di->lease_session && di->lease_session != session)
784 		goto out_unlock;
785 
786 	ceph_dentry_lru_touch(dentry);
787 
788 	if (!di->lease_session)
789 		di->lease_session = ceph_get_mds_session(session);
790 	di->lease_gen = session->s_cap_gen;
791 	di->lease_seq = le32_to_cpu(lease->seq);
792 	di->lease_renew_after = half_ttl;
793 	di->lease_renew_from = 0;
794 	dentry->d_time = ttl;
795 out_unlock:
796 	spin_unlock(&dentry->d_lock);
797 	return;
798 }
799 
800 /*
801  * splice a dentry to an inode.
802  * caller must hold directory i_mutex for this to be safe.
803  *
804  * we will only rehash the resulting dentry if @prehash is
805  * true; @prehash will be set to false (for the benefit of
806  * the caller) if we fail.
807  */
808 static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
809 				    bool *prehash)
810 {
811 	struct dentry *realdn;
812 
813 	/* dn must be unhashed */
814 	if (!d_unhashed(dn))
815 		d_drop(dn);
816 	realdn = d_materialise_unique(dn, in);
817 	if (IS_ERR(realdn)) {
818 		pr_err("splice_dentry error %p inode %p ino %llx.%llx\n",
819 		       dn, in, ceph_vinop(in));
820 		if (prehash)
821 			*prehash = false; /* don't rehash on error */
822 		dn = realdn; /* note realdn contains the error */
823 		goto out;
824 	} else if (realdn) {
825 		dout("dn %p (%d) spliced with %p (%d) "
826 		     "inode %p ino %llx.%llx\n",
827 		     dn, atomic_read(&dn->d_count),
828 		     realdn, atomic_read(&realdn->d_count),
829 		     realdn->d_inode, ceph_vinop(realdn->d_inode));
830 		dput(dn);
831 		dn = realdn;
832 	} else {
833 		BUG_ON(!ceph_dentry(dn));
834 
835 		dout("dn %p attached to %p ino %llx.%llx\n",
836 		     dn, dn->d_inode, ceph_vinop(dn->d_inode));
837 	}
838 	if ((!prehash || *prehash) && d_unhashed(dn))
839 		d_rehash(dn);
840 out:
841 	return dn;
842 }
843 
844 /*
845  * Set dentry's directory position based on the current dir's max, and
846  * order it in d_subdirs, so that dcache_readdir behaves.
847  */
848 static void ceph_set_dentry_offset(struct dentry *dn)
849 {
850 	struct dentry *dir = dn->d_parent;
851 	struct inode *inode = dn->d_parent->d_inode;
852 	struct ceph_dentry_info *di;
853 
854 	BUG_ON(!inode);
855 
856 	di = ceph_dentry(dn);
857 
858 	spin_lock(&inode->i_lock);
859 	di->offset = ceph_inode(inode)->i_max_offset++;
860 	spin_unlock(&inode->i_lock);
861 
862 	spin_lock(&dcache_lock);
863 	spin_lock(&dn->d_lock);
864 	list_move_tail(&dir->d_subdirs, &dn->d_u.d_child);
865 	dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
866 	     dn->d_u.d_child.prev, dn->d_u.d_child.next);
867 	spin_unlock(&dn->d_lock);
868 	spin_unlock(&dcache_lock);
869 }
870 
871 /*
872  * Incorporate results into the local cache.  This is either just
873  * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
874  * after a lookup).
875  *
876  * A reply may contain
877  *         a directory inode along with a dentry.
878  *  and/or a target inode
879  *
880  * Called with snap_rwsem (read).
881  */
882 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
883 		    struct ceph_mds_session *session)
884 {
885 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
886 	struct inode *in = NULL;
887 	struct ceph_mds_reply_inode *ininfo;
888 	struct ceph_vino vino;
889 	int i = 0;
890 	int err = 0;
891 
892 	dout("fill_trace %p is_dentry %d is_target %d\n", req,
893 	     rinfo->head->is_dentry, rinfo->head->is_target);
894 
895 #if 0
896 	/*
897 	 * Debugging hook:
898 	 *
899 	 * If we resend completed ops to a recovering mds, we get no
900 	 * trace.  Since that is very rare, pretend this is the case
901 	 * to ensure the 'no trace' handlers in the callers behave.
902 	 *
903 	 * Fill in inodes unconditionally to avoid breaking cap
904 	 * invariants.
905 	 */
906 	if (rinfo->head->op & CEPH_MDS_OP_WRITE) {
907 		pr_info("fill_trace faking empty trace on %lld %s\n",
908 			req->r_tid, ceph_mds_op_name(rinfo->head->op));
909 		if (rinfo->head->is_dentry) {
910 			rinfo->head->is_dentry = 0;
911 			err = fill_inode(req->r_locked_dir,
912 					 &rinfo->diri, rinfo->dirfrag,
913 					 session, req->r_request_started, -1);
914 		}
915 		if (rinfo->head->is_target) {
916 			rinfo->head->is_target = 0;
917 			ininfo = rinfo->targeti.in;
918 			vino.ino = le64_to_cpu(ininfo->ino);
919 			vino.snap = le64_to_cpu(ininfo->snapid);
920 			in = ceph_get_inode(sb, vino);
921 			err = fill_inode(in, &rinfo->targeti, NULL,
922 					 session, req->r_request_started,
923 					 req->r_fmode);
924 			iput(in);
925 		}
926 	}
927 #endif
928 
929 	if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
930 		dout("fill_trace reply is empty!\n");
931 		if (rinfo->head->result == 0 && req->r_locked_dir) {
932 			struct ceph_inode_info *ci =
933 				ceph_inode(req->r_locked_dir);
934 			dout(" clearing %p complete (empty trace)\n",
935 			     req->r_locked_dir);
936 			ci->i_ceph_flags &= ~CEPH_I_COMPLETE;
937 			ci->i_release_count++;
938 		}
939 		return 0;
940 	}
941 
942 	if (rinfo->head->is_dentry) {
943 		struct inode *dir = req->r_locked_dir;
944 
945 		err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag,
946 				 session, req->r_request_started, -1,
947 				 &req->r_caps_reservation);
948 		if (err < 0)
949 			return err;
950 	}
951 
952 	if (rinfo->head->is_dentry && !req->r_aborted) {
953 		/*
954 		 * lookup link rename   : null -> possibly existing inode
955 		 * mknod symlink mkdir  : null -> new inode
956 		 * unlink               : linked -> null
957 		 */
958 		struct inode *dir = req->r_locked_dir;
959 		struct dentry *dn = req->r_dentry;
960 		bool have_dir_cap, have_lease;
961 
962 		BUG_ON(!dn);
963 		BUG_ON(!dir);
964 		BUG_ON(dn->d_parent->d_inode != dir);
965 		BUG_ON(ceph_ino(dir) !=
966 		       le64_to_cpu(rinfo->diri.in->ino));
967 		BUG_ON(ceph_snap(dir) !=
968 		       le64_to_cpu(rinfo->diri.in->snapid));
969 
970 		/* do we have a lease on the whole dir? */
971 		have_dir_cap =
972 			(le32_to_cpu(rinfo->diri.in->cap.caps) &
973 			 CEPH_CAP_FILE_SHARED);
974 
975 		/* do we have a dn lease? */
976 		have_lease = have_dir_cap ||
977 			(le16_to_cpu(rinfo->dlease->mask) &
978 			 CEPH_LOCK_DN);
979 
980 		if (!have_lease)
981 			dout("fill_trace  no dentry lease or dir cap\n");
982 
983 		/* rename? */
984 		if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
985 			dout(" src %p '%.*s' dst %p '%.*s'\n",
986 			     req->r_old_dentry,
987 			     req->r_old_dentry->d_name.len,
988 			     req->r_old_dentry->d_name.name,
989 			     dn, dn->d_name.len, dn->d_name.name);
990 			dout("fill_trace doing d_move %p -> %p\n",
991 			     req->r_old_dentry, dn);
992 			d_move(req->r_old_dentry, dn);
993 			dout(" src %p '%.*s' dst %p '%.*s'\n",
994 			     req->r_old_dentry,
995 			     req->r_old_dentry->d_name.len,
996 			     req->r_old_dentry->d_name.name,
997 			     dn, dn->d_name.len, dn->d_name.name);
998 			/* ensure target dentry is invalidated, despite
999 			   rehashing bug in vfs_rename_dir */
1000 			dn->d_time = jiffies;
1001 			ceph_dentry(dn)->lease_shared_gen = 0;
1002 			/* take overwritten dentry's readdir offset */
1003 			ceph_dentry(req->r_old_dentry)->offset =
1004 				ceph_dentry(dn)->offset;
1005 			dn = req->r_old_dentry;  /* use old_dentry */
1006 			in = dn->d_inode;
1007 		}
1008 
1009 		/* null dentry? */
1010 		if (!rinfo->head->is_target) {
1011 			dout("fill_trace null dentry\n");
1012 			if (dn->d_inode) {
1013 				dout("d_delete %p\n", dn);
1014 				d_delete(dn);
1015 			} else {
1016 				dout("d_instantiate %p NULL\n", dn);
1017 				d_instantiate(dn, NULL);
1018 				if (have_lease && d_unhashed(dn))
1019 					d_rehash(dn);
1020 				update_dentry_lease(dn, rinfo->dlease,
1021 						    session,
1022 						    req->r_request_started);
1023 			}
1024 			goto done;
1025 		}
1026 
1027 		/* attach proper inode */
1028 		ininfo = rinfo->targeti.in;
1029 		vino.ino = le64_to_cpu(ininfo->ino);
1030 		vino.snap = le64_to_cpu(ininfo->snapid);
1031 		if (!dn->d_inode) {
1032 			in = ceph_get_inode(sb, vino);
1033 			if (IS_ERR(in)) {
1034 				pr_err("fill_trace bad get_inode "
1035 				       "%llx.%llx\n", vino.ino, vino.snap);
1036 				err = PTR_ERR(in);
1037 				d_delete(dn);
1038 				goto done;
1039 			}
1040 			dn = splice_dentry(dn, in, &have_lease);
1041 			if (IS_ERR(dn)) {
1042 				err = PTR_ERR(dn);
1043 				goto done;
1044 			}
1045 			req->r_dentry = dn;  /* may have spliced */
1046 			ceph_set_dentry_offset(dn);
1047 			igrab(in);
1048 		} else if (ceph_ino(in) == vino.ino &&
1049 			   ceph_snap(in) == vino.snap) {
1050 			igrab(in);
1051 		} else {
1052 			dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
1053 			     dn, in, ceph_ino(in), ceph_snap(in),
1054 			     vino.ino, vino.snap);
1055 			have_lease = false;
1056 			in = NULL;
1057 		}
1058 
1059 		if (have_lease)
1060 			update_dentry_lease(dn, rinfo->dlease, session,
1061 					    req->r_request_started);
1062 		dout(" final dn %p\n", dn);
1063 		i++;
1064 	} else if (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1065 		   req->r_op == CEPH_MDS_OP_MKSNAP) {
1066 		struct dentry *dn = req->r_dentry;
1067 
1068 		/* fill out a snapdir LOOKUPSNAP dentry */
1069 		BUG_ON(!dn);
1070 		BUG_ON(!req->r_locked_dir);
1071 		BUG_ON(ceph_snap(req->r_locked_dir) != CEPH_SNAPDIR);
1072 		ininfo = rinfo->targeti.in;
1073 		vino.ino = le64_to_cpu(ininfo->ino);
1074 		vino.snap = le64_to_cpu(ininfo->snapid);
1075 		in = ceph_get_inode(sb, vino);
1076 		if (IS_ERR(in)) {
1077 			pr_err("fill_inode get_inode badness %llx.%llx\n",
1078 			       vino.ino, vino.snap);
1079 			err = PTR_ERR(in);
1080 			d_delete(dn);
1081 			goto done;
1082 		}
1083 		dout(" linking snapped dir %p to dn %p\n", in, dn);
1084 		dn = splice_dentry(dn, in, NULL);
1085 		if (IS_ERR(dn)) {
1086 			err = PTR_ERR(dn);
1087 			goto done;
1088 		}
1089 		ceph_set_dentry_offset(dn);
1090 		req->r_dentry = dn;  /* may have spliced */
1091 		igrab(in);
1092 		rinfo->head->is_dentry = 1;  /* fool notrace handlers */
1093 	}
1094 
1095 	if (rinfo->head->is_target) {
1096 		vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1097 		vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1098 
1099 		if (in == NULL || ceph_ino(in) != vino.ino ||
1100 		    ceph_snap(in) != vino.snap) {
1101 			in = ceph_get_inode(sb, vino);
1102 			if (IS_ERR(in)) {
1103 				err = PTR_ERR(in);
1104 				goto done;
1105 			}
1106 		}
1107 		req->r_target_inode = in;
1108 
1109 		err = fill_inode(in,
1110 				 &rinfo->targeti, NULL,
1111 				 session, req->r_request_started,
1112 				 (le32_to_cpu(rinfo->head->result) == 0) ?
1113 				 req->r_fmode : -1,
1114 				 &req->r_caps_reservation);
1115 		if (err < 0) {
1116 			pr_err("fill_inode badness %p %llx.%llx\n",
1117 			       in, ceph_vinop(in));
1118 			goto done;
1119 		}
1120 	}
1121 
1122 done:
1123 	dout("fill_trace done err=%d\n", err);
1124 	return err;
1125 }
1126 
1127 /*
1128  * Prepopulate our cache with readdir results, leases, etc.
1129  */
1130 int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1131 			     struct ceph_mds_session *session)
1132 {
1133 	struct dentry *parent = req->r_dentry;
1134 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1135 	struct qstr dname;
1136 	struct dentry *dn;
1137 	struct inode *in;
1138 	int err = 0, i;
1139 	struct inode *snapdir = NULL;
1140 	struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
1141 	u64 frag = le32_to_cpu(rhead->args.readdir.frag);
1142 	struct ceph_dentry_info *di;
1143 
1144 	if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
1145 		snapdir = ceph_get_snapdir(parent->d_inode);
1146 		parent = d_find_alias(snapdir);
1147 		dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
1148 		     rinfo->dir_nr, parent);
1149 	} else {
1150 		dout("readdir_prepopulate %d items under dn %p\n",
1151 		     rinfo->dir_nr, parent);
1152 		if (rinfo->dir_dir)
1153 			ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir);
1154 	}
1155 
1156 	for (i = 0; i < rinfo->dir_nr; i++) {
1157 		struct ceph_vino vino;
1158 
1159 		dname.name = rinfo->dir_dname[i];
1160 		dname.len = rinfo->dir_dname_len[i];
1161 		dname.hash = full_name_hash(dname.name, dname.len);
1162 
1163 		vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
1164 		vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
1165 
1166 retry_lookup:
1167 		dn = d_lookup(parent, &dname);
1168 		dout("d_lookup on parent=%p name=%.*s got %p\n",
1169 		     parent, dname.len, dname.name, dn);
1170 
1171 		if (!dn) {
1172 			dn = d_alloc(parent, &dname);
1173 			dout("d_alloc %p '%.*s' = %p\n", parent,
1174 			     dname.len, dname.name, dn);
1175 			if (dn == NULL) {
1176 				dout("d_alloc badness\n");
1177 				err = -ENOMEM;
1178 				goto out;
1179 			}
1180 			err = ceph_init_dentry(dn);
1181 			if (err < 0)
1182 				goto out;
1183 		} else if (dn->d_inode &&
1184 			   (ceph_ino(dn->d_inode) != vino.ino ||
1185 			    ceph_snap(dn->d_inode) != vino.snap)) {
1186 			dout(" dn %p points to wrong inode %p\n",
1187 			     dn, dn->d_inode);
1188 			d_delete(dn);
1189 			dput(dn);
1190 			goto retry_lookup;
1191 		} else {
1192 			/* reorder parent's d_subdirs */
1193 			spin_lock(&dcache_lock);
1194 			spin_lock(&dn->d_lock);
1195 			list_move(&dn->d_u.d_child, &parent->d_subdirs);
1196 			spin_unlock(&dn->d_lock);
1197 			spin_unlock(&dcache_lock);
1198 		}
1199 
1200 		di = dn->d_fsdata;
1201 		di->offset = ceph_make_fpos(frag, i + req->r_readdir_offset);
1202 
1203 		/* inode */
1204 		if (dn->d_inode) {
1205 			in = dn->d_inode;
1206 		} else {
1207 			in = ceph_get_inode(parent->d_sb, vino);
1208 			if (in == NULL) {
1209 				dout("new_inode badness\n");
1210 				d_delete(dn);
1211 				dput(dn);
1212 				err = -ENOMEM;
1213 				goto out;
1214 			}
1215 			dn = splice_dentry(dn, in, NULL);
1216 		}
1217 
1218 		if (fill_inode(in, &rinfo->dir_in[i], NULL, session,
1219 			       req->r_request_started, -1,
1220 			       &req->r_caps_reservation) < 0) {
1221 			pr_err("fill_inode badness on %p\n", in);
1222 			dput(dn);
1223 			continue;
1224 		}
1225 		update_dentry_lease(dn, rinfo->dir_dlease[i],
1226 				    req->r_session, req->r_request_started);
1227 		dput(dn);
1228 	}
1229 	req->r_did_prepopulate = true;
1230 
1231 out:
1232 	if (snapdir) {
1233 		iput(snapdir);
1234 		dput(parent);
1235 	}
1236 	dout("readdir_prepopulate done\n");
1237 	return err;
1238 }
1239 
1240 int ceph_inode_set_size(struct inode *inode, loff_t size)
1241 {
1242 	struct ceph_inode_info *ci = ceph_inode(inode);
1243 	int ret = 0;
1244 
1245 	spin_lock(&inode->i_lock);
1246 	dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
1247 	inode->i_size = size;
1248 	inode->i_blocks = (size + (1 << 9) - 1) >> 9;
1249 
1250 	/* tell the MDS if we are approaching max_size */
1251 	if ((size << 1) >= ci->i_max_size &&
1252 	    (ci->i_reported_size << 1) < ci->i_max_size)
1253 		ret = 1;
1254 
1255 	spin_unlock(&inode->i_lock);
1256 	return ret;
1257 }
1258 
1259 /*
1260  * Write back inode data in a worker thread.  (This can't be done
1261  * in the message handler context.)
1262  */
1263 void ceph_queue_writeback(struct inode *inode)
1264 {
1265 	if (queue_work(ceph_inode_to_client(inode)->wb_wq,
1266 		       &ceph_inode(inode)->i_wb_work)) {
1267 		dout("ceph_queue_writeback %p\n", inode);
1268 		igrab(inode);
1269 	} else {
1270 		dout("ceph_queue_writeback %p failed\n", inode);
1271 	}
1272 }
1273 
1274 static void ceph_writeback_work(struct work_struct *work)
1275 {
1276 	struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1277 						  i_wb_work);
1278 	struct inode *inode = &ci->vfs_inode;
1279 
1280 	dout("writeback %p\n", inode);
1281 	filemap_fdatawrite(&inode->i_data);
1282 	iput(inode);
1283 }
1284 
1285 /*
1286  * queue an async invalidation
1287  */
1288 void ceph_queue_invalidate(struct inode *inode)
1289 {
1290 	if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
1291 		       &ceph_inode(inode)->i_pg_inv_work)) {
1292 		dout("ceph_queue_invalidate %p\n", inode);
1293 		igrab(inode);
1294 	} else {
1295 		dout("ceph_queue_invalidate %p failed\n", inode);
1296 	}
1297 }
1298 
1299 /*
1300  * invalidate any pages that are not dirty or under writeback.  this
1301  * includes pages that are clean and mapped.
1302  */
1303 static void ceph_invalidate_nondirty_pages(struct address_space *mapping)
1304 {
1305 	struct pagevec pvec;
1306 	pgoff_t next = 0;
1307 	int i;
1308 
1309 	pagevec_init(&pvec, 0);
1310 	while (pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
1311 		for (i = 0; i < pagevec_count(&pvec); i++) {
1312 			struct page *page = pvec.pages[i];
1313 			pgoff_t index;
1314 			int skip_page =
1315 				(PageDirty(page) || PageWriteback(page));
1316 
1317 			if (!skip_page)
1318 				skip_page = !trylock_page(page);
1319 
1320 			/*
1321 			 * We really shouldn't be looking at the ->index of an
1322 			 * unlocked page.  But we're not allowed to lock these
1323 			 * pages.  So we rely upon nobody altering the ->index
1324 			 * of this (pinned-by-us) page.
1325 			 */
1326 			index = page->index;
1327 			if (index > next)
1328 				next = index;
1329 			next++;
1330 
1331 			if (skip_page)
1332 				continue;
1333 
1334 			generic_error_remove_page(mapping, page);
1335 			unlock_page(page);
1336 		}
1337 		pagevec_release(&pvec);
1338 		cond_resched();
1339 	}
1340 }
1341 
1342 /*
1343  * Invalidate inode pages in a worker thread.  (This can't be done
1344  * in the message handler context.)
1345  */
1346 static void ceph_invalidate_work(struct work_struct *work)
1347 {
1348 	struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1349 						  i_pg_inv_work);
1350 	struct inode *inode = &ci->vfs_inode;
1351 	u32 orig_gen;
1352 	int check = 0;
1353 
1354 	spin_lock(&inode->i_lock);
1355 	dout("invalidate_pages %p gen %d revoking %d\n", inode,
1356 	     ci->i_rdcache_gen, ci->i_rdcache_revoking);
1357 	if (ci->i_rdcache_gen == 0 ||
1358 	    ci->i_rdcache_revoking != ci->i_rdcache_gen) {
1359 		BUG_ON(ci->i_rdcache_revoking > ci->i_rdcache_gen);
1360 		/* nevermind! */
1361 		ci->i_rdcache_revoking = 0;
1362 		spin_unlock(&inode->i_lock);
1363 		goto out;
1364 	}
1365 	orig_gen = ci->i_rdcache_gen;
1366 	spin_unlock(&inode->i_lock);
1367 
1368 	ceph_invalidate_nondirty_pages(inode->i_mapping);
1369 
1370 	spin_lock(&inode->i_lock);
1371 	if (orig_gen == ci->i_rdcache_gen) {
1372 		dout("invalidate_pages %p gen %d successful\n", inode,
1373 		     ci->i_rdcache_gen);
1374 		ci->i_rdcache_gen = 0;
1375 		ci->i_rdcache_revoking = 0;
1376 		check = 1;
1377 	} else {
1378 		dout("invalidate_pages %p gen %d raced, gen now %d\n",
1379 		     inode, orig_gen, ci->i_rdcache_gen);
1380 	}
1381 	spin_unlock(&inode->i_lock);
1382 
1383 	if (check)
1384 		ceph_check_caps(ci, 0, NULL);
1385 out:
1386 	iput(inode);
1387 }
1388 
1389 
1390 /*
1391  * called by trunc_wq; take i_mutex ourselves
1392  *
1393  * We also truncate in a separate thread as well.
1394  */
1395 static void ceph_vmtruncate_work(struct work_struct *work)
1396 {
1397 	struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1398 						  i_vmtruncate_work);
1399 	struct inode *inode = &ci->vfs_inode;
1400 
1401 	dout("vmtruncate_work %p\n", inode);
1402 	mutex_lock(&inode->i_mutex);
1403 	__ceph_do_pending_vmtruncate(inode);
1404 	mutex_unlock(&inode->i_mutex);
1405 	iput(inode);
1406 }
1407 
1408 /*
1409  * Queue an async vmtruncate.  If we fail to queue work, we will handle
1410  * the truncation the next time we call __ceph_do_pending_vmtruncate.
1411  */
1412 void ceph_queue_vmtruncate(struct inode *inode)
1413 {
1414 	struct ceph_inode_info *ci = ceph_inode(inode);
1415 
1416 	if (queue_work(ceph_client(inode->i_sb)->trunc_wq,
1417 		       &ci->i_vmtruncate_work)) {
1418 		dout("ceph_queue_vmtruncate %p\n", inode);
1419 		igrab(inode);
1420 	} else {
1421 		dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
1422 		     inode, ci->i_truncate_pending);
1423 	}
1424 }
1425 
1426 /*
1427  * called with i_mutex held.
1428  *
1429  * Make sure any pending truncation is applied before doing anything
1430  * that may depend on it.
1431  */
1432 void __ceph_do_pending_vmtruncate(struct inode *inode)
1433 {
1434 	struct ceph_inode_info *ci = ceph_inode(inode);
1435 	u64 to;
1436 	int wrbuffer_refs, wake = 0;
1437 
1438 retry:
1439 	spin_lock(&inode->i_lock);
1440 	if (ci->i_truncate_pending == 0) {
1441 		dout("__do_pending_vmtruncate %p none pending\n", inode);
1442 		spin_unlock(&inode->i_lock);
1443 		return;
1444 	}
1445 
1446 	/*
1447 	 * make sure any dirty snapped pages are flushed before we
1448 	 * possibly truncate them.. so write AND block!
1449 	 */
1450 	if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
1451 		dout("__do_pending_vmtruncate %p flushing snaps first\n",
1452 		     inode);
1453 		spin_unlock(&inode->i_lock);
1454 		filemap_write_and_wait_range(&inode->i_data, 0,
1455 					     inode->i_sb->s_maxbytes);
1456 		goto retry;
1457 	}
1458 
1459 	to = ci->i_truncate_size;
1460 	wrbuffer_refs = ci->i_wrbuffer_ref;
1461 	dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
1462 	     ci->i_truncate_pending, to);
1463 	spin_unlock(&inode->i_lock);
1464 
1465 	truncate_inode_pages(inode->i_mapping, to);
1466 
1467 	spin_lock(&inode->i_lock);
1468 	ci->i_truncate_pending--;
1469 	if (ci->i_truncate_pending == 0)
1470 		wake = 1;
1471 	spin_unlock(&inode->i_lock);
1472 
1473 	if (wrbuffer_refs == 0)
1474 		ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
1475 	if (wake)
1476 		wake_up(&ci->i_cap_wq);
1477 }
1478 
1479 
1480 /*
1481  * symlinks
1482  */
1483 static void *ceph_sym_follow_link(struct dentry *dentry, struct nameidata *nd)
1484 {
1485 	struct ceph_inode_info *ci = ceph_inode(dentry->d_inode);
1486 	nd_set_link(nd, ci->i_symlink);
1487 	return NULL;
1488 }
1489 
1490 static const struct inode_operations ceph_symlink_iops = {
1491 	.readlink = generic_readlink,
1492 	.follow_link = ceph_sym_follow_link,
1493 };
1494 
1495 /*
1496  * setattr
1497  */
1498 int ceph_setattr(struct dentry *dentry, struct iattr *attr)
1499 {
1500 	struct inode *inode = dentry->d_inode;
1501 	struct ceph_inode_info *ci = ceph_inode(inode);
1502 	struct inode *parent_inode = dentry->d_parent->d_inode;
1503 	const unsigned int ia_valid = attr->ia_valid;
1504 	struct ceph_mds_request *req;
1505 	struct ceph_mds_client *mdsc = &ceph_client(dentry->d_sb)->mdsc;
1506 	int issued;
1507 	int release = 0, dirtied = 0;
1508 	int mask = 0;
1509 	int err = 0;
1510 
1511 	if (ceph_snap(inode) != CEPH_NOSNAP)
1512 		return -EROFS;
1513 
1514 	__ceph_do_pending_vmtruncate(inode);
1515 
1516 	err = inode_change_ok(inode, attr);
1517 	if (err != 0)
1518 		return err;
1519 
1520 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
1521 				       USE_AUTH_MDS);
1522 	if (IS_ERR(req))
1523 		return PTR_ERR(req);
1524 
1525 	spin_lock(&inode->i_lock);
1526 	issued = __ceph_caps_issued(ci, NULL);
1527 	dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
1528 
1529 	if (ia_valid & ATTR_UID) {
1530 		dout("setattr %p uid %d -> %d\n", inode,
1531 		     inode->i_uid, attr->ia_uid);
1532 		if (issued & CEPH_CAP_AUTH_EXCL) {
1533 			inode->i_uid = attr->ia_uid;
1534 			dirtied |= CEPH_CAP_AUTH_EXCL;
1535 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1536 			   attr->ia_uid != inode->i_uid) {
1537 			req->r_args.setattr.uid = cpu_to_le32(attr->ia_uid);
1538 			mask |= CEPH_SETATTR_UID;
1539 			release |= CEPH_CAP_AUTH_SHARED;
1540 		}
1541 	}
1542 	if (ia_valid & ATTR_GID) {
1543 		dout("setattr %p gid %d -> %d\n", inode,
1544 		     inode->i_gid, attr->ia_gid);
1545 		if (issued & CEPH_CAP_AUTH_EXCL) {
1546 			inode->i_gid = attr->ia_gid;
1547 			dirtied |= CEPH_CAP_AUTH_EXCL;
1548 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1549 			   attr->ia_gid != inode->i_gid) {
1550 			req->r_args.setattr.gid = cpu_to_le32(attr->ia_gid);
1551 			mask |= CEPH_SETATTR_GID;
1552 			release |= CEPH_CAP_AUTH_SHARED;
1553 		}
1554 	}
1555 	if (ia_valid & ATTR_MODE) {
1556 		dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
1557 		     attr->ia_mode);
1558 		if (issued & CEPH_CAP_AUTH_EXCL) {
1559 			inode->i_mode = attr->ia_mode;
1560 			dirtied |= CEPH_CAP_AUTH_EXCL;
1561 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1562 			   attr->ia_mode != inode->i_mode) {
1563 			req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
1564 			mask |= CEPH_SETATTR_MODE;
1565 			release |= CEPH_CAP_AUTH_SHARED;
1566 		}
1567 	}
1568 
1569 	if (ia_valid & ATTR_ATIME) {
1570 		dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode,
1571 		     inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
1572 		     attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
1573 		if (issued & CEPH_CAP_FILE_EXCL) {
1574 			ci->i_time_warp_seq++;
1575 			inode->i_atime = attr->ia_atime;
1576 			dirtied |= CEPH_CAP_FILE_EXCL;
1577 		} else if ((issued & CEPH_CAP_FILE_WR) &&
1578 			   timespec_compare(&inode->i_atime,
1579 					    &attr->ia_atime) < 0) {
1580 			inode->i_atime = attr->ia_atime;
1581 			dirtied |= CEPH_CAP_FILE_WR;
1582 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1583 			   !timespec_equal(&inode->i_atime, &attr->ia_atime)) {
1584 			ceph_encode_timespec(&req->r_args.setattr.atime,
1585 					     &attr->ia_atime);
1586 			mask |= CEPH_SETATTR_ATIME;
1587 			release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD |
1588 				CEPH_CAP_FILE_WR;
1589 		}
1590 	}
1591 	if (ia_valid & ATTR_MTIME) {
1592 		dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode,
1593 		     inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
1594 		     attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
1595 		if (issued & CEPH_CAP_FILE_EXCL) {
1596 			ci->i_time_warp_seq++;
1597 			inode->i_mtime = attr->ia_mtime;
1598 			dirtied |= CEPH_CAP_FILE_EXCL;
1599 		} else if ((issued & CEPH_CAP_FILE_WR) &&
1600 			   timespec_compare(&inode->i_mtime,
1601 					    &attr->ia_mtime) < 0) {
1602 			inode->i_mtime = attr->ia_mtime;
1603 			dirtied |= CEPH_CAP_FILE_WR;
1604 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1605 			   !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) {
1606 			ceph_encode_timespec(&req->r_args.setattr.mtime,
1607 					     &attr->ia_mtime);
1608 			mask |= CEPH_SETATTR_MTIME;
1609 			release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1610 				CEPH_CAP_FILE_WR;
1611 		}
1612 	}
1613 	if (ia_valid & ATTR_SIZE) {
1614 		dout("setattr %p size %lld -> %lld\n", inode,
1615 		     inode->i_size, attr->ia_size);
1616 		if (attr->ia_size > inode->i_sb->s_maxbytes) {
1617 			err = -EINVAL;
1618 			goto out;
1619 		}
1620 		if ((issued & CEPH_CAP_FILE_EXCL) &&
1621 		    attr->ia_size > inode->i_size) {
1622 			inode->i_size = attr->ia_size;
1623 			inode->i_blocks =
1624 				(attr->ia_size + (1 << 9) - 1) >> 9;
1625 			inode->i_ctime = attr->ia_ctime;
1626 			ci->i_reported_size = attr->ia_size;
1627 			dirtied |= CEPH_CAP_FILE_EXCL;
1628 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1629 			   attr->ia_size != inode->i_size) {
1630 			req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
1631 			req->r_args.setattr.old_size =
1632 				cpu_to_le64(inode->i_size);
1633 			mask |= CEPH_SETATTR_SIZE;
1634 			release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1635 				CEPH_CAP_FILE_WR;
1636 		}
1637 	}
1638 
1639 	/* these do nothing */
1640 	if (ia_valid & ATTR_CTIME) {
1641 		bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
1642 					 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
1643 		dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode,
1644 		     inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
1645 		     attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
1646 		     only ? "ctime only" : "ignored");
1647 		inode->i_ctime = attr->ia_ctime;
1648 		if (only) {
1649 			/*
1650 			 * if kernel wants to dirty ctime but nothing else,
1651 			 * we need to choose a cap to dirty under, or do
1652 			 * a almost-no-op setattr
1653 			 */
1654 			if (issued & CEPH_CAP_AUTH_EXCL)
1655 				dirtied |= CEPH_CAP_AUTH_EXCL;
1656 			else if (issued & CEPH_CAP_FILE_EXCL)
1657 				dirtied |= CEPH_CAP_FILE_EXCL;
1658 			else if (issued & CEPH_CAP_XATTR_EXCL)
1659 				dirtied |= CEPH_CAP_XATTR_EXCL;
1660 			else
1661 				mask |= CEPH_SETATTR_CTIME;
1662 		}
1663 	}
1664 	if (ia_valid & ATTR_FILE)
1665 		dout("setattr %p ATTR_FILE ... hrm!\n", inode);
1666 
1667 	if (dirtied) {
1668 		__ceph_mark_dirty_caps(ci, dirtied);
1669 		inode->i_ctime = CURRENT_TIME;
1670 	}
1671 
1672 	release &= issued;
1673 	spin_unlock(&inode->i_lock);
1674 
1675 	if (mask) {
1676 		req->r_inode = igrab(inode);
1677 		req->r_inode_drop = release;
1678 		req->r_args.setattr.mask = cpu_to_le32(mask);
1679 		req->r_num_caps = 1;
1680 		err = ceph_mdsc_do_request(mdsc, parent_inode, req);
1681 	}
1682 	dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
1683 	     ceph_cap_string(dirtied), mask);
1684 
1685 	ceph_mdsc_put_request(req);
1686 	__ceph_do_pending_vmtruncate(inode);
1687 	return err;
1688 out:
1689 	spin_unlock(&inode->i_lock);
1690 	ceph_mdsc_put_request(req);
1691 	return err;
1692 }
1693 
1694 /*
1695  * Verify that we have a lease on the given mask.  If not,
1696  * do a getattr against an mds.
1697  */
1698 int ceph_do_getattr(struct inode *inode, int mask)
1699 {
1700 	struct ceph_client *client = ceph_sb_to_client(inode->i_sb);
1701 	struct ceph_mds_client *mdsc = &client->mdsc;
1702 	struct ceph_mds_request *req;
1703 	int err;
1704 
1705 	if (ceph_snap(inode) == CEPH_SNAPDIR) {
1706 		dout("do_getattr inode %p SNAPDIR\n", inode);
1707 		return 0;
1708 	}
1709 
1710 	dout("do_getattr inode %p mask %s\n", inode, ceph_cap_string(mask));
1711 	if (ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
1712 		return 0;
1713 
1714 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
1715 	if (IS_ERR(req))
1716 		return PTR_ERR(req);
1717 	req->r_inode = igrab(inode);
1718 	req->r_num_caps = 1;
1719 	req->r_args.getattr.mask = cpu_to_le32(mask);
1720 	err = ceph_mdsc_do_request(mdsc, NULL, req);
1721 	ceph_mdsc_put_request(req);
1722 	dout("do_getattr result=%d\n", err);
1723 	return err;
1724 }
1725 
1726 
1727 /*
1728  * Check inode permissions.  We verify we have a valid value for
1729  * the AUTH cap, then call the generic handler.
1730  */
1731 int ceph_permission(struct inode *inode, int mask)
1732 {
1733 	int err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED);
1734 
1735 	if (!err)
1736 		err = generic_permission(inode, mask, NULL);
1737 	return err;
1738 }
1739 
1740 /*
1741  * Get all attributes.  Hopefully somedata we'll have a statlite()
1742  * and can limit the fields we require to be accurate.
1743  */
1744 int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
1745 		 struct kstat *stat)
1746 {
1747 	struct inode *inode = dentry->d_inode;
1748 	struct ceph_inode_info *ci = ceph_inode(inode);
1749 	int err;
1750 
1751 	err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL);
1752 	if (!err) {
1753 		generic_fillattr(inode, stat);
1754 		stat->ino = inode->i_ino;
1755 		if (ceph_snap(inode) != CEPH_NOSNAP)
1756 			stat->dev = ceph_snap(inode);
1757 		else
1758 			stat->dev = 0;
1759 		if (S_ISDIR(inode->i_mode)) {
1760 			stat->size = ci->i_rbytes;
1761 			stat->blocks = 0;
1762 			stat->blksize = 65536;
1763 		}
1764 	}
1765 	return err;
1766 }
1767