xref: /openbmc/linux/fs/ceph/inode.c (revision 7221fe4c)
1 #include <linux/ceph/ceph_debug.h>
2 
3 #include <linux/module.h>
4 #include <linux/fs.h>
5 #include <linux/slab.h>
6 #include <linux/string.h>
7 #include <linux/uaccess.h>
8 #include <linux/kernel.h>
9 #include <linux/namei.h>
10 #include <linux/writeback.h>
11 #include <linux/vmalloc.h>
12 
13 #include "super.h"
14 #include "mds_client.h"
15 #include "cache.h"
16 #include <linux/ceph/decode.h>
17 
18 /*
19  * Ceph inode operations
20  *
21  * Implement basic inode helpers (get, alloc) and inode ops (getattr,
22  * setattr, etc.), xattr helpers, and helpers for assimilating
23  * metadata returned by the MDS into our cache.
24  *
25  * Also define helpers for doing asynchronous writeback, invalidation,
26  * and truncation for the benefit of those who can't afford to block
27  * (typically because they are in the message handler path).
28  */
29 
30 static const struct inode_operations ceph_symlink_iops;
31 
32 static void ceph_invalidate_work(struct work_struct *work);
33 static void ceph_writeback_work(struct work_struct *work);
34 static void ceph_vmtruncate_work(struct work_struct *work);
35 
36 /*
37  * find or create an inode, given the ceph ino number
38  */
39 static int ceph_set_ino_cb(struct inode *inode, void *data)
40 {
41 	ceph_inode(inode)->i_vino = *(struct ceph_vino *)data;
42 	inode->i_ino = ceph_vino_to_ino(*(struct ceph_vino *)data);
43 	return 0;
44 }
45 
46 struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino)
47 {
48 	struct inode *inode;
49 	ino_t t = ceph_vino_to_ino(vino);
50 
51 	inode = iget5_locked(sb, t, ceph_ino_compare, ceph_set_ino_cb, &vino);
52 	if (inode == NULL)
53 		return ERR_PTR(-ENOMEM);
54 	if (inode->i_state & I_NEW) {
55 		dout("get_inode created new inode %p %llx.%llx ino %llx\n",
56 		     inode, ceph_vinop(inode), (u64)inode->i_ino);
57 		unlock_new_inode(inode);
58 	}
59 
60 	dout("get_inode on %lu=%llx.%llx got %p\n", inode->i_ino, vino.ino,
61 	     vino.snap, inode);
62 	return inode;
63 }
64 
65 /*
66  * get/constuct snapdir inode for a given directory
67  */
68 struct inode *ceph_get_snapdir(struct inode *parent)
69 {
70 	struct ceph_vino vino = {
71 		.ino = ceph_ino(parent),
72 		.snap = CEPH_SNAPDIR,
73 	};
74 	struct inode *inode = ceph_get_inode(parent->i_sb, vino);
75 	struct ceph_inode_info *ci = ceph_inode(inode);
76 
77 	BUG_ON(!S_ISDIR(parent->i_mode));
78 	if (IS_ERR(inode))
79 		return inode;
80 	inode->i_mode = parent->i_mode;
81 	inode->i_uid = parent->i_uid;
82 	inode->i_gid = parent->i_gid;
83 	inode->i_op = &ceph_dir_iops;
84 	inode->i_fop = &ceph_dir_fops;
85 	ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
86 	ci->i_rbytes = 0;
87 	return inode;
88 }
89 
90 const struct inode_operations ceph_file_iops = {
91 	.permission = ceph_permission,
92 	.setattr = ceph_setattr,
93 	.getattr = ceph_getattr,
94 	.setxattr = ceph_setxattr,
95 	.getxattr = ceph_getxattr,
96 	.listxattr = ceph_listxattr,
97 	.removexattr = ceph_removexattr,
98 	.get_acl = ceph_get_acl,
99 };
100 
101 
102 /*
103  * We use a 'frag tree' to keep track of the MDS's directory fragments
104  * for a given inode (usually there is just a single fragment).  We
105  * need to know when a child frag is delegated to a new MDS, or when
106  * it is flagged as replicated, so we can direct our requests
107  * accordingly.
108  */
109 
110 /*
111  * find/create a frag in the tree
112  */
113 static struct ceph_inode_frag *__get_or_create_frag(struct ceph_inode_info *ci,
114 						    u32 f)
115 {
116 	struct rb_node **p;
117 	struct rb_node *parent = NULL;
118 	struct ceph_inode_frag *frag;
119 	int c;
120 
121 	p = &ci->i_fragtree.rb_node;
122 	while (*p) {
123 		parent = *p;
124 		frag = rb_entry(parent, struct ceph_inode_frag, node);
125 		c = ceph_frag_compare(f, frag->frag);
126 		if (c < 0)
127 			p = &(*p)->rb_left;
128 		else if (c > 0)
129 			p = &(*p)->rb_right;
130 		else
131 			return frag;
132 	}
133 
134 	frag = kmalloc(sizeof(*frag), GFP_NOFS);
135 	if (!frag) {
136 		pr_err("__get_or_create_frag ENOMEM on %p %llx.%llx "
137 		       "frag %x\n", &ci->vfs_inode,
138 		       ceph_vinop(&ci->vfs_inode), f);
139 		return ERR_PTR(-ENOMEM);
140 	}
141 	frag->frag = f;
142 	frag->split_by = 0;
143 	frag->mds = -1;
144 	frag->ndist = 0;
145 
146 	rb_link_node(&frag->node, parent, p);
147 	rb_insert_color(&frag->node, &ci->i_fragtree);
148 
149 	dout("get_or_create_frag added %llx.%llx frag %x\n",
150 	     ceph_vinop(&ci->vfs_inode), f);
151 	return frag;
152 }
153 
154 /*
155  * find a specific frag @f
156  */
157 struct ceph_inode_frag *__ceph_find_frag(struct ceph_inode_info *ci, u32 f)
158 {
159 	struct rb_node *n = ci->i_fragtree.rb_node;
160 
161 	while (n) {
162 		struct ceph_inode_frag *frag =
163 			rb_entry(n, struct ceph_inode_frag, node);
164 		int c = ceph_frag_compare(f, frag->frag);
165 		if (c < 0)
166 			n = n->rb_left;
167 		else if (c > 0)
168 			n = n->rb_right;
169 		else
170 			return frag;
171 	}
172 	return NULL;
173 }
174 
175 /*
176  * Choose frag containing the given value @v.  If @pfrag is
177  * specified, copy the frag delegation info to the caller if
178  * it is present.
179  */
180 u32 ceph_choose_frag(struct ceph_inode_info *ci, u32 v,
181 		     struct ceph_inode_frag *pfrag,
182 		     int *found)
183 {
184 	u32 t = ceph_frag_make(0, 0);
185 	struct ceph_inode_frag *frag;
186 	unsigned nway, i;
187 	u32 n;
188 
189 	if (found)
190 		*found = 0;
191 
192 	mutex_lock(&ci->i_fragtree_mutex);
193 	while (1) {
194 		WARN_ON(!ceph_frag_contains_value(t, v));
195 		frag = __ceph_find_frag(ci, t);
196 		if (!frag)
197 			break; /* t is a leaf */
198 		if (frag->split_by == 0) {
199 			if (pfrag)
200 				memcpy(pfrag, frag, sizeof(*pfrag));
201 			if (found)
202 				*found = 1;
203 			break;
204 		}
205 
206 		/* choose child */
207 		nway = 1 << frag->split_by;
208 		dout("choose_frag(%x) %x splits by %d (%d ways)\n", v, t,
209 		     frag->split_by, nway);
210 		for (i = 0; i < nway; i++) {
211 			n = ceph_frag_make_child(t, frag->split_by, i);
212 			if (ceph_frag_contains_value(n, v)) {
213 				t = n;
214 				break;
215 			}
216 		}
217 		BUG_ON(i == nway);
218 	}
219 	dout("choose_frag(%x) = %x\n", v, t);
220 
221 	mutex_unlock(&ci->i_fragtree_mutex);
222 	return t;
223 }
224 
225 /*
226  * Process dirfrag (delegation) info from the mds.  Include leaf
227  * fragment in tree ONLY if ndist > 0.  Otherwise, only
228  * branches/splits are included in i_fragtree)
229  */
230 static int ceph_fill_dirfrag(struct inode *inode,
231 			     struct ceph_mds_reply_dirfrag *dirinfo)
232 {
233 	struct ceph_inode_info *ci = ceph_inode(inode);
234 	struct ceph_inode_frag *frag;
235 	u32 id = le32_to_cpu(dirinfo->frag);
236 	int mds = le32_to_cpu(dirinfo->auth);
237 	int ndist = le32_to_cpu(dirinfo->ndist);
238 	int i;
239 	int err = 0;
240 
241 	mutex_lock(&ci->i_fragtree_mutex);
242 	if (ndist == 0) {
243 		/* no delegation info needed. */
244 		frag = __ceph_find_frag(ci, id);
245 		if (!frag)
246 			goto out;
247 		if (frag->split_by == 0) {
248 			/* tree leaf, remove */
249 			dout("fill_dirfrag removed %llx.%llx frag %x"
250 			     " (no ref)\n", ceph_vinop(inode), id);
251 			rb_erase(&frag->node, &ci->i_fragtree);
252 			kfree(frag);
253 		} else {
254 			/* tree branch, keep and clear */
255 			dout("fill_dirfrag cleared %llx.%llx frag %x"
256 			     " referral\n", ceph_vinop(inode), id);
257 			frag->mds = -1;
258 			frag->ndist = 0;
259 		}
260 		goto out;
261 	}
262 
263 
264 	/* find/add this frag to store mds delegation info */
265 	frag = __get_or_create_frag(ci, id);
266 	if (IS_ERR(frag)) {
267 		/* this is not the end of the world; we can continue
268 		   with bad/inaccurate delegation info */
269 		pr_err("fill_dirfrag ENOMEM on mds ref %llx.%llx fg %x\n",
270 		       ceph_vinop(inode), le32_to_cpu(dirinfo->frag));
271 		err = -ENOMEM;
272 		goto out;
273 	}
274 
275 	frag->mds = mds;
276 	frag->ndist = min_t(u32, ndist, CEPH_MAX_DIRFRAG_REP);
277 	for (i = 0; i < frag->ndist; i++)
278 		frag->dist[i] = le32_to_cpu(dirinfo->dist[i]);
279 	dout("fill_dirfrag %llx.%llx frag %x ndist=%d\n",
280 	     ceph_vinop(inode), frag->frag, frag->ndist);
281 
282 out:
283 	mutex_unlock(&ci->i_fragtree_mutex);
284 	return err;
285 }
286 
287 
288 /*
289  * initialize a newly allocated inode.
290  */
291 struct inode *ceph_alloc_inode(struct super_block *sb)
292 {
293 	struct ceph_inode_info *ci;
294 	int i;
295 
296 	ci = kmem_cache_alloc(ceph_inode_cachep, GFP_NOFS);
297 	if (!ci)
298 		return NULL;
299 
300 	dout("alloc_inode %p\n", &ci->vfs_inode);
301 
302 	spin_lock_init(&ci->i_ceph_lock);
303 
304 	ci->i_version = 0;
305 	ci->i_time_warp_seq = 0;
306 	ci->i_ceph_flags = 0;
307 	atomic_set(&ci->i_release_count, 1);
308 	atomic_set(&ci->i_complete_count, 0);
309 	ci->i_symlink = NULL;
310 
311 	memset(&ci->i_dir_layout, 0, sizeof(ci->i_dir_layout));
312 
313 	ci->i_fragtree = RB_ROOT;
314 	mutex_init(&ci->i_fragtree_mutex);
315 
316 	ci->i_xattrs.blob = NULL;
317 	ci->i_xattrs.prealloc_blob = NULL;
318 	ci->i_xattrs.dirty = false;
319 	ci->i_xattrs.index = RB_ROOT;
320 	ci->i_xattrs.count = 0;
321 	ci->i_xattrs.names_size = 0;
322 	ci->i_xattrs.vals_size = 0;
323 	ci->i_xattrs.version = 0;
324 	ci->i_xattrs.index_version = 0;
325 
326 	ci->i_caps = RB_ROOT;
327 	ci->i_auth_cap = NULL;
328 	ci->i_dirty_caps = 0;
329 	ci->i_flushing_caps = 0;
330 	INIT_LIST_HEAD(&ci->i_dirty_item);
331 	INIT_LIST_HEAD(&ci->i_flushing_item);
332 	ci->i_cap_flush_seq = 0;
333 	ci->i_cap_flush_last_tid = 0;
334 	memset(&ci->i_cap_flush_tid, 0, sizeof(ci->i_cap_flush_tid));
335 	init_waitqueue_head(&ci->i_cap_wq);
336 	ci->i_hold_caps_min = 0;
337 	ci->i_hold_caps_max = 0;
338 	INIT_LIST_HEAD(&ci->i_cap_delay_list);
339 	ci->i_cap_exporting_mds = 0;
340 	ci->i_cap_exporting_mseq = 0;
341 	ci->i_cap_exporting_issued = 0;
342 	INIT_LIST_HEAD(&ci->i_cap_snaps);
343 	ci->i_head_snapc = NULL;
344 	ci->i_snap_caps = 0;
345 
346 	for (i = 0; i < CEPH_FILE_MODE_NUM; i++)
347 		ci->i_nr_by_mode[i] = 0;
348 
349 	mutex_init(&ci->i_truncate_mutex);
350 	ci->i_truncate_seq = 0;
351 	ci->i_truncate_size = 0;
352 	ci->i_truncate_pending = 0;
353 
354 	ci->i_max_size = 0;
355 	ci->i_reported_size = 0;
356 	ci->i_wanted_max_size = 0;
357 	ci->i_requested_max_size = 0;
358 
359 	ci->i_pin_ref = 0;
360 	ci->i_rd_ref = 0;
361 	ci->i_rdcache_ref = 0;
362 	ci->i_wr_ref = 0;
363 	ci->i_wb_ref = 0;
364 	ci->i_wrbuffer_ref = 0;
365 	ci->i_wrbuffer_ref_head = 0;
366 	ci->i_shared_gen = 0;
367 	ci->i_rdcache_gen = 0;
368 	ci->i_rdcache_revoking = 0;
369 
370 	INIT_LIST_HEAD(&ci->i_unsafe_writes);
371 	INIT_LIST_HEAD(&ci->i_unsafe_dirops);
372 	spin_lock_init(&ci->i_unsafe_lock);
373 
374 	ci->i_snap_realm = NULL;
375 	INIT_LIST_HEAD(&ci->i_snap_realm_item);
376 	INIT_LIST_HEAD(&ci->i_snap_flush_item);
377 
378 	INIT_WORK(&ci->i_wb_work, ceph_writeback_work);
379 	INIT_WORK(&ci->i_pg_inv_work, ceph_invalidate_work);
380 
381 	INIT_WORK(&ci->i_vmtruncate_work, ceph_vmtruncate_work);
382 
383 	ceph_fscache_inode_init(ci);
384 
385 	return &ci->vfs_inode;
386 }
387 
388 static void ceph_i_callback(struct rcu_head *head)
389 {
390 	struct inode *inode = container_of(head, struct inode, i_rcu);
391 	struct ceph_inode_info *ci = ceph_inode(inode);
392 
393 	kmem_cache_free(ceph_inode_cachep, ci);
394 }
395 
396 void ceph_destroy_inode(struct inode *inode)
397 {
398 	struct ceph_inode_info *ci = ceph_inode(inode);
399 	struct ceph_inode_frag *frag;
400 	struct rb_node *n;
401 
402 	dout("destroy_inode %p ino %llx.%llx\n", inode, ceph_vinop(inode));
403 
404 	ceph_fscache_unregister_inode_cookie(ci);
405 
406 	ceph_queue_caps_release(inode);
407 
408 	/*
409 	 * we may still have a snap_realm reference if there are stray
410 	 * caps in i_cap_exporting_issued or i_snap_caps.
411 	 */
412 	if (ci->i_snap_realm) {
413 		struct ceph_mds_client *mdsc =
414 			ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
415 		struct ceph_snap_realm *realm = ci->i_snap_realm;
416 
417 		dout(" dropping residual ref to snap realm %p\n", realm);
418 		spin_lock(&realm->inodes_with_caps_lock);
419 		list_del_init(&ci->i_snap_realm_item);
420 		spin_unlock(&realm->inodes_with_caps_lock);
421 		ceph_put_snap_realm(mdsc, realm);
422 	}
423 
424 	kfree(ci->i_symlink);
425 	while ((n = rb_first(&ci->i_fragtree)) != NULL) {
426 		frag = rb_entry(n, struct ceph_inode_frag, node);
427 		rb_erase(n, &ci->i_fragtree);
428 		kfree(frag);
429 	}
430 
431 	__ceph_destroy_xattrs(ci);
432 	if (ci->i_xattrs.blob)
433 		ceph_buffer_put(ci->i_xattrs.blob);
434 	if (ci->i_xattrs.prealloc_blob)
435 		ceph_buffer_put(ci->i_xattrs.prealloc_blob);
436 
437 	call_rcu(&inode->i_rcu, ceph_i_callback);
438 }
439 
440 int ceph_drop_inode(struct inode *inode)
441 {
442 	/*
443 	 * Positve dentry and corresponding inode are always accompanied
444 	 * in MDS reply. So no need to keep inode in the cache after
445 	 * dropping all its aliases.
446 	 */
447 	return 1;
448 }
449 
450 /*
451  * Helpers to fill in size, ctime, mtime, and atime.  We have to be
452  * careful because either the client or MDS may have more up to date
453  * info, depending on which capabilities are held, and whether
454  * time_warp_seq or truncate_seq have increased.  (Ordinarily, mtime
455  * and size are monotonically increasing, except when utimes() or
456  * truncate() increments the corresponding _seq values.)
457  */
458 int ceph_fill_file_size(struct inode *inode, int issued,
459 			u32 truncate_seq, u64 truncate_size, u64 size)
460 {
461 	struct ceph_inode_info *ci = ceph_inode(inode);
462 	int queue_trunc = 0;
463 
464 	if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) > 0 ||
465 	    (truncate_seq == ci->i_truncate_seq && size > inode->i_size)) {
466 		dout("size %lld -> %llu\n", inode->i_size, size);
467 		inode->i_size = size;
468 		inode->i_blocks = (size + (1<<9) - 1) >> 9;
469 		ci->i_reported_size = size;
470 		if (truncate_seq != ci->i_truncate_seq) {
471 			dout("truncate_seq %u -> %u\n",
472 			     ci->i_truncate_seq, truncate_seq);
473 			ci->i_truncate_seq = truncate_seq;
474 
475 			/* the MDS should have revoked these caps */
476 			WARN_ON_ONCE(issued & (CEPH_CAP_FILE_EXCL |
477 					       CEPH_CAP_FILE_RD |
478 					       CEPH_CAP_FILE_WR |
479 					       CEPH_CAP_FILE_LAZYIO));
480 			/*
481 			 * If we hold relevant caps, or in the case where we're
482 			 * not the only client referencing this file and we
483 			 * don't hold those caps, then we need to check whether
484 			 * the file is either opened or mmaped
485 			 */
486 			if ((issued & (CEPH_CAP_FILE_CACHE|
487 				       CEPH_CAP_FILE_BUFFER)) ||
488 			    mapping_mapped(inode->i_mapping) ||
489 			    __ceph_caps_file_wanted(ci)) {
490 				ci->i_truncate_pending++;
491 				queue_trunc = 1;
492 			}
493 		}
494 	}
495 	if (ceph_seq_cmp(truncate_seq, ci->i_truncate_seq) >= 0 &&
496 	    ci->i_truncate_size != truncate_size) {
497 		dout("truncate_size %lld -> %llu\n", ci->i_truncate_size,
498 		     truncate_size);
499 		ci->i_truncate_size = truncate_size;
500 	}
501 
502 	if (queue_trunc)
503 		ceph_fscache_invalidate(inode);
504 
505 	return queue_trunc;
506 }
507 
508 void ceph_fill_file_time(struct inode *inode, int issued,
509 			 u64 time_warp_seq, struct timespec *ctime,
510 			 struct timespec *mtime, struct timespec *atime)
511 {
512 	struct ceph_inode_info *ci = ceph_inode(inode);
513 	int warn = 0;
514 
515 	if (issued & (CEPH_CAP_FILE_EXCL|
516 		      CEPH_CAP_FILE_WR|
517 		      CEPH_CAP_FILE_BUFFER|
518 		      CEPH_CAP_AUTH_EXCL|
519 		      CEPH_CAP_XATTR_EXCL)) {
520 		if (timespec_compare(ctime, &inode->i_ctime) > 0) {
521 			dout("ctime %ld.%09ld -> %ld.%09ld inc w/ cap\n",
522 			     inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
523 			     ctime->tv_sec, ctime->tv_nsec);
524 			inode->i_ctime = *ctime;
525 		}
526 		if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) > 0) {
527 			/* the MDS did a utimes() */
528 			dout("mtime %ld.%09ld -> %ld.%09ld "
529 			     "tw %d -> %d\n",
530 			     inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
531 			     mtime->tv_sec, mtime->tv_nsec,
532 			     ci->i_time_warp_seq, (int)time_warp_seq);
533 
534 			inode->i_mtime = *mtime;
535 			inode->i_atime = *atime;
536 			ci->i_time_warp_seq = time_warp_seq;
537 		} else if (time_warp_seq == ci->i_time_warp_seq) {
538 			/* nobody did utimes(); take the max */
539 			if (timespec_compare(mtime, &inode->i_mtime) > 0) {
540 				dout("mtime %ld.%09ld -> %ld.%09ld inc\n",
541 				     inode->i_mtime.tv_sec,
542 				     inode->i_mtime.tv_nsec,
543 				     mtime->tv_sec, mtime->tv_nsec);
544 				inode->i_mtime = *mtime;
545 			}
546 			if (timespec_compare(atime, &inode->i_atime) > 0) {
547 				dout("atime %ld.%09ld -> %ld.%09ld inc\n",
548 				     inode->i_atime.tv_sec,
549 				     inode->i_atime.tv_nsec,
550 				     atime->tv_sec, atime->tv_nsec);
551 				inode->i_atime = *atime;
552 			}
553 		} else if (issued & CEPH_CAP_FILE_EXCL) {
554 			/* we did a utimes(); ignore mds values */
555 		} else {
556 			warn = 1;
557 		}
558 	} else {
559 		/* we have no write|excl caps; whatever the MDS says is true */
560 		if (ceph_seq_cmp(time_warp_seq, ci->i_time_warp_seq) >= 0) {
561 			inode->i_ctime = *ctime;
562 			inode->i_mtime = *mtime;
563 			inode->i_atime = *atime;
564 			ci->i_time_warp_seq = time_warp_seq;
565 		} else {
566 			warn = 1;
567 		}
568 	}
569 	if (warn) /* time_warp_seq shouldn't go backwards */
570 		dout("%p mds time_warp_seq %llu < %u\n",
571 		     inode, time_warp_seq, ci->i_time_warp_seq);
572 }
573 
574 /*
575  * Populate an inode based on info from mds.  May be called on new or
576  * existing inodes.
577  */
578 static int fill_inode(struct inode *inode,
579 		      struct ceph_mds_reply_info_in *iinfo,
580 		      struct ceph_mds_reply_dirfrag *dirinfo,
581 		      struct ceph_mds_session *session,
582 		      unsigned long ttl_from, int cap_fmode,
583 		      struct ceph_cap_reservation *caps_reservation)
584 {
585 	struct ceph_mds_reply_inode *info = iinfo->in;
586 	struct ceph_inode_info *ci = ceph_inode(inode);
587 	int i;
588 	int issued = 0, implemented;
589 	struct timespec mtime, atime, ctime;
590 	u32 nsplits;
591 	struct ceph_inode_frag *frag;
592 	struct rb_node *rb_node;
593 	struct ceph_buffer *xattr_blob = NULL;
594 	int err = 0;
595 	int queue_trunc = 0;
596 
597 	dout("fill_inode %p ino %llx.%llx v %llu had %llu\n",
598 	     inode, ceph_vinop(inode), le64_to_cpu(info->version),
599 	     ci->i_version);
600 
601 	/*
602 	 * prealloc xattr data, if it looks like we'll need it.  only
603 	 * if len > 4 (meaning there are actually xattrs; the first 4
604 	 * bytes are the xattr count).
605 	 */
606 	if (iinfo->xattr_len > 4) {
607 		xattr_blob = ceph_buffer_new(iinfo->xattr_len, GFP_NOFS);
608 		if (!xattr_blob)
609 			pr_err("fill_inode ENOMEM xattr blob %d bytes\n",
610 			       iinfo->xattr_len);
611 	}
612 
613 	spin_lock(&ci->i_ceph_lock);
614 
615 	/*
616 	 * provided version will be odd if inode value is projected,
617 	 * even if stable.  skip the update if we have newer stable
618 	 * info (ours>=theirs, e.g. due to racing mds replies), unless
619 	 * we are getting projected (unstable) info (in which case the
620 	 * version is odd, and we want ours>theirs).
621 	 *   us   them
622 	 *   2    2     skip
623 	 *   3    2     skip
624 	 *   3    3     update
625 	 */
626 	if (le64_to_cpu(info->version) > 0 &&
627 	    (ci->i_version & ~1) >= le64_to_cpu(info->version))
628 		goto no_change;
629 
630 	issued = __ceph_caps_issued(ci, &implemented);
631 	issued |= implemented | __ceph_caps_dirty(ci);
632 
633 	/* update inode */
634 	ci->i_version = le64_to_cpu(info->version);
635 	inode->i_version++;
636 	inode->i_rdev = le32_to_cpu(info->rdev);
637 
638 	if ((issued & CEPH_CAP_AUTH_EXCL) == 0) {
639 		inode->i_mode = le32_to_cpu(info->mode);
640 		inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(info->uid));
641 		inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(info->gid));
642 		dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
643 		     from_kuid(&init_user_ns, inode->i_uid),
644 		     from_kgid(&init_user_ns, inode->i_gid));
645 	}
646 
647 	if ((issued & CEPH_CAP_LINK_EXCL) == 0)
648 		set_nlink(inode, le32_to_cpu(info->nlink));
649 
650 	/* be careful with mtime, atime, size */
651 	ceph_decode_timespec(&atime, &info->atime);
652 	ceph_decode_timespec(&mtime, &info->mtime);
653 	ceph_decode_timespec(&ctime, &info->ctime);
654 	queue_trunc = ceph_fill_file_size(inode, issued,
655 					  le32_to_cpu(info->truncate_seq),
656 					  le64_to_cpu(info->truncate_size),
657 					  le64_to_cpu(info->size));
658 	ceph_fill_file_time(inode, issued,
659 			    le32_to_cpu(info->time_warp_seq),
660 			    &ctime, &mtime, &atime);
661 
662 	/* only update max_size on auth cap */
663 	if ((info->cap.flags & CEPH_CAP_FLAG_AUTH) &&
664 	    ci->i_max_size != le64_to_cpu(info->max_size)) {
665 		dout("max_size %lld -> %llu\n", ci->i_max_size,
666 		     le64_to_cpu(info->max_size));
667 		ci->i_max_size = le64_to_cpu(info->max_size);
668 	}
669 
670 	ci->i_layout = info->layout;
671 	inode->i_blkbits = fls(le32_to_cpu(info->layout.fl_stripe_unit)) - 1;
672 
673 	/* xattrs */
674 	/* note that if i_xattrs.len <= 4, i_xattrs.data will still be NULL. */
675 	if ((issued & CEPH_CAP_XATTR_EXCL) == 0 &&
676 	    le64_to_cpu(info->xattr_version) > ci->i_xattrs.version) {
677 		if (ci->i_xattrs.blob)
678 			ceph_buffer_put(ci->i_xattrs.blob);
679 		ci->i_xattrs.blob = xattr_blob;
680 		if (xattr_blob)
681 			memcpy(ci->i_xattrs.blob->vec.iov_base,
682 			       iinfo->xattr_data, iinfo->xattr_len);
683 		ci->i_xattrs.version = le64_to_cpu(info->xattr_version);
684 		ceph_forget_all_cached_acls(inode);
685 		xattr_blob = NULL;
686 	}
687 
688 	inode->i_mapping->a_ops = &ceph_aops;
689 	inode->i_mapping->backing_dev_info =
690 		&ceph_sb_to_client(inode->i_sb)->backing_dev_info;
691 
692 	switch (inode->i_mode & S_IFMT) {
693 	case S_IFIFO:
694 	case S_IFBLK:
695 	case S_IFCHR:
696 	case S_IFSOCK:
697 		init_special_inode(inode, inode->i_mode, inode->i_rdev);
698 		inode->i_op = &ceph_file_iops;
699 		break;
700 	case S_IFREG:
701 		inode->i_op = &ceph_file_iops;
702 		inode->i_fop = &ceph_file_fops;
703 		break;
704 	case S_IFLNK:
705 		inode->i_op = &ceph_symlink_iops;
706 		if (!ci->i_symlink) {
707 			u32 symlen = iinfo->symlink_len;
708 			char *sym;
709 
710 			spin_unlock(&ci->i_ceph_lock);
711 
712 			err = -EINVAL;
713 			if (WARN_ON(symlen != inode->i_size))
714 				goto out;
715 
716 			err = -ENOMEM;
717 			sym = kstrndup(iinfo->symlink, symlen, GFP_NOFS);
718 			if (!sym)
719 				goto out;
720 
721 			spin_lock(&ci->i_ceph_lock);
722 			if (!ci->i_symlink)
723 				ci->i_symlink = sym;
724 			else
725 				kfree(sym); /* lost a race */
726 		}
727 		break;
728 	case S_IFDIR:
729 		inode->i_op = &ceph_dir_iops;
730 		inode->i_fop = &ceph_dir_fops;
731 
732 		ci->i_dir_layout = iinfo->dir_layout;
733 
734 		ci->i_files = le64_to_cpu(info->files);
735 		ci->i_subdirs = le64_to_cpu(info->subdirs);
736 		ci->i_rbytes = le64_to_cpu(info->rbytes);
737 		ci->i_rfiles = le64_to_cpu(info->rfiles);
738 		ci->i_rsubdirs = le64_to_cpu(info->rsubdirs);
739 		ceph_decode_timespec(&ci->i_rctime, &info->rctime);
740 		break;
741 	default:
742 		pr_err("fill_inode %llx.%llx BAD mode 0%o\n",
743 		       ceph_vinop(inode), inode->i_mode);
744 	}
745 
746 	/* set dir completion flag? */
747 	if (S_ISDIR(inode->i_mode) &&
748 	    ci->i_files == 0 && ci->i_subdirs == 0 &&
749 	    ceph_snap(inode) == CEPH_NOSNAP &&
750 	    (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) &&
751 	    (issued & CEPH_CAP_FILE_EXCL) == 0 &&
752 	    !__ceph_dir_is_complete(ci)) {
753 		dout(" marking %p complete (empty)\n", inode);
754 		__ceph_dir_set_complete(ci, atomic_read(&ci->i_release_count));
755 		ci->i_max_offset = 2;
756 	}
757 no_change:
758 	spin_unlock(&ci->i_ceph_lock);
759 
760 	/* queue truncate if we saw i_size decrease */
761 	if (queue_trunc)
762 		ceph_queue_vmtruncate(inode);
763 
764 	/* populate frag tree */
765 	/* FIXME: move me up, if/when version reflects fragtree changes */
766 	nsplits = le32_to_cpu(info->fragtree.nsplits);
767 	mutex_lock(&ci->i_fragtree_mutex);
768 	rb_node = rb_first(&ci->i_fragtree);
769 	for (i = 0; i < nsplits; i++) {
770 		u32 id = le32_to_cpu(info->fragtree.splits[i].frag);
771 		frag = NULL;
772 		while (rb_node) {
773 			frag = rb_entry(rb_node, struct ceph_inode_frag, node);
774 			if (ceph_frag_compare(frag->frag, id) >= 0) {
775 				if (frag->frag != id)
776 					frag = NULL;
777 				else
778 					rb_node = rb_next(rb_node);
779 				break;
780 			}
781 			rb_node = rb_next(rb_node);
782 			rb_erase(&frag->node, &ci->i_fragtree);
783 			kfree(frag);
784 			frag = NULL;
785 		}
786 		if (!frag) {
787 			frag = __get_or_create_frag(ci, id);
788 			if (IS_ERR(frag))
789 				continue;
790 		}
791 		frag->split_by = le32_to_cpu(info->fragtree.splits[i].by);
792 		dout(" frag %x split by %d\n", frag->frag, frag->split_by);
793 	}
794 	while (rb_node) {
795 		frag = rb_entry(rb_node, struct ceph_inode_frag, node);
796 		rb_node = rb_next(rb_node);
797 		rb_erase(&frag->node, &ci->i_fragtree);
798 		kfree(frag);
799 	}
800 	mutex_unlock(&ci->i_fragtree_mutex);
801 
802 	/* were we issued a capability? */
803 	if (info->cap.caps) {
804 		if (ceph_snap(inode) == CEPH_NOSNAP) {
805 			ceph_add_cap(inode, session,
806 				     le64_to_cpu(info->cap.cap_id),
807 				     cap_fmode,
808 				     le32_to_cpu(info->cap.caps),
809 				     le32_to_cpu(info->cap.wanted),
810 				     le32_to_cpu(info->cap.seq),
811 				     le32_to_cpu(info->cap.mseq),
812 				     le64_to_cpu(info->cap.realm),
813 				     info->cap.flags,
814 				     caps_reservation);
815 		} else {
816 			spin_lock(&ci->i_ceph_lock);
817 			dout(" %p got snap_caps %s\n", inode,
818 			     ceph_cap_string(le32_to_cpu(info->cap.caps)));
819 			ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
820 			if (cap_fmode >= 0)
821 				__ceph_get_fmode(ci, cap_fmode);
822 			spin_unlock(&ci->i_ceph_lock);
823 		}
824 	} else if (cap_fmode >= 0) {
825 		pr_warning("mds issued no caps on %llx.%llx\n",
826 			   ceph_vinop(inode));
827 		__ceph_get_fmode(ci, cap_fmode);
828 	}
829 
830 	/* update delegation info? */
831 	if (dirinfo)
832 		ceph_fill_dirfrag(inode, dirinfo);
833 
834 	err = 0;
835 
836 out:
837 	if (xattr_blob)
838 		ceph_buffer_put(xattr_blob);
839 	return err;
840 }
841 
842 /*
843  * caller should hold session s_mutex.
844  */
845 static void update_dentry_lease(struct dentry *dentry,
846 				struct ceph_mds_reply_lease *lease,
847 				struct ceph_mds_session *session,
848 				unsigned long from_time)
849 {
850 	struct ceph_dentry_info *di = ceph_dentry(dentry);
851 	long unsigned duration = le32_to_cpu(lease->duration_ms);
852 	long unsigned ttl = from_time + (duration * HZ) / 1000;
853 	long unsigned half_ttl = from_time + (duration * HZ / 2) / 1000;
854 	struct inode *dir;
855 
856 	/* only track leases on regular dentries */
857 	if (dentry->d_op != &ceph_dentry_ops)
858 		return;
859 
860 	spin_lock(&dentry->d_lock);
861 	dout("update_dentry_lease %p duration %lu ms ttl %lu\n",
862 	     dentry, duration, ttl);
863 
864 	/* make lease_rdcache_gen match directory */
865 	dir = dentry->d_parent->d_inode;
866 	di->lease_shared_gen = ceph_inode(dir)->i_shared_gen;
867 
868 	if (duration == 0)
869 		goto out_unlock;
870 
871 	if (di->lease_gen == session->s_cap_gen &&
872 	    time_before(ttl, dentry->d_time))
873 		goto out_unlock;  /* we already have a newer lease. */
874 
875 	if (di->lease_session && di->lease_session != session)
876 		goto out_unlock;
877 
878 	ceph_dentry_lru_touch(dentry);
879 
880 	if (!di->lease_session)
881 		di->lease_session = ceph_get_mds_session(session);
882 	di->lease_gen = session->s_cap_gen;
883 	di->lease_seq = le32_to_cpu(lease->seq);
884 	di->lease_renew_after = half_ttl;
885 	di->lease_renew_from = 0;
886 	dentry->d_time = ttl;
887 out_unlock:
888 	spin_unlock(&dentry->d_lock);
889 	return;
890 }
891 
892 /*
893  * Set dentry's directory position based on the current dir's max, and
894  * order it in d_subdirs, so that dcache_readdir behaves.
895  *
896  * Always called under directory's i_mutex.
897  */
898 static void ceph_set_dentry_offset(struct dentry *dn)
899 {
900 	struct dentry *dir = dn->d_parent;
901 	struct inode *inode = dir->d_inode;
902 	struct ceph_inode_info *ci;
903 	struct ceph_dentry_info *di;
904 
905 	BUG_ON(!inode);
906 
907 	ci = ceph_inode(inode);
908 	di = ceph_dentry(dn);
909 
910 	spin_lock(&ci->i_ceph_lock);
911 	if (!__ceph_dir_is_complete(ci)) {
912 		spin_unlock(&ci->i_ceph_lock);
913 		return;
914 	}
915 	di->offset = ceph_inode(inode)->i_max_offset++;
916 	spin_unlock(&ci->i_ceph_lock);
917 
918 	spin_lock(&dir->d_lock);
919 	spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
920 	list_move(&dn->d_u.d_child, &dir->d_subdirs);
921 	dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
922 	     dn->d_u.d_child.prev, dn->d_u.d_child.next);
923 	spin_unlock(&dn->d_lock);
924 	spin_unlock(&dir->d_lock);
925 }
926 
927 /*
928  * splice a dentry to an inode.
929  * caller must hold directory i_mutex for this to be safe.
930  *
931  * we will only rehash the resulting dentry if @prehash is
932  * true; @prehash will be set to false (for the benefit of
933  * the caller) if we fail.
934  */
935 static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
936 				    bool *prehash, bool set_offset)
937 {
938 	struct dentry *realdn;
939 
940 	BUG_ON(dn->d_inode);
941 
942 	/* dn must be unhashed */
943 	if (!d_unhashed(dn))
944 		d_drop(dn);
945 	realdn = d_materialise_unique(dn, in);
946 	if (IS_ERR(realdn)) {
947 		pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
948 		       PTR_ERR(realdn), dn, in, ceph_vinop(in));
949 		if (prehash)
950 			*prehash = false; /* don't rehash on error */
951 		dn = realdn; /* note realdn contains the error */
952 		goto out;
953 	} else if (realdn) {
954 		dout("dn %p (%d) spliced with %p (%d) "
955 		     "inode %p ino %llx.%llx\n",
956 		     dn, d_count(dn),
957 		     realdn, d_count(realdn),
958 		     realdn->d_inode, ceph_vinop(realdn->d_inode));
959 		dput(dn);
960 		dn = realdn;
961 	} else {
962 		BUG_ON(!ceph_dentry(dn));
963 		dout("dn %p attached to %p ino %llx.%llx\n",
964 		     dn, dn->d_inode, ceph_vinop(dn->d_inode));
965 	}
966 	if ((!prehash || *prehash) && d_unhashed(dn))
967 		d_rehash(dn);
968 	if (set_offset)
969 		ceph_set_dentry_offset(dn);
970 out:
971 	return dn;
972 }
973 
974 /*
975  * Incorporate results into the local cache.  This is either just
976  * one inode, or a directory, dentry, and possibly linked-to inode (e.g.,
977  * after a lookup).
978  *
979  * A reply may contain
980  *         a directory inode along with a dentry.
981  *  and/or a target inode
982  *
983  * Called with snap_rwsem (read).
984  */
985 int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
986 		    struct ceph_mds_session *session)
987 {
988 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
989 	struct inode *in = NULL;
990 	struct ceph_mds_reply_inode *ininfo;
991 	struct ceph_vino vino;
992 	struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
993 	int err = 0;
994 
995 	dout("fill_trace %p is_dentry %d is_target %d\n", req,
996 	     rinfo->head->is_dentry, rinfo->head->is_target);
997 
998 #if 0
999 	/*
1000 	 * Debugging hook:
1001 	 *
1002 	 * If we resend completed ops to a recovering mds, we get no
1003 	 * trace.  Since that is very rare, pretend this is the case
1004 	 * to ensure the 'no trace' handlers in the callers behave.
1005 	 *
1006 	 * Fill in inodes unconditionally to avoid breaking cap
1007 	 * invariants.
1008 	 */
1009 	if (rinfo->head->op & CEPH_MDS_OP_WRITE) {
1010 		pr_info("fill_trace faking empty trace on %lld %s\n",
1011 			req->r_tid, ceph_mds_op_name(rinfo->head->op));
1012 		if (rinfo->head->is_dentry) {
1013 			rinfo->head->is_dentry = 0;
1014 			err = fill_inode(req->r_locked_dir,
1015 					 &rinfo->diri, rinfo->dirfrag,
1016 					 session, req->r_request_started, -1);
1017 		}
1018 		if (rinfo->head->is_target) {
1019 			rinfo->head->is_target = 0;
1020 			ininfo = rinfo->targeti.in;
1021 			vino.ino = le64_to_cpu(ininfo->ino);
1022 			vino.snap = le64_to_cpu(ininfo->snapid);
1023 			in = ceph_get_inode(sb, vino);
1024 			err = fill_inode(in, &rinfo->targeti, NULL,
1025 					 session, req->r_request_started,
1026 					 req->r_fmode);
1027 			iput(in);
1028 		}
1029 	}
1030 #endif
1031 
1032 	if (!rinfo->head->is_target && !rinfo->head->is_dentry) {
1033 		dout("fill_trace reply is empty!\n");
1034 		if (rinfo->head->result == 0 && req->r_locked_dir)
1035 			ceph_invalidate_dir_request(req);
1036 		return 0;
1037 	}
1038 
1039 	if (rinfo->head->is_dentry) {
1040 		struct inode *dir = req->r_locked_dir;
1041 
1042 		if (dir) {
1043 			err = fill_inode(dir, &rinfo->diri, rinfo->dirfrag,
1044 					 session, req->r_request_started, -1,
1045 					 &req->r_caps_reservation);
1046 			if (err < 0)
1047 				return err;
1048 		} else {
1049 			WARN_ON_ONCE(1);
1050 		}
1051 	}
1052 
1053 	if (rinfo->head->is_target) {
1054 		vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
1055 		vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
1056 
1057 		in = ceph_get_inode(sb, vino);
1058 		if (IS_ERR(in)) {
1059 			err = PTR_ERR(in);
1060 			goto done;
1061 		}
1062 		req->r_target_inode = in;
1063 
1064 		err = fill_inode(in, &rinfo->targeti, NULL,
1065 				session, req->r_request_started,
1066 				(le32_to_cpu(rinfo->head->result) == 0) ?
1067 				req->r_fmode : -1,
1068 				&req->r_caps_reservation);
1069 		if (err < 0) {
1070 			pr_err("fill_inode badness %p %llx.%llx\n",
1071 				in, ceph_vinop(in));
1072 			goto done;
1073 		}
1074 	}
1075 
1076 	/*
1077 	 * ignore null lease/binding on snapdir ENOENT, or else we
1078 	 * will have trouble splicing in the virtual snapdir later
1079 	 */
1080 	if (rinfo->head->is_dentry && !req->r_aborted &&
1081 	    req->r_locked_dir &&
1082 	    (rinfo->head->is_target || strncmp(req->r_dentry->d_name.name,
1083 					       fsc->mount_options->snapdir_name,
1084 					       req->r_dentry->d_name.len))) {
1085 		/*
1086 		 * lookup link rename   : null -> possibly existing inode
1087 		 * mknod symlink mkdir  : null -> new inode
1088 		 * unlink               : linked -> null
1089 		 */
1090 		struct inode *dir = req->r_locked_dir;
1091 		struct dentry *dn = req->r_dentry;
1092 		bool have_dir_cap, have_lease;
1093 
1094 		BUG_ON(!dn);
1095 		BUG_ON(!dir);
1096 		BUG_ON(dn->d_parent->d_inode != dir);
1097 		BUG_ON(ceph_ino(dir) !=
1098 		       le64_to_cpu(rinfo->diri.in->ino));
1099 		BUG_ON(ceph_snap(dir) !=
1100 		       le64_to_cpu(rinfo->diri.in->snapid));
1101 
1102 		/* do we have a lease on the whole dir? */
1103 		have_dir_cap =
1104 			(le32_to_cpu(rinfo->diri.in->cap.caps) &
1105 			 CEPH_CAP_FILE_SHARED);
1106 
1107 		/* do we have a dn lease? */
1108 		have_lease = have_dir_cap ||
1109 			le32_to_cpu(rinfo->dlease->duration_ms);
1110 		if (!have_lease)
1111 			dout("fill_trace  no dentry lease or dir cap\n");
1112 
1113 		/* rename? */
1114 		if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
1115 			dout(" src %p '%.*s' dst %p '%.*s'\n",
1116 			     req->r_old_dentry,
1117 			     req->r_old_dentry->d_name.len,
1118 			     req->r_old_dentry->d_name.name,
1119 			     dn, dn->d_name.len, dn->d_name.name);
1120 			dout("fill_trace doing d_move %p -> %p\n",
1121 			     req->r_old_dentry, dn);
1122 
1123 			d_move(req->r_old_dentry, dn);
1124 			dout(" src %p '%.*s' dst %p '%.*s'\n",
1125 			     req->r_old_dentry,
1126 			     req->r_old_dentry->d_name.len,
1127 			     req->r_old_dentry->d_name.name,
1128 			     dn, dn->d_name.len, dn->d_name.name);
1129 
1130 			/* ensure target dentry is invalidated, despite
1131 			   rehashing bug in vfs_rename_dir */
1132 			ceph_invalidate_dentry_lease(dn);
1133 
1134 			/*
1135 			 * d_move() puts the renamed dentry at the end of
1136 			 * d_subdirs.  We need to assign it an appropriate
1137 			 * directory offset so we can behave when dir is
1138 			 * complete.
1139 			 */
1140 			ceph_set_dentry_offset(req->r_old_dentry);
1141 			dout("dn %p gets new offset %lld\n", req->r_old_dentry,
1142 			     ceph_dentry(req->r_old_dentry)->offset);
1143 
1144 			dn = req->r_old_dentry;  /* use old_dentry */
1145 		}
1146 
1147 		/* null dentry? */
1148 		if (!rinfo->head->is_target) {
1149 			dout("fill_trace null dentry\n");
1150 			if (dn->d_inode) {
1151 				dout("d_delete %p\n", dn);
1152 				d_delete(dn);
1153 			} else {
1154 				dout("d_instantiate %p NULL\n", dn);
1155 				d_instantiate(dn, NULL);
1156 				if (have_lease && d_unhashed(dn))
1157 					d_rehash(dn);
1158 				update_dentry_lease(dn, rinfo->dlease,
1159 						    session,
1160 						    req->r_request_started);
1161 			}
1162 			goto done;
1163 		}
1164 
1165 		/* attach proper inode */
1166 		if (!dn->d_inode) {
1167 			ihold(in);
1168 			dn = splice_dentry(dn, in, &have_lease, true);
1169 			if (IS_ERR(dn)) {
1170 				err = PTR_ERR(dn);
1171 				goto done;
1172 			}
1173 			req->r_dentry = dn;  /* may have spliced */
1174 		} else if (dn->d_inode && dn->d_inode != in) {
1175 			dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
1176 			     dn, dn->d_inode, ceph_vinop(dn->d_inode),
1177 			     ceph_vinop(in));
1178 			have_lease = false;
1179 		}
1180 
1181 		if (have_lease)
1182 			update_dentry_lease(dn, rinfo->dlease, session,
1183 					    req->r_request_started);
1184 		dout(" final dn %p\n", dn);
1185 	} else if (!req->r_aborted &&
1186 		   (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
1187 		    req->r_op == CEPH_MDS_OP_MKSNAP)) {
1188 		struct dentry *dn = req->r_dentry;
1189 
1190 		/* fill out a snapdir LOOKUPSNAP dentry */
1191 		BUG_ON(!dn);
1192 		BUG_ON(!req->r_locked_dir);
1193 		BUG_ON(ceph_snap(req->r_locked_dir) != CEPH_SNAPDIR);
1194 		ininfo = rinfo->targeti.in;
1195 		vino.ino = le64_to_cpu(ininfo->ino);
1196 		vino.snap = le64_to_cpu(ininfo->snapid);
1197 		dout(" linking snapped dir %p to dn %p\n", in, dn);
1198 		ihold(in);
1199 		dn = splice_dentry(dn, in, NULL, true);
1200 		if (IS_ERR(dn)) {
1201 			err = PTR_ERR(dn);
1202 			goto done;
1203 		}
1204 		req->r_dentry = dn;  /* may have spliced */
1205 	}
1206 done:
1207 	dout("fill_trace done err=%d\n", err);
1208 	return err;
1209 }
1210 
1211 /*
1212  * Prepopulate our cache with readdir results, leases, etc.
1213  */
1214 static int readdir_prepopulate_inodes_only(struct ceph_mds_request *req,
1215 					   struct ceph_mds_session *session)
1216 {
1217 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1218 	int i, err = 0;
1219 
1220 	for (i = 0; i < rinfo->dir_nr; i++) {
1221 		struct ceph_vino vino;
1222 		struct inode *in;
1223 		int rc;
1224 
1225 		vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
1226 		vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
1227 
1228 		in = ceph_get_inode(req->r_dentry->d_sb, vino);
1229 		if (IS_ERR(in)) {
1230 			err = PTR_ERR(in);
1231 			dout("new_inode badness got %d\n", err);
1232 			continue;
1233 		}
1234 		rc = fill_inode(in, &rinfo->dir_in[i], NULL, session,
1235 				req->r_request_started, -1,
1236 				&req->r_caps_reservation);
1237 		if (rc < 0) {
1238 			pr_err("fill_inode badness on %p got %d\n", in, rc);
1239 			err = rc;
1240 			continue;
1241 		}
1242 	}
1243 
1244 	return err;
1245 }
1246 
1247 int ceph_readdir_prepopulate(struct ceph_mds_request *req,
1248 			     struct ceph_mds_session *session)
1249 {
1250 	struct dentry *parent = req->r_dentry;
1251 	struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
1252 	struct qstr dname;
1253 	struct dentry *dn;
1254 	struct inode *in;
1255 	int err = 0, ret, i;
1256 	struct inode *snapdir = NULL;
1257 	struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
1258 	struct ceph_dentry_info *di;
1259 	u64 r_readdir_offset = req->r_readdir_offset;
1260 	u32 frag = le32_to_cpu(rhead->args.readdir.frag);
1261 
1262 	if (rinfo->dir_dir &&
1263 	    le32_to_cpu(rinfo->dir_dir->frag) != frag) {
1264 		dout("readdir_prepopulate got new frag %x -> %x\n",
1265 		     frag, le32_to_cpu(rinfo->dir_dir->frag));
1266 		frag = le32_to_cpu(rinfo->dir_dir->frag);
1267 		if (ceph_frag_is_leftmost(frag))
1268 			r_readdir_offset = 2;
1269 		else
1270 			r_readdir_offset = 0;
1271 	}
1272 
1273 	if (req->r_aborted)
1274 		return readdir_prepopulate_inodes_only(req, session);
1275 
1276 	if (le32_to_cpu(rinfo->head->op) == CEPH_MDS_OP_LSSNAP) {
1277 		snapdir = ceph_get_snapdir(parent->d_inode);
1278 		parent = d_find_alias(snapdir);
1279 		dout("readdir_prepopulate %d items under SNAPDIR dn %p\n",
1280 		     rinfo->dir_nr, parent);
1281 	} else {
1282 		dout("readdir_prepopulate %d items under dn %p\n",
1283 		     rinfo->dir_nr, parent);
1284 		if (rinfo->dir_dir)
1285 			ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir);
1286 	}
1287 
1288 	/* FIXME: release caps/leases if error occurs */
1289 	for (i = 0; i < rinfo->dir_nr; i++) {
1290 		struct ceph_vino vino;
1291 
1292 		dname.name = rinfo->dir_dname[i];
1293 		dname.len = rinfo->dir_dname_len[i];
1294 		dname.hash = full_name_hash(dname.name, dname.len);
1295 
1296 		vino.ino = le64_to_cpu(rinfo->dir_in[i].in->ino);
1297 		vino.snap = le64_to_cpu(rinfo->dir_in[i].in->snapid);
1298 
1299 retry_lookup:
1300 		dn = d_lookup(parent, &dname);
1301 		dout("d_lookup on parent=%p name=%.*s got %p\n",
1302 		     parent, dname.len, dname.name, dn);
1303 
1304 		if (!dn) {
1305 			dn = d_alloc(parent, &dname);
1306 			dout("d_alloc %p '%.*s' = %p\n", parent,
1307 			     dname.len, dname.name, dn);
1308 			if (dn == NULL) {
1309 				dout("d_alloc badness\n");
1310 				err = -ENOMEM;
1311 				goto out;
1312 			}
1313 			ret = ceph_init_dentry(dn);
1314 			if (ret < 0) {
1315 				dput(dn);
1316 				err = ret;
1317 				goto out;
1318 			}
1319 		} else if (dn->d_inode &&
1320 			   (ceph_ino(dn->d_inode) != vino.ino ||
1321 			    ceph_snap(dn->d_inode) != vino.snap)) {
1322 			dout(" dn %p points to wrong inode %p\n",
1323 			     dn, dn->d_inode);
1324 			d_delete(dn);
1325 			dput(dn);
1326 			goto retry_lookup;
1327 		} else {
1328 			/* reorder parent's d_subdirs */
1329 			spin_lock(&parent->d_lock);
1330 			spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
1331 			list_move(&dn->d_u.d_child, &parent->d_subdirs);
1332 			spin_unlock(&dn->d_lock);
1333 			spin_unlock(&parent->d_lock);
1334 		}
1335 
1336 		/* inode */
1337 		if (dn->d_inode) {
1338 			in = dn->d_inode;
1339 		} else {
1340 			in = ceph_get_inode(parent->d_sb, vino);
1341 			if (IS_ERR(in)) {
1342 				dout("new_inode badness\n");
1343 				d_drop(dn);
1344 				dput(dn);
1345 				err = PTR_ERR(in);
1346 				goto out;
1347 			}
1348 		}
1349 
1350 		if (fill_inode(in, &rinfo->dir_in[i], NULL, session,
1351 			       req->r_request_started, -1,
1352 			       &req->r_caps_reservation) < 0) {
1353 			pr_err("fill_inode badness on %p\n", in);
1354 			if (!dn->d_inode)
1355 				iput(in);
1356 			d_drop(dn);
1357 			goto next_item;
1358 		}
1359 
1360 		if (!dn->d_inode) {
1361 			dn = splice_dentry(dn, in, NULL, false);
1362 			if (IS_ERR(dn)) {
1363 				err = PTR_ERR(dn);
1364 				dn = NULL;
1365 				goto next_item;
1366 			}
1367 		}
1368 
1369 		di = dn->d_fsdata;
1370 		di->offset = ceph_make_fpos(frag, i + r_readdir_offset);
1371 
1372 		update_dentry_lease(dn, rinfo->dir_dlease[i],
1373 				    req->r_session,
1374 				    req->r_request_started);
1375 next_item:
1376 		if (dn)
1377 			dput(dn);
1378 	}
1379 	if (err == 0)
1380 		req->r_did_prepopulate = true;
1381 
1382 out:
1383 	if (snapdir) {
1384 		iput(snapdir);
1385 		dput(parent);
1386 	}
1387 	dout("readdir_prepopulate done\n");
1388 	return err;
1389 }
1390 
1391 int ceph_inode_set_size(struct inode *inode, loff_t size)
1392 {
1393 	struct ceph_inode_info *ci = ceph_inode(inode);
1394 	int ret = 0;
1395 
1396 	spin_lock(&ci->i_ceph_lock);
1397 	dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
1398 	inode->i_size = size;
1399 	inode->i_blocks = (size + (1 << 9) - 1) >> 9;
1400 
1401 	/* tell the MDS if we are approaching max_size */
1402 	if ((size << 1) >= ci->i_max_size &&
1403 	    (ci->i_reported_size << 1) < ci->i_max_size)
1404 		ret = 1;
1405 
1406 	spin_unlock(&ci->i_ceph_lock);
1407 	return ret;
1408 }
1409 
1410 /*
1411  * Write back inode data in a worker thread.  (This can't be done
1412  * in the message handler context.)
1413  */
1414 void ceph_queue_writeback(struct inode *inode)
1415 {
1416 	ihold(inode);
1417 	if (queue_work(ceph_inode_to_client(inode)->wb_wq,
1418 		       &ceph_inode(inode)->i_wb_work)) {
1419 		dout("ceph_queue_writeback %p\n", inode);
1420 	} else {
1421 		dout("ceph_queue_writeback %p failed\n", inode);
1422 		iput(inode);
1423 	}
1424 }
1425 
1426 static void ceph_writeback_work(struct work_struct *work)
1427 {
1428 	struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1429 						  i_wb_work);
1430 	struct inode *inode = &ci->vfs_inode;
1431 
1432 	dout("writeback %p\n", inode);
1433 	filemap_fdatawrite(&inode->i_data);
1434 	iput(inode);
1435 }
1436 
1437 /*
1438  * queue an async invalidation
1439  */
1440 void ceph_queue_invalidate(struct inode *inode)
1441 {
1442 	ihold(inode);
1443 	if (queue_work(ceph_inode_to_client(inode)->pg_inv_wq,
1444 		       &ceph_inode(inode)->i_pg_inv_work)) {
1445 		dout("ceph_queue_invalidate %p\n", inode);
1446 	} else {
1447 		dout("ceph_queue_invalidate %p failed\n", inode);
1448 		iput(inode);
1449 	}
1450 }
1451 
1452 /*
1453  * Invalidate inode pages in a worker thread.  (This can't be done
1454  * in the message handler context.)
1455  */
1456 static void ceph_invalidate_work(struct work_struct *work)
1457 {
1458 	struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1459 						  i_pg_inv_work);
1460 	struct inode *inode = &ci->vfs_inode;
1461 	u32 orig_gen;
1462 	int check = 0;
1463 
1464 	mutex_lock(&ci->i_truncate_mutex);
1465 	spin_lock(&ci->i_ceph_lock);
1466 	dout("invalidate_pages %p gen %d revoking %d\n", inode,
1467 	     ci->i_rdcache_gen, ci->i_rdcache_revoking);
1468 	if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
1469 		/* nevermind! */
1470 		spin_unlock(&ci->i_ceph_lock);
1471 		mutex_unlock(&ci->i_truncate_mutex);
1472 		goto out;
1473 	}
1474 	orig_gen = ci->i_rdcache_gen;
1475 	spin_unlock(&ci->i_ceph_lock);
1476 
1477 	truncate_inode_pages(inode->i_mapping, 0);
1478 
1479 	spin_lock(&ci->i_ceph_lock);
1480 	if (orig_gen == ci->i_rdcache_gen &&
1481 	    orig_gen == ci->i_rdcache_revoking) {
1482 		dout("invalidate_pages %p gen %d successful\n", inode,
1483 		     ci->i_rdcache_gen);
1484 		ci->i_rdcache_revoking--;
1485 		check = 1;
1486 	} else {
1487 		dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
1488 		     inode, orig_gen, ci->i_rdcache_gen,
1489 		     ci->i_rdcache_revoking);
1490 	}
1491 	spin_unlock(&ci->i_ceph_lock);
1492 	mutex_unlock(&ci->i_truncate_mutex);
1493 
1494 	if (check)
1495 		ceph_check_caps(ci, 0, NULL);
1496 out:
1497 	iput(inode);
1498 }
1499 
1500 
1501 /*
1502  * called by trunc_wq;
1503  *
1504  * We also truncate in a separate thread as well.
1505  */
1506 static void ceph_vmtruncate_work(struct work_struct *work)
1507 {
1508 	struct ceph_inode_info *ci = container_of(work, struct ceph_inode_info,
1509 						  i_vmtruncate_work);
1510 	struct inode *inode = &ci->vfs_inode;
1511 
1512 	dout("vmtruncate_work %p\n", inode);
1513 	__ceph_do_pending_vmtruncate(inode);
1514 	iput(inode);
1515 }
1516 
1517 /*
1518  * Queue an async vmtruncate.  If we fail to queue work, we will handle
1519  * the truncation the next time we call __ceph_do_pending_vmtruncate.
1520  */
1521 void ceph_queue_vmtruncate(struct inode *inode)
1522 {
1523 	struct ceph_inode_info *ci = ceph_inode(inode);
1524 
1525 	ihold(inode);
1526 
1527 	if (queue_work(ceph_sb_to_client(inode->i_sb)->trunc_wq,
1528 		       &ci->i_vmtruncate_work)) {
1529 		dout("ceph_queue_vmtruncate %p\n", inode);
1530 	} else {
1531 		dout("ceph_queue_vmtruncate %p failed, pending=%d\n",
1532 		     inode, ci->i_truncate_pending);
1533 		iput(inode);
1534 	}
1535 }
1536 
1537 /*
1538  * Make sure any pending truncation is applied before doing anything
1539  * that may depend on it.
1540  */
1541 void __ceph_do_pending_vmtruncate(struct inode *inode)
1542 {
1543 	struct ceph_inode_info *ci = ceph_inode(inode);
1544 	u64 to;
1545 	int wrbuffer_refs, finish = 0;
1546 
1547 	mutex_lock(&ci->i_truncate_mutex);
1548 retry:
1549 	spin_lock(&ci->i_ceph_lock);
1550 	if (ci->i_truncate_pending == 0) {
1551 		dout("__do_pending_vmtruncate %p none pending\n", inode);
1552 		spin_unlock(&ci->i_ceph_lock);
1553 		mutex_unlock(&ci->i_truncate_mutex);
1554 		return;
1555 	}
1556 
1557 	/*
1558 	 * make sure any dirty snapped pages are flushed before we
1559 	 * possibly truncate them.. so write AND block!
1560 	 */
1561 	if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
1562 		dout("__do_pending_vmtruncate %p flushing snaps first\n",
1563 		     inode);
1564 		spin_unlock(&ci->i_ceph_lock);
1565 		filemap_write_and_wait_range(&inode->i_data, 0,
1566 					     inode->i_sb->s_maxbytes);
1567 		goto retry;
1568 	}
1569 
1570 	/* there should be no reader or writer */
1571 	WARN_ON_ONCE(ci->i_rd_ref || ci->i_wr_ref);
1572 
1573 	to = ci->i_truncate_size;
1574 	wrbuffer_refs = ci->i_wrbuffer_ref;
1575 	dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
1576 	     ci->i_truncate_pending, to);
1577 	spin_unlock(&ci->i_ceph_lock);
1578 
1579 	truncate_inode_pages(inode->i_mapping, to);
1580 
1581 	spin_lock(&ci->i_ceph_lock);
1582 	if (to == ci->i_truncate_size) {
1583 		ci->i_truncate_pending = 0;
1584 		finish = 1;
1585 	}
1586 	spin_unlock(&ci->i_ceph_lock);
1587 	if (!finish)
1588 		goto retry;
1589 
1590 	mutex_unlock(&ci->i_truncate_mutex);
1591 
1592 	if (wrbuffer_refs == 0)
1593 		ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
1594 
1595 	wake_up_all(&ci->i_cap_wq);
1596 }
1597 
1598 /*
1599  * symlinks
1600  */
1601 static void *ceph_sym_follow_link(struct dentry *dentry, struct nameidata *nd)
1602 {
1603 	struct ceph_inode_info *ci = ceph_inode(dentry->d_inode);
1604 	nd_set_link(nd, ci->i_symlink);
1605 	return NULL;
1606 }
1607 
1608 static const struct inode_operations ceph_symlink_iops = {
1609 	.readlink = generic_readlink,
1610 	.follow_link = ceph_sym_follow_link,
1611 	.setattr = ceph_setattr,
1612 	.getattr = ceph_getattr,
1613 	.setxattr = ceph_setxattr,
1614 	.getxattr = ceph_getxattr,
1615 	.listxattr = ceph_listxattr,
1616 	.removexattr = ceph_removexattr,
1617 	.get_acl = ceph_get_acl,
1618 };
1619 
1620 /*
1621  * setattr
1622  */
1623 int ceph_setattr(struct dentry *dentry, struct iattr *attr)
1624 {
1625 	struct inode *inode = dentry->d_inode;
1626 	struct ceph_inode_info *ci = ceph_inode(inode);
1627 	struct inode *parent_inode;
1628 	const unsigned int ia_valid = attr->ia_valid;
1629 	struct ceph_mds_request *req;
1630 	struct ceph_mds_client *mdsc = ceph_sb_to_client(dentry->d_sb)->mdsc;
1631 	int issued;
1632 	int release = 0, dirtied = 0;
1633 	int mask = 0;
1634 	int err = 0;
1635 	int inode_dirty_flags = 0;
1636 
1637 	if (ceph_snap(inode) != CEPH_NOSNAP)
1638 		return -EROFS;
1639 
1640 	err = inode_change_ok(inode, attr);
1641 	if (err != 0)
1642 		return err;
1643 
1644 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETATTR,
1645 				       USE_AUTH_MDS);
1646 	if (IS_ERR(req))
1647 		return PTR_ERR(req);
1648 
1649 	spin_lock(&ci->i_ceph_lock);
1650 	issued = __ceph_caps_issued(ci, NULL);
1651 	dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
1652 
1653 	if (ia_valid & ATTR_UID) {
1654 		dout("setattr %p uid %d -> %d\n", inode,
1655 		     from_kuid(&init_user_ns, inode->i_uid),
1656 		     from_kuid(&init_user_ns, attr->ia_uid));
1657 		if (issued & CEPH_CAP_AUTH_EXCL) {
1658 			inode->i_uid = attr->ia_uid;
1659 			dirtied |= CEPH_CAP_AUTH_EXCL;
1660 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1661 			   !uid_eq(attr->ia_uid, inode->i_uid)) {
1662 			req->r_args.setattr.uid = cpu_to_le32(
1663 				from_kuid(&init_user_ns, attr->ia_uid));
1664 			mask |= CEPH_SETATTR_UID;
1665 			release |= CEPH_CAP_AUTH_SHARED;
1666 		}
1667 	}
1668 	if (ia_valid & ATTR_GID) {
1669 		dout("setattr %p gid %d -> %d\n", inode,
1670 		     from_kgid(&init_user_ns, inode->i_gid),
1671 		     from_kgid(&init_user_ns, attr->ia_gid));
1672 		if (issued & CEPH_CAP_AUTH_EXCL) {
1673 			inode->i_gid = attr->ia_gid;
1674 			dirtied |= CEPH_CAP_AUTH_EXCL;
1675 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1676 			   !gid_eq(attr->ia_gid, inode->i_gid)) {
1677 			req->r_args.setattr.gid = cpu_to_le32(
1678 				from_kgid(&init_user_ns, attr->ia_gid));
1679 			mask |= CEPH_SETATTR_GID;
1680 			release |= CEPH_CAP_AUTH_SHARED;
1681 		}
1682 	}
1683 	if (ia_valid & ATTR_MODE) {
1684 		dout("setattr %p mode 0%o -> 0%o\n", inode, inode->i_mode,
1685 		     attr->ia_mode);
1686 		if (issued & CEPH_CAP_AUTH_EXCL) {
1687 			inode->i_mode = attr->ia_mode;
1688 			dirtied |= CEPH_CAP_AUTH_EXCL;
1689 		} else if ((issued & CEPH_CAP_AUTH_SHARED) == 0 ||
1690 			   attr->ia_mode != inode->i_mode) {
1691 			inode->i_mode = attr->ia_mode;
1692 			req->r_args.setattr.mode = cpu_to_le32(attr->ia_mode);
1693 			mask |= CEPH_SETATTR_MODE;
1694 			release |= CEPH_CAP_AUTH_SHARED;
1695 		}
1696 	}
1697 
1698 	if (ia_valid & ATTR_ATIME) {
1699 		dout("setattr %p atime %ld.%ld -> %ld.%ld\n", inode,
1700 		     inode->i_atime.tv_sec, inode->i_atime.tv_nsec,
1701 		     attr->ia_atime.tv_sec, attr->ia_atime.tv_nsec);
1702 		if (issued & CEPH_CAP_FILE_EXCL) {
1703 			ci->i_time_warp_seq++;
1704 			inode->i_atime = attr->ia_atime;
1705 			dirtied |= CEPH_CAP_FILE_EXCL;
1706 		} else if ((issued & CEPH_CAP_FILE_WR) &&
1707 			   timespec_compare(&inode->i_atime,
1708 					    &attr->ia_atime) < 0) {
1709 			inode->i_atime = attr->ia_atime;
1710 			dirtied |= CEPH_CAP_FILE_WR;
1711 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1712 			   !timespec_equal(&inode->i_atime, &attr->ia_atime)) {
1713 			ceph_encode_timespec(&req->r_args.setattr.atime,
1714 					     &attr->ia_atime);
1715 			mask |= CEPH_SETATTR_ATIME;
1716 			release |= CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_RD |
1717 				CEPH_CAP_FILE_WR;
1718 		}
1719 	}
1720 	if (ia_valid & ATTR_MTIME) {
1721 		dout("setattr %p mtime %ld.%ld -> %ld.%ld\n", inode,
1722 		     inode->i_mtime.tv_sec, inode->i_mtime.tv_nsec,
1723 		     attr->ia_mtime.tv_sec, attr->ia_mtime.tv_nsec);
1724 		if (issued & CEPH_CAP_FILE_EXCL) {
1725 			ci->i_time_warp_seq++;
1726 			inode->i_mtime = attr->ia_mtime;
1727 			dirtied |= CEPH_CAP_FILE_EXCL;
1728 		} else if ((issued & CEPH_CAP_FILE_WR) &&
1729 			   timespec_compare(&inode->i_mtime,
1730 					    &attr->ia_mtime) < 0) {
1731 			inode->i_mtime = attr->ia_mtime;
1732 			dirtied |= CEPH_CAP_FILE_WR;
1733 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1734 			   !timespec_equal(&inode->i_mtime, &attr->ia_mtime)) {
1735 			ceph_encode_timespec(&req->r_args.setattr.mtime,
1736 					     &attr->ia_mtime);
1737 			mask |= CEPH_SETATTR_MTIME;
1738 			release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1739 				CEPH_CAP_FILE_WR;
1740 		}
1741 	}
1742 	if (ia_valid & ATTR_SIZE) {
1743 		dout("setattr %p size %lld -> %lld\n", inode,
1744 		     inode->i_size, attr->ia_size);
1745 		if (attr->ia_size > inode->i_sb->s_maxbytes) {
1746 			err = -EINVAL;
1747 			goto out;
1748 		}
1749 		if ((issued & CEPH_CAP_FILE_EXCL) &&
1750 		    attr->ia_size > inode->i_size) {
1751 			inode->i_size = attr->ia_size;
1752 			inode->i_blocks =
1753 				(attr->ia_size + (1 << 9) - 1) >> 9;
1754 			inode->i_ctime = attr->ia_ctime;
1755 			ci->i_reported_size = attr->ia_size;
1756 			dirtied |= CEPH_CAP_FILE_EXCL;
1757 		} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
1758 			   attr->ia_size != inode->i_size) {
1759 			req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
1760 			req->r_args.setattr.old_size =
1761 				cpu_to_le64(inode->i_size);
1762 			mask |= CEPH_SETATTR_SIZE;
1763 			release |= CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_RD |
1764 				CEPH_CAP_FILE_WR;
1765 		}
1766 	}
1767 
1768 	/* these do nothing */
1769 	if (ia_valid & ATTR_CTIME) {
1770 		bool only = (ia_valid & (ATTR_SIZE|ATTR_MTIME|ATTR_ATIME|
1771 					 ATTR_MODE|ATTR_UID|ATTR_GID)) == 0;
1772 		dout("setattr %p ctime %ld.%ld -> %ld.%ld (%s)\n", inode,
1773 		     inode->i_ctime.tv_sec, inode->i_ctime.tv_nsec,
1774 		     attr->ia_ctime.tv_sec, attr->ia_ctime.tv_nsec,
1775 		     only ? "ctime only" : "ignored");
1776 		inode->i_ctime = attr->ia_ctime;
1777 		if (only) {
1778 			/*
1779 			 * if kernel wants to dirty ctime but nothing else,
1780 			 * we need to choose a cap to dirty under, or do
1781 			 * a almost-no-op setattr
1782 			 */
1783 			if (issued & CEPH_CAP_AUTH_EXCL)
1784 				dirtied |= CEPH_CAP_AUTH_EXCL;
1785 			else if (issued & CEPH_CAP_FILE_EXCL)
1786 				dirtied |= CEPH_CAP_FILE_EXCL;
1787 			else if (issued & CEPH_CAP_XATTR_EXCL)
1788 				dirtied |= CEPH_CAP_XATTR_EXCL;
1789 			else
1790 				mask |= CEPH_SETATTR_CTIME;
1791 		}
1792 	}
1793 	if (ia_valid & ATTR_FILE)
1794 		dout("setattr %p ATTR_FILE ... hrm!\n", inode);
1795 
1796 	if (dirtied) {
1797 		inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied);
1798 		inode->i_ctime = CURRENT_TIME;
1799 	}
1800 
1801 	release &= issued;
1802 	spin_unlock(&ci->i_ceph_lock);
1803 
1804 	if (inode_dirty_flags)
1805 		__mark_inode_dirty(inode, inode_dirty_flags);
1806 
1807 	if (ia_valid & ATTR_MODE) {
1808 		err = ceph_acl_chmod(dentry, inode);
1809 		if (err)
1810 			goto out_put;
1811 	}
1812 
1813 	if (mask) {
1814 		req->r_inode = inode;
1815 		ihold(inode);
1816 		req->r_inode_drop = release;
1817 		req->r_args.setattr.mask = cpu_to_le32(mask);
1818 		req->r_num_caps = 1;
1819 		parent_inode = ceph_get_dentry_parent_inode(dentry);
1820 		err = ceph_mdsc_do_request(mdsc, parent_inode, req);
1821 		iput(parent_inode);
1822 	}
1823 	dout("setattr %p result=%d (%s locally, %d remote)\n", inode, err,
1824 	     ceph_cap_string(dirtied), mask);
1825 
1826 	ceph_mdsc_put_request(req);
1827 	if (mask & CEPH_SETATTR_SIZE)
1828 		__ceph_do_pending_vmtruncate(inode);
1829 	return err;
1830 out:
1831 	spin_unlock(&ci->i_ceph_lock);
1832 out_put:
1833 	ceph_mdsc_put_request(req);
1834 	return err;
1835 }
1836 
1837 /*
1838  * Verify that we have a lease on the given mask.  If not,
1839  * do a getattr against an mds.
1840  */
1841 int ceph_do_getattr(struct inode *inode, int mask)
1842 {
1843 	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
1844 	struct ceph_mds_client *mdsc = fsc->mdsc;
1845 	struct ceph_mds_request *req;
1846 	int err;
1847 
1848 	if (ceph_snap(inode) == CEPH_SNAPDIR) {
1849 		dout("do_getattr inode %p SNAPDIR\n", inode);
1850 		return 0;
1851 	}
1852 
1853 	dout("do_getattr inode %p mask %s mode 0%o\n", inode, ceph_cap_string(mask), inode->i_mode);
1854 	if (ceph_caps_issued_mask(ceph_inode(inode), mask, 1))
1855 		return 0;
1856 
1857 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
1858 	if (IS_ERR(req))
1859 		return PTR_ERR(req);
1860 	req->r_inode = inode;
1861 	ihold(inode);
1862 	req->r_num_caps = 1;
1863 	req->r_args.getattr.mask = cpu_to_le32(mask);
1864 	err = ceph_mdsc_do_request(mdsc, NULL, req);
1865 	ceph_mdsc_put_request(req);
1866 	dout("do_getattr result=%d\n", err);
1867 	return err;
1868 }
1869 
1870 
1871 /*
1872  * Check inode permissions.  We verify we have a valid value for
1873  * the AUTH cap, then call the generic handler.
1874  */
1875 int ceph_permission(struct inode *inode, int mask)
1876 {
1877 	int err;
1878 
1879 	if (mask & MAY_NOT_BLOCK)
1880 		return -ECHILD;
1881 
1882 	err = ceph_do_getattr(inode, CEPH_CAP_AUTH_SHARED);
1883 
1884 	if (!err)
1885 		err = generic_permission(inode, mask);
1886 	return err;
1887 }
1888 
1889 /*
1890  * Get all attributes.  Hopefully somedata we'll have a statlite()
1891  * and can limit the fields we require to be accurate.
1892  */
1893 int ceph_getattr(struct vfsmount *mnt, struct dentry *dentry,
1894 		 struct kstat *stat)
1895 {
1896 	struct inode *inode = dentry->d_inode;
1897 	struct ceph_inode_info *ci = ceph_inode(inode);
1898 	int err;
1899 
1900 	err = ceph_do_getattr(inode, CEPH_STAT_CAP_INODE_ALL);
1901 	if (!err) {
1902 		generic_fillattr(inode, stat);
1903 		stat->ino = ceph_translate_ino(inode->i_sb, inode->i_ino);
1904 		if (ceph_snap(inode) != CEPH_NOSNAP)
1905 			stat->dev = ceph_snap(inode);
1906 		else
1907 			stat->dev = 0;
1908 		if (S_ISDIR(inode->i_mode)) {
1909 			if (ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb),
1910 						RBYTES))
1911 				stat->size = ci->i_rbytes;
1912 			else
1913 				stat->size = ci->i_files + ci->i_subdirs;
1914 			stat->blocks = 0;
1915 			stat->blksize = 65536;
1916 		}
1917 	}
1918 	return err;
1919 }
1920