xref: /openbmc/linux/fs/ceph/file.c (revision 96c63fa7393d0a346acfe5a91e0c7d4c7782641b)
1 #include <linux/ceph/ceph_debug.h>
2 
3 #include <linux/module.h>
4 #include <linux/sched.h>
5 #include <linux/slab.h>
6 #include <linux/file.h>
7 #include <linux/mount.h>
8 #include <linux/namei.h>
9 #include <linux/writeback.h>
10 #include <linux/falloc.h>
11 
12 #include "super.h"
13 #include "mds_client.h"
14 #include "cache.h"
15 
16 /*
17  * Ceph file operations
18  *
19  * Implement basic open/close functionality, and implement
20  * read/write.
21  *
22  * We implement three modes of file I/O:
23  *  - buffered uses the generic_file_aio_{read,write} helpers
24  *
25  *  - synchronous is used when there is multi-client read/write
26  *    sharing, avoids the page cache, and synchronously waits for an
27  *    ack from the OSD.
28  *
29  *  - direct io takes the variant of the sync path that references
30  *    user pages directly.
31  *
32  * fsync() flushes and waits on dirty pages, but just queues metadata
33  * for writeback: since the MDS can recover size and mtime there is no
34  * need to wait for MDS acknowledgement.
35  */
36 
37 /*
38  * Calculate the length sum of direct io vectors that can
39  * be combined into one page vector.
40  */
41 static size_t dio_get_pagev_size(const struct iov_iter *it)
42 {
43     const struct iovec *iov = it->iov;
44     const struct iovec *iovend = iov + it->nr_segs;
45     size_t size;
46 
47     size = iov->iov_len - it->iov_offset;
48     /*
49      * An iov can be page vectored when both the current tail
50      * and the next base are page aligned.
51      */
52     while (PAGE_ALIGNED((iov->iov_base + iov->iov_len)) &&
53            (++iov < iovend && PAGE_ALIGNED((iov->iov_base)))) {
54         size += iov->iov_len;
55     }
56     dout("dio_get_pagevlen len = %zu\n", size);
57     return size;
58 }
59 
60 /*
61  * Allocate a page vector based on (@it, @nbytes).
62  * The return value is the tuple describing a page vector,
63  * that is (@pages, @page_align, @num_pages).
64  */
65 static struct page **
66 dio_get_pages_alloc(const struct iov_iter *it, size_t nbytes,
67 		    size_t *page_align, int *num_pages)
68 {
69 	struct iov_iter tmp_it = *it;
70 	size_t align;
71 	struct page **pages;
72 	int ret = 0, idx, npages;
73 
74 	align = (unsigned long)(it->iov->iov_base + it->iov_offset) &
75 		(PAGE_SIZE - 1);
76 	npages = calc_pages_for(align, nbytes);
77 	pages = kmalloc(sizeof(*pages) * npages, GFP_KERNEL);
78 	if (!pages) {
79 		pages = vmalloc(sizeof(*pages) * npages);
80 		if (!pages)
81 			return ERR_PTR(-ENOMEM);
82 	}
83 
84 	for (idx = 0; idx < npages; ) {
85 		size_t start;
86 		ret = iov_iter_get_pages(&tmp_it, pages + idx, nbytes,
87 					 npages - idx, &start);
88 		if (ret < 0)
89 			goto fail;
90 
91 		iov_iter_advance(&tmp_it, ret);
92 		nbytes -= ret;
93 		idx += (ret + start + PAGE_SIZE - 1) / PAGE_SIZE;
94 	}
95 
96 	BUG_ON(nbytes != 0);
97 	*num_pages = npages;
98 	*page_align = align;
99 	dout("dio_get_pages_alloc: got %d pages align %zu\n", npages, align);
100 	return pages;
101 fail:
102 	ceph_put_page_vector(pages, idx, false);
103 	return ERR_PTR(ret);
104 }
105 
106 /*
107  * Prepare an open request.  Preallocate ceph_cap to avoid an
108  * inopportune ENOMEM later.
109  */
110 static struct ceph_mds_request *
111 prepare_open_request(struct super_block *sb, int flags, int create_mode)
112 {
113 	struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
114 	struct ceph_mds_client *mdsc = fsc->mdsc;
115 	struct ceph_mds_request *req;
116 	int want_auth = USE_ANY_MDS;
117 	int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
118 
119 	if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
120 		want_auth = USE_AUTH_MDS;
121 
122 	req = ceph_mdsc_create_request(mdsc, op, want_auth);
123 	if (IS_ERR(req))
124 		goto out;
125 	req->r_fmode = ceph_flags_to_mode(flags);
126 	req->r_args.open.flags = cpu_to_le32(flags);
127 	req->r_args.open.mode = cpu_to_le32(create_mode);
128 out:
129 	return req;
130 }
131 
132 /*
133  * initialize private struct file data.
134  * if we fail, clean up by dropping fmode reference on the ceph_inode
135  */
136 static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
137 {
138 	struct ceph_file_info *cf;
139 	int ret = 0;
140 	struct ceph_inode_info *ci = ceph_inode(inode);
141 	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
142 	struct ceph_mds_client *mdsc = fsc->mdsc;
143 
144 	switch (inode->i_mode & S_IFMT) {
145 	case S_IFREG:
146 		/* First file open request creates the cookie, we want to keep
147 		 * this cookie around for the filetime of the inode as not to
148 		 * have to worry about fscache register / revoke / operation
149 		 * races.
150 		 *
151 		 * Also, if we know the operation is going to invalidate data
152 		 * (non readonly) just nuke the cache right away.
153 		 */
154 		ceph_fscache_register_inode_cookie(mdsc->fsc, ci);
155 		if ((fmode & CEPH_FILE_MODE_WR))
156 			ceph_fscache_invalidate(inode);
157 	case S_IFDIR:
158 		dout("init_file %p %p 0%o (regular)\n", inode, file,
159 		     inode->i_mode);
160 		cf = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
161 		if (cf == NULL) {
162 			ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
163 			return -ENOMEM;
164 		}
165 		cf->fmode = fmode;
166 		cf->next_offset = 2;
167 		cf->readdir_cache_idx = -1;
168 		file->private_data = cf;
169 		BUG_ON(inode->i_fop->release != ceph_release);
170 		break;
171 
172 	case S_IFLNK:
173 		dout("init_file %p %p 0%o (symlink)\n", inode, file,
174 		     inode->i_mode);
175 		ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
176 		break;
177 
178 	default:
179 		dout("init_file %p %p 0%o (special)\n", inode, file,
180 		     inode->i_mode);
181 		/*
182 		 * we need to drop the open ref now, since we don't
183 		 * have .release set to ceph_release.
184 		 */
185 		ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
186 		BUG_ON(inode->i_fop->release == ceph_release);
187 
188 		/* call the proper open fop */
189 		ret = inode->i_fop->open(inode, file);
190 	}
191 	return ret;
192 }
193 
194 /*
195  * try renew caps after session gets killed.
196  */
197 int ceph_renew_caps(struct inode *inode)
198 {
199 	struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
200 	struct ceph_inode_info *ci = ceph_inode(inode);
201 	struct ceph_mds_request *req;
202 	int err, flags, wanted;
203 
204 	spin_lock(&ci->i_ceph_lock);
205 	wanted = __ceph_caps_file_wanted(ci);
206 	if (__ceph_is_any_real_caps(ci) &&
207 	    (!(wanted & CEPH_CAP_ANY_WR) == 0 || ci->i_auth_cap)) {
208 		int issued = __ceph_caps_issued(ci, NULL);
209 		spin_unlock(&ci->i_ceph_lock);
210 		dout("renew caps %p want %s issued %s updating mds_wanted\n",
211 		     inode, ceph_cap_string(wanted), ceph_cap_string(issued));
212 		ceph_check_caps(ci, 0, NULL);
213 		return 0;
214 	}
215 	spin_unlock(&ci->i_ceph_lock);
216 
217 	flags = 0;
218 	if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
219 		flags = O_RDWR;
220 	else if (wanted & CEPH_CAP_FILE_RD)
221 		flags = O_RDONLY;
222 	else if (wanted & CEPH_CAP_FILE_WR)
223 		flags = O_WRONLY;
224 #ifdef O_LAZY
225 	if (wanted & CEPH_CAP_FILE_LAZYIO)
226 		flags |= O_LAZY;
227 #endif
228 
229 	req = prepare_open_request(inode->i_sb, flags, 0);
230 	if (IS_ERR(req)) {
231 		err = PTR_ERR(req);
232 		goto out;
233 	}
234 
235 	req->r_inode = inode;
236 	ihold(inode);
237 	req->r_num_caps = 1;
238 	req->r_fmode = -1;
239 
240 	err = ceph_mdsc_do_request(mdsc, NULL, req);
241 	ceph_mdsc_put_request(req);
242 out:
243 	dout("renew caps %p open result=%d\n", inode, err);
244 	return err < 0 ? err : 0;
245 }
246 
247 /*
248  * If we already have the requisite capabilities, we can satisfy
249  * the open request locally (no need to request new caps from the
250  * MDS).  We do, however, need to inform the MDS (asynchronously)
251  * if our wanted caps set expands.
252  */
253 int ceph_open(struct inode *inode, struct file *file)
254 {
255 	struct ceph_inode_info *ci = ceph_inode(inode);
256 	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
257 	struct ceph_mds_client *mdsc = fsc->mdsc;
258 	struct ceph_mds_request *req;
259 	struct ceph_file_info *cf = file->private_data;
260 	int err;
261 	int flags, fmode, wanted;
262 
263 	if (cf) {
264 		dout("open file %p is already opened\n", file);
265 		return 0;
266 	}
267 
268 	/* filter out O_CREAT|O_EXCL; vfs did that already.  yuck. */
269 	flags = file->f_flags & ~(O_CREAT|O_EXCL);
270 	if (S_ISDIR(inode->i_mode))
271 		flags = O_DIRECTORY;  /* mds likes to know */
272 
273 	dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
274 	     ceph_vinop(inode), file, flags, file->f_flags);
275 	fmode = ceph_flags_to_mode(flags);
276 	wanted = ceph_caps_for_mode(fmode);
277 
278 	/* snapped files are read-only */
279 	if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
280 		return -EROFS;
281 
282 	/* trivially open snapdir */
283 	if (ceph_snap(inode) == CEPH_SNAPDIR) {
284 		spin_lock(&ci->i_ceph_lock);
285 		__ceph_get_fmode(ci, fmode);
286 		spin_unlock(&ci->i_ceph_lock);
287 		return ceph_init_file(inode, file, fmode);
288 	}
289 
290 	/*
291 	 * No need to block if we have caps on the auth MDS (for
292 	 * write) or any MDS (for read).  Update wanted set
293 	 * asynchronously.
294 	 */
295 	spin_lock(&ci->i_ceph_lock);
296 	if (__ceph_is_any_real_caps(ci) &&
297 	    (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
298 		int mds_wanted = __ceph_caps_mds_wanted(ci);
299 		int issued = __ceph_caps_issued(ci, NULL);
300 
301 		dout("open %p fmode %d want %s issued %s using existing\n",
302 		     inode, fmode, ceph_cap_string(wanted),
303 		     ceph_cap_string(issued));
304 		__ceph_get_fmode(ci, fmode);
305 		spin_unlock(&ci->i_ceph_lock);
306 
307 		/* adjust wanted? */
308 		if ((issued & wanted) != wanted &&
309 		    (mds_wanted & wanted) != wanted &&
310 		    ceph_snap(inode) != CEPH_SNAPDIR)
311 			ceph_check_caps(ci, 0, NULL);
312 
313 		return ceph_init_file(inode, file, fmode);
314 	} else if (ceph_snap(inode) != CEPH_NOSNAP &&
315 		   (ci->i_snap_caps & wanted) == wanted) {
316 		__ceph_get_fmode(ci, fmode);
317 		spin_unlock(&ci->i_ceph_lock);
318 		return ceph_init_file(inode, file, fmode);
319 	}
320 
321 	spin_unlock(&ci->i_ceph_lock);
322 
323 	dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
324 	req = prepare_open_request(inode->i_sb, flags, 0);
325 	if (IS_ERR(req)) {
326 		err = PTR_ERR(req);
327 		goto out;
328 	}
329 	req->r_inode = inode;
330 	ihold(inode);
331 
332 	req->r_num_caps = 1;
333 	err = ceph_mdsc_do_request(mdsc, NULL, req);
334 	if (!err)
335 		err = ceph_init_file(inode, file, req->r_fmode);
336 	ceph_mdsc_put_request(req);
337 	dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
338 out:
339 	return err;
340 }
341 
342 
343 /*
344  * Do a lookup + open with a single request.  If we get a non-existent
345  * file or symlink, return 1 so the VFS can retry.
346  */
347 int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
348 		     struct file *file, unsigned flags, umode_t mode,
349 		     int *opened)
350 {
351 	struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
352 	struct ceph_mds_client *mdsc = fsc->mdsc;
353 	struct ceph_mds_request *req;
354 	struct dentry *dn;
355 	struct ceph_acls_info acls = {};
356        int mask;
357 	int err;
358 
359 	dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
360 	     dir, dentry, dentry,
361 	     d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
362 
363 	if (dentry->d_name.len > NAME_MAX)
364 		return -ENAMETOOLONG;
365 
366 	err = ceph_init_dentry(dentry);
367 	if (err < 0)
368 		return err;
369 
370 	if (flags & O_CREAT) {
371 		err = ceph_pre_init_acls(dir, &mode, &acls);
372 		if (err < 0)
373 			return err;
374 	}
375 
376 	/* do the open */
377 	req = prepare_open_request(dir->i_sb, flags, mode);
378 	if (IS_ERR(req)) {
379 		err = PTR_ERR(req);
380 		goto out_acl;
381 	}
382 	req->r_dentry = dget(dentry);
383 	req->r_num_caps = 2;
384 	if (flags & O_CREAT) {
385 		req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
386 		req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
387 		if (acls.pagelist) {
388 			req->r_pagelist = acls.pagelist;
389 			acls.pagelist = NULL;
390 		}
391 	}
392 
393        mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
394        if (ceph_security_xattr_wanted(dir))
395                mask |= CEPH_CAP_XATTR_SHARED;
396        req->r_args.open.mask = cpu_to_le32(mask);
397 
398 	req->r_locked_dir = dir;           /* caller holds dir->i_mutex */
399 	err = ceph_mdsc_do_request(mdsc,
400 				   (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
401 				   req);
402 	err = ceph_handle_snapdir(req, dentry, err);
403 	if (err)
404 		goto out_req;
405 
406 	if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
407 		err = ceph_handle_notrace_create(dir, dentry);
408 
409 	if (d_unhashed(dentry)) {
410 		dn = ceph_finish_lookup(req, dentry, err);
411 		if (IS_ERR(dn))
412 			err = PTR_ERR(dn);
413 	} else {
414 		/* we were given a hashed negative dentry */
415 		dn = NULL;
416 	}
417 	if (err)
418 		goto out_req;
419 	if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
420 		/* make vfs retry on splice, ENOENT, or symlink */
421 		dout("atomic_open finish_no_open on dn %p\n", dn);
422 		err = finish_no_open(file, dn);
423 	} else {
424 		dout("atomic_open finish_open on dn %p\n", dn);
425 		if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
426 			ceph_init_inode_acls(d_inode(dentry), &acls);
427 			*opened |= FILE_CREATED;
428 		}
429 		err = finish_open(file, dentry, ceph_open, opened);
430 	}
431 out_req:
432 	if (!req->r_err && req->r_target_inode)
433 		ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode);
434 	ceph_mdsc_put_request(req);
435 out_acl:
436 	ceph_release_acls_info(&acls);
437 	dout("atomic_open result=%d\n", err);
438 	return err;
439 }
440 
441 int ceph_release(struct inode *inode, struct file *file)
442 {
443 	struct ceph_inode_info *ci = ceph_inode(inode);
444 	struct ceph_file_info *cf = file->private_data;
445 
446 	dout("release inode %p file %p\n", inode, file);
447 	ceph_put_fmode(ci, cf->fmode);
448 	if (cf->last_readdir)
449 		ceph_mdsc_put_request(cf->last_readdir);
450 	kfree(cf->last_name);
451 	kfree(cf->dir_info);
452 	kmem_cache_free(ceph_file_cachep, cf);
453 
454 	/* wake up anyone waiting for caps on this inode */
455 	wake_up_all(&ci->i_cap_wq);
456 	return 0;
457 }
458 
459 enum {
460 	HAVE_RETRIED = 1,
461 	CHECK_EOF =    2,
462 	READ_INLINE =  3,
463 };
464 
465 /*
466  * Read a range of bytes striped over one or more objects.  Iterate over
467  * objects we stripe over.  (That's not atomic, but good enough for now.)
468  *
469  * If we get a short result from the OSD, check against i_size; we need to
470  * only return a short read to the caller if we hit EOF.
471  */
472 static int striped_read(struct inode *inode,
473 			u64 off, u64 len,
474 			struct page **pages, int num_pages,
475 			int *checkeof)
476 {
477 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
478 	struct ceph_inode_info *ci = ceph_inode(inode);
479 	u64 pos, this_len, left;
480 	loff_t i_size;
481 	int page_align, pages_left;
482 	int read, ret;
483 	struct page **page_pos;
484 	bool hit_stripe, was_short;
485 
486 	/*
487 	 * we may need to do multiple reads.  not atomic, unfortunately.
488 	 */
489 	pos = off;
490 	left = len;
491 	page_pos = pages;
492 	pages_left = num_pages;
493 	read = 0;
494 
495 more:
496 	page_align = pos & ~PAGE_MASK;
497 	this_len = left;
498 	ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
499 				  &ci->i_layout, pos, &this_len,
500 				  ci->i_truncate_seq,
501 				  ci->i_truncate_size,
502 				  page_pos, pages_left, page_align);
503 	if (ret == -ENOENT)
504 		ret = 0;
505 	hit_stripe = this_len < left;
506 	was_short = ret >= 0 && ret < this_len;
507 	dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, left, read,
508 	     ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
509 
510 	i_size = i_size_read(inode);
511 	if (ret >= 0) {
512 		int didpages;
513 		if (was_short && (pos + ret < i_size)) {
514 			int zlen = min(this_len - ret, i_size - pos - ret);
515 			int zoff = (off & ~PAGE_MASK) + read + ret;
516 			dout(" zero gap %llu to %llu\n",
517 				pos + ret, pos + ret + zlen);
518 			ceph_zero_page_vector_range(zoff, zlen, pages);
519 			ret += zlen;
520 		}
521 
522 		didpages = (page_align + ret) >> PAGE_SHIFT;
523 		pos += ret;
524 		read = pos - off;
525 		left -= ret;
526 		page_pos += didpages;
527 		pages_left -= didpages;
528 
529 		/* hit stripe and need continue*/
530 		if (left && hit_stripe && pos < i_size)
531 			goto more;
532 	}
533 
534 	if (read > 0) {
535 		ret = read;
536 		/* did we bounce off eof? */
537 		if (pos + left > i_size)
538 			*checkeof = CHECK_EOF;
539 	}
540 
541 	dout("striped_read returns %d\n", ret);
542 	return ret;
543 }
544 
545 /*
546  * Completely synchronous read and write methods.  Direct from __user
547  * buffer to osd, or directly to user pages (if O_DIRECT).
548  *
549  * If the read spans object boundary, just do multiple reads.
550  */
551 static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *i,
552 				int *checkeof)
553 {
554 	struct file *file = iocb->ki_filp;
555 	struct inode *inode = file_inode(file);
556 	struct page **pages;
557 	u64 off = iocb->ki_pos;
558 	int num_pages, ret;
559 	size_t len = iov_iter_count(i);
560 
561 	dout("sync_read on file %p %llu~%u %s\n", file, off,
562 	     (unsigned)len,
563 	     (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
564 
565 	if (!len)
566 		return 0;
567 	/*
568 	 * flush any page cache pages in this range.  this
569 	 * will make concurrent normal and sync io slow,
570 	 * but it will at least behave sensibly when they are
571 	 * in sequence.
572 	 */
573 	ret = filemap_write_and_wait_range(inode->i_mapping, off,
574 						off + len);
575 	if (ret < 0)
576 		return ret;
577 
578 	num_pages = calc_pages_for(off, len);
579 	pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
580 	if (IS_ERR(pages))
581 		return PTR_ERR(pages);
582 	ret = striped_read(inode, off, len, pages,
583 				num_pages, checkeof);
584 	if (ret > 0) {
585 		int l, k = 0;
586 		size_t left = ret;
587 
588 		while (left) {
589 			size_t page_off = off & ~PAGE_MASK;
590 			size_t copy = min_t(size_t, left,
591 					    PAGE_SIZE - page_off);
592 			l = copy_page_to_iter(pages[k++], page_off, copy, i);
593 			off += l;
594 			left -= l;
595 			if (l < copy)
596 				break;
597 		}
598 	}
599 	ceph_release_page_vector(pages, num_pages);
600 
601 	if (off > iocb->ki_pos) {
602 		ret = off - iocb->ki_pos;
603 		iocb->ki_pos = off;
604 	}
605 
606 	dout("sync_read result %d\n", ret);
607 	return ret;
608 }
609 
610 struct ceph_aio_request {
611 	struct kiocb *iocb;
612 	size_t total_len;
613 	int write;
614 	int error;
615 	struct list_head osd_reqs;
616 	unsigned num_reqs;
617 	atomic_t pending_reqs;
618 	struct timespec mtime;
619 	struct ceph_cap_flush *prealloc_cf;
620 };
621 
622 struct ceph_aio_work {
623 	struct work_struct work;
624 	struct ceph_osd_request *req;
625 };
626 
627 static void ceph_aio_retry_work(struct work_struct *work);
628 
629 static void ceph_aio_complete(struct inode *inode,
630 			      struct ceph_aio_request *aio_req)
631 {
632 	struct ceph_inode_info *ci = ceph_inode(inode);
633 	int ret;
634 
635 	if (!atomic_dec_and_test(&aio_req->pending_reqs))
636 		return;
637 
638 	ret = aio_req->error;
639 	if (!ret)
640 		ret = aio_req->total_len;
641 
642 	dout("ceph_aio_complete %p rc %d\n", inode, ret);
643 
644 	if (ret >= 0 && aio_req->write) {
645 		int dirty;
646 
647 		loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
648 		if (endoff > i_size_read(inode)) {
649 			if (ceph_inode_set_size(inode, endoff))
650 				ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
651 		}
652 
653 		spin_lock(&ci->i_ceph_lock);
654 		ci->i_inline_version = CEPH_INLINE_NONE;
655 		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
656 					       &aio_req->prealloc_cf);
657 		spin_unlock(&ci->i_ceph_lock);
658 		if (dirty)
659 			__mark_inode_dirty(inode, dirty);
660 
661 	}
662 
663 	ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
664 						CEPH_CAP_FILE_RD));
665 
666 	aio_req->iocb->ki_complete(aio_req->iocb, ret, 0);
667 
668 	ceph_free_cap_flush(aio_req->prealloc_cf);
669 	kfree(aio_req);
670 }
671 
672 static void ceph_aio_complete_req(struct ceph_osd_request *req)
673 {
674 	int rc = req->r_result;
675 	struct inode *inode = req->r_inode;
676 	struct ceph_aio_request *aio_req = req->r_priv;
677 	struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
678 	int num_pages = calc_pages_for((u64)osd_data->alignment,
679 				       osd_data->length);
680 
681 	dout("ceph_aio_complete_req %p rc %d bytes %llu\n",
682 	     inode, rc, osd_data->length);
683 
684 	if (rc == -EOLDSNAPC) {
685 		struct ceph_aio_work *aio_work;
686 		BUG_ON(!aio_req->write);
687 
688 		aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
689 		if (aio_work) {
690 			INIT_WORK(&aio_work->work, ceph_aio_retry_work);
691 			aio_work->req = req;
692 			queue_work(ceph_inode_to_client(inode)->wb_wq,
693 				   &aio_work->work);
694 			return;
695 		}
696 		rc = -ENOMEM;
697 	} else if (!aio_req->write) {
698 		if (rc == -ENOENT)
699 			rc = 0;
700 		if (rc >= 0 && osd_data->length > rc) {
701 			int zoff = osd_data->alignment + rc;
702 			int zlen = osd_data->length - rc;
703 			/*
704 			 * If read is satisfied by single OSD request,
705 			 * it can pass EOF. Otherwise read is within
706 			 * i_size.
707 			 */
708 			if (aio_req->num_reqs == 1) {
709 				loff_t i_size = i_size_read(inode);
710 				loff_t endoff = aio_req->iocb->ki_pos + rc;
711 				if (endoff < i_size)
712 					zlen = min_t(size_t, zlen,
713 						     i_size - endoff);
714 				aio_req->total_len = rc + zlen;
715 			}
716 
717 			if (zlen > 0)
718 				ceph_zero_page_vector_range(zoff, zlen,
719 							    osd_data->pages);
720 		}
721 	}
722 
723 	ceph_put_page_vector(osd_data->pages, num_pages, false);
724 	ceph_osdc_put_request(req);
725 
726 	if (rc < 0)
727 		cmpxchg(&aio_req->error, 0, rc);
728 
729 	ceph_aio_complete(inode, aio_req);
730 	return;
731 }
732 
733 static void ceph_aio_retry_work(struct work_struct *work)
734 {
735 	struct ceph_aio_work *aio_work =
736 		container_of(work, struct ceph_aio_work, work);
737 	struct ceph_osd_request *orig_req = aio_work->req;
738 	struct ceph_aio_request *aio_req = orig_req->r_priv;
739 	struct inode *inode = orig_req->r_inode;
740 	struct ceph_inode_info *ci = ceph_inode(inode);
741 	struct ceph_snap_context *snapc;
742 	struct ceph_osd_request *req;
743 	int ret;
744 
745 	spin_lock(&ci->i_ceph_lock);
746 	if (__ceph_have_pending_cap_snap(ci)) {
747 		struct ceph_cap_snap *capsnap =
748 			list_last_entry(&ci->i_cap_snaps,
749 					struct ceph_cap_snap,
750 					ci_item);
751 		snapc = ceph_get_snap_context(capsnap->context);
752 	} else {
753 		BUG_ON(!ci->i_head_snapc);
754 		snapc = ceph_get_snap_context(ci->i_head_snapc);
755 	}
756 	spin_unlock(&ci->i_ceph_lock);
757 
758 	req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 2,
759 			false, GFP_NOFS);
760 	if (!req) {
761 		ret = -ENOMEM;
762 		req = orig_req;
763 		goto out;
764 	}
765 
766 	req->r_flags =	CEPH_OSD_FLAG_ORDERSNAP |
767 			CEPH_OSD_FLAG_ONDISK |
768 			CEPH_OSD_FLAG_WRITE;
769 	ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
770 	ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
771 
772 	ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
773 	if (ret) {
774 		ceph_osdc_put_request(req);
775 		req = orig_req;
776 		goto out;
777 	}
778 
779 	req->r_ops[0] = orig_req->r_ops[0];
780 	osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
781 
782 	req->r_mtime = aio_req->mtime;
783 	req->r_data_offset = req->r_ops[0].extent.offset;
784 
785 	ceph_osdc_put_request(orig_req);
786 
787 	req->r_callback = ceph_aio_complete_req;
788 	req->r_inode = inode;
789 	req->r_priv = aio_req;
790 
791 	ret = ceph_osdc_start_request(req->r_osdc, req, false);
792 out:
793 	if (ret < 0) {
794 		req->r_result = ret;
795 		ceph_aio_complete_req(req);
796 	}
797 
798 	ceph_put_snap_context(snapc);
799 	kfree(aio_work);
800 }
801 
802 /*
803  * Write commit request unsafe callback, called to tell us when a
804  * request is unsafe (that is, in flight--has been handed to the
805  * messenger to send to its target osd).  It is called again when
806  * we've received a response message indicating the request is
807  * "safe" (its CEPH_OSD_FLAG_ONDISK flag is set), or when a request
808  * is completed early (and unsuccessfully) due to a timeout or
809  * interrupt.
810  *
811  * This is used if we requested both an ACK and ONDISK commit reply
812  * from the OSD.
813  */
814 static void ceph_sync_write_unsafe(struct ceph_osd_request *req, bool unsafe)
815 {
816 	struct ceph_inode_info *ci = ceph_inode(req->r_inode);
817 
818 	dout("%s %p tid %llu %ssafe\n", __func__, req, req->r_tid,
819 		unsafe ? "un" : "");
820 	if (unsafe) {
821 		ceph_get_cap_refs(ci, CEPH_CAP_FILE_WR);
822 		spin_lock(&ci->i_unsafe_lock);
823 		list_add_tail(&req->r_unsafe_item,
824 			      &ci->i_unsafe_writes);
825 		spin_unlock(&ci->i_unsafe_lock);
826 
827 		complete_all(&req->r_completion);
828 	} else {
829 		spin_lock(&ci->i_unsafe_lock);
830 		list_del_init(&req->r_unsafe_item);
831 		spin_unlock(&ci->i_unsafe_lock);
832 		ceph_put_cap_refs(ci, CEPH_CAP_FILE_WR);
833 	}
834 }
835 
836 
837 static ssize_t
838 ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
839 		       struct ceph_snap_context *snapc,
840 		       struct ceph_cap_flush **pcf)
841 {
842 	struct file *file = iocb->ki_filp;
843 	struct inode *inode = file_inode(file);
844 	struct ceph_inode_info *ci = ceph_inode(inode);
845 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
846 	struct ceph_vino vino;
847 	struct ceph_osd_request *req;
848 	struct page **pages;
849 	struct ceph_aio_request *aio_req = NULL;
850 	int num_pages = 0;
851 	int flags;
852 	int ret;
853 	struct timespec mtime = current_fs_time(inode->i_sb);
854 	size_t count = iov_iter_count(iter);
855 	loff_t pos = iocb->ki_pos;
856 	bool write = iov_iter_rw(iter) == WRITE;
857 
858 	if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
859 		return -EROFS;
860 
861 	dout("sync_direct_read_write (%s) on file %p %lld~%u\n",
862 	     (write ? "write" : "read"), file, pos, (unsigned)count);
863 
864 	ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
865 	if (ret < 0)
866 		return ret;
867 
868 	if (write) {
869 		ret = invalidate_inode_pages2_range(inode->i_mapping,
870 					pos >> PAGE_SHIFT,
871 					(pos + count) >> PAGE_SHIFT);
872 		if (ret < 0)
873 			dout("invalidate_inode_pages2_range returned %d\n", ret);
874 
875 		flags = CEPH_OSD_FLAG_ORDERSNAP |
876 			CEPH_OSD_FLAG_ONDISK |
877 			CEPH_OSD_FLAG_WRITE;
878 	} else {
879 		flags = CEPH_OSD_FLAG_READ;
880 	}
881 
882 	while (iov_iter_count(iter) > 0) {
883 		u64 size = dio_get_pagev_size(iter);
884 		size_t start = 0;
885 		ssize_t len;
886 
887 		vino = ceph_vino(inode);
888 		req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
889 					    vino, pos, &size, 0,
890 					    /*include a 'startsync' command*/
891 					    write ? 2 : 1,
892 					    write ? CEPH_OSD_OP_WRITE :
893 						    CEPH_OSD_OP_READ,
894 					    flags, snapc,
895 					    ci->i_truncate_seq,
896 					    ci->i_truncate_size,
897 					    false);
898 		if (IS_ERR(req)) {
899 			ret = PTR_ERR(req);
900 			break;
901 		}
902 
903 		len = size;
904 		pages = dio_get_pages_alloc(iter, len, &start, &num_pages);
905 		if (IS_ERR(pages)) {
906 			ceph_osdc_put_request(req);
907 			ret = PTR_ERR(pages);
908 			break;
909 		}
910 
911 		/*
912 		 * To simplify error handling, allow AIO when IO within i_size
913 		 * or IO can be satisfied by single OSD request.
914 		 */
915 		if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
916 		    (len == count || pos + count <= i_size_read(inode))) {
917 			aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
918 			if (aio_req) {
919 				aio_req->iocb = iocb;
920 				aio_req->write = write;
921 				INIT_LIST_HEAD(&aio_req->osd_reqs);
922 				if (write) {
923 					aio_req->mtime = mtime;
924 					swap(aio_req->prealloc_cf, *pcf);
925 				}
926 			}
927 			/* ignore error */
928 		}
929 
930 		if (write) {
931 			/*
932 			 * throw out any page cache pages in this range. this
933 			 * may block.
934 			 */
935 			truncate_inode_pages_range(inode->i_mapping, pos,
936 					(pos+len) | (PAGE_SIZE - 1));
937 
938 			osd_req_op_init(req, 1, CEPH_OSD_OP_STARTSYNC, 0);
939 			req->r_mtime = mtime;
940 		}
941 
942 		osd_req_op_extent_osd_data_pages(req, 0, pages, len, start,
943 						 false, false);
944 
945 		if (aio_req) {
946 			aio_req->total_len += len;
947 			aio_req->num_reqs++;
948 			atomic_inc(&aio_req->pending_reqs);
949 
950 			req->r_callback = ceph_aio_complete_req;
951 			req->r_inode = inode;
952 			req->r_priv = aio_req;
953 			list_add_tail(&req->r_unsafe_item, &aio_req->osd_reqs);
954 
955 			pos += len;
956 			iov_iter_advance(iter, len);
957 			continue;
958 		}
959 
960 		ret = ceph_osdc_start_request(req->r_osdc, req, false);
961 		if (!ret)
962 			ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
963 
964 		size = i_size_read(inode);
965 		if (!write) {
966 			if (ret == -ENOENT)
967 				ret = 0;
968 			if (ret >= 0 && ret < len && pos + ret < size) {
969 				int zlen = min_t(size_t, len - ret,
970 						 size - pos - ret);
971 				ceph_zero_page_vector_range(start + ret, zlen,
972 							    pages);
973 				ret += zlen;
974 			}
975 			if (ret >= 0)
976 				len = ret;
977 		}
978 
979 		ceph_put_page_vector(pages, num_pages, false);
980 
981 		ceph_osdc_put_request(req);
982 		if (ret < 0)
983 			break;
984 
985 		pos += len;
986 		iov_iter_advance(iter, len);
987 
988 		if (!write && pos >= size)
989 			break;
990 
991 		if (write && pos > size) {
992 			if (ceph_inode_set_size(inode, pos))
993 				ceph_check_caps(ceph_inode(inode),
994 						CHECK_CAPS_AUTHONLY,
995 						NULL);
996 		}
997 	}
998 
999 	if (aio_req) {
1000 		if (aio_req->num_reqs == 0) {
1001 			kfree(aio_req);
1002 			return ret;
1003 		}
1004 
1005 		ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1006 					      CEPH_CAP_FILE_RD);
1007 
1008 		while (!list_empty(&aio_req->osd_reqs)) {
1009 			req = list_first_entry(&aio_req->osd_reqs,
1010 					       struct ceph_osd_request,
1011 					       r_unsafe_item);
1012 			list_del_init(&req->r_unsafe_item);
1013 			if (ret >= 0)
1014 				ret = ceph_osdc_start_request(req->r_osdc,
1015 							      req, false);
1016 			if (ret < 0) {
1017 				req->r_result = ret;
1018 				ceph_aio_complete_req(req);
1019 			}
1020 		}
1021 		return -EIOCBQUEUED;
1022 	}
1023 
1024 	if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1025 		ret = pos - iocb->ki_pos;
1026 		iocb->ki_pos = pos;
1027 	}
1028 	return ret;
1029 }
1030 
1031 /*
1032  * Synchronous write, straight from __user pointer or user pages.
1033  *
1034  * If write spans object boundary, just do multiple writes.  (For a
1035  * correct atomic write, we should e.g. take write locks on all
1036  * objects, rollback on failure, etc.)
1037  */
1038 static ssize_t
1039 ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1040 		struct ceph_snap_context *snapc)
1041 {
1042 	struct file *file = iocb->ki_filp;
1043 	struct inode *inode = file_inode(file);
1044 	struct ceph_inode_info *ci = ceph_inode(inode);
1045 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1046 	struct ceph_vino vino;
1047 	struct ceph_osd_request *req;
1048 	struct page **pages;
1049 	u64 len;
1050 	int num_pages;
1051 	int written = 0;
1052 	int flags;
1053 	int check_caps = 0;
1054 	int ret;
1055 	struct timespec mtime = current_fs_time(inode->i_sb);
1056 	size_t count = iov_iter_count(from);
1057 
1058 	if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1059 		return -EROFS;
1060 
1061 	dout("sync_write on file %p %lld~%u\n", file, pos, (unsigned)count);
1062 
1063 	ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
1064 	if (ret < 0)
1065 		return ret;
1066 
1067 	ret = invalidate_inode_pages2_range(inode->i_mapping,
1068 					    pos >> PAGE_SHIFT,
1069 					    (pos + count) >> PAGE_SHIFT);
1070 	if (ret < 0)
1071 		dout("invalidate_inode_pages2_range returned %d\n", ret);
1072 
1073 	flags = CEPH_OSD_FLAG_ORDERSNAP |
1074 		CEPH_OSD_FLAG_ONDISK |
1075 		CEPH_OSD_FLAG_WRITE |
1076 		CEPH_OSD_FLAG_ACK;
1077 
1078 	while ((len = iov_iter_count(from)) > 0) {
1079 		size_t left;
1080 		int n;
1081 
1082 		vino = ceph_vino(inode);
1083 		req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1084 					    vino, pos, &len, 0, 1,
1085 					    CEPH_OSD_OP_WRITE, flags, snapc,
1086 					    ci->i_truncate_seq,
1087 					    ci->i_truncate_size,
1088 					    false);
1089 		if (IS_ERR(req)) {
1090 			ret = PTR_ERR(req);
1091 			break;
1092 		}
1093 
1094 		/*
1095 		 * write from beginning of first page,
1096 		 * regardless of io alignment
1097 		 */
1098 		num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1099 
1100 		pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1101 		if (IS_ERR(pages)) {
1102 			ret = PTR_ERR(pages);
1103 			goto out;
1104 		}
1105 
1106 		left = len;
1107 		for (n = 0; n < num_pages; n++) {
1108 			size_t plen = min_t(size_t, left, PAGE_SIZE);
1109 			ret = copy_page_from_iter(pages[n], 0, plen, from);
1110 			if (ret != plen) {
1111 				ret = -EFAULT;
1112 				break;
1113 			}
1114 			left -= ret;
1115 		}
1116 
1117 		if (ret < 0) {
1118 			ceph_release_page_vector(pages, num_pages);
1119 			goto out;
1120 		}
1121 
1122 		/* get a second commit callback */
1123 		req->r_unsafe_callback = ceph_sync_write_unsafe;
1124 		req->r_inode = inode;
1125 
1126 		osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1127 						false, true);
1128 
1129 		req->r_mtime = mtime;
1130 		ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1131 		if (!ret)
1132 			ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1133 
1134 out:
1135 		ceph_osdc_put_request(req);
1136 		if (ret == 0) {
1137 			pos += len;
1138 			written += len;
1139 
1140 			if (pos > i_size_read(inode)) {
1141 				check_caps = ceph_inode_set_size(inode, pos);
1142 				if (check_caps)
1143 					ceph_check_caps(ceph_inode(inode),
1144 							CHECK_CAPS_AUTHONLY,
1145 							NULL);
1146 			}
1147 		} else
1148 			break;
1149 	}
1150 
1151 	if (ret != -EOLDSNAPC && written > 0) {
1152 		ret = written;
1153 		iocb->ki_pos = pos;
1154 	}
1155 	return ret;
1156 }
1157 
1158 /*
1159  * Wrap generic_file_aio_read with checks for cap bits on the inode.
1160  * Atomically grab references, so that those bits are not released
1161  * back to the MDS mid-read.
1162  *
1163  * Hmm, the sync read case isn't actually async... should it be?
1164  */
1165 static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1166 {
1167 	struct file *filp = iocb->ki_filp;
1168 	struct ceph_file_info *fi = filp->private_data;
1169 	size_t len = iov_iter_count(to);
1170 	struct inode *inode = file_inode(filp);
1171 	struct ceph_inode_info *ci = ceph_inode(inode);
1172 	struct page *pinned_page = NULL;
1173 	ssize_t ret;
1174 	int want, got = 0;
1175 	int retry_op = 0, read = 0;
1176 
1177 again:
1178 	dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1179 	     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1180 
1181 	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1182 		want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1183 	else
1184 		want = CEPH_CAP_FILE_CACHE;
1185 	ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
1186 	if (ret < 0)
1187 		return ret;
1188 
1189 	if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1190 	    (iocb->ki_flags & IOCB_DIRECT) ||
1191 	    (fi->flags & CEPH_F_SYNC)) {
1192 
1193 		dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1194 		     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1195 		     ceph_cap_string(got));
1196 
1197 		if (ci->i_inline_version == CEPH_INLINE_NONE) {
1198 			if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1199 				ret = ceph_direct_read_write(iocb, to,
1200 							     NULL, NULL);
1201 				if (ret >= 0 && ret < len)
1202 					retry_op = CHECK_EOF;
1203 			} else {
1204 				ret = ceph_sync_read(iocb, to, &retry_op);
1205 			}
1206 		} else {
1207 			retry_op = READ_INLINE;
1208 		}
1209 	} else {
1210 		dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1211 		     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1212 		     ceph_cap_string(got));
1213 
1214 		ret = generic_file_read_iter(iocb, to);
1215 	}
1216 	dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1217 	     inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1218 	if (pinned_page) {
1219 		put_page(pinned_page);
1220 		pinned_page = NULL;
1221 	}
1222 	ceph_put_cap_refs(ci, got);
1223 	if (retry_op > HAVE_RETRIED && ret >= 0) {
1224 		int statret;
1225 		struct page *page = NULL;
1226 		loff_t i_size;
1227 		if (retry_op == READ_INLINE) {
1228 			page = __page_cache_alloc(GFP_KERNEL);
1229 			if (!page)
1230 				return -ENOMEM;
1231 		}
1232 
1233 		statret = __ceph_do_getattr(inode, page,
1234 					    CEPH_STAT_CAP_INLINE_DATA, !!page);
1235 		if (statret < 0) {
1236 			 __free_page(page);
1237 			if (statret == -ENODATA) {
1238 				BUG_ON(retry_op != READ_INLINE);
1239 				goto again;
1240 			}
1241 			return statret;
1242 		}
1243 
1244 		i_size = i_size_read(inode);
1245 		if (retry_op == READ_INLINE) {
1246 			BUG_ON(ret > 0 || read > 0);
1247 			if (iocb->ki_pos < i_size &&
1248 			    iocb->ki_pos < PAGE_SIZE) {
1249 				loff_t end = min_t(loff_t, i_size,
1250 						   iocb->ki_pos + len);
1251 				end = min_t(loff_t, end, PAGE_SIZE);
1252 				if (statret < end)
1253 					zero_user_segment(page, statret, end);
1254 				ret = copy_page_to_iter(page,
1255 						iocb->ki_pos & ~PAGE_MASK,
1256 						end - iocb->ki_pos, to);
1257 				iocb->ki_pos += ret;
1258 				read += ret;
1259 			}
1260 			if (iocb->ki_pos < i_size && read < len) {
1261 				size_t zlen = min_t(size_t, len - read,
1262 						    i_size - iocb->ki_pos);
1263 				ret = iov_iter_zero(zlen, to);
1264 				iocb->ki_pos += ret;
1265 				read += ret;
1266 			}
1267 			__free_pages(page, 0);
1268 			return read;
1269 		}
1270 
1271 		/* hit EOF or hole? */
1272 		if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1273 		    ret < len) {
1274 			dout("sync_read hit hole, ppos %lld < size %lld"
1275 			     ", reading more\n", iocb->ki_pos, i_size);
1276 
1277 			read += ret;
1278 			len -= ret;
1279 			retry_op = HAVE_RETRIED;
1280 			goto again;
1281 		}
1282 	}
1283 
1284 	if (ret >= 0)
1285 		ret += read;
1286 
1287 	return ret;
1288 }
1289 
1290 /*
1291  * Take cap references to avoid releasing caps to MDS mid-write.
1292  *
1293  * If we are synchronous, and write with an old snap context, the OSD
1294  * may return EOLDSNAPC.  In that case, retry the write.. _after_
1295  * dropping our cap refs and allowing the pending snap to logically
1296  * complete _before_ this write occurs.
1297  *
1298  * If we are near ENOSPC, write synchronously.
1299  */
1300 static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1301 {
1302 	struct file *file = iocb->ki_filp;
1303 	struct ceph_file_info *fi = file->private_data;
1304 	struct inode *inode = file_inode(file);
1305 	struct ceph_inode_info *ci = ceph_inode(inode);
1306 	struct ceph_osd_client *osdc =
1307 		&ceph_sb_to_client(inode->i_sb)->client->osdc;
1308 	struct ceph_cap_flush *prealloc_cf;
1309 	ssize_t count, written = 0;
1310 	int err, want, got;
1311 	loff_t pos;
1312 
1313 	if (ceph_snap(inode) != CEPH_NOSNAP)
1314 		return -EROFS;
1315 
1316 	prealloc_cf = ceph_alloc_cap_flush();
1317 	if (!prealloc_cf)
1318 		return -ENOMEM;
1319 
1320 	inode_lock(inode);
1321 
1322 	/* We can write back this queue in page reclaim */
1323 	current->backing_dev_info = inode_to_bdi(inode);
1324 
1325 	if (iocb->ki_flags & IOCB_APPEND) {
1326 		err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1327 		if (err < 0)
1328 			goto out;
1329 	}
1330 
1331 	err = generic_write_checks(iocb, from);
1332 	if (err <= 0)
1333 		goto out;
1334 
1335 	pos = iocb->ki_pos;
1336 	count = iov_iter_count(from);
1337 	err = file_remove_privs(file);
1338 	if (err)
1339 		goto out;
1340 
1341 	err = file_update_time(file);
1342 	if (err)
1343 		goto out;
1344 
1345 	if (ci->i_inline_version != CEPH_INLINE_NONE) {
1346 		err = ceph_uninline_data(file, NULL);
1347 		if (err < 0)
1348 			goto out;
1349 	}
1350 
1351 retry_snap:
1352 	if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL)) {
1353 		err = -ENOSPC;
1354 		goto out;
1355 	}
1356 
1357 	dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1358 	     inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1359 	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1360 		want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1361 	else
1362 		want = CEPH_CAP_FILE_BUFFER;
1363 	got = 0;
1364 	err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count,
1365 			    &got, NULL);
1366 	if (err < 0)
1367 		goto out;
1368 
1369 	dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1370 	     inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1371 
1372 	if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1373 	    (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC)) {
1374 		struct ceph_snap_context *snapc;
1375 		struct iov_iter data;
1376 		inode_unlock(inode);
1377 
1378 		spin_lock(&ci->i_ceph_lock);
1379 		if (__ceph_have_pending_cap_snap(ci)) {
1380 			struct ceph_cap_snap *capsnap =
1381 					list_last_entry(&ci->i_cap_snaps,
1382 							struct ceph_cap_snap,
1383 							ci_item);
1384 			snapc = ceph_get_snap_context(capsnap->context);
1385 		} else {
1386 			BUG_ON(!ci->i_head_snapc);
1387 			snapc = ceph_get_snap_context(ci->i_head_snapc);
1388 		}
1389 		spin_unlock(&ci->i_ceph_lock);
1390 
1391 		/* we might need to revert back to that point */
1392 		data = *from;
1393 		if (iocb->ki_flags & IOCB_DIRECT)
1394 			written = ceph_direct_read_write(iocb, &data, snapc,
1395 							 &prealloc_cf);
1396 		else
1397 			written = ceph_sync_write(iocb, &data, pos, snapc);
1398 		if (written == -EOLDSNAPC) {
1399 			dout("aio_write %p %llx.%llx %llu~%u"
1400 				"got EOLDSNAPC, retrying\n",
1401 				inode, ceph_vinop(inode),
1402 				pos, (unsigned)count);
1403 			inode_lock(inode);
1404 			goto retry_snap;
1405 		}
1406 		if (written > 0)
1407 			iov_iter_advance(from, written);
1408 		ceph_put_snap_context(snapc);
1409 	} else {
1410 		loff_t old_size = i_size_read(inode);
1411 		/*
1412 		 * No need to acquire the i_truncate_mutex. Because
1413 		 * the MDS revokes Fwb caps before sending truncate
1414 		 * message to us. We can't get Fwb cap while there
1415 		 * are pending vmtruncate. So write and vmtruncate
1416 		 * can not run at the same time
1417 		 */
1418 		written = generic_perform_write(file, from, pos);
1419 		if (likely(written >= 0))
1420 			iocb->ki_pos = pos + written;
1421 		if (i_size_read(inode) > old_size)
1422 			ceph_fscache_update_objectsize(inode);
1423 		inode_unlock(inode);
1424 	}
1425 
1426 	if (written >= 0) {
1427 		int dirty;
1428 		spin_lock(&ci->i_ceph_lock);
1429 		ci->i_inline_version = CEPH_INLINE_NONE;
1430 		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1431 					       &prealloc_cf);
1432 		spin_unlock(&ci->i_ceph_lock);
1433 		if (dirty)
1434 			__mark_inode_dirty(inode, dirty);
1435 	}
1436 
1437 	dout("aio_write %p %llx.%llx %llu~%u  dropping cap refs on %s\n",
1438 	     inode, ceph_vinop(inode), pos, (unsigned)count,
1439 	     ceph_cap_string(got));
1440 	ceph_put_cap_refs(ci, got);
1441 
1442 	if (written >= 0) {
1443 		if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_NEARFULL))
1444 			iocb->ki_flags |= IOCB_DSYNC;
1445 
1446 		written = generic_write_sync(iocb, written);
1447 	}
1448 
1449 	goto out_unlocked;
1450 
1451 out:
1452 	inode_unlock(inode);
1453 out_unlocked:
1454 	ceph_free_cap_flush(prealloc_cf);
1455 	current->backing_dev_info = NULL;
1456 	return written ? written : err;
1457 }
1458 
1459 /*
1460  * llseek.  be sure to verify file size on SEEK_END.
1461  */
1462 static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1463 {
1464 	struct inode *inode = file->f_mapping->host;
1465 	loff_t i_size;
1466 	int ret;
1467 
1468 	inode_lock(inode);
1469 
1470 	if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1471 		ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1472 		if (ret < 0) {
1473 			offset = ret;
1474 			goto out;
1475 		}
1476 	}
1477 
1478 	i_size = i_size_read(inode);
1479 	switch (whence) {
1480 	case SEEK_END:
1481 		offset += i_size;
1482 		break;
1483 	case SEEK_CUR:
1484 		/*
1485 		 * Here we special-case the lseek(fd, 0, SEEK_CUR)
1486 		 * position-querying operation.  Avoid rewriting the "same"
1487 		 * f_pos value back to the file because a concurrent read(),
1488 		 * write() or lseek() might have altered it
1489 		 */
1490 		if (offset == 0) {
1491 			offset = file->f_pos;
1492 			goto out;
1493 		}
1494 		offset += file->f_pos;
1495 		break;
1496 	case SEEK_DATA:
1497 		if (offset >= i_size) {
1498 			ret = -ENXIO;
1499 			goto out;
1500 		}
1501 		break;
1502 	case SEEK_HOLE:
1503 		if (offset >= i_size) {
1504 			ret = -ENXIO;
1505 			goto out;
1506 		}
1507 		offset = i_size;
1508 		break;
1509 	}
1510 
1511 	offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1512 
1513 out:
1514 	inode_unlock(inode);
1515 	return offset;
1516 }
1517 
1518 static inline void ceph_zero_partial_page(
1519 	struct inode *inode, loff_t offset, unsigned size)
1520 {
1521 	struct page *page;
1522 	pgoff_t index = offset >> PAGE_SHIFT;
1523 
1524 	page = find_lock_page(inode->i_mapping, index);
1525 	if (page) {
1526 		wait_on_page_writeback(page);
1527 		zero_user(page, offset & (PAGE_SIZE - 1), size);
1528 		unlock_page(page);
1529 		put_page(page);
1530 	}
1531 }
1532 
1533 static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1534 				      loff_t length)
1535 {
1536 	loff_t nearly = round_up(offset, PAGE_SIZE);
1537 	if (offset < nearly) {
1538 		loff_t size = nearly - offset;
1539 		if (length < size)
1540 			size = length;
1541 		ceph_zero_partial_page(inode, offset, size);
1542 		offset += size;
1543 		length -= size;
1544 	}
1545 	if (length >= PAGE_SIZE) {
1546 		loff_t size = round_down(length, PAGE_SIZE);
1547 		truncate_pagecache_range(inode, offset, offset + size - 1);
1548 		offset += size;
1549 		length -= size;
1550 	}
1551 	if (length)
1552 		ceph_zero_partial_page(inode, offset, length);
1553 }
1554 
1555 static int ceph_zero_partial_object(struct inode *inode,
1556 				    loff_t offset, loff_t *length)
1557 {
1558 	struct ceph_inode_info *ci = ceph_inode(inode);
1559 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1560 	struct ceph_osd_request *req;
1561 	int ret = 0;
1562 	loff_t zero = 0;
1563 	int op;
1564 
1565 	if (!length) {
1566 		op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
1567 		length = &zero;
1568 	} else {
1569 		op = CEPH_OSD_OP_ZERO;
1570 	}
1571 
1572 	req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1573 					ceph_vino(inode),
1574 					offset, length,
1575 					0, 1, op,
1576 					CEPH_OSD_FLAG_WRITE |
1577 					CEPH_OSD_FLAG_ONDISK,
1578 					NULL, 0, 0, false);
1579 	if (IS_ERR(req)) {
1580 		ret = PTR_ERR(req);
1581 		goto out;
1582 	}
1583 
1584 	req->r_mtime = inode->i_mtime;
1585 	ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1586 	if (!ret) {
1587 		ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1588 		if (ret == -ENOENT)
1589 			ret = 0;
1590 	}
1591 	ceph_osdc_put_request(req);
1592 
1593 out:
1594 	return ret;
1595 }
1596 
1597 static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
1598 {
1599 	int ret = 0;
1600 	struct ceph_inode_info *ci = ceph_inode(inode);
1601 	s32 stripe_unit = ceph_file_layout_su(ci->i_layout);
1602 	s32 stripe_count = ceph_file_layout_stripe_count(ci->i_layout);
1603 	s32 object_size = ceph_file_layout_object_size(ci->i_layout);
1604 	u64 object_set_size = object_size * stripe_count;
1605 	u64 nearly, t;
1606 
1607 	/* round offset up to next period boundary */
1608 	nearly = offset + object_set_size - 1;
1609 	t = nearly;
1610 	nearly -= do_div(t, object_set_size);
1611 
1612 	while (length && offset < nearly) {
1613 		loff_t size = length;
1614 		ret = ceph_zero_partial_object(inode, offset, &size);
1615 		if (ret < 0)
1616 			return ret;
1617 		offset += size;
1618 		length -= size;
1619 	}
1620 	while (length >= object_set_size) {
1621 		int i;
1622 		loff_t pos = offset;
1623 		for (i = 0; i < stripe_count; ++i) {
1624 			ret = ceph_zero_partial_object(inode, pos, NULL);
1625 			if (ret < 0)
1626 				return ret;
1627 			pos += stripe_unit;
1628 		}
1629 		offset += object_set_size;
1630 		length -= object_set_size;
1631 	}
1632 	while (length) {
1633 		loff_t size = length;
1634 		ret = ceph_zero_partial_object(inode, offset, &size);
1635 		if (ret < 0)
1636 			return ret;
1637 		offset += size;
1638 		length -= size;
1639 	}
1640 	return ret;
1641 }
1642 
1643 static long ceph_fallocate(struct file *file, int mode,
1644 				loff_t offset, loff_t length)
1645 {
1646 	struct ceph_file_info *fi = file->private_data;
1647 	struct inode *inode = file_inode(file);
1648 	struct ceph_inode_info *ci = ceph_inode(inode);
1649 	struct ceph_osd_client *osdc =
1650 		&ceph_inode_to_client(inode)->client->osdc;
1651 	struct ceph_cap_flush *prealloc_cf;
1652 	int want, got = 0;
1653 	int dirty;
1654 	int ret = 0;
1655 	loff_t endoff = 0;
1656 	loff_t size;
1657 
1658 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
1659 		return -EOPNOTSUPP;
1660 
1661 	if (!S_ISREG(inode->i_mode))
1662 		return -EOPNOTSUPP;
1663 
1664 	prealloc_cf = ceph_alloc_cap_flush();
1665 	if (!prealloc_cf)
1666 		return -ENOMEM;
1667 
1668 	inode_lock(inode);
1669 
1670 	if (ceph_snap(inode) != CEPH_NOSNAP) {
1671 		ret = -EROFS;
1672 		goto unlock;
1673 	}
1674 
1675 	if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) &&
1676 		!(mode & FALLOC_FL_PUNCH_HOLE)) {
1677 		ret = -ENOSPC;
1678 		goto unlock;
1679 	}
1680 
1681 	if (ci->i_inline_version != CEPH_INLINE_NONE) {
1682 		ret = ceph_uninline_data(file, NULL);
1683 		if (ret < 0)
1684 			goto unlock;
1685 	}
1686 
1687 	size = i_size_read(inode);
1688 	if (!(mode & FALLOC_FL_KEEP_SIZE))
1689 		endoff = offset + length;
1690 
1691 	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1692 		want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1693 	else
1694 		want = CEPH_CAP_FILE_BUFFER;
1695 
1696 	ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
1697 	if (ret < 0)
1698 		goto unlock;
1699 
1700 	if (mode & FALLOC_FL_PUNCH_HOLE) {
1701 		if (offset < size)
1702 			ceph_zero_pagecache_range(inode, offset, length);
1703 		ret = ceph_zero_objects(inode, offset, length);
1704 	} else if (endoff > size) {
1705 		truncate_pagecache_range(inode, size, -1);
1706 		if (ceph_inode_set_size(inode, endoff))
1707 			ceph_check_caps(ceph_inode(inode),
1708 				CHECK_CAPS_AUTHONLY, NULL);
1709 	}
1710 
1711 	if (!ret) {
1712 		spin_lock(&ci->i_ceph_lock);
1713 		ci->i_inline_version = CEPH_INLINE_NONE;
1714 		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1715 					       &prealloc_cf);
1716 		spin_unlock(&ci->i_ceph_lock);
1717 		if (dirty)
1718 			__mark_inode_dirty(inode, dirty);
1719 	}
1720 
1721 	ceph_put_cap_refs(ci, got);
1722 unlock:
1723 	inode_unlock(inode);
1724 	ceph_free_cap_flush(prealloc_cf);
1725 	return ret;
1726 }
1727 
1728 const struct file_operations ceph_file_fops = {
1729 	.open = ceph_open,
1730 	.release = ceph_release,
1731 	.llseek = ceph_llseek,
1732 	.read_iter = ceph_read_iter,
1733 	.write_iter = ceph_write_iter,
1734 	.mmap = ceph_mmap,
1735 	.fsync = ceph_fsync,
1736 	.lock = ceph_lock,
1737 	.flock = ceph_flock,
1738 	.splice_read = generic_file_splice_read,
1739 	.splice_write = iter_file_splice_write,
1740 	.unlocked_ioctl = ceph_ioctl,
1741 	.compat_ioctl	= ceph_ioctl,
1742 	.fallocate	= ceph_fallocate,
1743 };
1744 
1745