xref: /openbmc/linux/fs/ceph/file.c (revision babbdf5b)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 #include <linux/ceph/striper.h>
4 
5 #include <linux/module.h>
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/file.h>
9 #include <linux/mount.h>
10 #include <linux/namei.h>
11 #include <linux/writeback.h>
12 #include <linux/falloc.h>
13 #include <linux/iversion.h>
14 #include <linux/ktime.h>
15 
16 #include "super.h"
17 #include "mds_client.h"
18 #include "cache.h"
19 #include "io.h"
20 #include "metric.h"
21 
22 static __le32 ceph_flags_sys2wire(u32 flags)
23 {
24 	u32 wire_flags = 0;
25 
26 	switch (flags & O_ACCMODE) {
27 	case O_RDONLY:
28 		wire_flags |= CEPH_O_RDONLY;
29 		break;
30 	case O_WRONLY:
31 		wire_flags |= CEPH_O_WRONLY;
32 		break;
33 	case O_RDWR:
34 		wire_flags |= CEPH_O_RDWR;
35 		break;
36 	}
37 
38 	flags &= ~O_ACCMODE;
39 
40 #define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
41 
42 	ceph_sys2wire(O_CREAT);
43 	ceph_sys2wire(O_EXCL);
44 	ceph_sys2wire(O_TRUNC);
45 	ceph_sys2wire(O_DIRECTORY);
46 	ceph_sys2wire(O_NOFOLLOW);
47 
48 #undef ceph_sys2wire
49 
50 	if (flags)
51 		dout("unused open flags: %x\n", flags);
52 
53 	return cpu_to_le32(wire_flags);
54 }
55 
56 /*
57  * Ceph file operations
58  *
59  * Implement basic open/close functionality, and implement
60  * read/write.
61  *
62  * We implement three modes of file I/O:
63  *  - buffered uses the generic_file_aio_{read,write} helpers
64  *
65  *  - synchronous is used when there is multi-client read/write
66  *    sharing, avoids the page cache, and synchronously waits for an
67  *    ack from the OSD.
68  *
69  *  - direct io takes the variant of the sync path that references
70  *    user pages directly.
71  *
72  * fsync() flushes and waits on dirty pages, but just queues metadata
73  * for writeback: since the MDS can recover size and mtime there is no
74  * need to wait for MDS acknowledgement.
75  */
76 
77 /*
78  * How many pages to get in one call to iov_iter_get_pages().  This
79  * determines the size of the on-stack array used as a buffer.
80  */
81 #define ITER_GET_BVECS_PAGES	64
82 
83 static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
84 				struct bio_vec *bvecs)
85 {
86 	size_t size = 0;
87 	int bvec_idx = 0;
88 
89 	if (maxsize > iov_iter_count(iter))
90 		maxsize = iov_iter_count(iter);
91 
92 	while (size < maxsize) {
93 		struct page *pages[ITER_GET_BVECS_PAGES];
94 		ssize_t bytes;
95 		size_t start;
96 		int idx = 0;
97 
98 		bytes = iov_iter_get_pages(iter, pages, maxsize - size,
99 					   ITER_GET_BVECS_PAGES, &start);
100 		if (bytes < 0)
101 			return size ?: bytes;
102 
103 		iov_iter_advance(iter, bytes);
104 		size += bytes;
105 
106 		for ( ; bytes; idx++, bvec_idx++) {
107 			struct bio_vec bv = {
108 				.bv_page = pages[idx],
109 				.bv_len = min_t(int, bytes, PAGE_SIZE - start),
110 				.bv_offset = start,
111 			};
112 
113 			bvecs[bvec_idx] = bv;
114 			bytes -= bv.bv_len;
115 			start = 0;
116 		}
117 	}
118 
119 	return size;
120 }
121 
122 /*
123  * iov_iter_get_pages() only considers one iov_iter segment, no matter
124  * what maxsize or maxpages are given.  For ITER_BVEC that is a single
125  * page.
126  *
127  * Attempt to get up to @maxsize bytes worth of pages from @iter.
128  * Return the number of bytes in the created bio_vec array, or an error.
129  */
130 static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize,
131 				    struct bio_vec **bvecs, int *num_bvecs)
132 {
133 	struct bio_vec *bv;
134 	size_t orig_count = iov_iter_count(iter);
135 	ssize_t bytes;
136 	int npages;
137 
138 	iov_iter_truncate(iter, maxsize);
139 	npages = iov_iter_npages(iter, INT_MAX);
140 	iov_iter_reexpand(iter, orig_count);
141 
142 	/*
143 	 * __iter_get_bvecs() may populate only part of the array -- zero it
144 	 * out.
145 	 */
146 	bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO);
147 	if (!bv)
148 		return -ENOMEM;
149 
150 	bytes = __iter_get_bvecs(iter, maxsize, bv);
151 	if (bytes < 0) {
152 		/*
153 		 * No pages were pinned -- just free the array.
154 		 */
155 		kvfree(bv);
156 		return bytes;
157 	}
158 
159 	*bvecs = bv;
160 	*num_bvecs = npages;
161 	return bytes;
162 }
163 
164 static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty)
165 {
166 	int i;
167 
168 	for (i = 0; i < num_bvecs; i++) {
169 		if (bvecs[i].bv_page) {
170 			if (should_dirty)
171 				set_page_dirty_lock(bvecs[i].bv_page);
172 			put_page(bvecs[i].bv_page);
173 		}
174 	}
175 	kvfree(bvecs);
176 }
177 
178 /*
179  * Prepare an open request.  Preallocate ceph_cap to avoid an
180  * inopportune ENOMEM later.
181  */
182 static struct ceph_mds_request *
183 prepare_open_request(struct super_block *sb, int flags, int create_mode)
184 {
185 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(sb);
186 	struct ceph_mds_request *req;
187 	int want_auth = USE_ANY_MDS;
188 	int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
189 
190 	if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
191 		want_auth = USE_AUTH_MDS;
192 
193 	req = ceph_mdsc_create_request(mdsc, op, want_auth);
194 	if (IS_ERR(req))
195 		goto out;
196 	req->r_fmode = ceph_flags_to_mode(flags);
197 	req->r_args.open.flags = ceph_flags_sys2wire(flags);
198 	req->r_args.open.mode = cpu_to_le32(create_mode);
199 out:
200 	return req;
201 }
202 
203 static int ceph_init_file_info(struct inode *inode, struct file *file,
204 					int fmode, bool isdir)
205 {
206 	struct ceph_inode_info *ci = ceph_inode(inode);
207 	struct ceph_file_info *fi;
208 
209 	dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
210 			inode->i_mode, isdir ? "dir" : "regular");
211 	BUG_ON(inode->i_fop->release != ceph_release);
212 
213 	if (isdir) {
214 		struct ceph_dir_file_info *dfi =
215 			kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
216 		if (!dfi)
217 			return -ENOMEM;
218 
219 		file->private_data = dfi;
220 		fi = &dfi->file_info;
221 		dfi->next_offset = 2;
222 		dfi->readdir_cache_idx = -1;
223 	} else {
224 		fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
225 		if (!fi)
226 			return -ENOMEM;
227 
228 		file->private_data = fi;
229 	}
230 
231 	ceph_get_fmode(ci, fmode, 1);
232 	fi->fmode = fmode;
233 
234 	spin_lock_init(&fi->rw_contexts_lock);
235 	INIT_LIST_HEAD(&fi->rw_contexts);
236 	fi->meta_err = errseq_sample(&ci->i_meta_err);
237 	fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
238 
239 	return 0;
240 }
241 
242 /*
243  * initialize private struct file data.
244  * if we fail, clean up by dropping fmode reference on the ceph_inode
245  */
246 static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
247 {
248 	int ret = 0;
249 
250 	switch (inode->i_mode & S_IFMT) {
251 	case S_IFREG:
252 		ceph_fscache_register_inode_cookie(inode);
253 		ceph_fscache_file_set_cookie(inode, file);
254 		fallthrough;
255 	case S_IFDIR:
256 		ret = ceph_init_file_info(inode, file, fmode,
257 						S_ISDIR(inode->i_mode));
258 		break;
259 
260 	case S_IFLNK:
261 		dout("init_file %p %p 0%o (symlink)\n", inode, file,
262 		     inode->i_mode);
263 		break;
264 
265 	default:
266 		dout("init_file %p %p 0%o (special)\n", inode, file,
267 		     inode->i_mode);
268 		/*
269 		 * we need to drop the open ref now, since we don't
270 		 * have .release set to ceph_release.
271 		 */
272 		BUG_ON(inode->i_fop->release == ceph_release);
273 
274 		/* call the proper open fop */
275 		ret = inode->i_fop->open(inode, file);
276 	}
277 	return ret;
278 }
279 
280 /*
281  * try renew caps after session gets killed.
282  */
283 int ceph_renew_caps(struct inode *inode, int fmode)
284 {
285 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
286 	struct ceph_inode_info *ci = ceph_inode(inode);
287 	struct ceph_mds_request *req;
288 	int err, flags, wanted;
289 
290 	spin_lock(&ci->i_ceph_lock);
291 	__ceph_touch_fmode(ci, mdsc, fmode);
292 	wanted = __ceph_caps_file_wanted(ci);
293 	if (__ceph_is_any_real_caps(ci) &&
294 	    (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
295 		int issued = __ceph_caps_issued(ci, NULL);
296 		spin_unlock(&ci->i_ceph_lock);
297 		dout("renew caps %p want %s issued %s updating mds_wanted\n",
298 		     inode, ceph_cap_string(wanted), ceph_cap_string(issued));
299 		ceph_check_caps(ci, 0, NULL);
300 		return 0;
301 	}
302 	spin_unlock(&ci->i_ceph_lock);
303 
304 	flags = 0;
305 	if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
306 		flags = O_RDWR;
307 	else if (wanted & CEPH_CAP_FILE_RD)
308 		flags = O_RDONLY;
309 	else if (wanted & CEPH_CAP_FILE_WR)
310 		flags = O_WRONLY;
311 #ifdef O_LAZY
312 	if (wanted & CEPH_CAP_FILE_LAZYIO)
313 		flags |= O_LAZY;
314 #endif
315 
316 	req = prepare_open_request(inode->i_sb, flags, 0);
317 	if (IS_ERR(req)) {
318 		err = PTR_ERR(req);
319 		goto out;
320 	}
321 
322 	req->r_inode = inode;
323 	ihold(inode);
324 	req->r_num_caps = 1;
325 
326 	err = ceph_mdsc_do_request(mdsc, NULL, req);
327 	ceph_mdsc_put_request(req);
328 out:
329 	dout("renew caps %p open result=%d\n", inode, err);
330 	return err < 0 ? err : 0;
331 }
332 
333 /*
334  * If we already have the requisite capabilities, we can satisfy
335  * the open request locally (no need to request new caps from the
336  * MDS).  We do, however, need to inform the MDS (asynchronously)
337  * if our wanted caps set expands.
338  */
339 int ceph_open(struct inode *inode, struct file *file)
340 {
341 	struct ceph_inode_info *ci = ceph_inode(inode);
342 	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
343 	struct ceph_mds_client *mdsc = fsc->mdsc;
344 	struct ceph_mds_request *req;
345 	struct ceph_file_info *fi = file->private_data;
346 	int err;
347 	int flags, fmode, wanted;
348 
349 	if (fi) {
350 		dout("open file %p is already opened\n", file);
351 		return 0;
352 	}
353 
354 	/* filter out O_CREAT|O_EXCL; vfs did that already.  yuck. */
355 	flags = file->f_flags & ~(O_CREAT|O_EXCL);
356 	if (S_ISDIR(inode->i_mode))
357 		flags = O_DIRECTORY;  /* mds likes to know */
358 
359 	dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
360 	     ceph_vinop(inode), file, flags, file->f_flags);
361 	fmode = ceph_flags_to_mode(flags);
362 	wanted = ceph_caps_for_mode(fmode);
363 
364 	/* snapped files are read-only */
365 	if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
366 		return -EROFS;
367 
368 	/* trivially open snapdir */
369 	if (ceph_snap(inode) == CEPH_SNAPDIR) {
370 		return ceph_init_file(inode, file, fmode);
371 	}
372 
373 	/*
374 	 * No need to block if we have caps on the auth MDS (for
375 	 * write) or any MDS (for read).  Update wanted set
376 	 * asynchronously.
377 	 */
378 	spin_lock(&ci->i_ceph_lock);
379 	if (__ceph_is_any_real_caps(ci) &&
380 	    (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
381 		int mds_wanted = __ceph_caps_mds_wanted(ci, true);
382 		int issued = __ceph_caps_issued(ci, NULL);
383 
384 		dout("open %p fmode %d want %s issued %s using existing\n",
385 		     inode, fmode, ceph_cap_string(wanted),
386 		     ceph_cap_string(issued));
387 		__ceph_touch_fmode(ci, mdsc, fmode);
388 		spin_unlock(&ci->i_ceph_lock);
389 
390 		/* adjust wanted? */
391 		if ((issued & wanted) != wanted &&
392 		    (mds_wanted & wanted) != wanted &&
393 		    ceph_snap(inode) != CEPH_SNAPDIR)
394 			ceph_check_caps(ci, 0, NULL);
395 
396 		return ceph_init_file(inode, file, fmode);
397 	} else if (ceph_snap(inode) != CEPH_NOSNAP &&
398 		   (ci->i_snap_caps & wanted) == wanted) {
399 		__ceph_touch_fmode(ci, mdsc, fmode);
400 		spin_unlock(&ci->i_ceph_lock);
401 		return ceph_init_file(inode, file, fmode);
402 	}
403 
404 	spin_unlock(&ci->i_ceph_lock);
405 
406 	dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
407 	req = prepare_open_request(inode->i_sb, flags, 0);
408 	if (IS_ERR(req)) {
409 		err = PTR_ERR(req);
410 		goto out;
411 	}
412 	req->r_inode = inode;
413 	ihold(inode);
414 
415 	req->r_num_caps = 1;
416 	err = ceph_mdsc_do_request(mdsc, NULL, req);
417 	if (!err)
418 		err = ceph_init_file(inode, file, req->r_fmode);
419 	ceph_mdsc_put_request(req);
420 	dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
421 out:
422 	return err;
423 }
424 
425 /* Clone the layout from a synchronous create, if the dir now has Dc caps */
426 static void
427 cache_file_layout(struct inode *dst, struct inode *src)
428 {
429 	struct ceph_inode_info *cdst = ceph_inode(dst);
430 	struct ceph_inode_info *csrc = ceph_inode(src);
431 
432 	spin_lock(&cdst->i_ceph_lock);
433 	if ((__ceph_caps_issued(cdst, NULL) & CEPH_CAP_DIR_CREATE) &&
434 	    !ceph_file_layout_is_valid(&cdst->i_cached_layout)) {
435 		memcpy(&cdst->i_cached_layout, &csrc->i_layout,
436 			sizeof(cdst->i_cached_layout));
437 		rcu_assign_pointer(cdst->i_cached_layout.pool_ns,
438 				   ceph_try_get_string(csrc->i_layout.pool_ns));
439 	}
440 	spin_unlock(&cdst->i_ceph_lock);
441 }
442 
443 /*
444  * Try to set up an async create. We need caps, a file layout, and inode number,
445  * and either a lease on the dentry or complete dir info. If any of those
446  * criteria are not satisfied, then return false and the caller can go
447  * synchronous.
448  */
449 static int try_prep_async_create(struct inode *dir, struct dentry *dentry,
450 				 struct ceph_file_layout *lo, u64 *pino)
451 {
452 	struct ceph_inode_info *ci = ceph_inode(dir);
453 	struct ceph_dentry_info *di = ceph_dentry(dentry);
454 	int got = 0, want = CEPH_CAP_FILE_EXCL | CEPH_CAP_DIR_CREATE;
455 	u64 ino;
456 
457 	spin_lock(&ci->i_ceph_lock);
458 	/* No auth cap means no chance for Dc caps */
459 	if (!ci->i_auth_cap)
460 		goto no_async;
461 
462 	/* Any delegated inos? */
463 	if (xa_empty(&ci->i_auth_cap->session->s_delegated_inos))
464 		goto no_async;
465 
466 	if (!ceph_file_layout_is_valid(&ci->i_cached_layout))
467 		goto no_async;
468 
469 	if ((__ceph_caps_issued(ci, NULL) & want) != want)
470 		goto no_async;
471 
472 	if (d_in_lookup(dentry)) {
473 		if (!__ceph_dir_is_complete(ci))
474 			goto no_async;
475 		spin_lock(&dentry->d_lock);
476 		di->lease_shared_gen = atomic_read(&ci->i_shared_gen);
477 		spin_unlock(&dentry->d_lock);
478 	} else if (atomic_read(&ci->i_shared_gen) !=
479 		   READ_ONCE(di->lease_shared_gen)) {
480 		goto no_async;
481 	}
482 
483 	ino = ceph_get_deleg_ino(ci->i_auth_cap->session);
484 	if (!ino)
485 		goto no_async;
486 
487 	*pino = ino;
488 	ceph_take_cap_refs(ci, want, false);
489 	memcpy(lo, &ci->i_cached_layout, sizeof(*lo));
490 	rcu_assign_pointer(lo->pool_ns,
491 			   ceph_try_get_string(ci->i_cached_layout.pool_ns));
492 	got = want;
493 no_async:
494 	spin_unlock(&ci->i_ceph_lock);
495 	return got;
496 }
497 
498 static void restore_deleg_ino(struct inode *dir, u64 ino)
499 {
500 	struct ceph_inode_info *ci = ceph_inode(dir);
501 	struct ceph_mds_session *s = NULL;
502 
503 	spin_lock(&ci->i_ceph_lock);
504 	if (ci->i_auth_cap)
505 		s = ceph_get_mds_session(ci->i_auth_cap->session);
506 	spin_unlock(&ci->i_ceph_lock);
507 	if (s) {
508 		int err = ceph_restore_deleg_ino(s, ino);
509 		if (err)
510 			pr_warn("ceph: unable to restore delegated ino 0x%llx to session: %d\n",
511 				ino, err);
512 		ceph_put_mds_session(s);
513 	}
514 }
515 
516 static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
517                                  struct ceph_mds_request *req)
518 {
519 	int result = req->r_err ? req->r_err :
520 			le32_to_cpu(req->r_reply_info.head->result);
521 
522 	if (result == -EJUKEBOX)
523 		goto out;
524 
525 	mapping_set_error(req->r_parent->i_mapping, result);
526 
527 	if (result) {
528 		struct dentry *dentry = req->r_dentry;
529 		int pathlen = 0;
530 		u64 base = 0;
531 		char *path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
532 						  &base, 0);
533 
534 		ceph_dir_clear_complete(req->r_parent);
535 		if (!d_unhashed(dentry))
536 			d_drop(dentry);
537 
538 		/* FIXME: start returning I/O errors on all accesses? */
539 		pr_warn("ceph: async create failure path=(%llx)%s result=%d!\n",
540 			base, IS_ERR(path) ? "<<bad>>" : path, result);
541 		ceph_mdsc_free_path(path, pathlen);
542 	}
543 
544 	if (req->r_target_inode) {
545 		struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
546 		u64 ino = ceph_vino(req->r_target_inode).ino;
547 
548 		if (req->r_deleg_ino != ino)
549 			pr_warn("%s: inode number mismatch! err=%d deleg_ino=0x%llx target=0x%llx\n",
550 				__func__, req->r_err, req->r_deleg_ino, ino);
551 		mapping_set_error(req->r_target_inode->i_mapping, result);
552 
553 		spin_lock(&ci->i_ceph_lock);
554 		if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE) {
555 			ci->i_ceph_flags &= ~CEPH_I_ASYNC_CREATE;
556 			wake_up_bit(&ci->i_ceph_flags, CEPH_ASYNC_CREATE_BIT);
557 		}
558 		ceph_kick_flushing_inode_caps(req->r_session, ci);
559 		spin_unlock(&ci->i_ceph_lock);
560 	} else {
561 		pr_warn("%s: no req->r_target_inode for 0x%llx\n", __func__,
562 			req->r_deleg_ino);
563 	}
564 out:
565 	ceph_mdsc_release_dir_caps(req);
566 }
567 
568 static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
569 				    struct file *file, umode_t mode,
570 				    struct ceph_mds_request *req,
571 				    struct ceph_acl_sec_ctx *as_ctx,
572 				    struct ceph_file_layout *lo)
573 {
574 	int ret;
575 	char xattr_buf[4];
576 	struct ceph_mds_reply_inode in = { };
577 	struct ceph_mds_reply_info_in iinfo = { .in = &in };
578 	struct ceph_inode_info *ci = ceph_inode(dir);
579 	struct inode *inode;
580 	struct timespec64 now;
581 	struct ceph_vino vino = { .ino = req->r_deleg_ino,
582 				  .snap = CEPH_NOSNAP };
583 
584 	ktime_get_real_ts64(&now);
585 
586 	inode = ceph_get_inode(dentry->d_sb, vino);
587 	if (IS_ERR(inode))
588 		return PTR_ERR(inode);
589 
590 	iinfo.inline_version = CEPH_INLINE_NONE;
591 	iinfo.change_attr = 1;
592 	ceph_encode_timespec64(&iinfo.btime, &now);
593 
594 	iinfo.xattr_len = ARRAY_SIZE(xattr_buf);
595 	iinfo.xattr_data = xattr_buf;
596 	memset(iinfo.xattr_data, 0, iinfo.xattr_len);
597 
598 	in.ino = cpu_to_le64(vino.ino);
599 	in.snapid = cpu_to_le64(CEPH_NOSNAP);
600 	in.version = cpu_to_le64(1);	// ???
601 	in.cap.caps = in.cap.wanted = cpu_to_le32(CEPH_CAP_ALL_FILE);
602 	in.cap.cap_id = cpu_to_le64(1);
603 	in.cap.realm = cpu_to_le64(ci->i_snap_realm->ino);
604 	in.cap.flags = CEPH_CAP_FLAG_AUTH;
605 	in.ctime = in.mtime = in.atime = iinfo.btime;
606 	in.mode = cpu_to_le32((u32)mode);
607 	in.truncate_seq = cpu_to_le32(1);
608 	in.truncate_size = cpu_to_le64(-1ULL);
609 	in.xattr_version = cpu_to_le64(1);
610 	in.uid = cpu_to_le32(from_kuid(&init_user_ns, current_fsuid()));
611 	in.gid = cpu_to_le32(from_kgid(&init_user_ns, dir->i_mode & S_ISGID ?
612 				dir->i_gid : current_fsgid()));
613 	in.nlink = cpu_to_le32(1);
614 	in.max_size = cpu_to_le64(lo->stripe_unit);
615 
616 	ceph_file_layout_to_legacy(lo, &in.layout);
617 
618 	ret = ceph_fill_inode(inode, NULL, &iinfo, NULL, req->r_session,
619 			      req->r_fmode, NULL);
620 	if (ret) {
621 		dout("%s failed to fill inode: %d\n", __func__, ret);
622 		ceph_dir_clear_complete(dir);
623 		if (!d_unhashed(dentry))
624 			d_drop(dentry);
625 		if (inode->i_state & I_NEW)
626 			discard_new_inode(inode);
627 	} else {
628 		struct dentry *dn;
629 
630 		dout("%s d_adding new inode 0x%llx to 0x%llx/%s\n", __func__,
631 			vino.ino, ceph_ino(dir), dentry->d_name.name);
632 		ceph_dir_clear_ordered(dir);
633 		ceph_init_inode_acls(inode, as_ctx);
634 		if (inode->i_state & I_NEW) {
635 			/*
636 			 * If it's not I_NEW, then someone created this before
637 			 * we got here. Assume the server is aware of it at
638 			 * that point and don't worry about setting
639 			 * CEPH_I_ASYNC_CREATE.
640 			 */
641 			ceph_inode(inode)->i_ceph_flags = CEPH_I_ASYNC_CREATE;
642 			unlock_new_inode(inode);
643 		}
644 		if (d_in_lookup(dentry) || d_really_is_negative(dentry)) {
645 			if (!d_unhashed(dentry))
646 				d_drop(dentry);
647 			dn = d_splice_alias(inode, dentry);
648 			WARN_ON_ONCE(dn && dn != dentry);
649 		}
650 		file->f_mode |= FMODE_CREATED;
651 		ret = finish_open(file, dentry, ceph_open);
652 	}
653 	return ret;
654 }
655 
656 /*
657  * Do a lookup + open with a single request.  If we get a non-existent
658  * file or symlink, return 1 so the VFS can retry.
659  */
660 int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
661 		     struct file *file, unsigned flags, umode_t mode)
662 {
663 	struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
664 	struct ceph_mds_client *mdsc = fsc->mdsc;
665 	struct ceph_mds_request *req;
666 	struct dentry *dn;
667 	struct ceph_acl_sec_ctx as_ctx = {};
668 	bool try_async = ceph_test_mount_opt(fsc, ASYNC_DIROPS);
669 	int mask;
670 	int err;
671 
672 	dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
673 	     dir, dentry, dentry,
674 	     d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
675 
676 	if (dentry->d_name.len > NAME_MAX)
677 		return -ENAMETOOLONG;
678 
679 	if (flags & O_CREAT) {
680 		if (ceph_quota_is_max_files_exceeded(dir))
681 			return -EDQUOT;
682 		err = ceph_pre_init_acls(dir, &mode, &as_ctx);
683 		if (err < 0)
684 			return err;
685 		err = ceph_security_init_secctx(dentry, mode, &as_ctx);
686 		if (err < 0)
687 			goto out_ctx;
688 	} else if (!d_in_lookup(dentry)) {
689 		/* If it's not being looked up, it's negative */
690 		return -ENOENT;
691 	}
692 retry:
693 	/* do the open */
694 	req = prepare_open_request(dir->i_sb, flags, mode);
695 	if (IS_ERR(req)) {
696 		err = PTR_ERR(req);
697 		goto out_ctx;
698 	}
699 	req->r_dentry = dget(dentry);
700 	req->r_num_caps = 2;
701 	mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
702 	if (ceph_security_xattr_wanted(dir))
703 		mask |= CEPH_CAP_XATTR_SHARED;
704 	req->r_args.open.mask = cpu_to_le32(mask);
705 	req->r_parent = dir;
706 
707 	if (flags & O_CREAT) {
708 		struct ceph_file_layout lo;
709 
710 		req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
711 		req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
712 		if (as_ctx.pagelist) {
713 			req->r_pagelist = as_ctx.pagelist;
714 			as_ctx.pagelist = NULL;
715 		}
716 		if (try_async &&
717 		    (req->r_dir_caps =
718 		      try_prep_async_create(dir, dentry, &lo,
719 					    &req->r_deleg_ino))) {
720 			set_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags);
721 			req->r_args.open.flags |= cpu_to_le32(CEPH_O_EXCL);
722 			req->r_callback = ceph_async_create_cb;
723 			err = ceph_mdsc_submit_request(mdsc, dir, req);
724 			if (!err) {
725 				err = ceph_finish_async_create(dir, dentry,
726 							file, mode, req,
727 							&as_ctx, &lo);
728 			} else if (err == -EJUKEBOX) {
729 				restore_deleg_ino(dir, req->r_deleg_ino);
730 				ceph_mdsc_put_request(req);
731 				try_async = false;
732 				goto retry;
733 			}
734 			goto out_req;
735 		}
736 	}
737 
738 	set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
739 	err = ceph_mdsc_do_request(mdsc,
740 				   (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
741 				   req);
742 	dentry = ceph_handle_snapdir(req, dentry, err);
743 	if (IS_ERR(dentry)) {
744 		err = PTR_ERR(dentry);
745 		goto out_req;
746 	}
747 	err = 0;
748 
749 	if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
750 		err = ceph_handle_notrace_create(dir, dentry);
751 
752 	if (d_in_lookup(dentry)) {
753 		dn = ceph_finish_lookup(req, dentry, err);
754 		if (IS_ERR(dn))
755 			err = PTR_ERR(dn);
756 	} else {
757 		/* we were given a hashed negative dentry */
758 		dn = NULL;
759 	}
760 	if (err)
761 		goto out_req;
762 	if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
763 		/* make vfs retry on splice, ENOENT, or symlink */
764 		dout("atomic_open finish_no_open on dn %p\n", dn);
765 		err = finish_no_open(file, dn);
766 	} else {
767 		dout("atomic_open finish_open on dn %p\n", dn);
768 		if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
769 			struct inode *newino = d_inode(dentry);
770 
771 			cache_file_layout(dir, newino);
772 			ceph_init_inode_acls(newino, &as_ctx);
773 			file->f_mode |= FMODE_CREATED;
774 		}
775 		err = finish_open(file, dentry, ceph_open);
776 	}
777 out_req:
778 	ceph_mdsc_put_request(req);
779 out_ctx:
780 	ceph_release_acl_sec_ctx(&as_ctx);
781 	dout("atomic_open result=%d\n", err);
782 	return err;
783 }
784 
785 int ceph_release(struct inode *inode, struct file *file)
786 {
787 	struct ceph_inode_info *ci = ceph_inode(inode);
788 
789 	if (S_ISDIR(inode->i_mode)) {
790 		struct ceph_dir_file_info *dfi = file->private_data;
791 		dout("release inode %p dir file %p\n", inode, file);
792 		WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
793 
794 		ceph_put_fmode(ci, dfi->file_info.fmode, 1);
795 
796 		if (dfi->last_readdir)
797 			ceph_mdsc_put_request(dfi->last_readdir);
798 		kfree(dfi->last_name);
799 		kfree(dfi->dir_info);
800 		kmem_cache_free(ceph_dir_file_cachep, dfi);
801 	} else {
802 		struct ceph_file_info *fi = file->private_data;
803 		dout("release inode %p regular file %p\n", inode, file);
804 		WARN_ON(!list_empty(&fi->rw_contexts));
805 
806 		ceph_put_fmode(ci, fi->fmode, 1);
807 
808 		kmem_cache_free(ceph_file_cachep, fi);
809 	}
810 
811 	/* wake up anyone waiting for caps on this inode */
812 	wake_up_all(&ci->i_cap_wq);
813 	return 0;
814 }
815 
816 enum {
817 	HAVE_RETRIED = 1,
818 	CHECK_EOF =    2,
819 	READ_INLINE =  3,
820 };
821 
822 /*
823  * Completely synchronous read and write methods.  Direct from __user
824  * buffer to osd, or directly to user pages (if O_DIRECT).
825  *
826  * If the read spans object boundary, just do multiple reads.  (That's not
827  * atomic, but good enough for now.)
828  *
829  * If we get a short result from the OSD, check against i_size; we need to
830  * only return a short read to the caller if we hit EOF.
831  */
832 static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
833 			      int *retry_op)
834 {
835 	struct file *file = iocb->ki_filp;
836 	struct inode *inode = file_inode(file);
837 	struct ceph_inode_info *ci = ceph_inode(inode);
838 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
839 	struct ceph_osd_client *osdc = &fsc->client->osdc;
840 	ssize_t ret;
841 	u64 off = iocb->ki_pos;
842 	u64 len = iov_iter_count(to);
843 
844 	dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
845 	     (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
846 
847 	if (!len)
848 		return 0;
849 	/*
850 	 * flush any page cache pages in this range.  this
851 	 * will make concurrent normal and sync io slow,
852 	 * but it will at least behave sensibly when they are
853 	 * in sequence.
854 	 */
855 	ret = filemap_write_and_wait_range(inode->i_mapping,
856 					   off, off + len - 1);
857 	if (ret < 0)
858 		return ret;
859 
860 	ret = 0;
861 	while ((len = iov_iter_count(to)) > 0) {
862 		struct ceph_osd_request *req;
863 		struct page **pages;
864 		int num_pages;
865 		size_t page_off;
866 		u64 i_size;
867 		bool more;
868 		int idx;
869 		size_t left;
870 
871 		req = ceph_osdc_new_request(osdc, &ci->i_layout,
872 					ci->i_vino, off, &len, 0, 1,
873 					CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
874 					NULL, ci->i_truncate_seq,
875 					ci->i_truncate_size, false);
876 		if (IS_ERR(req)) {
877 			ret = PTR_ERR(req);
878 			break;
879 		}
880 
881 		more = len < iov_iter_count(to);
882 
883 		num_pages = calc_pages_for(off, len);
884 		page_off = off & ~PAGE_MASK;
885 		pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
886 		if (IS_ERR(pages)) {
887 			ceph_osdc_put_request(req);
888 			ret = PTR_ERR(pages);
889 			break;
890 		}
891 
892 		osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_off,
893 						 false, false);
894 		ret = ceph_osdc_start_request(osdc, req, false);
895 		if (!ret)
896 			ret = ceph_osdc_wait_request(osdc, req);
897 
898 		ceph_update_read_metrics(&fsc->mdsc->metric,
899 					 req->r_start_latency,
900 					 req->r_end_latency,
901 					 ret);
902 
903 		ceph_osdc_put_request(req);
904 
905 		i_size = i_size_read(inode);
906 		dout("sync_read %llu~%llu got %zd i_size %llu%s\n",
907 		     off, len, ret, i_size, (more ? " MORE" : ""));
908 
909 		if (ret == -ENOENT)
910 			ret = 0;
911 		if (ret >= 0 && ret < len && (off + ret < i_size)) {
912 			int zlen = min(len - ret, i_size - off - ret);
913 			int zoff = page_off + ret;
914 			dout("sync_read zero gap %llu~%llu\n",
915                              off + ret, off + ret + zlen);
916 			ceph_zero_page_vector_range(zoff, zlen, pages);
917 			ret += zlen;
918 		}
919 
920 		idx = 0;
921 		left = ret > 0 ? ret : 0;
922 		while (left > 0) {
923 			size_t len, copied;
924 			page_off = off & ~PAGE_MASK;
925 			len = min_t(size_t, left, PAGE_SIZE - page_off);
926 			SetPageUptodate(pages[idx]);
927 			copied = copy_page_to_iter(pages[idx++],
928 						   page_off, len, to);
929 			off += copied;
930 			left -= copied;
931 			if (copied < len) {
932 				ret = -EFAULT;
933 				break;
934 			}
935 		}
936 		ceph_release_page_vector(pages, num_pages);
937 
938 		if (ret < 0) {
939 			if (ret == -EBLOCKLISTED)
940 				fsc->blocklisted = true;
941 			break;
942 		}
943 
944 		if (off >= i_size || !more)
945 			break;
946 	}
947 
948 	if (off > iocb->ki_pos) {
949 		if (ret >= 0 &&
950 		    iov_iter_count(to) > 0 && off >= i_size_read(inode))
951 			*retry_op = CHECK_EOF;
952 		ret = off - iocb->ki_pos;
953 		iocb->ki_pos = off;
954 	}
955 
956 	dout("sync_read result %zd retry_op %d\n", ret, *retry_op);
957 	return ret;
958 }
959 
960 struct ceph_aio_request {
961 	struct kiocb *iocb;
962 	size_t total_len;
963 	bool write;
964 	bool should_dirty;
965 	int error;
966 	struct list_head osd_reqs;
967 	unsigned num_reqs;
968 	atomic_t pending_reqs;
969 	struct timespec64 mtime;
970 	struct ceph_cap_flush *prealloc_cf;
971 };
972 
973 struct ceph_aio_work {
974 	struct work_struct work;
975 	struct ceph_osd_request *req;
976 };
977 
978 static void ceph_aio_retry_work(struct work_struct *work);
979 
980 static void ceph_aio_complete(struct inode *inode,
981 			      struct ceph_aio_request *aio_req)
982 {
983 	struct ceph_inode_info *ci = ceph_inode(inode);
984 	int ret;
985 
986 	if (!atomic_dec_and_test(&aio_req->pending_reqs))
987 		return;
988 
989 	if (aio_req->iocb->ki_flags & IOCB_DIRECT)
990 		inode_dio_end(inode);
991 
992 	ret = aio_req->error;
993 	if (!ret)
994 		ret = aio_req->total_len;
995 
996 	dout("ceph_aio_complete %p rc %d\n", inode, ret);
997 
998 	if (ret >= 0 && aio_req->write) {
999 		int dirty;
1000 
1001 		loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
1002 		if (endoff > i_size_read(inode)) {
1003 			if (ceph_inode_set_size(inode, endoff))
1004 				ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
1005 		}
1006 
1007 		spin_lock(&ci->i_ceph_lock);
1008 		ci->i_inline_version = CEPH_INLINE_NONE;
1009 		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1010 					       &aio_req->prealloc_cf);
1011 		spin_unlock(&ci->i_ceph_lock);
1012 		if (dirty)
1013 			__mark_inode_dirty(inode, dirty);
1014 
1015 	}
1016 
1017 	ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
1018 						CEPH_CAP_FILE_RD));
1019 
1020 	aio_req->iocb->ki_complete(aio_req->iocb, ret, 0);
1021 
1022 	ceph_free_cap_flush(aio_req->prealloc_cf);
1023 	kfree(aio_req);
1024 }
1025 
1026 static void ceph_aio_complete_req(struct ceph_osd_request *req)
1027 {
1028 	int rc = req->r_result;
1029 	struct inode *inode = req->r_inode;
1030 	struct ceph_aio_request *aio_req = req->r_priv;
1031 	struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
1032 	struct ceph_client_metric *metric = &ceph_sb_to_mdsc(inode->i_sb)->metric;
1033 
1034 	BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
1035 	BUG_ON(!osd_data->num_bvecs);
1036 
1037 	dout("ceph_aio_complete_req %p rc %d bytes %u\n",
1038 	     inode, rc, osd_data->bvec_pos.iter.bi_size);
1039 
1040 	if (rc == -EOLDSNAPC) {
1041 		struct ceph_aio_work *aio_work;
1042 		BUG_ON(!aio_req->write);
1043 
1044 		aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
1045 		if (aio_work) {
1046 			INIT_WORK(&aio_work->work, ceph_aio_retry_work);
1047 			aio_work->req = req;
1048 			queue_work(ceph_inode_to_client(inode)->inode_wq,
1049 				   &aio_work->work);
1050 			return;
1051 		}
1052 		rc = -ENOMEM;
1053 	} else if (!aio_req->write) {
1054 		if (rc == -ENOENT)
1055 			rc = 0;
1056 		if (rc >= 0 && osd_data->bvec_pos.iter.bi_size > rc) {
1057 			struct iov_iter i;
1058 			int zlen = osd_data->bvec_pos.iter.bi_size - rc;
1059 
1060 			/*
1061 			 * If read is satisfied by single OSD request,
1062 			 * it can pass EOF. Otherwise read is within
1063 			 * i_size.
1064 			 */
1065 			if (aio_req->num_reqs == 1) {
1066 				loff_t i_size = i_size_read(inode);
1067 				loff_t endoff = aio_req->iocb->ki_pos + rc;
1068 				if (endoff < i_size)
1069 					zlen = min_t(size_t, zlen,
1070 						     i_size - endoff);
1071 				aio_req->total_len = rc + zlen;
1072 			}
1073 
1074 			iov_iter_bvec(&i, READ, osd_data->bvec_pos.bvecs,
1075 				      osd_data->num_bvecs,
1076 				      osd_data->bvec_pos.iter.bi_size);
1077 			iov_iter_advance(&i, rc);
1078 			iov_iter_zero(zlen, &i);
1079 		}
1080 	}
1081 
1082 	/* r_start_latency == 0 means the request was not submitted */
1083 	if (req->r_start_latency) {
1084 		if (aio_req->write)
1085 			ceph_update_write_metrics(metric, req->r_start_latency,
1086 						  req->r_end_latency, rc);
1087 		else
1088 			ceph_update_read_metrics(metric, req->r_start_latency,
1089 						 req->r_end_latency, rc);
1090 	}
1091 
1092 	put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs,
1093 		  aio_req->should_dirty);
1094 	ceph_osdc_put_request(req);
1095 
1096 	if (rc < 0)
1097 		cmpxchg(&aio_req->error, 0, rc);
1098 
1099 	ceph_aio_complete(inode, aio_req);
1100 	return;
1101 }
1102 
1103 static void ceph_aio_retry_work(struct work_struct *work)
1104 {
1105 	struct ceph_aio_work *aio_work =
1106 		container_of(work, struct ceph_aio_work, work);
1107 	struct ceph_osd_request *orig_req = aio_work->req;
1108 	struct ceph_aio_request *aio_req = orig_req->r_priv;
1109 	struct inode *inode = orig_req->r_inode;
1110 	struct ceph_inode_info *ci = ceph_inode(inode);
1111 	struct ceph_snap_context *snapc;
1112 	struct ceph_osd_request *req;
1113 	int ret;
1114 
1115 	spin_lock(&ci->i_ceph_lock);
1116 	if (__ceph_have_pending_cap_snap(ci)) {
1117 		struct ceph_cap_snap *capsnap =
1118 			list_last_entry(&ci->i_cap_snaps,
1119 					struct ceph_cap_snap,
1120 					ci_item);
1121 		snapc = ceph_get_snap_context(capsnap->context);
1122 	} else {
1123 		BUG_ON(!ci->i_head_snapc);
1124 		snapc = ceph_get_snap_context(ci->i_head_snapc);
1125 	}
1126 	spin_unlock(&ci->i_ceph_lock);
1127 
1128 	req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 1,
1129 			false, GFP_NOFS);
1130 	if (!req) {
1131 		ret = -ENOMEM;
1132 		req = orig_req;
1133 		goto out;
1134 	}
1135 
1136 	req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1137 	ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
1138 	ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
1139 
1140 	req->r_ops[0] = orig_req->r_ops[0];
1141 
1142 	req->r_mtime = aio_req->mtime;
1143 	req->r_data_offset = req->r_ops[0].extent.offset;
1144 
1145 	ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
1146 	if (ret) {
1147 		ceph_osdc_put_request(req);
1148 		req = orig_req;
1149 		goto out;
1150 	}
1151 
1152 	ceph_osdc_put_request(orig_req);
1153 
1154 	req->r_callback = ceph_aio_complete_req;
1155 	req->r_inode = inode;
1156 	req->r_priv = aio_req;
1157 
1158 	ret = ceph_osdc_start_request(req->r_osdc, req, false);
1159 out:
1160 	if (ret < 0) {
1161 		req->r_result = ret;
1162 		ceph_aio_complete_req(req);
1163 	}
1164 
1165 	ceph_put_snap_context(snapc);
1166 	kfree(aio_work);
1167 }
1168 
1169 static ssize_t
1170 ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
1171 		       struct ceph_snap_context *snapc,
1172 		       struct ceph_cap_flush **pcf)
1173 {
1174 	struct file *file = iocb->ki_filp;
1175 	struct inode *inode = file_inode(file);
1176 	struct ceph_inode_info *ci = ceph_inode(inode);
1177 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1178 	struct ceph_client_metric *metric = &fsc->mdsc->metric;
1179 	struct ceph_vino vino;
1180 	struct ceph_osd_request *req;
1181 	struct bio_vec *bvecs;
1182 	struct ceph_aio_request *aio_req = NULL;
1183 	int num_pages = 0;
1184 	int flags;
1185 	int ret = 0;
1186 	struct timespec64 mtime = current_time(inode);
1187 	size_t count = iov_iter_count(iter);
1188 	loff_t pos = iocb->ki_pos;
1189 	bool write = iov_iter_rw(iter) == WRITE;
1190 	bool should_dirty = !write && iter_is_iovec(iter);
1191 
1192 	if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1193 		return -EROFS;
1194 
1195 	dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
1196 	     (write ? "write" : "read"), file, pos, (unsigned)count,
1197 	     snapc, snapc ? snapc->seq : 0);
1198 
1199 	if (write) {
1200 		int ret2 = invalidate_inode_pages2_range(inode->i_mapping,
1201 					pos >> PAGE_SHIFT,
1202 					(pos + count - 1) >> PAGE_SHIFT);
1203 		if (ret2 < 0)
1204 			dout("invalidate_inode_pages2_range returned %d\n", ret2);
1205 
1206 		flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1207 	} else {
1208 		flags = CEPH_OSD_FLAG_READ;
1209 	}
1210 
1211 	while (iov_iter_count(iter) > 0) {
1212 		u64 size = iov_iter_count(iter);
1213 		ssize_t len;
1214 
1215 		if (write)
1216 			size = min_t(u64, size, fsc->mount_options->wsize);
1217 		else
1218 			size = min_t(u64, size, fsc->mount_options->rsize);
1219 
1220 		vino = ceph_vino(inode);
1221 		req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1222 					    vino, pos, &size, 0,
1223 					    1,
1224 					    write ? CEPH_OSD_OP_WRITE :
1225 						    CEPH_OSD_OP_READ,
1226 					    flags, snapc,
1227 					    ci->i_truncate_seq,
1228 					    ci->i_truncate_size,
1229 					    false);
1230 		if (IS_ERR(req)) {
1231 			ret = PTR_ERR(req);
1232 			break;
1233 		}
1234 
1235 		len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
1236 		if (len < 0) {
1237 			ceph_osdc_put_request(req);
1238 			ret = len;
1239 			break;
1240 		}
1241 		if (len != size)
1242 			osd_req_op_extent_update(req, 0, len);
1243 
1244 		/*
1245 		 * To simplify error handling, allow AIO when IO within i_size
1246 		 * or IO can be satisfied by single OSD request.
1247 		 */
1248 		if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
1249 		    (len == count || pos + count <= i_size_read(inode))) {
1250 			aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
1251 			if (aio_req) {
1252 				aio_req->iocb = iocb;
1253 				aio_req->write = write;
1254 				aio_req->should_dirty = should_dirty;
1255 				INIT_LIST_HEAD(&aio_req->osd_reqs);
1256 				if (write) {
1257 					aio_req->mtime = mtime;
1258 					swap(aio_req->prealloc_cf, *pcf);
1259 				}
1260 			}
1261 			/* ignore error */
1262 		}
1263 
1264 		if (write) {
1265 			/*
1266 			 * throw out any page cache pages in this range. this
1267 			 * may block.
1268 			 */
1269 			truncate_inode_pages_range(inode->i_mapping, pos,
1270 						   PAGE_ALIGN(pos + len) - 1);
1271 
1272 			req->r_mtime = mtime;
1273 		}
1274 
1275 		osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
1276 
1277 		if (aio_req) {
1278 			aio_req->total_len += len;
1279 			aio_req->num_reqs++;
1280 			atomic_inc(&aio_req->pending_reqs);
1281 
1282 			req->r_callback = ceph_aio_complete_req;
1283 			req->r_inode = inode;
1284 			req->r_priv = aio_req;
1285 			list_add_tail(&req->r_private_item, &aio_req->osd_reqs);
1286 
1287 			pos += len;
1288 			continue;
1289 		}
1290 
1291 		ret = ceph_osdc_start_request(req->r_osdc, req, false);
1292 		if (!ret)
1293 			ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1294 
1295 		if (write)
1296 			ceph_update_write_metrics(metric, req->r_start_latency,
1297 						  req->r_end_latency, ret);
1298 		else
1299 			ceph_update_read_metrics(metric, req->r_start_latency,
1300 						 req->r_end_latency, ret);
1301 
1302 		size = i_size_read(inode);
1303 		if (!write) {
1304 			if (ret == -ENOENT)
1305 				ret = 0;
1306 			if (ret >= 0 && ret < len && pos + ret < size) {
1307 				struct iov_iter i;
1308 				int zlen = min_t(size_t, len - ret,
1309 						 size - pos - ret);
1310 
1311 				iov_iter_bvec(&i, READ, bvecs, num_pages, len);
1312 				iov_iter_advance(&i, ret);
1313 				iov_iter_zero(zlen, &i);
1314 				ret += zlen;
1315 			}
1316 			if (ret >= 0)
1317 				len = ret;
1318 		}
1319 
1320 		put_bvecs(bvecs, num_pages, should_dirty);
1321 		ceph_osdc_put_request(req);
1322 		if (ret < 0)
1323 			break;
1324 
1325 		pos += len;
1326 		if (!write && pos >= size)
1327 			break;
1328 
1329 		if (write && pos > size) {
1330 			if (ceph_inode_set_size(inode, pos))
1331 				ceph_check_caps(ceph_inode(inode),
1332 						CHECK_CAPS_AUTHONLY,
1333 						NULL);
1334 		}
1335 	}
1336 
1337 	if (aio_req) {
1338 		LIST_HEAD(osd_reqs);
1339 
1340 		if (aio_req->num_reqs == 0) {
1341 			kfree(aio_req);
1342 			return ret;
1343 		}
1344 
1345 		ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1346 					      CEPH_CAP_FILE_RD);
1347 
1348 		list_splice(&aio_req->osd_reqs, &osd_reqs);
1349 		inode_dio_begin(inode);
1350 		while (!list_empty(&osd_reqs)) {
1351 			req = list_first_entry(&osd_reqs,
1352 					       struct ceph_osd_request,
1353 					       r_private_item);
1354 			list_del_init(&req->r_private_item);
1355 			if (ret >= 0)
1356 				ret = ceph_osdc_start_request(req->r_osdc,
1357 							      req, false);
1358 			if (ret < 0) {
1359 				req->r_result = ret;
1360 				ceph_aio_complete_req(req);
1361 			}
1362 		}
1363 		return -EIOCBQUEUED;
1364 	}
1365 
1366 	if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1367 		ret = pos - iocb->ki_pos;
1368 		iocb->ki_pos = pos;
1369 	}
1370 	return ret;
1371 }
1372 
1373 /*
1374  * Synchronous write, straight from __user pointer or user pages.
1375  *
1376  * If write spans object boundary, just do multiple writes.  (For a
1377  * correct atomic write, we should e.g. take write locks on all
1378  * objects, rollback on failure, etc.)
1379  */
1380 static ssize_t
1381 ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1382 		struct ceph_snap_context *snapc)
1383 {
1384 	struct file *file = iocb->ki_filp;
1385 	struct inode *inode = file_inode(file);
1386 	struct ceph_inode_info *ci = ceph_inode(inode);
1387 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1388 	struct ceph_vino vino;
1389 	struct ceph_osd_request *req;
1390 	struct page **pages;
1391 	u64 len;
1392 	int num_pages;
1393 	int written = 0;
1394 	int flags;
1395 	int ret;
1396 	bool check_caps = false;
1397 	struct timespec64 mtime = current_time(inode);
1398 	size_t count = iov_iter_count(from);
1399 
1400 	if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1401 		return -EROFS;
1402 
1403 	dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
1404 	     file, pos, (unsigned)count, snapc, snapc->seq);
1405 
1406 	ret = filemap_write_and_wait_range(inode->i_mapping,
1407 					   pos, pos + count - 1);
1408 	if (ret < 0)
1409 		return ret;
1410 
1411 	ret = invalidate_inode_pages2_range(inode->i_mapping,
1412 					    pos >> PAGE_SHIFT,
1413 					    (pos + count - 1) >> PAGE_SHIFT);
1414 	if (ret < 0)
1415 		dout("invalidate_inode_pages2_range returned %d\n", ret);
1416 
1417 	flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1418 
1419 	while ((len = iov_iter_count(from)) > 0) {
1420 		size_t left;
1421 		int n;
1422 
1423 		vino = ceph_vino(inode);
1424 		req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1425 					    vino, pos, &len, 0, 1,
1426 					    CEPH_OSD_OP_WRITE, flags, snapc,
1427 					    ci->i_truncate_seq,
1428 					    ci->i_truncate_size,
1429 					    false);
1430 		if (IS_ERR(req)) {
1431 			ret = PTR_ERR(req);
1432 			break;
1433 		}
1434 
1435 		/*
1436 		 * write from beginning of first page,
1437 		 * regardless of io alignment
1438 		 */
1439 		num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1440 
1441 		pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1442 		if (IS_ERR(pages)) {
1443 			ret = PTR_ERR(pages);
1444 			goto out;
1445 		}
1446 
1447 		left = len;
1448 		for (n = 0; n < num_pages; n++) {
1449 			size_t plen = min_t(size_t, left, PAGE_SIZE);
1450 			ret = copy_page_from_iter(pages[n], 0, plen, from);
1451 			if (ret != plen) {
1452 				ret = -EFAULT;
1453 				break;
1454 			}
1455 			left -= ret;
1456 		}
1457 
1458 		if (ret < 0) {
1459 			ceph_release_page_vector(pages, num_pages);
1460 			goto out;
1461 		}
1462 
1463 		req->r_inode = inode;
1464 
1465 		osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1466 						false, true);
1467 
1468 		req->r_mtime = mtime;
1469 		ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1470 		if (!ret)
1471 			ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1472 
1473 		ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
1474 					  req->r_end_latency, ret);
1475 out:
1476 		ceph_osdc_put_request(req);
1477 		if (ret != 0) {
1478 			ceph_set_error_write(ci);
1479 			break;
1480 		}
1481 
1482 		ceph_clear_error_write(ci);
1483 		pos += len;
1484 		written += len;
1485 		if (pos > i_size_read(inode)) {
1486 			check_caps = ceph_inode_set_size(inode, pos);
1487 			if (check_caps)
1488 				ceph_check_caps(ceph_inode(inode),
1489 						CHECK_CAPS_AUTHONLY,
1490 						NULL);
1491 		}
1492 
1493 	}
1494 
1495 	if (ret != -EOLDSNAPC && written > 0) {
1496 		ret = written;
1497 		iocb->ki_pos = pos;
1498 	}
1499 	return ret;
1500 }
1501 
1502 /*
1503  * Wrap generic_file_aio_read with checks for cap bits on the inode.
1504  * Atomically grab references, so that those bits are not released
1505  * back to the MDS mid-read.
1506  *
1507  * Hmm, the sync read case isn't actually async... should it be?
1508  */
1509 static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1510 {
1511 	struct file *filp = iocb->ki_filp;
1512 	struct ceph_file_info *fi = filp->private_data;
1513 	size_t len = iov_iter_count(to);
1514 	struct inode *inode = file_inode(filp);
1515 	struct ceph_inode_info *ci = ceph_inode(inode);
1516 	bool direct_lock = iocb->ki_flags & IOCB_DIRECT;
1517 	ssize_t ret;
1518 	int want, got = 0;
1519 	int retry_op = 0, read = 0;
1520 
1521 again:
1522 	dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1523 	     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1524 
1525 	if (direct_lock)
1526 		ceph_start_io_direct(inode);
1527 	else
1528 		ceph_start_io_read(inode);
1529 
1530 	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1531 		want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1532 	else
1533 		want = CEPH_CAP_FILE_CACHE;
1534 	ret = ceph_get_caps(filp, CEPH_CAP_FILE_RD, want, -1, &got);
1535 	if (ret < 0) {
1536 		if (iocb->ki_flags & IOCB_DIRECT)
1537 			ceph_end_io_direct(inode);
1538 		else
1539 			ceph_end_io_read(inode);
1540 		return ret;
1541 	}
1542 
1543 	if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1544 	    (iocb->ki_flags & IOCB_DIRECT) ||
1545 	    (fi->flags & CEPH_F_SYNC)) {
1546 
1547 		dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1548 		     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1549 		     ceph_cap_string(got));
1550 
1551 		if (ci->i_inline_version == CEPH_INLINE_NONE) {
1552 			if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1553 				ret = ceph_direct_read_write(iocb, to,
1554 							     NULL, NULL);
1555 				if (ret >= 0 && ret < len)
1556 					retry_op = CHECK_EOF;
1557 			} else {
1558 				ret = ceph_sync_read(iocb, to, &retry_op);
1559 			}
1560 		} else {
1561 			retry_op = READ_INLINE;
1562 		}
1563 	} else {
1564 		CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1565 		dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1566 		     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1567 		     ceph_cap_string(got));
1568 		ceph_add_rw_context(fi, &rw_ctx);
1569 		ret = generic_file_read_iter(iocb, to);
1570 		ceph_del_rw_context(fi, &rw_ctx);
1571 	}
1572 
1573 	dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1574 	     inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1575 	ceph_put_cap_refs(ci, got);
1576 
1577 	if (direct_lock)
1578 		ceph_end_io_direct(inode);
1579 	else
1580 		ceph_end_io_read(inode);
1581 
1582 	if (retry_op > HAVE_RETRIED && ret >= 0) {
1583 		int statret;
1584 		struct page *page = NULL;
1585 		loff_t i_size;
1586 		if (retry_op == READ_INLINE) {
1587 			page = __page_cache_alloc(GFP_KERNEL);
1588 			if (!page)
1589 				return -ENOMEM;
1590 		}
1591 
1592 		statret = __ceph_do_getattr(inode, page,
1593 					    CEPH_STAT_CAP_INLINE_DATA, !!page);
1594 		if (statret < 0) {
1595 			if (page)
1596 				__free_page(page);
1597 			if (statret == -ENODATA) {
1598 				BUG_ON(retry_op != READ_INLINE);
1599 				goto again;
1600 			}
1601 			return statret;
1602 		}
1603 
1604 		i_size = i_size_read(inode);
1605 		if (retry_op == READ_INLINE) {
1606 			BUG_ON(ret > 0 || read > 0);
1607 			if (iocb->ki_pos < i_size &&
1608 			    iocb->ki_pos < PAGE_SIZE) {
1609 				loff_t end = min_t(loff_t, i_size,
1610 						   iocb->ki_pos + len);
1611 				end = min_t(loff_t, end, PAGE_SIZE);
1612 				if (statret < end)
1613 					zero_user_segment(page, statret, end);
1614 				ret = copy_page_to_iter(page,
1615 						iocb->ki_pos & ~PAGE_MASK,
1616 						end - iocb->ki_pos, to);
1617 				iocb->ki_pos += ret;
1618 				read += ret;
1619 			}
1620 			if (iocb->ki_pos < i_size && read < len) {
1621 				size_t zlen = min_t(size_t, len - read,
1622 						    i_size - iocb->ki_pos);
1623 				ret = iov_iter_zero(zlen, to);
1624 				iocb->ki_pos += ret;
1625 				read += ret;
1626 			}
1627 			__free_pages(page, 0);
1628 			return read;
1629 		}
1630 
1631 		/* hit EOF or hole? */
1632 		if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1633 		    ret < len) {
1634 			dout("sync_read hit hole, ppos %lld < size %lld"
1635 			     ", reading more\n", iocb->ki_pos, i_size);
1636 
1637 			read += ret;
1638 			len -= ret;
1639 			retry_op = HAVE_RETRIED;
1640 			goto again;
1641 		}
1642 	}
1643 
1644 	if (ret >= 0)
1645 		ret += read;
1646 
1647 	return ret;
1648 }
1649 
1650 /*
1651  * Take cap references to avoid releasing caps to MDS mid-write.
1652  *
1653  * If we are synchronous, and write with an old snap context, the OSD
1654  * may return EOLDSNAPC.  In that case, retry the write.. _after_
1655  * dropping our cap refs and allowing the pending snap to logically
1656  * complete _before_ this write occurs.
1657  *
1658  * If we are near ENOSPC, write synchronously.
1659  */
1660 static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1661 {
1662 	struct file *file = iocb->ki_filp;
1663 	struct ceph_file_info *fi = file->private_data;
1664 	struct inode *inode = file_inode(file);
1665 	struct ceph_inode_info *ci = ceph_inode(inode);
1666 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1667 	struct ceph_osd_client *osdc = &fsc->client->osdc;
1668 	struct ceph_cap_flush *prealloc_cf;
1669 	ssize_t count, written = 0;
1670 	int err, want, got;
1671 	bool direct_lock = false;
1672 	u32 map_flags;
1673 	u64 pool_flags;
1674 	loff_t pos;
1675 	loff_t limit = max(i_size_read(inode), fsc->max_file_size);
1676 
1677 	if (ceph_snap(inode) != CEPH_NOSNAP)
1678 		return -EROFS;
1679 
1680 	prealloc_cf = ceph_alloc_cap_flush();
1681 	if (!prealloc_cf)
1682 		return -ENOMEM;
1683 
1684 	if ((iocb->ki_flags & (IOCB_DIRECT | IOCB_APPEND)) == IOCB_DIRECT)
1685 		direct_lock = true;
1686 
1687 retry_snap:
1688 	if (direct_lock)
1689 		ceph_start_io_direct(inode);
1690 	else
1691 		ceph_start_io_write(inode);
1692 
1693 	/* We can write back this queue in page reclaim */
1694 	current->backing_dev_info = inode_to_bdi(inode);
1695 
1696 	if (iocb->ki_flags & IOCB_APPEND) {
1697 		err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1698 		if (err < 0)
1699 			goto out;
1700 	}
1701 
1702 	err = generic_write_checks(iocb, from);
1703 	if (err <= 0)
1704 		goto out;
1705 
1706 	pos = iocb->ki_pos;
1707 	if (unlikely(pos >= limit)) {
1708 		err = -EFBIG;
1709 		goto out;
1710 	} else {
1711 		iov_iter_truncate(from, limit - pos);
1712 	}
1713 
1714 	count = iov_iter_count(from);
1715 	if (ceph_quota_is_max_bytes_exceeded(inode, pos + count)) {
1716 		err = -EDQUOT;
1717 		goto out;
1718 	}
1719 
1720 	err = file_remove_privs(file);
1721 	if (err)
1722 		goto out;
1723 
1724 	err = file_update_time(file);
1725 	if (err)
1726 		goto out;
1727 
1728 	inode_inc_iversion_raw(inode);
1729 
1730 	if (ci->i_inline_version != CEPH_INLINE_NONE) {
1731 		err = ceph_uninline_data(file, NULL);
1732 		if (err < 0)
1733 			goto out;
1734 	}
1735 
1736 	down_read(&osdc->lock);
1737 	map_flags = osdc->osdmap->flags;
1738 	pool_flags = ceph_pg_pool_flags(osdc->osdmap, ci->i_layout.pool_id);
1739 	up_read(&osdc->lock);
1740 	if ((map_flags & CEPH_OSDMAP_FULL) ||
1741 	    (pool_flags & CEPH_POOL_FLAG_FULL)) {
1742 		err = -ENOSPC;
1743 		goto out;
1744 	}
1745 
1746 	dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1747 	     inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1748 	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1749 		want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1750 	else
1751 		want = CEPH_CAP_FILE_BUFFER;
1752 	got = 0;
1753 	err = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, pos + count, &got);
1754 	if (err < 0)
1755 		goto out;
1756 
1757 	dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1758 	     inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1759 
1760 	if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1761 	    (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
1762 	    (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) {
1763 		struct ceph_snap_context *snapc;
1764 		struct iov_iter data;
1765 
1766 		spin_lock(&ci->i_ceph_lock);
1767 		if (__ceph_have_pending_cap_snap(ci)) {
1768 			struct ceph_cap_snap *capsnap =
1769 					list_last_entry(&ci->i_cap_snaps,
1770 							struct ceph_cap_snap,
1771 							ci_item);
1772 			snapc = ceph_get_snap_context(capsnap->context);
1773 		} else {
1774 			BUG_ON(!ci->i_head_snapc);
1775 			snapc = ceph_get_snap_context(ci->i_head_snapc);
1776 		}
1777 		spin_unlock(&ci->i_ceph_lock);
1778 
1779 		/* we might need to revert back to that point */
1780 		data = *from;
1781 		if (iocb->ki_flags & IOCB_DIRECT)
1782 			written = ceph_direct_read_write(iocb, &data, snapc,
1783 							 &prealloc_cf);
1784 		else
1785 			written = ceph_sync_write(iocb, &data, pos, snapc);
1786 		if (direct_lock)
1787 			ceph_end_io_direct(inode);
1788 		else
1789 			ceph_end_io_write(inode);
1790 		if (written > 0)
1791 			iov_iter_advance(from, written);
1792 		ceph_put_snap_context(snapc);
1793 	} else {
1794 		/*
1795 		 * No need to acquire the i_truncate_mutex. Because
1796 		 * the MDS revokes Fwb caps before sending truncate
1797 		 * message to us. We can't get Fwb cap while there
1798 		 * are pending vmtruncate. So write and vmtruncate
1799 		 * can not run at the same time
1800 		 */
1801 		written = generic_perform_write(file, from, pos);
1802 		if (likely(written >= 0))
1803 			iocb->ki_pos = pos + written;
1804 		ceph_end_io_write(inode);
1805 	}
1806 
1807 	if (written >= 0) {
1808 		int dirty;
1809 
1810 		spin_lock(&ci->i_ceph_lock);
1811 		ci->i_inline_version = CEPH_INLINE_NONE;
1812 		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1813 					       &prealloc_cf);
1814 		spin_unlock(&ci->i_ceph_lock);
1815 		if (dirty)
1816 			__mark_inode_dirty(inode, dirty);
1817 		if (ceph_quota_is_max_bytes_approaching(inode, iocb->ki_pos))
1818 			ceph_check_caps(ci, 0, NULL);
1819 	}
1820 
1821 	dout("aio_write %p %llx.%llx %llu~%u  dropping cap refs on %s\n",
1822 	     inode, ceph_vinop(inode), pos, (unsigned)count,
1823 	     ceph_cap_string(got));
1824 	ceph_put_cap_refs(ci, got);
1825 
1826 	if (written == -EOLDSNAPC) {
1827 		dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
1828 		     inode, ceph_vinop(inode), pos, (unsigned)count);
1829 		goto retry_snap;
1830 	}
1831 
1832 	if (written >= 0) {
1833 		if ((map_flags & CEPH_OSDMAP_NEARFULL) ||
1834 		    (pool_flags & CEPH_POOL_FLAG_NEARFULL))
1835 			iocb->ki_flags |= IOCB_DSYNC;
1836 		written = generic_write_sync(iocb, written);
1837 	}
1838 
1839 	goto out_unlocked;
1840 out:
1841 	if (direct_lock)
1842 		ceph_end_io_direct(inode);
1843 	else
1844 		ceph_end_io_write(inode);
1845 out_unlocked:
1846 	ceph_free_cap_flush(prealloc_cf);
1847 	current->backing_dev_info = NULL;
1848 	return written ? written : err;
1849 }
1850 
1851 /*
1852  * llseek.  be sure to verify file size on SEEK_END.
1853  */
1854 static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1855 {
1856 	struct inode *inode = file->f_mapping->host;
1857 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1858 	loff_t i_size;
1859 	loff_t ret;
1860 
1861 	inode_lock(inode);
1862 
1863 	if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1864 		ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1865 		if (ret < 0)
1866 			goto out;
1867 	}
1868 
1869 	i_size = i_size_read(inode);
1870 	switch (whence) {
1871 	case SEEK_END:
1872 		offset += i_size;
1873 		break;
1874 	case SEEK_CUR:
1875 		/*
1876 		 * Here we special-case the lseek(fd, 0, SEEK_CUR)
1877 		 * position-querying operation.  Avoid rewriting the "same"
1878 		 * f_pos value back to the file because a concurrent read(),
1879 		 * write() or lseek() might have altered it
1880 		 */
1881 		if (offset == 0) {
1882 			ret = file->f_pos;
1883 			goto out;
1884 		}
1885 		offset += file->f_pos;
1886 		break;
1887 	case SEEK_DATA:
1888 		if (offset < 0 || offset >= i_size) {
1889 			ret = -ENXIO;
1890 			goto out;
1891 		}
1892 		break;
1893 	case SEEK_HOLE:
1894 		if (offset < 0 || offset >= i_size) {
1895 			ret = -ENXIO;
1896 			goto out;
1897 		}
1898 		offset = i_size;
1899 		break;
1900 	}
1901 
1902 	ret = vfs_setpos(file, offset, max(i_size, fsc->max_file_size));
1903 
1904 out:
1905 	inode_unlock(inode);
1906 	return ret;
1907 }
1908 
1909 static inline void ceph_zero_partial_page(
1910 	struct inode *inode, loff_t offset, unsigned size)
1911 {
1912 	struct page *page;
1913 	pgoff_t index = offset >> PAGE_SHIFT;
1914 
1915 	page = find_lock_page(inode->i_mapping, index);
1916 	if (page) {
1917 		wait_on_page_writeback(page);
1918 		zero_user(page, offset & (PAGE_SIZE - 1), size);
1919 		unlock_page(page);
1920 		put_page(page);
1921 	}
1922 }
1923 
1924 static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1925 				      loff_t length)
1926 {
1927 	loff_t nearly = round_up(offset, PAGE_SIZE);
1928 	if (offset < nearly) {
1929 		loff_t size = nearly - offset;
1930 		if (length < size)
1931 			size = length;
1932 		ceph_zero_partial_page(inode, offset, size);
1933 		offset += size;
1934 		length -= size;
1935 	}
1936 	if (length >= PAGE_SIZE) {
1937 		loff_t size = round_down(length, PAGE_SIZE);
1938 		truncate_pagecache_range(inode, offset, offset + size - 1);
1939 		offset += size;
1940 		length -= size;
1941 	}
1942 	if (length)
1943 		ceph_zero_partial_page(inode, offset, length);
1944 }
1945 
1946 static int ceph_zero_partial_object(struct inode *inode,
1947 				    loff_t offset, loff_t *length)
1948 {
1949 	struct ceph_inode_info *ci = ceph_inode(inode);
1950 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1951 	struct ceph_osd_request *req;
1952 	int ret = 0;
1953 	loff_t zero = 0;
1954 	int op;
1955 
1956 	if (!length) {
1957 		op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
1958 		length = &zero;
1959 	} else {
1960 		op = CEPH_OSD_OP_ZERO;
1961 	}
1962 
1963 	req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1964 					ceph_vino(inode),
1965 					offset, length,
1966 					0, 1, op,
1967 					CEPH_OSD_FLAG_WRITE,
1968 					NULL, 0, 0, false);
1969 	if (IS_ERR(req)) {
1970 		ret = PTR_ERR(req);
1971 		goto out;
1972 	}
1973 
1974 	req->r_mtime = inode->i_mtime;
1975 	ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1976 	if (!ret) {
1977 		ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1978 		if (ret == -ENOENT)
1979 			ret = 0;
1980 	}
1981 	ceph_osdc_put_request(req);
1982 
1983 out:
1984 	return ret;
1985 }
1986 
1987 static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
1988 {
1989 	int ret = 0;
1990 	struct ceph_inode_info *ci = ceph_inode(inode);
1991 	s32 stripe_unit = ci->i_layout.stripe_unit;
1992 	s32 stripe_count = ci->i_layout.stripe_count;
1993 	s32 object_size = ci->i_layout.object_size;
1994 	u64 object_set_size = object_size * stripe_count;
1995 	u64 nearly, t;
1996 
1997 	/* round offset up to next period boundary */
1998 	nearly = offset + object_set_size - 1;
1999 	t = nearly;
2000 	nearly -= do_div(t, object_set_size);
2001 
2002 	while (length && offset < nearly) {
2003 		loff_t size = length;
2004 		ret = ceph_zero_partial_object(inode, offset, &size);
2005 		if (ret < 0)
2006 			return ret;
2007 		offset += size;
2008 		length -= size;
2009 	}
2010 	while (length >= object_set_size) {
2011 		int i;
2012 		loff_t pos = offset;
2013 		for (i = 0; i < stripe_count; ++i) {
2014 			ret = ceph_zero_partial_object(inode, pos, NULL);
2015 			if (ret < 0)
2016 				return ret;
2017 			pos += stripe_unit;
2018 		}
2019 		offset += object_set_size;
2020 		length -= object_set_size;
2021 	}
2022 	while (length) {
2023 		loff_t size = length;
2024 		ret = ceph_zero_partial_object(inode, offset, &size);
2025 		if (ret < 0)
2026 			return ret;
2027 		offset += size;
2028 		length -= size;
2029 	}
2030 	return ret;
2031 }
2032 
2033 static long ceph_fallocate(struct file *file, int mode,
2034 				loff_t offset, loff_t length)
2035 {
2036 	struct ceph_file_info *fi = file->private_data;
2037 	struct inode *inode = file_inode(file);
2038 	struct ceph_inode_info *ci = ceph_inode(inode);
2039 	struct ceph_cap_flush *prealloc_cf;
2040 	int want, got = 0;
2041 	int dirty;
2042 	int ret = 0;
2043 	loff_t endoff = 0;
2044 	loff_t size;
2045 
2046 	if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2047 		return -EOPNOTSUPP;
2048 
2049 	if (!S_ISREG(inode->i_mode))
2050 		return -EOPNOTSUPP;
2051 
2052 	prealloc_cf = ceph_alloc_cap_flush();
2053 	if (!prealloc_cf)
2054 		return -ENOMEM;
2055 
2056 	inode_lock(inode);
2057 
2058 	if (ceph_snap(inode) != CEPH_NOSNAP) {
2059 		ret = -EROFS;
2060 		goto unlock;
2061 	}
2062 
2063 	if (ci->i_inline_version != CEPH_INLINE_NONE) {
2064 		ret = ceph_uninline_data(file, NULL);
2065 		if (ret < 0)
2066 			goto unlock;
2067 	}
2068 
2069 	size = i_size_read(inode);
2070 
2071 	/* Are we punching a hole beyond EOF? */
2072 	if (offset >= size)
2073 		goto unlock;
2074 	if ((offset + length) > size)
2075 		length = size - offset;
2076 
2077 	if (fi->fmode & CEPH_FILE_MODE_LAZY)
2078 		want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
2079 	else
2080 		want = CEPH_CAP_FILE_BUFFER;
2081 
2082 	ret = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, endoff, &got);
2083 	if (ret < 0)
2084 		goto unlock;
2085 
2086 	ceph_zero_pagecache_range(inode, offset, length);
2087 	ret = ceph_zero_objects(inode, offset, length);
2088 
2089 	if (!ret) {
2090 		spin_lock(&ci->i_ceph_lock);
2091 		ci->i_inline_version = CEPH_INLINE_NONE;
2092 		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
2093 					       &prealloc_cf);
2094 		spin_unlock(&ci->i_ceph_lock);
2095 		if (dirty)
2096 			__mark_inode_dirty(inode, dirty);
2097 	}
2098 
2099 	ceph_put_cap_refs(ci, got);
2100 unlock:
2101 	inode_unlock(inode);
2102 	ceph_free_cap_flush(prealloc_cf);
2103 	return ret;
2104 }
2105 
2106 /*
2107  * This function tries to get FILE_WR capabilities for dst_ci and FILE_RD for
2108  * src_ci.  Two attempts are made to obtain both caps, and an error is return if
2109  * this fails; zero is returned on success.
2110  */
2111 static int get_rd_wr_caps(struct file *src_filp, int *src_got,
2112 			  struct file *dst_filp,
2113 			  loff_t dst_endoff, int *dst_got)
2114 {
2115 	int ret = 0;
2116 	bool retrying = false;
2117 
2118 retry_caps:
2119 	ret = ceph_get_caps(dst_filp, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER,
2120 			    dst_endoff, dst_got);
2121 	if (ret < 0)
2122 		return ret;
2123 
2124 	/*
2125 	 * Since we're already holding the FILE_WR capability for the dst file,
2126 	 * we would risk a deadlock by using ceph_get_caps.  Thus, we'll do some
2127 	 * retry dance instead to try to get both capabilities.
2128 	 */
2129 	ret = ceph_try_get_caps(file_inode(src_filp),
2130 				CEPH_CAP_FILE_RD, CEPH_CAP_FILE_SHARED,
2131 				false, src_got);
2132 	if (ret <= 0) {
2133 		/* Start by dropping dst_ci caps and getting src_ci caps */
2134 		ceph_put_cap_refs(ceph_inode(file_inode(dst_filp)), *dst_got);
2135 		if (retrying) {
2136 			if (!ret)
2137 				/* ceph_try_get_caps masks EAGAIN */
2138 				ret = -EAGAIN;
2139 			return ret;
2140 		}
2141 		ret = ceph_get_caps(src_filp, CEPH_CAP_FILE_RD,
2142 				    CEPH_CAP_FILE_SHARED, -1, src_got);
2143 		if (ret < 0)
2144 			return ret;
2145 		/*... drop src_ci caps too, and retry */
2146 		ceph_put_cap_refs(ceph_inode(file_inode(src_filp)), *src_got);
2147 		retrying = true;
2148 		goto retry_caps;
2149 	}
2150 	return ret;
2151 }
2152 
2153 static void put_rd_wr_caps(struct ceph_inode_info *src_ci, int src_got,
2154 			   struct ceph_inode_info *dst_ci, int dst_got)
2155 {
2156 	ceph_put_cap_refs(src_ci, src_got);
2157 	ceph_put_cap_refs(dst_ci, dst_got);
2158 }
2159 
2160 /*
2161  * This function does several size-related checks, returning an error if:
2162  *  - source file is smaller than off+len
2163  *  - destination file size is not OK (inode_newsize_ok())
2164  *  - max bytes quotas is exceeded
2165  */
2166 static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
2167 			   loff_t src_off, loff_t dst_off, size_t len)
2168 {
2169 	loff_t size, endoff;
2170 
2171 	size = i_size_read(src_inode);
2172 	/*
2173 	 * Don't copy beyond source file EOF.  Instead of simply setting length
2174 	 * to (size - src_off), just drop to VFS default implementation, as the
2175 	 * local i_size may be stale due to other clients writing to the source
2176 	 * inode.
2177 	 */
2178 	if (src_off + len > size) {
2179 		dout("Copy beyond EOF (%llu + %zu > %llu)\n",
2180 		     src_off, len, size);
2181 		return -EOPNOTSUPP;
2182 	}
2183 	size = i_size_read(dst_inode);
2184 
2185 	endoff = dst_off + len;
2186 	if (inode_newsize_ok(dst_inode, endoff))
2187 		return -EOPNOTSUPP;
2188 
2189 	if (ceph_quota_is_max_bytes_exceeded(dst_inode, endoff))
2190 		return -EDQUOT;
2191 
2192 	return 0;
2193 }
2194 
2195 static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off,
2196 				    struct ceph_inode_info *dst_ci, u64 *dst_off,
2197 				    struct ceph_fs_client *fsc,
2198 				    size_t len, unsigned int flags)
2199 {
2200 	struct ceph_object_locator src_oloc, dst_oloc;
2201 	struct ceph_object_id src_oid, dst_oid;
2202 	size_t bytes = 0;
2203 	u64 src_objnum, src_objoff, dst_objnum, dst_objoff;
2204 	u32 src_objlen, dst_objlen;
2205 	u32 object_size = src_ci->i_layout.object_size;
2206 	int ret;
2207 
2208 	src_oloc.pool = src_ci->i_layout.pool_id;
2209 	src_oloc.pool_ns = ceph_try_get_string(src_ci->i_layout.pool_ns);
2210 	dst_oloc.pool = dst_ci->i_layout.pool_id;
2211 	dst_oloc.pool_ns = ceph_try_get_string(dst_ci->i_layout.pool_ns);
2212 
2213 	while (len >= object_size) {
2214 		ceph_calc_file_object_mapping(&src_ci->i_layout, *src_off,
2215 					      object_size, &src_objnum,
2216 					      &src_objoff, &src_objlen);
2217 		ceph_calc_file_object_mapping(&dst_ci->i_layout, *dst_off,
2218 					      object_size, &dst_objnum,
2219 					      &dst_objoff, &dst_objlen);
2220 		ceph_oid_init(&src_oid);
2221 		ceph_oid_printf(&src_oid, "%llx.%08llx",
2222 				src_ci->i_vino.ino, src_objnum);
2223 		ceph_oid_init(&dst_oid);
2224 		ceph_oid_printf(&dst_oid, "%llx.%08llx",
2225 				dst_ci->i_vino.ino, dst_objnum);
2226 		/* Do an object remote copy */
2227 		ret = ceph_osdc_copy_from(&fsc->client->osdc,
2228 					  src_ci->i_vino.snap, 0,
2229 					  &src_oid, &src_oloc,
2230 					  CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2231 					  CEPH_OSD_OP_FLAG_FADVISE_NOCACHE,
2232 					  &dst_oid, &dst_oloc,
2233 					  CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2234 					  CEPH_OSD_OP_FLAG_FADVISE_DONTNEED,
2235 					  dst_ci->i_truncate_seq,
2236 					  dst_ci->i_truncate_size,
2237 					  CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ);
2238 		if (ret) {
2239 			if (ret == -EOPNOTSUPP) {
2240 				fsc->have_copy_from2 = false;
2241 				pr_notice("OSDs don't support copy-from2; disabling copy offload\n");
2242 			}
2243 			dout("ceph_osdc_copy_from returned %d\n", ret);
2244 			if (!bytes)
2245 				bytes = ret;
2246 			goto out;
2247 		}
2248 		len -= object_size;
2249 		bytes += object_size;
2250 		*src_off += object_size;
2251 		*dst_off += object_size;
2252 	}
2253 
2254 out:
2255 	ceph_oloc_destroy(&src_oloc);
2256 	ceph_oloc_destroy(&dst_oloc);
2257 	return bytes;
2258 }
2259 
2260 static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
2261 				      struct file *dst_file, loff_t dst_off,
2262 				      size_t len, unsigned int flags)
2263 {
2264 	struct inode *src_inode = file_inode(src_file);
2265 	struct inode *dst_inode = file_inode(dst_file);
2266 	struct ceph_inode_info *src_ci = ceph_inode(src_inode);
2267 	struct ceph_inode_info *dst_ci = ceph_inode(dst_inode);
2268 	struct ceph_cap_flush *prealloc_cf;
2269 	struct ceph_fs_client *src_fsc = ceph_inode_to_client(src_inode);
2270 	loff_t size;
2271 	ssize_t ret = -EIO, bytes;
2272 	u64 src_objnum, dst_objnum, src_objoff, dst_objoff;
2273 	u32 src_objlen, dst_objlen;
2274 	int src_got = 0, dst_got = 0, err, dirty;
2275 
2276 	if (src_inode->i_sb != dst_inode->i_sb) {
2277 		struct ceph_fs_client *dst_fsc = ceph_inode_to_client(dst_inode);
2278 
2279 		if (ceph_fsid_compare(&src_fsc->client->fsid,
2280 				      &dst_fsc->client->fsid)) {
2281 			dout("Copying files across clusters: src: %pU dst: %pU\n",
2282 			     &src_fsc->client->fsid, &dst_fsc->client->fsid);
2283 			return -EXDEV;
2284 		}
2285 	}
2286 	if (ceph_snap(dst_inode) != CEPH_NOSNAP)
2287 		return -EROFS;
2288 
2289 	/*
2290 	 * Some of the checks below will return -EOPNOTSUPP, which will force a
2291 	 * fallback to the default VFS copy_file_range implementation.  This is
2292 	 * desirable in several cases (for ex, the 'len' is smaller than the
2293 	 * size of the objects, or in cases where that would be more
2294 	 * efficient).
2295 	 */
2296 
2297 	if (ceph_test_mount_opt(src_fsc, NOCOPYFROM))
2298 		return -EOPNOTSUPP;
2299 
2300 	if (!src_fsc->have_copy_from2)
2301 		return -EOPNOTSUPP;
2302 
2303 	/*
2304 	 * Striped file layouts require that we copy partial objects, but the
2305 	 * OSD copy-from operation only supports full-object copies.  Limit
2306 	 * this to non-striped file layouts for now.
2307 	 */
2308 	if ((src_ci->i_layout.stripe_unit != dst_ci->i_layout.stripe_unit) ||
2309 	    (src_ci->i_layout.stripe_count != 1) ||
2310 	    (dst_ci->i_layout.stripe_count != 1) ||
2311 	    (src_ci->i_layout.object_size != dst_ci->i_layout.object_size)) {
2312 		dout("Invalid src/dst files layout\n");
2313 		return -EOPNOTSUPP;
2314 	}
2315 
2316 	if (len < src_ci->i_layout.object_size)
2317 		return -EOPNOTSUPP; /* no remote copy will be done */
2318 
2319 	prealloc_cf = ceph_alloc_cap_flush();
2320 	if (!prealloc_cf)
2321 		return -ENOMEM;
2322 
2323 	/* Start by sync'ing the source and destination files */
2324 	ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
2325 	if (ret < 0) {
2326 		dout("failed to write src file (%zd)\n", ret);
2327 		goto out;
2328 	}
2329 	ret = file_write_and_wait_range(dst_file, dst_off, (dst_off + len));
2330 	if (ret < 0) {
2331 		dout("failed to write dst file (%zd)\n", ret);
2332 		goto out;
2333 	}
2334 
2335 	/*
2336 	 * We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other
2337 	 * clients may have dirty data in their caches.  And OSDs know nothing
2338 	 * about caps, so they can't safely do the remote object copies.
2339 	 */
2340 	err = get_rd_wr_caps(src_file, &src_got,
2341 			     dst_file, (dst_off + len), &dst_got);
2342 	if (err < 0) {
2343 		dout("get_rd_wr_caps returned %d\n", err);
2344 		ret = -EOPNOTSUPP;
2345 		goto out;
2346 	}
2347 
2348 	ret = is_file_size_ok(src_inode, dst_inode, src_off, dst_off, len);
2349 	if (ret < 0)
2350 		goto out_caps;
2351 
2352 	/* Drop dst file cached pages */
2353 	ret = invalidate_inode_pages2_range(dst_inode->i_mapping,
2354 					    dst_off >> PAGE_SHIFT,
2355 					    (dst_off + len) >> PAGE_SHIFT);
2356 	if (ret < 0) {
2357 		dout("Failed to invalidate inode pages (%zd)\n", ret);
2358 		ret = 0; /* XXX */
2359 	}
2360 	ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
2361 				      src_ci->i_layout.object_size,
2362 				      &src_objnum, &src_objoff, &src_objlen);
2363 	ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off,
2364 				      dst_ci->i_layout.object_size,
2365 				      &dst_objnum, &dst_objoff, &dst_objlen);
2366 	/* object-level offsets need to the same */
2367 	if (src_objoff != dst_objoff) {
2368 		ret = -EOPNOTSUPP;
2369 		goto out_caps;
2370 	}
2371 
2372 	/*
2373 	 * Do a manual copy if the object offset isn't object aligned.
2374 	 * 'src_objlen' contains the bytes left until the end of the object,
2375 	 * starting at the src_off
2376 	 */
2377 	if (src_objoff) {
2378 		dout("Initial partial copy of %u bytes\n", src_objlen);
2379 
2380 		/*
2381 		 * we need to temporarily drop all caps as we'll be calling
2382 		 * {read,write}_iter, which will get caps again.
2383 		 */
2384 		put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2385 		ret = do_splice_direct(src_file, &src_off, dst_file,
2386 				       &dst_off, src_objlen, flags);
2387 		/* Abort on short copies or on error */
2388 		if (ret < src_objlen) {
2389 			dout("Failed partial copy (%zd)\n", ret);
2390 			goto out;
2391 		}
2392 		len -= ret;
2393 		err = get_rd_wr_caps(src_file, &src_got,
2394 				     dst_file, (dst_off + len), &dst_got);
2395 		if (err < 0)
2396 			goto out;
2397 		err = is_file_size_ok(src_inode, dst_inode,
2398 				      src_off, dst_off, len);
2399 		if (err < 0)
2400 			goto out_caps;
2401 	}
2402 
2403 	size = i_size_read(dst_inode);
2404 	bytes = ceph_do_objects_copy(src_ci, &src_off, dst_ci, &dst_off,
2405 				     src_fsc, len, flags);
2406 	if (bytes <= 0) {
2407 		if (!ret)
2408 			ret = bytes;
2409 		goto out_caps;
2410 	}
2411 	dout("Copied %zu bytes out of %zu\n", bytes, len);
2412 	len -= bytes;
2413 	ret += bytes;
2414 
2415 	file_update_time(dst_file);
2416 	inode_inc_iversion_raw(dst_inode);
2417 
2418 	if (dst_off > size) {
2419 		/* Let the MDS know about dst file size change */
2420 		if (ceph_inode_set_size(dst_inode, dst_off) ||
2421 		    ceph_quota_is_max_bytes_approaching(dst_inode, dst_off))
2422 			ceph_check_caps(dst_ci, CHECK_CAPS_AUTHONLY, NULL);
2423 	}
2424 	/* Mark Fw dirty */
2425 	spin_lock(&dst_ci->i_ceph_lock);
2426 	dst_ci->i_inline_version = CEPH_INLINE_NONE;
2427 	dirty = __ceph_mark_dirty_caps(dst_ci, CEPH_CAP_FILE_WR, &prealloc_cf);
2428 	spin_unlock(&dst_ci->i_ceph_lock);
2429 	if (dirty)
2430 		__mark_inode_dirty(dst_inode, dirty);
2431 
2432 out_caps:
2433 	put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2434 
2435 	/*
2436 	 * Do the final manual copy if we still have some bytes left, unless
2437 	 * there were errors in remote object copies (len >= object_size).
2438 	 */
2439 	if (len && (len < src_ci->i_layout.object_size)) {
2440 		dout("Final partial copy of %zu bytes\n", len);
2441 		bytes = do_splice_direct(src_file, &src_off, dst_file,
2442 					 &dst_off, len, flags);
2443 		if (bytes > 0)
2444 			ret += bytes;
2445 		else
2446 			dout("Failed partial copy (%zd)\n", bytes);
2447 	}
2448 
2449 out:
2450 	ceph_free_cap_flush(prealloc_cf);
2451 
2452 	return ret;
2453 }
2454 
2455 static ssize_t ceph_copy_file_range(struct file *src_file, loff_t src_off,
2456 				    struct file *dst_file, loff_t dst_off,
2457 				    size_t len, unsigned int flags)
2458 {
2459 	ssize_t ret;
2460 
2461 	ret = __ceph_copy_file_range(src_file, src_off, dst_file, dst_off,
2462 				     len, flags);
2463 
2464 	if (ret == -EOPNOTSUPP || ret == -EXDEV)
2465 		ret = generic_copy_file_range(src_file, src_off, dst_file,
2466 					      dst_off, len, flags);
2467 	return ret;
2468 }
2469 
2470 const struct file_operations ceph_file_fops = {
2471 	.open = ceph_open,
2472 	.release = ceph_release,
2473 	.llseek = ceph_llseek,
2474 	.read_iter = ceph_read_iter,
2475 	.write_iter = ceph_write_iter,
2476 	.mmap = ceph_mmap,
2477 	.fsync = ceph_fsync,
2478 	.lock = ceph_lock,
2479 	.setlease = simple_nosetlease,
2480 	.flock = ceph_flock,
2481 	.splice_read = generic_file_splice_read,
2482 	.splice_write = iter_file_splice_write,
2483 	.unlocked_ioctl = ceph_ioctl,
2484 	.compat_ioctl = compat_ptr_ioctl,
2485 	.fallocate	= ceph_fallocate,
2486 	.copy_file_range = ceph_copy_file_range,
2487 };
2488