xref: /openbmc/linux/fs/ceph/file.c (revision de6da33e)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 #include <linux/ceph/striper.h>
4 
5 #include <linux/module.h>
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/file.h>
9 #include <linux/mount.h>
10 #include <linux/namei.h>
11 #include <linux/writeback.h>
12 #include <linux/falloc.h>
13 #include <linux/iversion.h>
14 #include <linux/ktime.h>
15 
16 #include "super.h"
17 #include "mds_client.h"
18 #include "cache.h"
19 #include "io.h"
20 #include "metric.h"
21 
22 static __le32 ceph_flags_sys2wire(u32 flags)
23 {
24 	u32 wire_flags = 0;
25 
26 	switch (flags & O_ACCMODE) {
27 	case O_RDONLY:
28 		wire_flags |= CEPH_O_RDONLY;
29 		break;
30 	case O_WRONLY:
31 		wire_flags |= CEPH_O_WRONLY;
32 		break;
33 	case O_RDWR:
34 		wire_flags |= CEPH_O_RDWR;
35 		break;
36 	}
37 
38 	flags &= ~O_ACCMODE;
39 
40 #define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
41 
42 	ceph_sys2wire(O_CREAT);
43 	ceph_sys2wire(O_EXCL);
44 	ceph_sys2wire(O_TRUNC);
45 	ceph_sys2wire(O_DIRECTORY);
46 	ceph_sys2wire(O_NOFOLLOW);
47 
48 #undef ceph_sys2wire
49 
50 	if (flags)
51 		dout("unused open flags: %x\n", flags);
52 
53 	return cpu_to_le32(wire_flags);
54 }
55 
56 /*
57  * Ceph file operations
58  *
59  * Implement basic open/close functionality, and implement
60  * read/write.
61  *
62  * We implement three modes of file I/O:
63  *  - buffered uses the generic_file_aio_{read,write} helpers
64  *
65  *  - synchronous is used when there is multi-client read/write
66  *    sharing, avoids the page cache, and synchronously waits for an
67  *    ack from the OSD.
68  *
69  *  - direct io takes the variant of the sync path that references
70  *    user pages directly.
71  *
72  * fsync() flushes and waits on dirty pages, but just queues metadata
73  * for writeback: since the MDS can recover size and mtime there is no
74  * need to wait for MDS acknowledgement.
75  */
76 
77 /*
78  * How many pages to get in one call to iov_iter_get_pages().  This
79  * determines the size of the on-stack array used as a buffer.
80  */
81 #define ITER_GET_BVECS_PAGES	64
82 
83 static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
84 				struct bio_vec *bvecs)
85 {
86 	size_t size = 0;
87 	int bvec_idx = 0;
88 
89 	if (maxsize > iov_iter_count(iter))
90 		maxsize = iov_iter_count(iter);
91 
92 	while (size < maxsize) {
93 		struct page *pages[ITER_GET_BVECS_PAGES];
94 		ssize_t bytes;
95 		size_t start;
96 		int idx = 0;
97 
98 		bytes = iov_iter_get_pages(iter, pages, maxsize - size,
99 					   ITER_GET_BVECS_PAGES, &start);
100 		if (bytes < 0)
101 			return size ?: bytes;
102 
103 		iov_iter_advance(iter, bytes);
104 		size += bytes;
105 
106 		for ( ; bytes; idx++, bvec_idx++) {
107 			struct bio_vec bv = {
108 				.bv_page = pages[idx],
109 				.bv_len = min_t(int, bytes, PAGE_SIZE - start),
110 				.bv_offset = start,
111 			};
112 
113 			bvecs[bvec_idx] = bv;
114 			bytes -= bv.bv_len;
115 			start = 0;
116 		}
117 	}
118 
119 	return size;
120 }
121 
122 /*
123  * iov_iter_get_pages() only considers one iov_iter segment, no matter
124  * what maxsize or maxpages are given.  For ITER_BVEC that is a single
125  * page.
126  *
127  * Attempt to get up to @maxsize bytes worth of pages from @iter.
128  * Return the number of bytes in the created bio_vec array, or an error.
129  */
130 static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize,
131 				    struct bio_vec **bvecs, int *num_bvecs)
132 {
133 	struct bio_vec *bv;
134 	size_t orig_count = iov_iter_count(iter);
135 	ssize_t bytes;
136 	int npages;
137 
138 	iov_iter_truncate(iter, maxsize);
139 	npages = iov_iter_npages(iter, INT_MAX);
140 	iov_iter_reexpand(iter, orig_count);
141 
142 	/*
143 	 * __iter_get_bvecs() may populate only part of the array -- zero it
144 	 * out.
145 	 */
146 	bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO);
147 	if (!bv)
148 		return -ENOMEM;
149 
150 	bytes = __iter_get_bvecs(iter, maxsize, bv);
151 	if (bytes < 0) {
152 		/*
153 		 * No pages were pinned -- just free the array.
154 		 */
155 		kvfree(bv);
156 		return bytes;
157 	}
158 
159 	*bvecs = bv;
160 	*num_bvecs = npages;
161 	return bytes;
162 }
163 
164 static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty)
165 {
166 	int i;
167 
168 	for (i = 0; i < num_bvecs; i++) {
169 		if (bvecs[i].bv_page) {
170 			if (should_dirty)
171 				set_page_dirty_lock(bvecs[i].bv_page);
172 			put_page(bvecs[i].bv_page);
173 		}
174 	}
175 	kvfree(bvecs);
176 }
177 
178 /*
179  * Prepare an open request.  Preallocate ceph_cap to avoid an
180  * inopportune ENOMEM later.
181  */
182 static struct ceph_mds_request *
183 prepare_open_request(struct super_block *sb, int flags, int create_mode)
184 {
185 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(sb);
186 	struct ceph_mds_request *req;
187 	int want_auth = USE_ANY_MDS;
188 	int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
189 
190 	if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
191 		want_auth = USE_AUTH_MDS;
192 
193 	req = ceph_mdsc_create_request(mdsc, op, want_auth);
194 	if (IS_ERR(req))
195 		goto out;
196 	req->r_fmode = ceph_flags_to_mode(flags);
197 	req->r_args.open.flags = ceph_flags_sys2wire(flags);
198 	req->r_args.open.mode = cpu_to_le32(create_mode);
199 out:
200 	return req;
201 }
202 
203 static int ceph_init_file_info(struct inode *inode, struct file *file,
204 					int fmode, bool isdir)
205 {
206 	struct ceph_inode_info *ci = ceph_inode(inode);
207 	struct ceph_file_info *fi;
208 
209 	dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
210 			inode->i_mode, isdir ? "dir" : "regular");
211 	BUG_ON(inode->i_fop->release != ceph_release);
212 
213 	if (isdir) {
214 		struct ceph_dir_file_info *dfi =
215 			kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
216 		if (!dfi)
217 			return -ENOMEM;
218 
219 		file->private_data = dfi;
220 		fi = &dfi->file_info;
221 		dfi->next_offset = 2;
222 		dfi->readdir_cache_idx = -1;
223 	} else {
224 		fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
225 		if (!fi)
226 			return -ENOMEM;
227 
228 		file->private_data = fi;
229 	}
230 
231 	ceph_get_fmode(ci, fmode, 1);
232 	fi->fmode = fmode;
233 
234 	spin_lock_init(&fi->rw_contexts_lock);
235 	INIT_LIST_HEAD(&fi->rw_contexts);
236 	fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
237 
238 	return 0;
239 }
240 
241 /*
242  * initialize private struct file data.
243  * if we fail, clean up by dropping fmode reference on the ceph_inode
244  */
245 static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
246 {
247 	int ret = 0;
248 
249 	switch (inode->i_mode & S_IFMT) {
250 	case S_IFREG:
251 		ceph_fscache_register_inode_cookie(inode);
252 		ceph_fscache_file_set_cookie(inode, file);
253 		fallthrough;
254 	case S_IFDIR:
255 		ret = ceph_init_file_info(inode, file, fmode,
256 						S_ISDIR(inode->i_mode));
257 		break;
258 
259 	case S_IFLNK:
260 		dout("init_file %p %p 0%o (symlink)\n", inode, file,
261 		     inode->i_mode);
262 		break;
263 
264 	default:
265 		dout("init_file %p %p 0%o (special)\n", inode, file,
266 		     inode->i_mode);
267 		/*
268 		 * we need to drop the open ref now, since we don't
269 		 * have .release set to ceph_release.
270 		 */
271 		BUG_ON(inode->i_fop->release == ceph_release);
272 
273 		/* call the proper open fop */
274 		ret = inode->i_fop->open(inode, file);
275 	}
276 	return ret;
277 }
278 
279 /*
280  * try renew caps after session gets killed.
281  */
282 int ceph_renew_caps(struct inode *inode, int fmode)
283 {
284 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
285 	struct ceph_inode_info *ci = ceph_inode(inode);
286 	struct ceph_mds_request *req;
287 	int err, flags, wanted;
288 
289 	spin_lock(&ci->i_ceph_lock);
290 	__ceph_touch_fmode(ci, mdsc, fmode);
291 	wanted = __ceph_caps_file_wanted(ci);
292 	if (__ceph_is_any_real_caps(ci) &&
293 	    (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
294 		int issued = __ceph_caps_issued(ci, NULL);
295 		spin_unlock(&ci->i_ceph_lock);
296 		dout("renew caps %p want %s issued %s updating mds_wanted\n",
297 		     inode, ceph_cap_string(wanted), ceph_cap_string(issued));
298 		ceph_check_caps(ci, 0, NULL);
299 		return 0;
300 	}
301 	spin_unlock(&ci->i_ceph_lock);
302 
303 	flags = 0;
304 	if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
305 		flags = O_RDWR;
306 	else if (wanted & CEPH_CAP_FILE_RD)
307 		flags = O_RDONLY;
308 	else if (wanted & CEPH_CAP_FILE_WR)
309 		flags = O_WRONLY;
310 #ifdef O_LAZY
311 	if (wanted & CEPH_CAP_FILE_LAZYIO)
312 		flags |= O_LAZY;
313 #endif
314 
315 	req = prepare_open_request(inode->i_sb, flags, 0);
316 	if (IS_ERR(req)) {
317 		err = PTR_ERR(req);
318 		goto out;
319 	}
320 
321 	req->r_inode = inode;
322 	ihold(inode);
323 	req->r_num_caps = 1;
324 
325 	err = ceph_mdsc_do_request(mdsc, NULL, req);
326 	ceph_mdsc_put_request(req);
327 out:
328 	dout("renew caps %p open result=%d\n", inode, err);
329 	return err < 0 ? err : 0;
330 }
331 
332 /*
333  * If we already have the requisite capabilities, we can satisfy
334  * the open request locally (no need to request new caps from the
335  * MDS).  We do, however, need to inform the MDS (asynchronously)
336  * if our wanted caps set expands.
337  */
338 int ceph_open(struct inode *inode, struct file *file)
339 {
340 	struct ceph_inode_info *ci = ceph_inode(inode);
341 	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
342 	struct ceph_mds_client *mdsc = fsc->mdsc;
343 	struct ceph_mds_request *req;
344 	struct ceph_file_info *fi = file->private_data;
345 	int err;
346 	int flags, fmode, wanted;
347 
348 	if (fi) {
349 		dout("open file %p is already opened\n", file);
350 		return 0;
351 	}
352 
353 	/* filter out O_CREAT|O_EXCL; vfs did that already.  yuck. */
354 	flags = file->f_flags & ~(O_CREAT|O_EXCL);
355 	if (S_ISDIR(inode->i_mode))
356 		flags = O_DIRECTORY;  /* mds likes to know */
357 
358 	dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
359 	     ceph_vinop(inode), file, flags, file->f_flags);
360 	fmode = ceph_flags_to_mode(flags);
361 	wanted = ceph_caps_for_mode(fmode);
362 
363 	/* snapped files are read-only */
364 	if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
365 		return -EROFS;
366 
367 	/* trivially open snapdir */
368 	if (ceph_snap(inode) == CEPH_SNAPDIR) {
369 		return ceph_init_file(inode, file, fmode);
370 	}
371 
372 	/*
373 	 * No need to block if we have caps on the auth MDS (for
374 	 * write) or any MDS (for read).  Update wanted set
375 	 * asynchronously.
376 	 */
377 	spin_lock(&ci->i_ceph_lock);
378 	if (__ceph_is_any_real_caps(ci) &&
379 	    (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
380 		int mds_wanted = __ceph_caps_mds_wanted(ci, true);
381 		int issued = __ceph_caps_issued(ci, NULL);
382 
383 		dout("open %p fmode %d want %s issued %s using existing\n",
384 		     inode, fmode, ceph_cap_string(wanted),
385 		     ceph_cap_string(issued));
386 		__ceph_touch_fmode(ci, mdsc, fmode);
387 		spin_unlock(&ci->i_ceph_lock);
388 
389 		/* adjust wanted? */
390 		if ((issued & wanted) != wanted &&
391 		    (mds_wanted & wanted) != wanted &&
392 		    ceph_snap(inode) != CEPH_SNAPDIR)
393 			ceph_check_caps(ci, 0, NULL);
394 
395 		return ceph_init_file(inode, file, fmode);
396 	} else if (ceph_snap(inode) != CEPH_NOSNAP &&
397 		   (ci->i_snap_caps & wanted) == wanted) {
398 		__ceph_touch_fmode(ci, mdsc, fmode);
399 		spin_unlock(&ci->i_ceph_lock);
400 		return ceph_init_file(inode, file, fmode);
401 	}
402 
403 	spin_unlock(&ci->i_ceph_lock);
404 
405 	dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
406 	req = prepare_open_request(inode->i_sb, flags, 0);
407 	if (IS_ERR(req)) {
408 		err = PTR_ERR(req);
409 		goto out;
410 	}
411 	req->r_inode = inode;
412 	ihold(inode);
413 
414 	req->r_num_caps = 1;
415 	err = ceph_mdsc_do_request(mdsc, NULL, req);
416 	if (!err)
417 		err = ceph_init_file(inode, file, req->r_fmode);
418 	ceph_mdsc_put_request(req);
419 	dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
420 out:
421 	return err;
422 }
423 
424 /* Clone the layout from a synchronous create, if the dir now has Dc caps */
425 static void
426 cache_file_layout(struct inode *dst, struct inode *src)
427 {
428 	struct ceph_inode_info *cdst = ceph_inode(dst);
429 	struct ceph_inode_info *csrc = ceph_inode(src);
430 
431 	spin_lock(&cdst->i_ceph_lock);
432 	if ((__ceph_caps_issued(cdst, NULL) & CEPH_CAP_DIR_CREATE) &&
433 	    !ceph_file_layout_is_valid(&cdst->i_cached_layout)) {
434 		memcpy(&cdst->i_cached_layout, &csrc->i_layout,
435 			sizeof(cdst->i_cached_layout));
436 		rcu_assign_pointer(cdst->i_cached_layout.pool_ns,
437 				   ceph_try_get_string(csrc->i_layout.pool_ns));
438 	}
439 	spin_unlock(&cdst->i_ceph_lock);
440 }
441 
442 /*
443  * Try to set up an async create. We need caps, a file layout, and inode number,
444  * and either a lease on the dentry or complete dir info. If any of those
445  * criteria are not satisfied, then return false and the caller can go
446  * synchronous.
447  */
448 static int try_prep_async_create(struct inode *dir, struct dentry *dentry,
449 				 struct ceph_file_layout *lo, u64 *pino)
450 {
451 	struct ceph_inode_info *ci = ceph_inode(dir);
452 	struct ceph_dentry_info *di = ceph_dentry(dentry);
453 	int got = 0, want = CEPH_CAP_FILE_EXCL | CEPH_CAP_DIR_CREATE;
454 	u64 ino;
455 
456 	spin_lock(&ci->i_ceph_lock);
457 	/* No auth cap means no chance for Dc caps */
458 	if (!ci->i_auth_cap)
459 		goto no_async;
460 
461 	/* Any delegated inos? */
462 	if (xa_empty(&ci->i_auth_cap->session->s_delegated_inos))
463 		goto no_async;
464 
465 	if (!ceph_file_layout_is_valid(&ci->i_cached_layout))
466 		goto no_async;
467 
468 	if ((__ceph_caps_issued(ci, NULL) & want) != want)
469 		goto no_async;
470 
471 	if (d_in_lookup(dentry)) {
472 		if (!__ceph_dir_is_complete(ci))
473 			goto no_async;
474 		spin_lock(&dentry->d_lock);
475 		di->lease_shared_gen = atomic_read(&ci->i_shared_gen);
476 		spin_unlock(&dentry->d_lock);
477 	} else if (atomic_read(&ci->i_shared_gen) !=
478 		   READ_ONCE(di->lease_shared_gen)) {
479 		goto no_async;
480 	}
481 
482 	ino = ceph_get_deleg_ino(ci->i_auth_cap->session);
483 	if (!ino)
484 		goto no_async;
485 
486 	*pino = ino;
487 	ceph_take_cap_refs(ci, want, false);
488 	memcpy(lo, &ci->i_cached_layout, sizeof(*lo));
489 	rcu_assign_pointer(lo->pool_ns,
490 			   ceph_try_get_string(ci->i_cached_layout.pool_ns));
491 	got = want;
492 no_async:
493 	spin_unlock(&ci->i_ceph_lock);
494 	return got;
495 }
496 
497 static void restore_deleg_ino(struct inode *dir, u64 ino)
498 {
499 	struct ceph_inode_info *ci = ceph_inode(dir);
500 	struct ceph_mds_session *s = NULL;
501 
502 	spin_lock(&ci->i_ceph_lock);
503 	if (ci->i_auth_cap)
504 		s = ceph_get_mds_session(ci->i_auth_cap->session);
505 	spin_unlock(&ci->i_ceph_lock);
506 	if (s) {
507 		int err = ceph_restore_deleg_ino(s, ino);
508 		if (err)
509 			pr_warn("ceph: unable to restore delegated ino 0x%llx to session: %d\n",
510 				ino, err);
511 		ceph_put_mds_session(s);
512 	}
513 }
514 
515 static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
516                                  struct ceph_mds_request *req)
517 {
518 	int result = req->r_err ? req->r_err :
519 			le32_to_cpu(req->r_reply_info.head->result);
520 
521 	if (result == -EJUKEBOX)
522 		goto out;
523 
524 	mapping_set_error(req->r_parent->i_mapping, result);
525 
526 	if (result) {
527 		struct dentry *dentry = req->r_dentry;
528 		struct inode *inode = d_inode(dentry);
529 		int pathlen = 0;
530 		u64 base = 0;
531 		char *path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
532 						  &base, 0);
533 
534 		ceph_dir_clear_complete(req->r_parent);
535 		if (!d_unhashed(dentry))
536 			d_drop(dentry);
537 
538 		ceph_inode_shutdown(inode);
539 
540 		pr_warn("ceph: async create failure path=(%llx)%s result=%d!\n",
541 			base, IS_ERR(path) ? "<<bad>>" : path, result);
542 		ceph_mdsc_free_path(path, pathlen);
543 	}
544 
545 	if (req->r_target_inode) {
546 		struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
547 		u64 ino = ceph_vino(req->r_target_inode).ino;
548 
549 		if (req->r_deleg_ino != ino)
550 			pr_warn("%s: inode number mismatch! err=%d deleg_ino=0x%llx target=0x%llx\n",
551 				__func__, req->r_err, req->r_deleg_ino, ino);
552 		mapping_set_error(req->r_target_inode->i_mapping, result);
553 
554 		spin_lock(&ci->i_ceph_lock);
555 		if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE) {
556 			ci->i_ceph_flags &= ~CEPH_I_ASYNC_CREATE;
557 			wake_up_bit(&ci->i_ceph_flags, CEPH_ASYNC_CREATE_BIT);
558 		}
559 		ceph_kick_flushing_inode_caps(req->r_session, ci);
560 		spin_unlock(&ci->i_ceph_lock);
561 	} else if (!result) {
562 		pr_warn("%s: no req->r_target_inode for 0x%llx\n", __func__,
563 			req->r_deleg_ino);
564 	}
565 out:
566 	ceph_mdsc_release_dir_caps(req);
567 }
568 
569 static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
570 				    struct file *file, umode_t mode,
571 				    struct ceph_mds_request *req,
572 				    struct ceph_acl_sec_ctx *as_ctx,
573 				    struct ceph_file_layout *lo)
574 {
575 	int ret;
576 	char xattr_buf[4];
577 	struct ceph_mds_reply_inode in = { };
578 	struct ceph_mds_reply_info_in iinfo = { .in = &in };
579 	struct ceph_inode_info *ci = ceph_inode(dir);
580 	struct inode *inode;
581 	struct timespec64 now;
582 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
583 	struct ceph_vino vino = { .ino = req->r_deleg_ino,
584 				  .snap = CEPH_NOSNAP };
585 
586 	ktime_get_real_ts64(&now);
587 
588 	inode = ceph_get_inode(dentry->d_sb, vino);
589 	if (IS_ERR(inode))
590 		return PTR_ERR(inode);
591 
592 	iinfo.inline_version = CEPH_INLINE_NONE;
593 	iinfo.change_attr = 1;
594 	ceph_encode_timespec64(&iinfo.btime, &now);
595 
596 	iinfo.xattr_len = ARRAY_SIZE(xattr_buf);
597 	iinfo.xattr_data = xattr_buf;
598 	memset(iinfo.xattr_data, 0, iinfo.xattr_len);
599 
600 	in.ino = cpu_to_le64(vino.ino);
601 	in.snapid = cpu_to_le64(CEPH_NOSNAP);
602 	in.version = cpu_to_le64(1);	// ???
603 	in.cap.caps = in.cap.wanted = cpu_to_le32(CEPH_CAP_ALL_FILE);
604 	in.cap.cap_id = cpu_to_le64(1);
605 	in.cap.realm = cpu_to_le64(ci->i_snap_realm->ino);
606 	in.cap.flags = CEPH_CAP_FLAG_AUTH;
607 	in.ctime = in.mtime = in.atime = iinfo.btime;
608 	in.mode = cpu_to_le32((u32)mode);
609 	in.truncate_seq = cpu_to_le32(1);
610 	in.truncate_size = cpu_to_le64(-1ULL);
611 	in.xattr_version = cpu_to_le64(1);
612 	in.uid = cpu_to_le32(from_kuid(&init_user_ns, current_fsuid()));
613 	in.gid = cpu_to_le32(from_kgid(&init_user_ns, dir->i_mode & S_ISGID ?
614 				dir->i_gid : current_fsgid()));
615 	in.nlink = cpu_to_le32(1);
616 	in.max_size = cpu_to_le64(lo->stripe_unit);
617 
618 	ceph_file_layout_to_legacy(lo, &in.layout);
619 
620 	down_read(&mdsc->snap_rwsem);
621 	ret = ceph_fill_inode(inode, NULL, &iinfo, NULL, req->r_session,
622 			      req->r_fmode, NULL);
623 	up_read(&mdsc->snap_rwsem);
624 	if (ret) {
625 		dout("%s failed to fill inode: %d\n", __func__, ret);
626 		ceph_dir_clear_complete(dir);
627 		if (!d_unhashed(dentry))
628 			d_drop(dentry);
629 		if (inode->i_state & I_NEW)
630 			discard_new_inode(inode);
631 	} else {
632 		struct dentry *dn;
633 
634 		dout("%s d_adding new inode 0x%llx to 0x%llx/%s\n", __func__,
635 			vino.ino, ceph_ino(dir), dentry->d_name.name);
636 		ceph_dir_clear_ordered(dir);
637 		ceph_init_inode_acls(inode, as_ctx);
638 		if (inode->i_state & I_NEW) {
639 			/*
640 			 * If it's not I_NEW, then someone created this before
641 			 * we got here. Assume the server is aware of it at
642 			 * that point and don't worry about setting
643 			 * CEPH_I_ASYNC_CREATE.
644 			 */
645 			ceph_inode(inode)->i_ceph_flags = CEPH_I_ASYNC_CREATE;
646 			unlock_new_inode(inode);
647 		}
648 		if (d_in_lookup(dentry) || d_really_is_negative(dentry)) {
649 			if (!d_unhashed(dentry))
650 				d_drop(dentry);
651 			dn = d_splice_alias(inode, dentry);
652 			WARN_ON_ONCE(dn && dn != dentry);
653 		}
654 		file->f_mode |= FMODE_CREATED;
655 		ret = finish_open(file, dentry, ceph_open);
656 	}
657 	return ret;
658 }
659 
660 /*
661  * Do a lookup + open with a single request.  If we get a non-existent
662  * file or symlink, return 1 so the VFS can retry.
663  */
664 int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
665 		     struct file *file, unsigned flags, umode_t mode)
666 {
667 	struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
668 	struct ceph_mds_client *mdsc = fsc->mdsc;
669 	struct ceph_mds_request *req;
670 	struct dentry *dn;
671 	struct ceph_acl_sec_ctx as_ctx = {};
672 	bool try_async = ceph_test_mount_opt(fsc, ASYNC_DIROPS);
673 	int mask;
674 	int err;
675 
676 	dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
677 	     dir, dentry, dentry,
678 	     d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
679 
680 	if (dentry->d_name.len > NAME_MAX)
681 		return -ENAMETOOLONG;
682 
683 	if (flags & O_CREAT) {
684 		if (ceph_quota_is_max_files_exceeded(dir))
685 			return -EDQUOT;
686 		err = ceph_pre_init_acls(dir, &mode, &as_ctx);
687 		if (err < 0)
688 			return err;
689 		err = ceph_security_init_secctx(dentry, mode, &as_ctx);
690 		if (err < 0)
691 			goto out_ctx;
692 	} else if (!d_in_lookup(dentry)) {
693 		/* If it's not being looked up, it's negative */
694 		return -ENOENT;
695 	}
696 retry:
697 	/* do the open */
698 	req = prepare_open_request(dir->i_sb, flags, mode);
699 	if (IS_ERR(req)) {
700 		err = PTR_ERR(req);
701 		goto out_ctx;
702 	}
703 	req->r_dentry = dget(dentry);
704 	req->r_num_caps = 2;
705 	mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
706 	if (ceph_security_xattr_wanted(dir))
707 		mask |= CEPH_CAP_XATTR_SHARED;
708 	req->r_args.open.mask = cpu_to_le32(mask);
709 	req->r_parent = dir;
710 	ihold(dir);
711 
712 	if (flags & O_CREAT) {
713 		struct ceph_file_layout lo;
714 
715 		req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
716 		req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
717 		if (as_ctx.pagelist) {
718 			req->r_pagelist = as_ctx.pagelist;
719 			as_ctx.pagelist = NULL;
720 		}
721 		if (try_async &&
722 		    (req->r_dir_caps =
723 		      try_prep_async_create(dir, dentry, &lo,
724 					    &req->r_deleg_ino))) {
725 			set_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags);
726 			req->r_args.open.flags |= cpu_to_le32(CEPH_O_EXCL);
727 			req->r_callback = ceph_async_create_cb;
728 			err = ceph_mdsc_submit_request(mdsc, dir, req);
729 			if (!err) {
730 				err = ceph_finish_async_create(dir, dentry,
731 							file, mode, req,
732 							&as_ctx, &lo);
733 			} else if (err == -EJUKEBOX) {
734 				restore_deleg_ino(dir, req->r_deleg_ino);
735 				ceph_mdsc_put_request(req);
736 				try_async = false;
737 				goto retry;
738 			}
739 			goto out_req;
740 		}
741 	}
742 
743 	set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
744 	err = ceph_mdsc_do_request(mdsc,
745 				   (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
746 				   req);
747 	if (err == -ENOENT) {
748 		dentry = ceph_handle_snapdir(req, dentry);
749 		if (IS_ERR(dentry)) {
750 			err = PTR_ERR(dentry);
751 			goto out_req;
752 		}
753 		err = 0;
754 	}
755 
756 	if (!err && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
757 		err = ceph_handle_notrace_create(dir, dentry);
758 
759 	if (d_in_lookup(dentry)) {
760 		dn = ceph_finish_lookup(req, dentry, err);
761 		if (IS_ERR(dn))
762 			err = PTR_ERR(dn);
763 	} else {
764 		/* we were given a hashed negative dentry */
765 		dn = NULL;
766 	}
767 	if (err)
768 		goto out_req;
769 	if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
770 		/* make vfs retry on splice, ENOENT, or symlink */
771 		dout("atomic_open finish_no_open on dn %p\n", dn);
772 		err = finish_no_open(file, dn);
773 	} else {
774 		dout("atomic_open finish_open on dn %p\n", dn);
775 		if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
776 			struct inode *newino = d_inode(dentry);
777 
778 			cache_file_layout(dir, newino);
779 			ceph_init_inode_acls(newino, &as_ctx);
780 			file->f_mode |= FMODE_CREATED;
781 		}
782 		err = finish_open(file, dentry, ceph_open);
783 	}
784 out_req:
785 	ceph_mdsc_put_request(req);
786 out_ctx:
787 	ceph_release_acl_sec_ctx(&as_ctx);
788 	dout("atomic_open result=%d\n", err);
789 	return err;
790 }
791 
792 int ceph_release(struct inode *inode, struct file *file)
793 {
794 	struct ceph_inode_info *ci = ceph_inode(inode);
795 
796 	if (S_ISDIR(inode->i_mode)) {
797 		struct ceph_dir_file_info *dfi = file->private_data;
798 		dout("release inode %p dir file %p\n", inode, file);
799 		WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
800 
801 		ceph_put_fmode(ci, dfi->file_info.fmode, 1);
802 
803 		if (dfi->last_readdir)
804 			ceph_mdsc_put_request(dfi->last_readdir);
805 		kfree(dfi->last_name);
806 		kfree(dfi->dir_info);
807 		kmem_cache_free(ceph_dir_file_cachep, dfi);
808 	} else {
809 		struct ceph_file_info *fi = file->private_data;
810 		dout("release inode %p regular file %p\n", inode, file);
811 		WARN_ON(!list_empty(&fi->rw_contexts));
812 
813 		ceph_put_fmode(ci, fi->fmode, 1);
814 
815 		kmem_cache_free(ceph_file_cachep, fi);
816 	}
817 
818 	/* wake up anyone waiting for caps on this inode */
819 	wake_up_all(&ci->i_cap_wq);
820 	return 0;
821 }
822 
823 enum {
824 	HAVE_RETRIED = 1,
825 	CHECK_EOF =    2,
826 	READ_INLINE =  3,
827 };
828 
829 /*
830  * Completely synchronous read and write methods.  Direct from __user
831  * buffer to osd, or directly to user pages (if O_DIRECT).
832  *
833  * If the read spans object boundary, just do multiple reads.  (That's not
834  * atomic, but good enough for now.)
835  *
836  * If we get a short result from the OSD, check against i_size; we need to
837  * only return a short read to the caller if we hit EOF.
838  */
839 static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
840 			      int *retry_op)
841 {
842 	struct file *file = iocb->ki_filp;
843 	struct inode *inode = file_inode(file);
844 	struct ceph_inode_info *ci = ceph_inode(inode);
845 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
846 	struct ceph_osd_client *osdc = &fsc->client->osdc;
847 	ssize_t ret;
848 	u64 off = iocb->ki_pos;
849 	u64 len = iov_iter_count(to);
850 	u64 i_size;
851 
852 	dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
853 	     (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
854 
855 	if (!len)
856 		return 0;
857 	/*
858 	 * flush any page cache pages in this range.  this
859 	 * will make concurrent normal and sync io slow,
860 	 * but it will at least behave sensibly when they are
861 	 * in sequence.
862 	 */
863 	ret = filemap_write_and_wait_range(inode->i_mapping,
864 					   off, off + len - 1);
865 	if (ret < 0)
866 		return ret;
867 
868 	ret = 0;
869 	while ((len = iov_iter_count(to)) > 0) {
870 		struct ceph_osd_request *req;
871 		struct page **pages;
872 		int num_pages;
873 		size_t page_off;
874 		bool more;
875 		int idx;
876 		size_t left;
877 
878 		req = ceph_osdc_new_request(osdc, &ci->i_layout,
879 					ci->i_vino, off, &len, 0, 1,
880 					CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
881 					NULL, ci->i_truncate_seq,
882 					ci->i_truncate_size, false);
883 		if (IS_ERR(req)) {
884 			ret = PTR_ERR(req);
885 			break;
886 		}
887 
888 		more = len < iov_iter_count(to);
889 
890 		num_pages = calc_pages_for(off, len);
891 		page_off = off & ~PAGE_MASK;
892 		pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
893 		if (IS_ERR(pages)) {
894 			ceph_osdc_put_request(req);
895 			ret = PTR_ERR(pages);
896 			break;
897 		}
898 
899 		osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_off,
900 						 false, false);
901 		ret = ceph_osdc_start_request(osdc, req, false);
902 		if (!ret)
903 			ret = ceph_osdc_wait_request(osdc, req);
904 
905 		ceph_update_read_metrics(&fsc->mdsc->metric,
906 					 req->r_start_latency,
907 					 req->r_end_latency,
908 					 len, ret);
909 
910 		ceph_osdc_put_request(req);
911 
912 		i_size = i_size_read(inode);
913 		dout("sync_read %llu~%llu got %zd i_size %llu%s\n",
914 		     off, len, ret, i_size, (more ? " MORE" : ""));
915 
916 		if (ret == -ENOENT)
917 			ret = 0;
918 		if (ret >= 0 && ret < len && (off + ret < i_size)) {
919 			int zlen = min(len - ret, i_size - off - ret);
920 			int zoff = page_off + ret;
921 			dout("sync_read zero gap %llu~%llu\n",
922                              off + ret, off + ret + zlen);
923 			ceph_zero_page_vector_range(zoff, zlen, pages);
924 			ret += zlen;
925 		}
926 
927 		idx = 0;
928 		left = ret > 0 ? ret : 0;
929 		while (left > 0) {
930 			size_t len, copied;
931 			page_off = off & ~PAGE_MASK;
932 			len = min_t(size_t, left, PAGE_SIZE - page_off);
933 			SetPageUptodate(pages[idx]);
934 			copied = copy_page_to_iter(pages[idx++],
935 						   page_off, len, to);
936 			off += copied;
937 			left -= copied;
938 			if (copied < len) {
939 				ret = -EFAULT;
940 				break;
941 			}
942 		}
943 		ceph_release_page_vector(pages, num_pages);
944 
945 		if (ret < 0) {
946 			if (ret == -EBLOCKLISTED)
947 				fsc->blocklisted = true;
948 			break;
949 		}
950 
951 		if (off >= i_size || !more)
952 			break;
953 	}
954 
955 	if (off > iocb->ki_pos) {
956 		if (off >= i_size) {
957 			*retry_op = CHECK_EOF;
958 			ret = i_size - iocb->ki_pos;
959 			iocb->ki_pos = i_size;
960 		} else {
961 			ret = off - iocb->ki_pos;
962 			iocb->ki_pos = off;
963 		}
964 	}
965 
966 	dout("sync_read result %zd retry_op %d\n", ret, *retry_op);
967 	return ret;
968 }
969 
970 struct ceph_aio_request {
971 	struct kiocb *iocb;
972 	size_t total_len;
973 	bool write;
974 	bool should_dirty;
975 	int error;
976 	struct list_head osd_reqs;
977 	unsigned num_reqs;
978 	atomic_t pending_reqs;
979 	struct timespec64 mtime;
980 	struct ceph_cap_flush *prealloc_cf;
981 };
982 
983 struct ceph_aio_work {
984 	struct work_struct work;
985 	struct ceph_osd_request *req;
986 };
987 
988 static void ceph_aio_retry_work(struct work_struct *work);
989 
990 static void ceph_aio_complete(struct inode *inode,
991 			      struct ceph_aio_request *aio_req)
992 {
993 	struct ceph_inode_info *ci = ceph_inode(inode);
994 	int ret;
995 
996 	if (!atomic_dec_and_test(&aio_req->pending_reqs))
997 		return;
998 
999 	if (aio_req->iocb->ki_flags & IOCB_DIRECT)
1000 		inode_dio_end(inode);
1001 
1002 	ret = aio_req->error;
1003 	if (!ret)
1004 		ret = aio_req->total_len;
1005 
1006 	dout("ceph_aio_complete %p rc %d\n", inode, ret);
1007 
1008 	if (ret >= 0 && aio_req->write) {
1009 		int dirty;
1010 
1011 		loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
1012 		if (endoff > i_size_read(inode)) {
1013 			if (ceph_inode_set_size(inode, endoff))
1014 				ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
1015 		}
1016 
1017 		spin_lock(&ci->i_ceph_lock);
1018 		ci->i_inline_version = CEPH_INLINE_NONE;
1019 		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1020 					       &aio_req->prealloc_cf);
1021 		spin_unlock(&ci->i_ceph_lock);
1022 		if (dirty)
1023 			__mark_inode_dirty(inode, dirty);
1024 
1025 	}
1026 
1027 	ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
1028 						CEPH_CAP_FILE_RD));
1029 
1030 	aio_req->iocb->ki_complete(aio_req->iocb, ret);
1031 
1032 	ceph_free_cap_flush(aio_req->prealloc_cf);
1033 	kfree(aio_req);
1034 }
1035 
1036 static void ceph_aio_complete_req(struct ceph_osd_request *req)
1037 {
1038 	int rc = req->r_result;
1039 	struct inode *inode = req->r_inode;
1040 	struct ceph_aio_request *aio_req = req->r_priv;
1041 	struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
1042 	struct ceph_client_metric *metric = &ceph_sb_to_mdsc(inode->i_sb)->metric;
1043 	unsigned int len = osd_data->bvec_pos.iter.bi_size;
1044 
1045 	BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
1046 	BUG_ON(!osd_data->num_bvecs);
1047 
1048 	dout("ceph_aio_complete_req %p rc %d bytes %u\n", inode, rc, len);
1049 
1050 	if (rc == -EOLDSNAPC) {
1051 		struct ceph_aio_work *aio_work;
1052 		BUG_ON(!aio_req->write);
1053 
1054 		aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
1055 		if (aio_work) {
1056 			INIT_WORK(&aio_work->work, ceph_aio_retry_work);
1057 			aio_work->req = req;
1058 			queue_work(ceph_inode_to_client(inode)->inode_wq,
1059 				   &aio_work->work);
1060 			return;
1061 		}
1062 		rc = -ENOMEM;
1063 	} else if (!aio_req->write) {
1064 		if (rc == -ENOENT)
1065 			rc = 0;
1066 		if (rc >= 0 && len > rc) {
1067 			struct iov_iter i;
1068 			int zlen = len - rc;
1069 
1070 			/*
1071 			 * If read is satisfied by single OSD request,
1072 			 * it can pass EOF. Otherwise read is within
1073 			 * i_size.
1074 			 */
1075 			if (aio_req->num_reqs == 1) {
1076 				loff_t i_size = i_size_read(inode);
1077 				loff_t endoff = aio_req->iocb->ki_pos + rc;
1078 				if (endoff < i_size)
1079 					zlen = min_t(size_t, zlen,
1080 						     i_size - endoff);
1081 				aio_req->total_len = rc + zlen;
1082 			}
1083 
1084 			iov_iter_bvec(&i, READ, osd_data->bvec_pos.bvecs,
1085 				      osd_data->num_bvecs, len);
1086 			iov_iter_advance(&i, rc);
1087 			iov_iter_zero(zlen, &i);
1088 		}
1089 	}
1090 
1091 	/* r_start_latency == 0 means the request was not submitted */
1092 	if (req->r_start_latency) {
1093 		if (aio_req->write)
1094 			ceph_update_write_metrics(metric, req->r_start_latency,
1095 						  req->r_end_latency, len, rc);
1096 		else
1097 			ceph_update_read_metrics(metric, req->r_start_latency,
1098 						 req->r_end_latency, len, rc);
1099 	}
1100 
1101 	put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs,
1102 		  aio_req->should_dirty);
1103 	ceph_osdc_put_request(req);
1104 
1105 	if (rc < 0)
1106 		cmpxchg(&aio_req->error, 0, rc);
1107 
1108 	ceph_aio_complete(inode, aio_req);
1109 	return;
1110 }
1111 
1112 static void ceph_aio_retry_work(struct work_struct *work)
1113 {
1114 	struct ceph_aio_work *aio_work =
1115 		container_of(work, struct ceph_aio_work, work);
1116 	struct ceph_osd_request *orig_req = aio_work->req;
1117 	struct ceph_aio_request *aio_req = orig_req->r_priv;
1118 	struct inode *inode = orig_req->r_inode;
1119 	struct ceph_inode_info *ci = ceph_inode(inode);
1120 	struct ceph_snap_context *snapc;
1121 	struct ceph_osd_request *req;
1122 	int ret;
1123 
1124 	spin_lock(&ci->i_ceph_lock);
1125 	if (__ceph_have_pending_cap_snap(ci)) {
1126 		struct ceph_cap_snap *capsnap =
1127 			list_last_entry(&ci->i_cap_snaps,
1128 					struct ceph_cap_snap,
1129 					ci_item);
1130 		snapc = ceph_get_snap_context(capsnap->context);
1131 	} else {
1132 		BUG_ON(!ci->i_head_snapc);
1133 		snapc = ceph_get_snap_context(ci->i_head_snapc);
1134 	}
1135 	spin_unlock(&ci->i_ceph_lock);
1136 
1137 	req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 1,
1138 			false, GFP_NOFS);
1139 	if (!req) {
1140 		ret = -ENOMEM;
1141 		req = orig_req;
1142 		goto out;
1143 	}
1144 
1145 	req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1146 	ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
1147 	ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
1148 
1149 	req->r_ops[0] = orig_req->r_ops[0];
1150 
1151 	req->r_mtime = aio_req->mtime;
1152 	req->r_data_offset = req->r_ops[0].extent.offset;
1153 
1154 	ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
1155 	if (ret) {
1156 		ceph_osdc_put_request(req);
1157 		req = orig_req;
1158 		goto out;
1159 	}
1160 
1161 	ceph_osdc_put_request(orig_req);
1162 
1163 	req->r_callback = ceph_aio_complete_req;
1164 	req->r_inode = inode;
1165 	req->r_priv = aio_req;
1166 
1167 	ret = ceph_osdc_start_request(req->r_osdc, req, false);
1168 out:
1169 	if (ret < 0) {
1170 		req->r_result = ret;
1171 		ceph_aio_complete_req(req);
1172 	}
1173 
1174 	ceph_put_snap_context(snapc);
1175 	kfree(aio_work);
1176 }
1177 
1178 static ssize_t
1179 ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
1180 		       struct ceph_snap_context *snapc,
1181 		       struct ceph_cap_flush **pcf)
1182 {
1183 	struct file *file = iocb->ki_filp;
1184 	struct inode *inode = file_inode(file);
1185 	struct ceph_inode_info *ci = ceph_inode(inode);
1186 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1187 	struct ceph_client_metric *metric = &fsc->mdsc->metric;
1188 	struct ceph_vino vino;
1189 	struct ceph_osd_request *req;
1190 	struct bio_vec *bvecs;
1191 	struct ceph_aio_request *aio_req = NULL;
1192 	int num_pages = 0;
1193 	int flags;
1194 	int ret = 0;
1195 	struct timespec64 mtime = current_time(inode);
1196 	size_t count = iov_iter_count(iter);
1197 	loff_t pos = iocb->ki_pos;
1198 	bool write = iov_iter_rw(iter) == WRITE;
1199 	bool should_dirty = !write && iter_is_iovec(iter);
1200 
1201 	if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1202 		return -EROFS;
1203 
1204 	dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
1205 	     (write ? "write" : "read"), file, pos, (unsigned)count,
1206 	     snapc, snapc ? snapc->seq : 0);
1207 
1208 	if (write) {
1209 		int ret2 = invalidate_inode_pages2_range(inode->i_mapping,
1210 					pos >> PAGE_SHIFT,
1211 					(pos + count - 1) >> PAGE_SHIFT);
1212 		if (ret2 < 0)
1213 			dout("invalidate_inode_pages2_range returned %d\n", ret2);
1214 
1215 		flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1216 	} else {
1217 		flags = CEPH_OSD_FLAG_READ;
1218 	}
1219 
1220 	while (iov_iter_count(iter) > 0) {
1221 		u64 size = iov_iter_count(iter);
1222 		ssize_t len;
1223 
1224 		if (write)
1225 			size = min_t(u64, size, fsc->mount_options->wsize);
1226 		else
1227 			size = min_t(u64, size, fsc->mount_options->rsize);
1228 
1229 		vino = ceph_vino(inode);
1230 		req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1231 					    vino, pos, &size, 0,
1232 					    1,
1233 					    write ? CEPH_OSD_OP_WRITE :
1234 						    CEPH_OSD_OP_READ,
1235 					    flags, snapc,
1236 					    ci->i_truncate_seq,
1237 					    ci->i_truncate_size,
1238 					    false);
1239 		if (IS_ERR(req)) {
1240 			ret = PTR_ERR(req);
1241 			break;
1242 		}
1243 
1244 		len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
1245 		if (len < 0) {
1246 			ceph_osdc_put_request(req);
1247 			ret = len;
1248 			break;
1249 		}
1250 		if (len != size)
1251 			osd_req_op_extent_update(req, 0, len);
1252 
1253 		/*
1254 		 * To simplify error handling, allow AIO when IO within i_size
1255 		 * or IO can be satisfied by single OSD request.
1256 		 */
1257 		if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
1258 		    (len == count || pos + count <= i_size_read(inode))) {
1259 			aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
1260 			if (aio_req) {
1261 				aio_req->iocb = iocb;
1262 				aio_req->write = write;
1263 				aio_req->should_dirty = should_dirty;
1264 				INIT_LIST_HEAD(&aio_req->osd_reqs);
1265 				if (write) {
1266 					aio_req->mtime = mtime;
1267 					swap(aio_req->prealloc_cf, *pcf);
1268 				}
1269 			}
1270 			/* ignore error */
1271 		}
1272 
1273 		if (write) {
1274 			/*
1275 			 * throw out any page cache pages in this range. this
1276 			 * may block.
1277 			 */
1278 			truncate_inode_pages_range(inode->i_mapping, pos,
1279 						   PAGE_ALIGN(pos + len) - 1);
1280 
1281 			req->r_mtime = mtime;
1282 		}
1283 
1284 		osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
1285 
1286 		if (aio_req) {
1287 			aio_req->total_len += len;
1288 			aio_req->num_reqs++;
1289 			atomic_inc(&aio_req->pending_reqs);
1290 
1291 			req->r_callback = ceph_aio_complete_req;
1292 			req->r_inode = inode;
1293 			req->r_priv = aio_req;
1294 			list_add_tail(&req->r_private_item, &aio_req->osd_reqs);
1295 
1296 			pos += len;
1297 			continue;
1298 		}
1299 
1300 		ret = ceph_osdc_start_request(req->r_osdc, req, false);
1301 		if (!ret)
1302 			ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1303 
1304 		if (write)
1305 			ceph_update_write_metrics(metric, req->r_start_latency,
1306 						  req->r_end_latency, len, ret);
1307 		else
1308 			ceph_update_read_metrics(metric, req->r_start_latency,
1309 						 req->r_end_latency, len, ret);
1310 
1311 		size = i_size_read(inode);
1312 		if (!write) {
1313 			if (ret == -ENOENT)
1314 				ret = 0;
1315 			if (ret >= 0 && ret < len && pos + ret < size) {
1316 				struct iov_iter i;
1317 				int zlen = min_t(size_t, len - ret,
1318 						 size - pos - ret);
1319 
1320 				iov_iter_bvec(&i, READ, bvecs, num_pages, len);
1321 				iov_iter_advance(&i, ret);
1322 				iov_iter_zero(zlen, &i);
1323 				ret += zlen;
1324 			}
1325 			if (ret >= 0)
1326 				len = ret;
1327 		}
1328 
1329 		put_bvecs(bvecs, num_pages, should_dirty);
1330 		ceph_osdc_put_request(req);
1331 		if (ret < 0)
1332 			break;
1333 
1334 		pos += len;
1335 		if (!write && pos >= size)
1336 			break;
1337 
1338 		if (write && pos > size) {
1339 			if (ceph_inode_set_size(inode, pos))
1340 				ceph_check_caps(ceph_inode(inode),
1341 						CHECK_CAPS_AUTHONLY,
1342 						NULL);
1343 		}
1344 	}
1345 
1346 	if (aio_req) {
1347 		LIST_HEAD(osd_reqs);
1348 
1349 		if (aio_req->num_reqs == 0) {
1350 			kfree(aio_req);
1351 			return ret;
1352 		}
1353 
1354 		ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1355 					      CEPH_CAP_FILE_RD);
1356 
1357 		list_splice(&aio_req->osd_reqs, &osd_reqs);
1358 		inode_dio_begin(inode);
1359 		while (!list_empty(&osd_reqs)) {
1360 			req = list_first_entry(&osd_reqs,
1361 					       struct ceph_osd_request,
1362 					       r_private_item);
1363 			list_del_init(&req->r_private_item);
1364 			if (ret >= 0)
1365 				ret = ceph_osdc_start_request(req->r_osdc,
1366 							      req, false);
1367 			if (ret < 0) {
1368 				req->r_result = ret;
1369 				ceph_aio_complete_req(req);
1370 			}
1371 		}
1372 		return -EIOCBQUEUED;
1373 	}
1374 
1375 	if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1376 		ret = pos - iocb->ki_pos;
1377 		iocb->ki_pos = pos;
1378 	}
1379 	return ret;
1380 }
1381 
1382 /*
1383  * Synchronous write, straight from __user pointer or user pages.
1384  *
1385  * If write spans object boundary, just do multiple writes.  (For a
1386  * correct atomic write, we should e.g. take write locks on all
1387  * objects, rollback on failure, etc.)
1388  */
1389 static ssize_t
1390 ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1391 		struct ceph_snap_context *snapc)
1392 {
1393 	struct file *file = iocb->ki_filp;
1394 	struct inode *inode = file_inode(file);
1395 	struct ceph_inode_info *ci = ceph_inode(inode);
1396 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1397 	struct ceph_vino vino;
1398 	struct ceph_osd_request *req;
1399 	struct page **pages;
1400 	u64 len;
1401 	int num_pages;
1402 	int written = 0;
1403 	int flags;
1404 	int ret;
1405 	bool check_caps = false;
1406 	struct timespec64 mtime = current_time(inode);
1407 	size_t count = iov_iter_count(from);
1408 
1409 	if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1410 		return -EROFS;
1411 
1412 	dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
1413 	     file, pos, (unsigned)count, snapc, snapc->seq);
1414 
1415 	ret = filemap_write_and_wait_range(inode->i_mapping,
1416 					   pos, pos + count - 1);
1417 	if (ret < 0)
1418 		return ret;
1419 
1420 	ret = invalidate_inode_pages2_range(inode->i_mapping,
1421 					    pos >> PAGE_SHIFT,
1422 					    (pos + count - 1) >> PAGE_SHIFT);
1423 	if (ret < 0)
1424 		dout("invalidate_inode_pages2_range returned %d\n", ret);
1425 
1426 	flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1427 
1428 	while ((len = iov_iter_count(from)) > 0) {
1429 		size_t left;
1430 		int n;
1431 
1432 		vino = ceph_vino(inode);
1433 		req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1434 					    vino, pos, &len, 0, 1,
1435 					    CEPH_OSD_OP_WRITE, flags, snapc,
1436 					    ci->i_truncate_seq,
1437 					    ci->i_truncate_size,
1438 					    false);
1439 		if (IS_ERR(req)) {
1440 			ret = PTR_ERR(req);
1441 			break;
1442 		}
1443 
1444 		/*
1445 		 * write from beginning of first page,
1446 		 * regardless of io alignment
1447 		 */
1448 		num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1449 
1450 		pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1451 		if (IS_ERR(pages)) {
1452 			ret = PTR_ERR(pages);
1453 			goto out;
1454 		}
1455 
1456 		left = len;
1457 		for (n = 0; n < num_pages; n++) {
1458 			size_t plen = min_t(size_t, left, PAGE_SIZE);
1459 			ret = copy_page_from_iter(pages[n], 0, plen, from);
1460 			if (ret != plen) {
1461 				ret = -EFAULT;
1462 				break;
1463 			}
1464 			left -= ret;
1465 		}
1466 
1467 		if (ret < 0) {
1468 			ceph_release_page_vector(pages, num_pages);
1469 			goto out;
1470 		}
1471 
1472 		req->r_inode = inode;
1473 
1474 		osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1475 						false, true);
1476 
1477 		req->r_mtime = mtime;
1478 		ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1479 		if (!ret)
1480 			ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1481 
1482 		ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
1483 					  req->r_end_latency, len, ret);
1484 out:
1485 		ceph_osdc_put_request(req);
1486 		if (ret != 0) {
1487 			ceph_set_error_write(ci);
1488 			break;
1489 		}
1490 
1491 		ceph_clear_error_write(ci);
1492 		pos += len;
1493 		written += len;
1494 		if (pos > i_size_read(inode)) {
1495 			check_caps = ceph_inode_set_size(inode, pos);
1496 			if (check_caps)
1497 				ceph_check_caps(ceph_inode(inode),
1498 						CHECK_CAPS_AUTHONLY,
1499 						NULL);
1500 		}
1501 
1502 	}
1503 
1504 	if (ret != -EOLDSNAPC && written > 0) {
1505 		ret = written;
1506 		iocb->ki_pos = pos;
1507 	}
1508 	return ret;
1509 }
1510 
1511 /*
1512  * Wrap generic_file_aio_read with checks for cap bits on the inode.
1513  * Atomically grab references, so that those bits are not released
1514  * back to the MDS mid-read.
1515  *
1516  * Hmm, the sync read case isn't actually async... should it be?
1517  */
1518 static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1519 {
1520 	struct file *filp = iocb->ki_filp;
1521 	struct ceph_file_info *fi = filp->private_data;
1522 	size_t len = iov_iter_count(to);
1523 	struct inode *inode = file_inode(filp);
1524 	struct ceph_inode_info *ci = ceph_inode(inode);
1525 	bool direct_lock = iocb->ki_flags & IOCB_DIRECT;
1526 	ssize_t ret;
1527 	int want, got = 0;
1528 	int retry_op = 0, read = 0;
1529 
1530 again:
1531 	dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1532 	     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1533 
1534 	if (ceph_inode_is_shutdown(inode))
1535 		return -ESTALE;
1536 
1537 	if (direct_lock)
1538 		ceph_start_io_direct(inode);
1539 	else
1540 		ceph_start_io_read(inode);
1541 
1542 	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1543 		want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1544 	else
1545 		want = CEPH_CAP_FILE_CACHE;
1546 	ret = ceph_get_caps(filp, CEPH_CAP_FILE_RD, want, -1, &got);
1547 	if (ret < 0) {
1548 		if (iocb->ki_flags & IOCB_DIRECT)
1549 			ceph_end_io_direct(inode);
1550 		else
1551 			ceph_end_io_read(inode);
1552 		return ret;
1553 	}
1554 
1555 	if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1556 	    (iocb->ki_flags & IOCB_DIRECT) ||
1557 	    (fi->flags & CEPH_F_SYNC)) {
1558 
1559 		dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1560 		     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1561 		     ceph_cap_string(got));
1562 
1563 		if (ci->i_inline_version == CEPH_INLINE_NONE) {
1564 			if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1565 				ret = ceph_direct_read_write(iocb, to,
1566 							     NULL, NULL);
1567 				if (ret >= 0 && ret < len)
1568 					retry_op = CHECK_EOF;
1569 			} else {
1570 				ret = ceph_sync_read(iocb, to, &retry_op);
1571 			}
1572 		} else {
1573 			retry_op = READ_INLINE;
1574 		}
1575 	} else {
1576 		CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1577 		dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1578 		     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1579 		     ceph_cap_string(got));
1580 		ceph_add_rw_context(fi, &rw_ctx);
1581 		ret = generic_file_read_iter(iocb, to);
1582 		ceph_del_rw_context(fi, &rw_ctx);
1583 	}
1584 
1585 	dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1586 	     inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1587 	ceph_put_cap_refs(ci, got);
1588 
1589 	if (direct_lock)
1590 		ceph_end_io_direct(inode);
1591 	else
1592 		ceph_end_io_read(inode);
1593 
1594 	if (retry_op > HAVE_RETRIED && ret >= 0) {
1595 		int statret;
1596 		struct page *page = NULL;
1597 		loff_t i_size;
1598 		if (retry_op == READ_INLINE) {
1599 			page = __page_cache_alloc(GFP_KERNEL);
1600 			if (!page)
1601 				return -ENOMEM;
1602 		}
1603 
1604 		statret = __ceph_do_getattr(inode, page,
1605 					    CEPH_STAT_CAP_INLINE_DATA, !!page);
1606 		if (statret < 0) {
1607 			if (page)
1608 				__free_page(page);
1609 			if (statret == -ENODATA) {
1610 				BUG_ON(retry_op != READ_INLINE);
1611 				goto again;
1612 			}
1613 			return statret;
1614 		}
1615 
1616 		i_size = i_size_read(inode);
1617 		if (retry_op == READ_INLINE) {
1618 			BUG_ON(ret > 0 || read > 0);
1619 			if (iocb->ki_pos < i_size &&
1620 			    iocb->ki_pos < PAGE_SIZE) {
1621 				loff_t end = min_t(loff_t, i_size,
1622 						   iocb->ki_pos + len);
1623 				end = min_t(loff_t, end, PAGE_SIZE);
1624 				if (statret < end)
1625 					zero_user_segment(page, statret, end);
1626 				ret = copy_page_to_iter(page,
1627 						iocb->ki_pos & ~PAGE_MASK,
1628 						end - iocb->ki_pos, to);
1629 				iocb->ki_pos += ret;
1630 				read += ret;
1631 			}
1632 			if (iocb->ki_pos < i_size && read < len) {
1633 				size_t zlen = min_t(size_t, len - read,
1634 						    i_size - iocb->ki_pos);
1635 				ret = iov_iter_zero(zlen, to);
1636 				iocb->ki_pos += ret;
1637 				read += ret;
1638 			}
1639 			__free_pages(page, 0);
1640 			return read;
1641 		}
1642 
1643 		/* hit EOF or hole? */
1644 		if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1645 		    ret < len) {
1646 			dout("sync_read hit hole, ppos %lld < size %lld"
1647 			     ", reading more\n", iocb->ki_pos, i_size);
1648 
1649 			read += ret;
1650 			len -= ret;
1651 			retry_op = HAVE_RETRIED;
1652 			goto again;
1653 		}
1654 	}
1655 
1656 	if (ret >= 0)
1657 		ret += read;
1658 
1659 	return ret;
1660 }
1661 
1662 /*
1663  * Take cap references to avoid releasing caps to MDS mid-write.
1664  *
1665  * If we are synchronous, and write with an old snap context, the OSD
1666  * may return EOLDSNAPC.  In that case, retry the write.. _after_
1667  * dropping our cap refs and allowing the pending snap to logically
1668  * complete _before_ this write occurs.
1669  *
1670  * If we are near ENOSPC, write synchronously.
1671  */
1672 static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1673 {
1674 	struct file *file = iocb->ki_filp;
1675 	struct ceph_file_info *fi = file->private_data;
1676 	struct inode *inode = file_inode(file);
1677 	struct ceph_inode_info *ci = ceph_inode(inode);
1678 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1679 	struct ceph_osd_client *osdc = &fsc->client->osdc;
1680 	struct ceph_cap_flush *prealloc_cf;
1681 	ssize_t count, written = 0;
1682 	int err, want, got;
1683 	bool direct_lock = false;
1684 	u32 map_flags;
1685 	u64 pool_flags;
1686 	loff_t pos;
1687 	loff_t limit = max(i_size_read(inode), fsc->max_file_size);
1688 
1689 	if (ceph_inode_is_shutdown(inode))
1690 		return -ESTALE;
1691 
1692 	if (ceph_snap(inode) != CEPH_NOSNAP)
1693 		return -EROFS;
1694 
1695 	prealloc_cf = ceph_alloc_cap_flush();
1696 	if (!prealloc_cf)
1697 		return -ENOMEM;
1698 
1699 	if ((iocb->ki_flags & (IOCB_DIRECT | IOCB_APPEND)) == IOCB_DIRECT)
1700 		direct_lock = true;
1701 
1702 retry_snap:
1703 	if (direct_lock)
1704 		ceph_start_io_direct(inode);
1705 	else
1706 		ceph_start_io_write(inode);
1707 
1708 	/* We can write back this queue in page reclaim */
1709 	current->backing_dev_info = inode_to_bdi(inode);
1710 
1711 	if (iocb->ki_flags & IOCB_APPEND) {
1712 		err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1713 		if (err < 0)
1714 			goto out;
1715 	}
1716 
1717 	err = generic_write_checks(iocb, from);
1718 	if (err <= 0)
1719 		goto out;
1720 
1721 	pos = iocb->ki_pos;
1722 	if (unlikely(pos >= limit)) {
1723 		err = -EFBIG;
1724 		goto out;
1725 	} else {
1726 		iov_iter_truncate(from, limit - pos);
1727 	}
1728 
1729 	count = iov_iter_count(from);
1730 	if (ceph_quota_is_max_bytes_exceeded(inode, pos + count)) {
1731 		err = -EDQUOT;
1732 		goto out;
1733 	}
1734 
1735 	down_read(&osdc->lock);
1736 	map_flags = osdc->osdmap->flags;
1737 	pool_flags = ceph_pg_pool_flags(osdc->osdmap, ci->i_layout.pool_id);
1738 	up_read(&osdc->lock);
1739 	if ((map_flags & CEPH_OSDMAP_FULL) ||
1740 	    (pool_flags & CEPH_POOL_FLAG_FULL)) {
1741 		err = -ENOSPC;
1742 		goto out;
1743 	}
1744 
1745 	err = file_remove_privs(file);
1746 	if (err)
1747 		goto out;
1748 
1749 	if (ci->i_inline_version != CEPH_INLINE_NONE) {
1750 		err = ceph_uninline_data(file, NULL);
1751 		if (err < 0)
1752 			goto out;
1753 	}
1754 
1755 	dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1756 	     inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1757 	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1758 		want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1759 	else
1760 		want = CEPH_CAP_FILE_BUFFER;
1761 	got = 0;
1762 	err = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, pos + count, &got);
1763 	if (err < 0)
1764 		goto out;
1765 
1766 	err = file_update_time(file);
1767 	if (err)
1768 		goto out_caps;
1769 
1770 	inode_inc_iversion_raw(inode);
1771 
1772 	dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1773 	     inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1774 
1775 	if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1776 	    (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
1777 	    (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) {
1778 		struct ceph_snap_context *snapc;
1779 		struct iov_iter data;
1780 
1781 		spin_lock(&ci->i_ceph_lock);
1782 		if (__ceph_have_pending_cap_snap(ci)) {
1783 			struct ceph_cap_snap *capsnap =
1784 					list_last_entry(&ci->i_cap_snaps,
1785 							struct ceph_cap_snap,
1786 							ci_item);
1787 			snapc = ceph_get_snap_context(capsnap->context);
1788 		} else {
1789 			BUG_ON(!ci->i_head_snapc);
1790 			snapc = ceph_get_snap_context(ci->i_head_snapc);
1791 		}
1792 		spin_unlock(&ci->i_ceph_lock);
1793 
1794 		/* we might need to revert back to that point */
1795 		data = *from;
1796 		if (iocb->ki_flags & IOCB_DIRECT)
1797 			written = ceph_direct_read_write(iocb, &data, snapc,
1798 							 &prealloc_cf);
1799 		else
1800 			written = ceph_sync_write(iocb, &data, pos, snapc);
1801 		if (direct_lock)
1802 			ceph_end_io_direct(inode);
1803 		else
1804 			ceph_end_io_write(inode);
1805 		if (written > 0)
1806 			iov_iter_advance(from, written);
1807 		ceph_put_snap_context(snapc);
1808 	} else {
1809 		/*
1810 		 * No need to acquire the i_truncate_mutex. Because
1811 		 * the MDS revokes Fwb caps before sending truncate
1812 		 * message to us. We can't get Fwb cap while there
1813 		 * are pending vmtruncate. So write and vmtruncate
1814 		 * can not run at the same time
1815 		 */
1816 		written = generic_perform_write(file, from, pos);
1817 		if (likely(written >= 0))
1818 			iocb->ki_pos = pos + written;
1819 		ceph_end_io_write(inode);
1820 	}
1821 
1822 	if (written >= 0) {
1823 		int dirty;
1824 
1825 		spin_lock(&ci->i_ceph_lock);
1826 		ci->i_inline_version = CEPH_INLINE_NONE;
1827 		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1828 					       &prealloc_cf);
1829 		spin_unlock(&ci->i_ceph_lock);
1830 		if (dirty)
1831 			__mark_inode_dirty(inode, dirty);
1832 		if (ceph_quota_is_max_bytes_approaching(inode, iocb->ki_pos))
1833 			ceph_check_caps(ci, 0, NULL);
1834 	}
1835 
1836 	dout("aio_write %p %llx.%llx %llu~%u  dropping cap refs on %s\n",
1837 	     inode, ceph_vinop(inode), pos, (unsigned)count,
1838 	     ceph_cap_string(got));
1839 	ceph_put_cap_refs(ci, got);
1840 
1841 	if (written == -EOLDSNAPC) {
1842 		dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
1843 		     inode, ceph_vinop(inode), pos, (unsigned)count);
1844 		goto retry_snap;
1845 	}
1846 
1847 	if (written >= 0) {
1848 		if ((map_flags & CEPH_OSDMAP_NEARFULL) ||
1849 		    (pool_flags & CEPH_POOL_FLAG_NEARFULL))
1850 			iocb->ki_flags |= IOCB_DSYNC;
1851 		written = generic_write_sync(iocb, written);
1852 	}
1853 
1854 	goto out_unlocked;
1855 out_caps:
1856 	ceph_put_cap_refs(ci, got);
1857 out:
1858 	if (direct_lock)
1859 		ceph_end_io_direct(inode);
1860 	else
1861 		ceph_end_io_write(inode);
1862 out_unlocked:
1863 	ceph_free_cap_flush(prealloc_cf);
1864 	current->backing_dev_info = NULL;
1865 	return written ? written : err;
1866 }
1867 
1868 /*
1869  * llseek.  be sure to verify file size on SEEK_END.
1870  */
1871 static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1872 {
1873 	struct inode *inode = file->f_mapping->host;
1874 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1875 	loff_t i_size;
1876 	loff_t ret;
1877 
1878 	inode_lock(inode);
1879 
1880 	if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1881 		ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1882 		if (ret < 0)
1883 			goto out;
1884 	}
1885 
1886 	i_size = i_size_read(inode);
1887 	switch (whence) {
1888 	case SEEK_END:
1889 		offset += i_size;
1890 		break;
1891 	case SEEK_CUR:
1892 		/*
1893 		 * Here we special-case the lseek(fd, 0, SEEK_CUR)
1894 		 * position-querying operation.  Avoid rewriting the "same"
1895 		 * f_pos value back to the file because a concurrent read(),
1896 		 * write() or lseek() might have altered it
1897 		 */
1898 		if (offset == 0) {
1899 			ret = file->f_pos;
1900 			goto out;
1901 		}
1902 		offset += file->f_pos;
1903 		break;
1904 	case SEEK_DATA:
1905 		if (offset < 0 || offset >= i_size) {
1906 			ret = -ENXIO;
1907 			goto out;
1908 		}
1909 		break;
1910 	case SEEK_HOLE:
1911 		if (offset < 0 || offset >= i_size) {
1912 			ret = -ENXIO;
1913 			goto out;
1914 		}
1915 		offset = i_size;
1916 		break;
1917 	}
1918 
1919 	ret = vfs_setpos(file, offset, max(i_size, fsc->max_file_size));
1920 
1921 out:
1922 	inode_unlock(inode);
1923 	return ret;
1924 }
1925 
1926 static inline void ceph_zero_partial_page(
1927 	struct inode *inode, loff_t offset, unsigned size)
1928 {
1929 	struct page *page;
1930 	pgoff_t index = offset >> PAGE_SHIFT;
1931 
1932 	page = find_lock_page(inode->i_mapping, index);
1933 	if (page) {
1934 		wait_on_page_writeback(page);
1935 		zero_user(page, offset & (PAGE_SIZE - 1), size);
1936 		unlock_page(page);
1937 		put_page(page);
1938 	}
1939 }
1940 
1941 static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1942 				      loff_t length)
1943 {
1944 	loff_t nearly = round_up(offset, PAGE_SIZE);
1945 	if (offset < nearly) {
1946 		loff_t size = nearly - offset;
1947 		if (length < size)
1948 			size = length;
1949 		ceph_zero_partial_page(inode, offset, size);
1950 		offset += size;
1951 		length -= size;
1952 	}
1953 	if (length >= PAGE_SIZE) {
1954 		loff_t size = round_down(length, PAGE_SIZE);
1955 		truncate_pagecache_range(inode, offset, offset + size - 1);
1956 		offset += size;
1957 		length -= size;
1958 	}
1959 	if (length)
1960 		ceph_zero_partial_page(inode, offset, length);
1961 }
1962 
1963 static int ceph_zero_partial_object(struct inode *inode,
1964 				    loff_t offset, loff_t *length)
1965 {
1966 	struct ceph_inode_info *ci = ceph_inode(inode);
1967 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1968 	struct ceph_osd_request *req;
1969 	int ret = 0;
1970 	loff_t zero = 0;
1971 	int op;
1972 
1973 	if (!length) {
1974 		op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
1975 		length = &zero;
1976 	} else {
1977 		op = CEPH_OSD_OP_ZERO;
1978 	}
1979 
1980 	req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1981 					ceph_vino(inode),
1982 					offset, length,
1983 					0, 1, op,
1984 					CEPH_OSD_FLAG_WRITE,
1985 					NULL, 0, 0, false);
1986 	if (IS_ERR(req)) {
1987 		ret = PTR_ERR(req);
1988 		goto out;
1989 	}
1990 
1991 	req->r_mtime = inode->i_mtime;
1992 	ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1993 	if (!ret) {
1994 		ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1995 		if (ret == -ENOENT)
1996 			ret = 0;
1997 	}
1998 	ceph_osdc_put_request(req);
1999 
2000 out:
2001 	return ret;
2002 }
2003 
2004 static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
2005 {
2006 	int ret = 0;
2007 	struct ceph_inode_info *ci = ceph_inode(inode);
2008 	s32 stripe_unit = ci->i_layout.stripe_unit;
2009 	s32 stripe_count = ci->i_layout.stripe_count;
2010 	s32 object_size = ci->i_layout.object_size;
2011 	u64 object_set_size = object_size * stripe_count;
2012 	u64 nearly, t;
2013 
2014 	/* round offset up to next period boundary */
2015 	nearly = offset + object_set_size - 1;
2016 	t = nearly;
2017 	nearly -= do_div(t, object_set_size);
2018 
2019 	while (length && offset < nearly) {
2020 		loff_t size = length;
2021 		ret = ceph_zero_partial_object(inode, offset, &size);
2022 		if (ret < 0)
2023 			return ret;
2024 		offset += size;
2025 		length -= size;
2026 	}
2027 	while (length >= object_set_size) {
2028 		int i;
2029 		loff_t pos = offset;
2030 		for (i = 0; i < stripe_count; ++i) {
2031 			ret = ceph_zero_partial_object(inode, pos, NULL);
2032 			if (ret < 0)
2033 				return ret;
2034 			pos += stripe_unit;
2035 		}
2036 		offset += object_set_size;
2037 		length -= object_set_size;
2038 	}
2039 	while (length) {
2040 		loff_t size = length;
2041 		ret = ceph_zero_partial_object(inode, offset, &size);
2042 		if (ret < 0)
2043 			return ret;
2044 		offset += size;
2045 		length -= size;
2046 	}
2047 	return ret;
2048 }
2049 
2050 static long ceph_fallocate(struct file *file, int mode,
2051 				loff_t offset, loff_t length)
2052 {
2053 	struct ceph_file_info *fi = file->private_data;
2054 	struct inode *inode = file_inode(file);
2055 	struct ceph_inode_info *ci = ceph_inode(inode);
2056 	struct ceph_cap_flush *prealloc_cf;
2057 	int want, got = 0;
2058 	int dirty;
2059 	int ret = 0;
2060 	loff_t endoff = 0;
2061 	loff_t size;
2062 
2063 	if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2064 		return -EOPNOTSUPP;
2065 
2066 	if (!S_ISREG(inode->i_mode))
2067 		return -EOPNOTSUPP;
2068 
2069 	prealloc_cf = ceph_alloc_cap_flush();
2070 	if (!prealloc_cf)
2071 		return -ENOMEM;
2072 
2073 	inode_lock(inode);
2074 
2075 	if (ceph_snap(inode) != CEPH_NOSNAP) {
2076 		ret = -EROFS;
2077 		goto unlock;
2078 	}
2079 
2080 	if (ci->i_inline_version != CEPH_INLINE_NONE) {
2081 		ret = ceph_uninline_data(file, NULL);
2082 		if (ret < 0)
2083 			goto unlock;
2084 	}
2085 
2086 	size = i_size_read(inode);
2087 
2088 	/* Are we punching a hole beyond EOF? */
2089 	if (offset >= size)
2090 		goto unlock;
2091 	if ((offset + length) > size)
2092 		length = size - offset;
2093 
2094 	if (fi->fmode & CEPH_FILE_MODE_LAZY)
2095 		want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
2096 	else
2097 		want = CEPH_CAP_FILE_BUFFER;
2098 
2099 	ret = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, endoff, &got);
2100 	if (ret < 0)
2101 		goto unlock;
2102 
2103 	filemap_invalidate_lock(inode->i_mapping);
2104 	ceph_zero_pagecache_range(inode, offset, length);
2105 	ret = ceph_zero_objects(inode, offset, length);
2106 
2107 	if (!ret) {
2108 		spin_lock(&ci->i_ceph_lock);
2109 		ci->i_inline_version = CEPH_INLINE_NONE;
2110 		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
2111 					       &prealloc_cf);
2112 		spin_unlock(&ci->i_ceph_lock);
2113 		if (dirty)
2114 			__mark_inode_dirty(inode, dirty);
2115 	}
2116 	filemap_invalidate_unlock(inode->i_mapping);
2117 
2118 	ceph_put_cap_refs(ci, got);
2119 unlock:
2120 	inode_unlock(inode);
2121 	ceph_free_cap_flush(prealloc_cf);
2122 	return ret;
2123 }
2124 
2125 /*
2126  * This function tries to get FILE_WR capabilities for dst_ci and FILE_RD for
2127  * src_ci.  Two attempts are made to obtain both caps, and an error is return if
2128  * this fails; zero is returned on success.
2129  */
2130 static int get_rd_wr_caps(struct file *src_filp, int *src_got,
2131 			  struct file *dst_filp,
2132 			  loff_t dst_endoff, int *dst_got)
2133 {
2134 	int ret = 0;
2135 	bool retrying = false;
2136 
2137 retry_caps:
2138 	ret = ceph_get_caps(dst_filp, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER,
2139 			    dst_endoff, dst_got);
2140 	if (ret < 0)
2141 		return ret;
2142 
2143 	/*
2144 	 * Since we're already holding the FILE_WR capability for the dst file,
2145 	 * we would risk a deadlock by using ceph_get_caps.  Thus, we'll do some
2146 	 * retry dance instead to try to get both capabilities.
2147 	 */
2148 	ret = ceph_try_get_caps(file_inode(src_filp),
2149 				CEPH_CAP_FILE_RD, CEPH_CAP_FILE_SHARED,
2150 				false, src_got);
2151 	if (ret <= 0) {
2152 		/* Start by dropping dst_ci caps and getting src_ci caps */
2153 		ceph_put_cap_refs(ceph_inode(file_inode(dst_filp)), *dst_got);
2154 		if (retrying) {
2155 			if (!ret)
2156 				/* ceph_try_get_caps masks EAGAIN */
2157 				ret = -EAGAIN;
2158 			return ret;
2159 		}
2160 		ret = ceph_get_caps(src_filp, CEPH_CAP_FILE_RD,
2161 				    CEPH_CAP_FILE_SHARED, -1, src_got);
2162 		if (ret < 0)
2163 			return ret;
2164 		/*... drop src_ci caps too, and retry */
2165 		ceph_put_cap_refs(ceph_inode(file_inode(src_filp)), *src_got);
2166 		retrying = true;
2167 		goto retry_caps;
2168 	}
2169 	return ret;
2170 }
2171 
2172 static void put_rd_wr_caps(struct ceph_inode_info *src_ci, int src_got,
2173 			   struct ceph_inode_info *dst_ci, int dst_got)
2174 {
2175 	ceph_put_cap_refs(src_ci, src_got);
2176 	ceph_put_cap_refs(dst_ci, dst_got);
2177 }
2178 
2179 /*
2180  * This function does several size-related checks, returning an error if:
2181  *  - source file is smaller than off+len
2182  *  - destination file size is not OK (inode_newsize_ok())
2183  *  - max bytes quotas is exceeded
2184  */
2185 static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
2186 			   loff_t src_off, loff_t dst_off, size_t len)
2187 {
2188 	loff_t size, endoff;
2189 
2190 	size = i_size_read(src_inode);
2191 	/*
2192 	 * Don't copy beyond source file EOF.  Instead of simply setting length
2193 	 * to (size - src_off), just drop to VFS default implementation, as the
2194 	 * local i_size may be stale due to other clients writing to the source
2195 	 * inode.
2196 	 */
2197 	if (src_off + len > size) {
2198 		dout("Copy beyond EOF (%llu + %zu > %llu)\n",
2199 		     src_off, len, size);
2200 		return -EOPNOTSUPP;
2201 	}
2202 	size = i_size_read(dst_inode);
2203 
2204 	endoff = dst_off + len;
2205 	if (inode_newsize_ok(dst_inode, endoff))
2206 		return -EOPNOTSUPP;
2207 
2208 	if (ceph_quota_is_max_bytes_exceeded(dst_inode, endoff))
2209 		return -EDQUOT;
2210 
2211 	return 0;
2212 }
2213 
2214 static struct ceph_osd_request *
2215 ceph_alloc_copyfrom_request(struct ceph_osd_client *osdc,
2216 			    u64 src_snapid,
2217 			    struct ceph_object_id *src_oid,
2218 			    struct ceph_object_locator *src_oloc,
2219 			    struct ceph_object_id *dst_oid,
2220 			    struct ceph_object_locator *dst_oloc,
2221 			    u32 truncate_seq, u64 truncate_size)
2222 {
2223 	struct ceph_osd_request *req;
2224 	int ret;
2225 	u32 src_fadvise_flags =
2226 		CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2227 		CEPH_OSD_OP_FLAG_FADVISE_NOCACHE;
2228 	u32 dst_fadvise_flags =
2229 		CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2230 		CEPH_OSD_OP_FLAG_FADVISE_DONTNEED;
2231 
2232 	req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
2233 	if (!req)
2234 		return ERR_PTR(-ENOMEM);
2235 
2236 	req->r_flags = CEPH_OSD_FLAG_WRITE;
2237 
2238 	ceph_oloc_copy(&req->r_t.base_oloc, dst_oloc);
2239 	ceph_oid_copy(&req->r_t.base_oid, dst_oid);
2240 
2241 	ret = osd_req_op_copy_from_init(req, src_snapid, 0,
2242 					src_oid, src_oloc,
2243 					src_fadvise_flags,
2244 					dst_fadvise_flags,
2245 					truncate_seq,
2246 					truncate_size,
2247 					CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ);
2248 	if (ret)
2249 		goto out;
2250 
2251 	ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
2252 	if (ret)
2253 		goto out;
2254 
2255 	return req;
2256 
2257 out:
2258 	ceph_osdc_put_request(req);
2259 	return ERR_PTR(ret);
2260 }
2261 
2262 static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off,
2263 				    struct ceph_inode_info *dst_ci, u64 *dst_off,
2264 				    struct ceph_fs_client *fsc,
2265 				    size_t len, unsigned int flags)
2266 {
2267 	struct ceph_object_locator src_oloc, dst_oloc;
2268 	struct ceph_object_id src_oid, dst_oid;
2269 	struct ceph_osd_client *osdc;
2270 	struct ceph_osd_request *req;
2271 	size_t bytes = 0;
2272 	u64 src_objnum, src_objoff, dst_objnum, dst_objoff;
2273 	u32 src_objlen, dst_objlen;
2274 	u32 object_size = src_ci->i_layout.object_size;
2275 	int ret;
2276 
2277 	src_oloc.pool = src_ci->i_layout.pool_id;
2278 	src_oloc.pool_ns = ceph_try_get_string(src_ci->i_layout.pool_ns);
2279 	dst_oloc.pool = dst_ci->i_layout.pool_id;
2280 	dst_oloc.pool_ns = ceph_try_get_string(dst_ci->i_layout.pool_ns);
2281 	osdc = &fsc->client->osdc;
2282 
2283 	while (len >= object_size) {
2284 		ceph_calc_file_object_mapping(&src_ci->i_layout, *src_off,
2285 					      object_size, &src_objnum,
2286 					      &src_objoff, &src_objlen);
2287 		ceph_calc_file_object_mapping(&dst_ci->i_layout, *dst_off,
2288 					      object_size, &dst_objnum,
2289 					      &dst_objoff, &dst_objlen);
2290 		ceph_oid_init(&src_oid);
2291 		ceph_oid_printf(&src_oid, "%llx.%08llx",
2292 				src_ci->i_vino.ino, src_objnum);
2293 		ceph_oid_init(&dst_oid);
2294 		ceph_oid_printf(&dst_oid, "%llx.%08llx",
2295 				dst_ci->i_vino.ino, dst_objnum);
2296 		/* Do an object remote copy */
2297 		req = ceph_alloc_copyfrom_request(osdc, src_ci->i_vino.snap,
2298 						  &src_oid, &src_oloc,
2299 						  &dst_oid, &dst_oloc,
2300 						  dst_ci->i_truncate_seq,
2301 						  dst_ci->i_truncate_size);
2302 		if (IS_ERR(req))
2303 			ret = PTR_ERR(req);
2304 		else {
2305 			ceph_osdc_start_request(osdc, req, false);
2306 			ret = ceph_osdc_wait_request(osdc, req);
2307 			ceph_update_copyfrom_metrics(&fsc->mdsc->metric,
2308 						     req->r_start_latency,
2309 						     req->r_end_latency,
2310 						     object_size, ret);
2311 			ceph_osdc_put_request(req);
2312 		}
2313 		if (ret) {
2314 			if (ret == -EOPNOTSUPP) {
2315 				fsc->have_copy_from2 = false;
2316 				pr_notice("OSDs don't support copy-from2; disabling copy offload\n");
2317 			}
2318 			dout("ceph_osdc_copy_from returned %d\n", ret);
2319 			if (!bytes)
2320 				bytes = ret;
2321 			goto out;
2322 		}
2323 		len -= object_size;
2324 		bytes += object_size;
2325 		*src_off += object_size;
2326 		*dst_off += object_size;
2327 	}
2328 
2329 out:
2330 	ceph_oloc_destroy(&src_oloc);
2331 	ceph_oloc_destroy(&dst_oloc);
2332 	return bytes;
2333 }
2334 
2335 static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
2336 				      struct file *dst_file, loff_t dst_off,
2337 				      size_t len, unsigned int flags)
2338 {
2339 	struct inode *src_inode = file_inode(src_file);
2340 	struct inode *dst_inode = file_inode(dst_file);
2341 	struct ceph_inode_info *src_ci = ceph_inode(src_inode);
2342 	struct ceph_inode_info *dst_ci = ceph_inode(dst_inode);
2343 	struct ceph_cap_flush *prealloc_cf;
2344 	struct ceph_fs_client *src_fsc = ceph_inode_to_client(src_inode);
2345 	loff_t size;
2346 	ssize_t ret = -EIO, bytes;
2347 	u64 src_objnum, dst_objnum, src_objoff, dst_objoff;
2348 	u32 src_objlen, dst_objlen;
2349 	int src_got = 0, dst_got = 0, err, dirty;
2350 
2351 	if (src_inode->i_sb != dst_inode->i_sb) {
2352 		struct ceph_fs_client *dst_fsc = ceph_inode_to_client(dst_inode);
2353 
2354 		if (ceph_fsid_compare(&src_fsc->client->fsid,
2355 				      &dst_fsc->client->fsid)) {
2356 			dout("Copying files across clusters: src: %pU dst: %pU\n",
2357 			     &src_fsc->client->fsid, &dst_fsc->client->fsid);
2358 			return -EXDEV;
2359 		}
2360 	}
2361 	if (ceph_snap(dst_inode) != CEPH_NOSNAP)
2362 		return -EROFS;
2363 
2364 	/*
2365 	 * Some of the checks below will return -EOPNOTSUPP, which will force a
2366 	 * fallback to the default VFS copy_file_range implementation.  This is
2367 	 * desirable in several cases (for ex, the 'len' is smaller than the
2368 	 * size of the objects, or in cases where that would be more
2369 	 * efficient).
2370 	 */
2371 
2372 	if (ceph_test_mount_opt(src_fsc, NOCOPYFROM))
2373 		return -EOPNOTSUPP;
2374 
2375 	if (!src_fsc->have_copy_from2)
2376 		return -EOPNOTSUPP;
2377 
2378 	/*
2379 	 * Striped file layouts require that we copy partial objects, but the
2380 	 * OSD copy-from operation only supports full-object copies.  Limit
2381 	 * this to non-striped file layouts for now.
2382 	 */
2383 	if ((src_ci->i_layout.stripe_unit != dst_ci->i_layout.stripe_unit) ||
2384 	    (src_ci->i_layout.stripe_count != 1) ||
2385 	    (dst_ci->i_layout.stripe_count != 1) ||
2386 	    (src_ci->i_layout.object_size != dst_ci->i_layout.object_size)) {
2387 		dout("Invalid src/dst files layout\n");
2388 		return -EOPNOTSUPP;
2389 	}
2390 
2391 	if (len < src_ci->i_layout.object_size)
2392 		return -EOPNOTSUPP; /* no remote copy will be done */
2393 
2394 	prealloc_cf = ceph_alloc_cap_flush();
2395 	if (!prealloc_cf)
2396 		return -ENOMEM;
2397 
2398 	/* Start by sync'ing the source and destination files */
2399 	ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
2400 	if (ret < 0) {
2401 		dout("failed to write src file (%zd)\n", ret);
2402 		goto out;
2403 	}
2404 	ret = file_write_and_wait_range(dst_file, dst_off, (dst_off + len));
2405 	if (ret < 0) {
2406 		dout("failed to write dst file (%zd)\n", ret);
2407 		goto out;
2408 	}
2409 
2410 	/*
2411 	 * We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other
2412 	 * clients may have dirty data in their caches.  And OSDs know nothing
2413 	 * about caps, so they can't safely do the remote object copies.
2414 	 */
2415 	err = get_rd_wr_caps(src_file, &src_got,
2416 			     dst_file, (dst_off + len), &dst_got);
2417 	if (err < 0) {
2418 		dout("get_rd_wr_caps returned %d\n", err);
2419 		ret = -EOPNOTSUPP;
2420 		goto out;
2421 	}
2422 
2423 	ret = is_file_size_ok(src_inode, dst_inode, src_off, dst_off, len);
2424 	if (ret < 0)
2425 		goto out_caps;
2426 
2427 	/* Drop dst file cached pages */
2428 	ret = invalidate_inode_pages2_range(dst_inode->i_mapping,
2429 					    dst_off >> PAGE_SHIFT,
2430 					    (dst_off + len) >> PAGE_SHIFT);
2431 	if (ret < 0) {
2432 		dout("Failed to invalidate inode pages (%zd)\n", ret);
2433 		ret = 0; /* XXX */
2434 	}
2435 	ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
2436 				      src_ci->i_layout.object_size,
2437 				      &src_objnum, &src_objoff, &src_objlen);
2438 	ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off,
2439 				      dst_ci->i_layout.object_size,
2440 				      &dst_objnum, &dst_objoff, &dst_objlen);
2441 	/* object-level offsets need to the same */
2442 	if (src_objoff != dst_objoff) {
2443 		ret = -EOPNOTSUPP;
2444 		goto out_caps;
2445 	}
2446 
2447 	/*
2448 	 * Do a manual copy if the object offset isn't object aligned.
2449 	 * 'src_objlen' contains the bytes left until the end of the object,
2450 	 * starting at the src_off
2451 	 */
2452 	if (src_objoff) {
2453 		dout("Initial partial copy of %u bytes\n", src_objlen);
2454 
2455 		/*
2456 		 * we need to temporarily drop all caps as we'll be calling
2457 		 * {read,write}_iter, which will get caps again.
2458 		 */
2459 		put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2460 		ret = do_splice_direct(src_file, &src_off, dst_file,
2461 				       &dst_off, src_objlen, flags);
2462 		/* Abort on short copies or on error */
2463 		if (ret < src_objlen) {
2464 			dout("Failed partial copy (%zd)\n", ret);
2465 			goto out;
2466 		}
2467 		len -= ret;
2468 		err = get_rd_wr_caps(src_file, &src_got,
2469 				     dst_file, (dst_off + len), &dst_got);
2470 		if (err < 0)
2471 			goto out;
2472 		err = is_file_size_ok(src_inode, dst_inode,
2473 				      src_off, dst_off, len);
2474 		if (err < 0)
2475 			goto out_caps;
2476 	}
2477 
2478 	size = i_size_read(dst_inode);
2479 	bytes = ceph_do_objects_copy(src_ci, &src_off, dst_ci, &dst_off,
2480 				     src_fsc, len, flags);
2481 	if (bytes <= 0) {
2482 		if (!ret)
2483 			ret = bytes;
2484 		goto out_caps;
2485 	}
2486 	dout("Copied %zu bytes out of %zu\n", bytes, len);
2487 	len -= bytes;
2488 	ret += bytes;
2489 
2490 	file_update_time(dst_file);
2491 	inode_inc_iversion_raw(dst_inode);
2492 
2493 	if (dst_off > size) {
2494 		/* Let the MDS know about dst file size change */
2495 		if (ceph_inode_set_size(dst_inode, dst_off) ||
2496 		    ceph_quota_is_max_bytes_approaching(dst_inode, dst_off))
2497 			ceph_check_caps(dst_ci, CHECK_CAPS_AUTHONLY, NULL);
2498 	}
2499 	/* Mark Fw dirty */
2500 	spin_lock(&dst_ci->i_ceph_lock);
2501 	dst_ci->i_inline_version = CEPH_INLINE_NONE;
2502 	dirty = __ceph_mark_dirty_caps(dst_ci, CEPH_CAP_FILE_WR, &prealloc_cf);
2503 	spin_unlock(&dst_ci->i_ceph_lock);
2504 	if (dirty)
2505 		__mark_inode_dirty(dst_inode, dirty);
2506 
2507 out_caps:
2508 	put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2509 
2510 	/*
2511 	 * Do the final manual copy if we still have some bytes left, unless
2512 	 * there were errors in remote object copies (len >= object_size).
2513 	 */
2514 	if (len && (len < src_ci->i_layout.object_size)) {
2515 		dout("Final partial copy of %zu bytes\n", len);
2516 		bytes = do_splice_direct(src_file, &src_off, dst_file,
2517 					 &dst_off, len, flags);
2518 		if (bytes > 0)
2519 			ret += bytes;
2520 		else
2521 			dout("Failed partial copy (%zd)\n", bytes);
2522 	}
2523 
2524 out:
2525 	ceph_free_cap_flush(prealloc_cf);
2526 
2527 	return ret;
2528 }
2529 
2530 static ssize_t ceph_copy_file_range(struct file *src_file, loff_t src_off,
2531 				    struct file *dst_file, loff_t dst_off,
2532 				    size_t len, unsigned int flags)
2533 {
2534 	ssize_t ret;
2535 
2536 	ret = __ceph_copy_file_range(src_file, src_off, dst_file, dst_off,
2537 				     len, flags);
2538 
2539 	if (ret == -EOPNOTSUPP || ret == -EXDEV)
2540 		ret = generic_copy_file_range(src_file, src_off, dst_file,
2541 					      dst_off, len, flags);
2542 	return ret;
2543 }
2544 
2545 const struct file_operations ceph_file_fops = {
2546 	.open = ceph_open,
2547 	.release = ceph_release,
2548 	.llseek = ceph_llseek,
2549 	.read_iter = ceph_read_iter,
2550 	.write_iter = ceph_write_iter,
2551 	.mmap = ceph_mmap,
2552 	.fsync = ceph_fsync,
2553 	.lock = ceph_lock,
2554 	.setlease = simple_nosetlease,
2555 	.flock = ceph_flock,
2556 	.splice_read = generic_file_splice_read,
2557 	.splice_write = iter_file_splice_write,
2558 	.unlocked_ioctl = ceph_ioctl,
2559 	.compat_ioctl = compat_ptr_ioctl,
2560 	.fallocate	= ceph_fallocate,
2561 	.copy_file_range = ceph_copy_file_range,
2562 };
2563