xref: /openbmc/linux/fs/ceph/file.c (revision 45cc35e8)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 #include <linux/ceph/striper.h>
4 
5 #include <linux/module.h>
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/file.h>
9 #include <linux/mount.h>
10 #include <linux/namei.h>
11 #include <linux/writeback.h>
12 #include <linux/falloc.h>
13 #include <linux/iversion.h>
14 #include <linux/ktime.h>
15 
16 #include "super.h"
17 #include "mds_client.h"
18 #include "cache.h"
19 #include "io.h"
20 #include "metric.h"
21 
22 static __le32 ceph_flags_sys2wire(u32 flags)
23 {
24 	u32 wire_flags = 0;
25 
26 	switch (flags & O_ACCMODE) {
27 	case O_RDONLY:
28 		wire_flags |= CEPH_O_RDONLY;
29 		break;
30 	case O_WRONLY:
31 		wire_flags |= CEPH_O_WRONLY;
32 		break;
33 	case O_RDWR:
34 		wire_flags |= CEPH_O_RDWR;
35 		break;
36 	}
37 
38 	flags &= ~O_ACCMODE;
39 
40 #define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
41 
42 	ceph_sys2wire(O_CREAT);
43 	ceph_sys2wire(O_EXCL);
44 	ceph_sys2wire(O_TRUNC);
45 	ceph_sys2wire(O_DIRECTORY);
46 	ceph_sys2wire(O_NOFOLLOW);
47 
48 #undef ceph_sys2wire
49 
50 	if (flags)
51 		dout("unused open flags: %x\n", flags);
52 
53 	return cpu_to_le32(wire_flags);
54 }
55 
56 /*
57  * Ceph file operations
58  *
59  * Implement basic open/close functionality, and implement
60  * read/write.
61  *
62  * We implement three modes of file I/O:
63  *  - buffered uses the generic_file_aio_{read,write} helpers
64  *
65  *  - synchronous is used when there is multi-client read/write
66  *    sharing, avoids the page cache, and synchronously waits for an
67  *    ack from the OSD.
68  *
69  *  - direct io takes the variant of the sync path that references
70  *    user pages directly.
71  *
72  * fsync() flushes and waits on dirty pages, but just queues metadata
73  * for writeback: since the MDS can recover size and mtime there is no
74  * need to wait for MDS acknowledgement.
75  */
76 
77 /*
78  * How many pages to get in one call to iov_iter_get_pages().  This
79  * determines the size of the on-stack array used as a buffer.
80  */
81 #define ITER_GET_BVECS_PAGES	64
82 
83 static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
84 				struct bio_vec *bvecs)
85 {
86 	size_t size = 0;
87 	int bvec_idx = 0;
88 
89 	if (maxsize > iov_iter_count(iter))
90 		maxsize = iov_iter_count(iter);
91 
92 	while (size < maxsize) {
93 		struct page *pages[ITER_GET_BVECS_PAGES];
94 		ssize_t bytes;
95 		size_t start;
96 		int idx = 0;
97 
98 		bytes = iov_iter_get_pages2(iter, pages, maxsize - size,
99 					   ITER_GET_BVECS_PAGES, &start);
100 		if (bytes < 0)
101 			return size ?: bytes;
102 
103 		size += bytes;
104 
105 		for ( ; bytes; idx++, bvec_idx++) {
106 			int len = min_t(int, bytes, PAGE_SIZE - start);
107 
108 			bvec_set_page(&bvecs[bvec_idx], pages[idx], len, start);
109 			bytes -= len;
110 			start = 0;
111 		}
112 	}
113 
114 	return size;
115 }
116 
117 /*
118  * iov_iter_get_pages() only considers one iov_iter segment, no matter
119  * what maxsize or maxpages are given.  For ITER_BVEC that is a single
120  * page.
121  *
122  * Attempt to get up to @maxsize bytes worth of pages from @iter.
123  * Return the number of bytes in the created bio_vec array, or an error.
124  */
125 static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize,
126 				    struct bio_vec **bvecs, int *num_bvecs)
127 {
128 	struct bio_vec *bv;
129 	size_t orig_count = iov_iter_count(iter);
130 	ssize_t bytes;
131 	int npages;
132 
133 	iov_iter_truncate(iter, maxsize);
134 	npages = iov_iter_npages(iter, INT_MAX);
135 	iov_iter_reexpand(iter, orig_count);
136 
137 	/*
138 	 * __iter_get_bvecs() may populate only part of the array -- zero it
139 	 * out.
140 	 */
141 	bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO);
142 	if (!bv)
143 		return -ENOMEM;
144 
145 	bytes = __iter_get_bvecs(iter, maxsize, bv);
146 	if (bytes < 0) {
147 		/*
148 		 * No pages were pinned -- just free the array.
149 		 */
150 		kvfree(bv);
151 		return bytes;
152 	}
153 
154 	*bvecs = bv;
155 	*num_bvecs = npages;
156 	return bytes;
157 }
158 
159 static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty)
160 {
161 	int i;
162 
163 	for (i = 0; i < num_bvecs; i++) {
164 		if (bvecs[i].bv_page) {
165 			if (should_dirty)
166 				set_page_dirty_lock(bvecs[i].bv_page);
167 			put_page(bvecs[i].bv_page);
168 		}
169 	}
170 	kvfree(bvecs);
171 }
172 
173 /*
174  * Prepare an open request.  Preallocate ceph_cap to avoid an
175  * inopportune ENOMEM later.
176  */
177 static struct ceph_mds_request *
178 prepare_open_request(struct super_block *sb, int flags, int create_mode)
179 {
180 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(sb);
181 	struct ceph_mds_request *req;
182 	int want_auth = USE_ANY_MDS;
183 	int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
184 
185 	if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
186 		want_auth = USE_AUTH_MDS;
187 
188 	req = ceph_mdsc_create_request(mdsc, op, want_auth);
189 	if (IS_ERR(req))
190 		goto out;
191 	req->r_fmode = ceph_flags_to_mode(flags);
192 	req->r_args.open.flags = ceph_flags_sys2wire(flags);
193 	req->r_args.open.mode = cpu_to_le32(create_mode);
194 out:
195 	return req;
196 }
197 
198 static int ceph_init_file_info(struct inode *inode, struct file *file,
199 					int fmode, bool isdir)
200 {
201 	struct ceph_inode_info *ci = ceph_inode(inode);
202 	struct ceph_mount_options *opt =
203 		ceph_inode_to_client(&ci->netfs.inode)->mount_options;
204 	struct ceph_file_info *fi;
205 	int ret;
206 
207 	dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
208 			inode->i_mode, isdir ? "dir" : "regular");
209 	BUG_ON(inode->i_fop->release != ceph_release);
210 
211 	if (isdir) {
212 		struct ceph_dir_file_info *dfi =
213 			kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
214 		if (!dfi)
215 			return -ENOMEM;
216 
217 		file->private_data = dfi;
218 		fi = &dfi->file_info;
219 		dfi->next_offset = 2;
220 		dfi->readdir_cache_idx = -1;
221 	} else {
222 		fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
223 		if (!fi)
224 			return -ENOMEM;
225 
226 		if (opt->flags & CEPH_MOUNT_OPT_NOPAGECACHE)
227 			fi->flags |= CEPH_F_SYNC;
228 
229 		file->private_data = fi;
230 	}
231 
232 	ceph_get_fmode(ci, fmode, 1);
233 	fi->fmode = fmode;
234 
235 	spin_lock_init(&fi->rw_contexts_lock);
236 	INIT_LIST_HEAD(&fi->rw_contexts);
237 	fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
238 
239 	if ((file->f_mode & FMODE_WRITE) && ceph_has_inline_data(ci)) {
240 		ret = ceph_uninline_data(file);
241 		if (ret < 0)
242 			goto error;
243 	}
244 
245 	return 0;
246 
247 error:
248 	ceph_fscache_unuse_cookie(inode, file->f_mode & FMODE_WRITE);
249 	ceph_put_fmode(ci, fi->fmode, 1);
250 	kmem_cache_free(ceph_file_cachep, fi);
251 	/* wake up anyone waiting for caps on this inode */
252 	wake_up_all(&ci->i_cap_wq);
253 	return ret;
254 }
255 
256 /*
257  * initialize private struct file data.
258  * if we fail, clean up by dropping fmode reference on the ceph_inode
259  */
260 static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
261 {
262 	int ret = 0;
263 
264 	switch (inode->i_mode & S_IFMT) {
265 	case S_IFREG:
266 		ceph_fscache_use_cookie(inode, file->f_mode & FMODE_WRITE);
267 		fallthrough;
268 	case S_IFDIR:
269 		ret = ceph_init_file_info(inode, file, fmode,
270 						S_ISDIR(inode->i_mode));
271 		break;
272 
273 	case S_IFLNK:
274 		dout("init_file %p %p 0%o (symlink)\n", inode, file,
275 		     inode->i_mode);
276 		break;
277 
278 	default:
279 		dout("init_file %p %p 0%o (special)\n", inode, file,
280 		     inode->i_mode);
281 		/*
282 		 * we need to drop the open ref now, since we don't
283 		 * have .release set to ceph_release.
284 		 */
285 		BUG_ON(inode->i_fop->release == ceph_release);
286 
287 		/* call the proper open fop */
288 		ret = inode->i_fop->open(inode, file);
289 	}
290 	return ret;
291 }
292 
293 /*
294  * try renew caps after session gets killed.
295  */
296 int ceph_renew_caps(struct inode *inode, int fmode)
297 {
298 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
299 	struct ceph_inode_info *ci = ceph_inode(inode);
300 	struct ceph_mds_request *req;
301 	int err, flags, wanted;
302 
303 	spin_lock(&ci->i_ceph_lock);
304 	__ceph_touch_fmode(ci, mdsc, fmode);
305 	wanted = __ceph_caps_file_wanted(ci);
306 	if (__ceph_is_any_real_caps(ci) &&
307 	    (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
308 		int issued = __ceph_caps_issued(ci, NULL);
309 		spin_unlock(&ci->i_ceph_lock);
310 		dout("renew caps %p want %s issued %s updating mds_wanted\n",
311 		     inode, ceph_cap_string(wanted), ceph_cap_string(issued));
312 		ceph_check_caps(ci, 0);
313 		return 0;
314 	}
315 	spin_unlock(&ci->i_ceph_lock);
316 
317 	flags = 0;
318 	if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
319 		flags = O_RDWR;
320 	else if (wanted & CEPH_CAP_FILE_RD)
321 		flags = O_RDONLY;
322 	else if (wanted & CEPH_CAP_FILE_WR)
323 		flags = O_WRONLY;
324 #ifdef O_LAZY
325 	if (wanted & CEPH_CAP_FILE_LAZYIO)
326 		flags |= O_LAZY;
327 #endif
328 
329 	req = prepare_open_request(inode->i_sb, flags, 0);
330 	if (IS_ERR(req)) {
331 		err = PTR_ERR(req);
332 		goto out;
333 	}
334 
335 	req->r_inode = inode;
336 	ihold(inode);
337 	req->r_num_caps = 1;
338 
339 	err = ceph_mdsc_do_request(mdsc, NULL, req);
340 	ceph_mdsc_put_request(req);
341 out:
342 	dout("renew caps %p open result=%d\n", inode, err);
343 	return err < 0 ? err : 0;
344 }
345 
346 /*
347  * If we already have the requisite capabilities, we can satisfy
348  * the open request locally (no need to request new caps from the
349  * MDS).  We do, however, need to inform the MDS (asynchronously)
350  * if our wanted caps set expands.
351  */
352 int ceph_open(struct inode *inode, struct file *file)
353 {
354 	struct ceph_inode_info *ci = ceph_inode(inode);
355 	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
356 	struct ceph_mds_client *mdsc = fsc->mdsc;
357 	struct ceph_mds_request *req;
358 	struct ceph_file_info *fi = file->private_data;
359 	int err;
360 	int flags, fmode, wanted;
361 
362 	if (fi) {
363 		dout("open file %p is already opened\n", file);
364 		return 0;
365 	}
366 
367 	/* filter out O_CREAT|O_EXCL; vfs did that already.  yuck. */
368 	flags = file->f_flags & ~(O_CREAT|O_EXCL);
369 	if (S_ISDIR(inode->i_mode))
370 		flags = O_DIRECTORY;  /* mds likes to know */
371 
372 	dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
373 	     ceph_vinop(inode), file, flags, file->f_flags);
374 	fmode = ceph_flags_to_mode(flags);
375 	wanted = ceph_caps_for_mode(fmode);
376 
377 	/* snapped files are read-only */
378 	if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
379 		return -EROFS;
380 
381 	/* trivially open snapdir */
382 	if (ceph_snap(inode) == CEPH_SNAPDIR) {
383 		return ceph_init_file(inode, file, fmode);
384 	}
385 
386 	/*
387 	 * No need to block if we have caps on the auth MDS (for
388 	 * write) or any MDS (for read).  Update wanted set
389 	 * asynchronously.
390 	 */
391 	spin_lock(&ci->i_ceph_lock);
392 	if (__ceph_is_any_real_caps(ci) &&
393 	    (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
394 		int mds_wanted = __ceph_caps_mds_wanted(ci, true);
395 		int issued = __ceph_caps_issued(ci, NULL);
396 
397 		dout("open %p fmode %d want %s issued %s using existing\n",
398 		     inode, fmode, ceph_cap_string(wanted),
399 		     ceph_cap_string(issued));
400 		__ceph_touch_fmode(ci, mdsc, fmode);
401 		spin_unlock(&ci->i_ceph_lock);
402 
403 		/* adjust wanted? */
404 		if ((issued & wanted) != wanted &&
405 		    (mds_wanted & wanted) != wanted &&
406 		    ceph_snap(inode) != CEPH_SNAPDIR)
407 			ceph_check_caps(ci, 0);
408 
409 		return ceph_init_file(inode, file, fmode);
410 	} else if (ceph_snap(inode) != CEPH_NOSNAP &&
411 		   (ci->i_snap_caps & wanted) == wanted) {
412 		__ceph_touch_fmode(ci, mdsc, fmode);
413 		spin_unlock(&ci->i_ceph_lock);
414 		return ceph_init_file(inode, file, fmode);
415 	}
416 
417 	spin_unlock(&ci->i_ceph_lock);
418 
419 	dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
420 	req = prepare_open_request(inode->i_sb, flags, 0);
421 	if (IS_ERR(req)) {
422 		err = PTR_ERR(req);
423 		goto out;
424 	}
425 	req->r_inode = inode;
426 	ihold(inode);
427 
428 	req->r_num_caps = 1;
429 	err = ceph_mdsc_do_request(mdsc, NULL, req);
430 	if (!err)
431 		err = ceph_init_file(inode, file, req->r_fmode);
432 	ceph_mdsc_put_request(req);
433 	dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
434 out:
435 	return err;
436 }
437 
438 /* Clone the layout from a synchronous create, if the dir now has Dc caps */
439 static void
440 cache_file_layout(struct inode *dst, struct inode *src)
441 {
442 	struct ceph_inode_info *cdst = ceph_inode(dst);
443 	struct ceph_inode_info *csrc = ceph_inode(src);
444 
445 	spin_lock(&cdst->i_ceph_lock);
446 	if ((__ceph_caps_issued(cdst, NULL) & CEPH_CAP_DIR_CREATE) &&
447 	    !ceph_file_layout_is_valid(&cdst->i_cached_layout)) {
448 		memcpy(&cdst->i_cached_layout, &csrc->i_layout,
449 			sizeof(cdst->i_cached_layout));
450 		rcu_assign_pointer(cdst->i_cached_layout.pool_ns,
451 				   ceph_try_get_string(csrc->i_layout.pool_ns));
452 	}
453 	spin_unlock(&cdst->i_ceph_lock);
454 }
455 
456 /*
457  * Try to set up an async create. We need caps, a file layout, and inode number,
458  * and either a lease on the dentry or complete dir info. If any of those
459  * criteria are not satisfied, then return false and the caller can go
460  * synchronous.
461  */
462 static int try_prep_async_create(struct inode *dir, struct dentry *dentry,
463 				 struct ceph_file_layout *lo, u64 *pino)
464 {
465 	struct ceph_inode_info *ci = ceph_inode(dir);
466 	struct ceph_dentry_info *di = ceph_dentry(dentry);
467 	int got = 0, want = CEPH_CAP_FILE_EXCL | CEPH_CAP_DIR_CREATE;
468 	u64 ino;
469 
470 	spin_lock(&ci->i_ceph_lock);
471 	/* No auth cap means no chance for Dc caps */
472 	if (!ci->i_auth_cap)
473 		goto no_async;
474 
475 	/* Any delegated inos? */
476 	if (xa_empty(&ci->i_auth_cap->session->s_delegated_inos))
477 		goto no_async;
478 
479 	if (!ceph_file_layout_is_valid(&ci->i_cached_layout))
480 		goto no_async;
481 
482 	if ((__ceph_caps_issued(ci, NULL) & want) != want)
483 		goto no_async;
484 
485 	if (d_in_lookup(dentry)) {
486 		if (!__ceph_dir_is_complete(ci))
487 			goto no_async;
488 		spin_lock(&dentry->d_lock);
489 		di->lease_shared_gen = atomic_read(&ci->i_shared_gen);
490 		spin_unlock(&dentry->d_lock);
491 	} else if (atomic_read(&ci->i_shared_gen) !=
492 		   READ_ONCE(di->lease_shared_gen)) {
493 		goto no_async;
494 	}
495 
496 	ino = ceph_get_deleg_ino(ci->i_auth_cap->session);
497 	if (!ino)
498 		goto no_async;
499 
500 	*pino = ino;
501 	ceph_take_cap_refs(ci, want, false);
502 	memcpy(lo, &ci->i_cached_layout, sizeof(*lo));
503 	rcu_assign_pointer(lo->pool_ns,
504 			   ceph_try_get_string(ci->i_cached_layout.pool_ns));
505 	got = want;
506 no_async:
507 	spin_unlock(&ci->i_ceph_lock);
508 	return got;
509 }
510 
511 static void restore_deleg_ino(struct inode *dir, u64 ino)
512 {
513 	struct ceph_inode_info *ci = ceph_inode(dir);
514 	struct ceph_mds_session *s = NULL;
515 
516 	spin_lock(&ci->i_ceph_lock);
517 	if (ci->i_auth_cap)
518 		s = ceph_get_mds_session(ci->i_auth_cap->session);
519 	spin_unlock(&ci->i_ceph_lock);
520 	if (s) {
521 		int err = ceph_restore_deleg_ino(s, ino);
522 		if (err)
523 			pr_warn("ceph: unable to restore delegated ino 0x%llx to session: %d\n",
524 				ino, err);
525 		ceph_put_mds_session(s);
526 	}
527 }
528 
529 static void wake_async_create_waiters(struct inode *inode,
530 				      struct ceph_mds_session *session)
531 {
532 	struct ceph_inode_info *ci = ceph_inode(inode);
533 	bool check_cap = false;
534 
535 	spin_lock(&ci->i_ceph_lock);
536 	if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE) {
537 		ci->i_ceph_flags &= ~CEPH_I_ASYNC_CREATE;
538 		wake_up_bit(&ci->i_ceph_flags, CEPH_ASYNC_CREATE_BIT);
539 
540 		if (ci->i_ceph_flags & CEPH_I_ASYNC_CHECK_CAPS) {
541 			ci->i_ceph_flags &= ~CEPH_I_ASYNC_CHECK_CAPS;
542 			check_cap = true;
543 		}
544 	}
545 	ceph_kick_flushing_inode_caps(session, ci);
546 	spin_unlock(&ci->i_ceph_lock);
547 
548 	if (check_cap)
549 		ceph_check_caps(ci, CHECK_CAPS_FLUSH);
550 }
551 
552 static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
553                                  struct ceph_mds_request *req)
554 {
555 	struct dentry *dentry = req->r_dentry;
556 	struct inode *dinode = d_inode(dentry);
557 	struct inode *tinode = req->r_target_inode;
558 	int result = req->r_err ? req->r_err :
559 			le32_to_cpu(req->r_reply_info.head->result);
560 
561 	WARN_ON_ONCE(dinode && tinode && dinode != tinode);
562 
563 	/* MDS changed -- caller must resubmit */
564 	if (result == -EJUKEBOX)
565 		goto out;
566 
567 	mapping_set_error(req->r_parent->i_mapping, result);
568 
569 	if (result) {
570 		int pathlen = 0;
571 		u64 base = 0;
572 		char *path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
573 						  &base, 0);
574 
575 		pr_warn("async create failure path=(%llx)%s result=%d!\n",
576 			base, IS_ERR(path) ? "<<bad>>" : path, result);
577 		ceph_mdsc_free_path(path, pathlen);
578 
579 		ceph_dir_clear_complete(req->r_parent);
580 		if (!d_unhashed(dentry))
581 			d_drop(dentry);
582 
583 		if (dinode) {
584 			mapping_set_error(dinode->i_mapping, result);
585 			ceph_inode_shutdown(dinode);
586 			wake_async_create_waiters(dinode, req->r_session);
587 		}
588 	}
589 
590 	if (tinode) {
591 		u64 ino = ceph_vino(tinode).ino;
592 
593 		if (req->r_deleg_ino != ino)
594 			pr_warn("%s: inode number mismatch! err=%d deleg_ino=0x%llx target=0x%llx\n",
595 				__func__, req->r_err, req->r_deleg_ino, ino);
596 
597 		mapping_set_error(tinode->i_mapping, result);
598 		wake_async_create_waiters(tinode, req->r_session);
599 	} else if (!result) {
600 		pr_warn("%s: no req->r_target_inode for 0x%llx\n", __func__,
601 			req->r_deleg_ino);
602 	}
603 out:
604 	ceph_mdsc_release_dir_caps(req);
605 }
606 
607 static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
608 				    struct file *file, umode_t mode,
609 				    struct ceph_mds_request *req,
610 				    struct ceph_acl_sec_ctx *as_ctx,
611 				    struct ceph_file_layout *lo)
612 {
613 	int ret;
614 	char xattr_buf[4];
615 	struct ceph_mds_reply_inode in = { };
616 	struct ceph_mds_reply_info_in iinfo = { .in = &in };
617 	struct ceph_inode_info *ci = ceph_inode(dir);
618 	struct ceph_dentry_info *di = ceph_dentry(dentry);
619 	struct inode *inode;
620 	struct timespec64 now;
621 	struct ceph_string *pool_ns;
622 	struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
623 	struct ceph_vino vino = { .ino = req->r_deleg_ino,
624 				  .snap = CEPH_NOSNAP };
625 
626 	ktime_get_real_ts64(&now);
627 
628 	inode = ceph_get_inode(dentry->d_sb, vino);
629 	if (IS_ERR(inode))
630 		return PTR_ERR(inode);
631 
632 	iinfo.inline_version = CEPH_INLINE_NONE;
633 	iinfo.change_attr = 1;
634 	ceph_encode_timespec64(&iinfo.btime, &now);
635 
636 	if (req->r_pagelist) {
637 		iinfo.xattr_len = req->r_pagelist->length;
638 		iinfo.xattr_data = req->r_pagelist->mapped_tail;
639 	} else {
640 		/* fake it */
641 		iinfo.xattr_len = ARRAY_SIZE(xattr_buf);
642 		iinfo.xattr_data = xattr_buf;
643 		memset(iinfo.xattr_data, 0, iinfo.xattr_len);
644 	}
645 
646 	in.ino = cpu_to_le64(vino.ino);
647 	in.snapid = cpu_to_le64(CEPH_NOSNAP);
648 	in.version = cpu_to_le64(1);	// ???
649 	in.cap.caps = in.cap.wanted = cpu_to_le32(CEPH_CAP_ALL_FILE);
650 	in.cap.cap_id = cpu_to_le64(1);
651 	in.cap.realm = cpu_to_le64(ci->i_snap_realm->ino);
652 	in.cap.flags = CEPH_CAP_FLAG_AUTH;
653 	in.ctime = in.mtime = in.atime = iinfo.btime;
654 	in.truncate_seq = cpu_to_le32(1);
655 	in.truncate_size = cpu_to_le64(-1ULL);
656 	in.xattr_version = cpu_to_le64(1);
657 	in.uid = cpu_to_le32(from_kuid(&init_user_ns, current_fsuid()));
658 	if (dir->i_mode & S_ISGID) {
659 		in.gid = cpu_to_le32(from_kgid(&init_user_ns, dir->i_gid));
660 
661 		/* Directories always inherit the setgid bit. */
662 		if (S_ISDIR(mode))
663 			mode |= S_ISGID;
664 	} else {
665 		in.gid = cpu_to_le32(from_kgid(&init_user_ns, current_fsgid()));
666 	}
667 	in.mode = cpu_to_le32((u32)mode);
668 
669 	in.nlink = cpu_to_le32(1);
670 	in.max_size = cpu_to_le64(lo->stripe_unit);
671 
672 	ceph_file_layout_to_legacy(lo, &in.layout);
673 	/* lo is private, so pool_ns can't change */
674 	pool_ns = rcu_dereference_raw(lo->pool_ns);
675 	if (pool_ns) {
676 		iinfo.pool_ns_len = pool_ns->len;
677 		iinfo.pool_ns_data = pool_ns->str;
678 	}
679 
680 	down_read(&mdsc->snap_rwsem);
681 	ret = ceph_fill_inode(inode, NULL, &iinfo, NULL, req->r_session,
682 			      req->r_fmode, NULL);
683 	up_read(&mdsc->snap_rwsem);
684 	if (ret) {
685 		dout("%s failed to fill inode: %d\n", __func__, ret);
686 		ceph_dir_clear_complete(dir);
687 		if (!d_unhashed(dentry))
688 			d_drop(dentry);
689 		if (inode->i_state & I_NEW)
690 			discard_new_inode(inode);
691 	} else {
692 		struct dentry *dn;
693 
694 		dout("%s d_adding new inode 0x%llx to 0x%llx/%s\n", __func__,
695 			vino.ino, ceph_ino(dir), dentry->d_name.name);
696 		ceph_dir_clear_ordered(dir);
697 		ceph_init_inode_acls(inode, as_ctx);
698 		if (inode->i_state & I_NEW) {
699 			/*
700 			 * If it's not I_NEW, then someone created this before
701 			 * we got here. Assume the server is aware of it at
702 			 * that point and don't worry about setting
703 			 * CEPH_I_ASYNC_CREATE.
704 			 */
705 			ceph_inode(inode)->i_ceph_flags = CEPH_I_ASYNC_CREATE;
706 			unlock_new_inode(inode);
707 		}
708 		if (d_in_lookup(dentry) || d_really_is_negative(dentry)) {
709 			if (!d_unhashed(dentry))
710 				d_drop(dentry);
711 			dn = d_splice_alias(inode, dentry);
712 			WARN_ON_ONCE(dn && dn != dentry);
713 		}
714 		file->f_mode |= FMODE_CREATED;
715 		ret = finish_open(file, dentry, ceph_open);
716 	}
717 
718 	spin_lock(&dentry->d_lock);
719 	di->flags &= ~CEPH_DENTRY_ASYNC_CREATE;
720 	wake_up_bit(&di->flags, CEPH_DENTRY_ASYNC_CREATE_BIT);
721 	spin_unlock(&dentry->d_lock);
722 
723 	return ret;
724 }
725 
726 /*
727  * Do a lookup + open with a single request.  If we get a non-existent
728  * file or symlink, return 1 so the VFS can retry.
729  */
730 int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
731 		     struct file *file, unsigned flags, umode_t mode)
732 {
733 	struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
734 	struct ceph_mds_client *mdsc = fsc->mdsc;
735 	struct ceph_mds_request *req;
736 	struct dentry *dn;
737 	struct ceph_acl_sec_ctx as_ctx = {};
738 	bool try_async = ceph_test_mount_opt(fsc, ASYNC_DIROPS);
739 	int mask;
740 	int err;
741 
742 	dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
743 	     dir, dentry, dentry,
744 	     d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
745 
746 	if (dentry->d_name.len > NAME_MAX)
747 		return -ENAMETOOLONG;
748 
749 	err = ceph_wait_on_conflict_unlink(dentry);
750 	if (err)
751 		return err;
752 	/*
753 	 * Do not truncate the file, since atomic_open is called before the
754 	 * permission check. The caller will do the truncation afterward.
755 	 */
756 	flags &= ~O_TRUNC;
757 
758 	if (flags & O_CREAT) {
759 		if (ceph_quota_is_max_files_exceeded(dir))
760 			return -EDQUOT;
761 		err = ceph_pre_init_acls(dir, &mode, &as_ctx);
762 		if (err < 0)
763 			return err;
764 		err = ceph_security_init_secctx(dentry, mode, &as_ctx);
765 		if (err < 0)
766 			goto out_ctx;
767 		/* Async create can't handle more than a page of xattrs */
768 		if (as_ctx.pagelist &&
769 		    !list_is_singular(&as_ctx.pagelist->head))
770 			try_async = false;
771 	} else if (!d_in_lookup(dentry)) {
772 		/* If it's not being looked up, it's negative */
773 		return -ENOENT;
774 	}
775 retry:
776 	/* do the open */
777 	req = prepare_open_request(dir->i_sb, flags, mode);
778 	if (IS_ERR(req)) {
779 		err = PTR_ERR(req);
780 		goto out_ctx;
781 	}
782 	req->r_dentry = dget(dentry);
783 	req->r_num_caps = 2;
784 	mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
785 	if (ceph_security_xattr_wanted(dir))
786 		mask |= CEPH_CAP_XATTR_SHARED;
787 	req->r_args.open.mask = cpu_to_le32(mask);
788 	req->r_parent = dir;
789 	ihold(dir);
790 
791 	if (flags & O_CREAT) {
792 		struct ceph_file_layout lo;
793 
794 		req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
795 		req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
796 		if (as_ctx.pagelist) {
797 			req->r_pagelist = as_ctx.pagelist;
798 			as_ctx.pagelist = NULL;
799 		}
800 		if (try_async &&
801 		    (req->r_dir_caps =
802 		      try_prep_async_create(dir, dentry, &lo,
803 					    &req->r_deleg_ino))) {
804 			struct ceph_dentry_info *di = ceph_dentry(dentry);
805 
806 			set_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags);
807 			req->r_args.open.flags |= cpu_to_le32(CEPH_O_EXCL);
808 			req->r_callback = ceph_async_create_cb;
809 
810 			spin_lock(&dentry->d_lock);
811 			di->flags |= CEPH_DENTRY_ASYNC_CREATE;
812 			spin_unlock(&dentry->d_lock);
813 
814 			err = ceph_mdsc_submit_request(mdsc, dir, req);
815 			if (!err) {
816 				err = ceph_finish_async_create(dir, dentry,
817 							file, mode, req,
818 							&as_ctx, &lo);
819 			} else if (err == -EJUKEBOX) {
820 				restore_deleg_ino(dir, req->r_deleg_ino);
821 				ceph_mdsc_put_request(req);
822 				try_async = false;
823 				ceph_put_string(rcu_dereference_raw(lo.pool_ns));
824 				goto retry;
825 			}
826 			ceph_put_string(rcu_dereference_raw(lo.pool_ns));
827 			goto out_req;
828 		}
829 	}
830 
831 	set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
832 	err = ceph_mdsc_do_request(mdsc, (flags & O_CREAT) ? dir : NULL, req);
833 	if (err == -ENOENT) {
834 		dentry = ceph_handle_snapdir(req, dentry);
835 		if (IS_ERR(dentry)) {
836 			err = PTR_ERR(dentry);
837 			goto out_req;
838 		}
839 		err = 0;
840 	}
841 
842 	if (!err && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
843 		err = ceph_handle_notrace_create(dir, dentry);
844 
845 	if (d_in_lookup(dentry)) {
846 		dn = ceph_finish_lookup(req, dentry, err);
847 		if (IS_ERR(dn))
848 			err = PTR_ERR(dn);
849 	} else {
850 		/* we were given a hashed negative dentry */
851 		dn = NULL;
852 	}
853 	if (err)
854 		goto out_req;
855 	if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
856 		/* make vfs retry on splice, ENOENT, or symlink */
857 		dout("atomic_open finish_no_open on dn %p\n", dn);
858 		err = finish_no_open(file, dn);
859 	} else {
860 		dout("atomic_open finish_open on dn %p\n", dn);
861 		if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
862 			struct inode *newino = d_inode(dentry);
863 
864 			cache_file_layout(dir, newino);
865 			ceph_init_inode_acls(newino, &as_ctx);
866 			file->f_mode |= FMODE_CREATED;
867 		}
868 		err = finish_open(file, dentry, ceph_open);
869 	}
870 out_req:
871 	ceph_mdsc_put_request(req);
872 out_ctx:
873 	ceph_release_acl_sec_ctx(&as_ctx);
874 	dout("atomic_open result=%d\n", err);
875 	return err;
876 }
877 
878 int ceph_release(struct inode *inode, struct file *file)
879 {
880 	struct ceph_inode_info *ci = ceph_inode(inode);
881 
882 	if (S_ISDIR(inode->i_mode)) {
883 		struct ceph_dir_file_info *dfi = file->private_data;
884 		dout("release inode %p dir file %p\n", inode, file);
885 		WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
886 
887 		ceph_put_fmode(ci, dfi->file_info.fmode, 1);
888 
889 		if (dfi->last_readdir)
890 			ceph_mdsc_put_request(dfi->last_readdir);
891 		kfree(dfi->last_name);
892 		kfree(dfi->dir_info);
893 		kmem_cache_free(ceph_dir_file_cachep, dfi);
894 	} else {
895 		struct ceph_file_info *fi = file->private_data;
896 		dout("release inode %p regular file %p\n", inode, file);
897 		WARN_ON(!list_empty(&fi->rw_contexts));
898 
899 		ceph_fscache_unuse_cookie(inode, file->f_mode & FMODE_WRITE);
900 		ceph_put_fmode(ci, fi->fmode, 1);
901 
902 		kmem_cache_free(ceph_file_cachep, fi);
903 	}
904 
905 	/* wake up anyone waiting for caps on this inode */
906 	wake_up_all(&ci->i_cap_wq);
907 	return 0;
908 }
909 
910 enum {
911 	HAVE_RETRIED = 1,
912 	CHECK_EOF =    2,
913 	READ_INLINE =  3,
914 };
915 
916 /*
917  * Completely synchronous read and write methods.  Direct from __user
918  * buffer to osd, or directly to user pages (if O_DIRECT).
919  *
920  * If the read spans object boundary, just do multiple reads.  (That's not
921  * atomic, but good enough for now.)
922  *
923  * If we get a short result from the OSD, check against i_size; we need to
924  * only return a short read to the caller if we hit EOF.
925  */
926 static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
927 			      int *retry_op)
928 {
929 	struct file *file = iocb->ki_filp;
930 	struct inode *inode = file_inode(file);
931 	struct ceph_inode_info *ci = ceph_inode(inode);
932 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
933 	struct ceph_osd_client *osdc = &fsc->client->osdc;
934 	ssize_t ret;
935 	u64 off = iocb->ki_pos;
936 	u64 len = iov_iter_count(to);
937 	u64 i_size = i_size_read(inode);
938 
939 	dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
940 	     (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
941 
942 	if (!len)
943 		return 0;
944 	/*
945 	 * flush any page cache pages in this range.  this
946 	 * will make concurrent normal and sync io slow,
947 	 * but it will at least behave sensibly when they are
948 	 * in sequence.
949 	 */
950 	ret = filemap_write_and_wait_range(inode->i_mapping,
951 					   off, off + len - 1);
952 	if (ret < 0)
953 		return ret;
954 
955 	ret = 0;
956 	while ((len = iov_iter_count(to)) > 0) {
957 		struct ceph_osd_request *req;
958 		struct page **pages;
959 		int num_pages;
960 		size_t page_off;
961 		bool more;
962 		int idx;
963 		size_t left;
964 
965 		req = ceph_osdc_new_request(osdc, &ci->i_layout,
966 					ci->i_vino, off, &len, 0, 1,
967 					CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
968 					NULL, ci->i_truncate_seq,
969 					ci->i_truncate_size, false);
970 		if (IS_ERR(req)) {
971 			ret = PTR_ERR(req);
972 			break;
973 		}
974 
975 		more = len < iov_iter_count(to);
976 
977 		num_pages = calc_pages_for(off, len);
978 		page_off = off & ~PAGE_MASK;
979 		pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
980 		if (IS_ERR(pages)) {
981 			ceph_osdc_put_request(req);
982 			ret = PTR_ERR(pages);
983 			break;
984 		}
985 
986 		osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_off,
987 						 false, false);
988 		ceph_osdc_start_request(osdc, req);
989 		ret = ceph_osdc_wait_request(osdc, req);
990 
991 		ceph_update_read_metrics(&fsc->mdsc->metric,
992 					 req->r_start_latency,
993 					 req->r_end_latency,
994 					 len, ret);
995 
996 		ceph_osdc_put_request(req);
997 
998 		i_size = i_size_read(inode);
999 		dout("sync_read %llu~%llu got %zd i_size %llu%s\n",
1000 		     off, len, ret, i_size, (more ? " MORE" : ""));
1001 
1002 		if (ret == -ENOENT)
1003 			ret = 0;
1004 		if (ret >= 0 && ret < len && (off + ret < i_size)) {
1005 			int zlen = min(len - ret, i_size - off - ret);
1006 			int zoff = page_off + ret;
1007 			dout("sync_read zero gap %llu~%llu\n",
1008                              off + ret, off + ret + zlen);
1009 			ceph_zero_page_vector_range(zoff, zlen, pages);
1010 			ret += zlen;
1011 		}
1012 
1013 		idx = 0;
1014 		left = ret > 0 ? ret : 0;
1015 		while (left > 0) {
1016 			size_t len, copied;
1017 			page_off = off & ~PAGE_MASK;
1018 			len = min_t(size_t, left, PAGE_SIZE - page_off);
1019 			SetPageUptodate(pages[idx]);
1020 			copied = copy_page_to_iter(pages[idx++],
1021 						   page_off, len, to);
1022 			off += copied;
1023 			left -= copied;
1024 			if (copied < len) {
1025 				ret = -EFAULT;
1026 				break;
1027 			}
1028 		}
1029 		ceph_release_page_vector(pages, num_pages);
1030 
1031 		if (ret < 0) {
1032 			if (ret == -EBLOCKLISTED)
1033 				fsc->blocklisted = true;
1034 			break;
1035 		}
1036 
1037 		if (off >= i_size || !more)
1038 			break;
1039 	}
1040 
1041 	if (off > iocb->ki_pos) {
1042 		if (off >= i_size) {
1043 			*retry_op = CHECK_EOF;
1044 			ret = i_size - iocb->ki_pos;
1045 			iocb->ki_pos = i_size;
1046 		} else {
1047 			ret = off - iocb->ki_pos;
1048 			iocb->ki_pos = off;
1049 		}
1050 	}
1051 
1052 	dout("sync_read result %zd retry_op %d\n", ret, *retry_op);
1053 	return ret;
1054 }
1055 
1056 struct ceph_aio_request {
1057 	struct kiocb *iocb;
1058 	size_t total_len;
1059 	bool write;
1060 	bool should_dirty;
1061 	int error;
1062 	struct list_head osd_reqs;
1063 	unsigned num_reqs;
1064 	atomic_t pending_reqs;
1065 	struct timespec64 mtime;
1066 	struct ceph_cap_flush *prealloc_cf;
1067 };
1068 
1069 struct ceph_aio_work {
1070 	struct work_struct work;
1071 	struct ceph_osd_request *req;
1072 };
1073 
1074 static void ceph_aio_retry_work(struct work_struct *work);
1075 
1076 static void ceph_aio_complete(struct inode *inode,
1077 			      struct ceph_aio_request *aio_req)
1078 {
1079 	struct ceph_inode_info *ci = ceph_inode(inode);
1080 	int ret;
1081 
1082 	if (!atomic_dec_and_test(&aio_req->pending_reqs))
1083 		return;
1084 
1085 	if (aio_req->iocb->ki_flags & IOCB_DIRECT)
1086 		inode_dio_end(inode);
1087 
1088 	ret = aio_req->error;
1089 	if (!ret)
1090 		ret = aio_req->total_len;
1091 
1092 	dout("ceph_aio_complete %p rc %d\n", inode, ret);
1093 
1094 	if (ret >= 0 && aio_req->write) {
1095 		int dirty;
1096 
1097 		loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
1098 		if (endoff > i_size_read(inode)) {
1099 			if (ceph_inode_set_size(inode, endoff))
1100 				ceph_check_caps(ci, CHECK_CAPS_AUTHONLY);
1101 		}
1102 
1103 		spin_lock(&ci->i_ceph_lock);
1104 		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1105 					       &aio_req->prealloc_cf);
1106 		spin_unlock(&ci->i_ceph_lock);
1107 		if (dirty)
1108 			__mark_inode_dirty(inode, dirty);
1109 
1110 	}
1111 
1112 	ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
1113 						CEPH_CAP_FILE_RD));
1114 
1115 	aio_req->iocb->ki_complete(aio_req->iocb, ret);
1116 
1117 	ceph_free_cap_flush(aio_req->prealloc_cf);
1118 	kfree(aio_req);
1119 }
1120 
1121 static void ceph_aio_complete_req(struct ceph_osd_request *req)
1122 {
1123 	int rc = req->r_result;
1124 	struct inode *inode = req->r_inode;
1125 	struct ceph_aio_request *aio_req = req->r_priv;
1126 	struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
1127 	struct ceph_client_metric *metric = &ceph_sb_to_mdsc(inode->i_sb)->metric;
1128 	unsigned int len = osd_data->bvec_pos.iter.bi_size;
1129 
1130 	BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
1131 	BUG_ON(!osd_data->num_bvecs);
1132 
1133 	dout("ceph_aio_complete_req %p rc %d bytes %u\n", inode, rc, len);
1134 
1135 	if (rc == -EOLDSNAPC) {
1136 		struct ceph_aio_work *aio_work;
1137 		BUG_ON(!aio_req->write);
1138 
1139 		aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
1140 		if (aio_work) {
1141 			INIT_WORK(&aio_work->work, ceph_aio_retry_work);
1142 			aio_work->req = req;
1143 			queue_work(ceph_inode_to_client(inode)->inode_wq,
1144 				   &aio_work->work);
1145 			return;
1146 		}
1147 		rc = -ENOMEM;
1148 	} else if (!aio_req->write) {
1149 		if (rc == -ENOENT)
1150 			rc = 0;
1151 		if (rc >= 0 && len > rc) {
1152 			struct iov_iter i;
1153 			int zlen = len - rc;
1154 
1155 			/*
1156 			 * If read is satisfied by single OSD request,
1157 			 * it can pass EOF. Otherwise read is within
1158 			 * i_size.
1159 			 */
1160 			if (aio_req->num_reqs == 1) {
1161 				loff_t i_size = i_size_read(inode);
1162 				loff_t endoff = aio_req->iocb->ki_pos + rc;
1163 				if (endoff < i_size)
1164 					zlen = min_t(size_t, zlen,
1165 						     i_size - endoff);
1166 				aio_req->total_len = rc + zlen;
1167 			}
1168 
1169 			iov_iter_bvec(&i, ITER_DEST, osd_data->bvec_pos.bvecs,
1170 				      osd_data->num_bvecs, len);
1171 			iov_iter_advance(&i, rc);
1172 			iov_iter_zero(zlen, &i);
1173 		}
1174 	}
1175 
1176 	/* r_start_latency == 0 means the request was not submitted */
1177 	if (req->r_start_latency) {
1178 		if (aio_req->write)
1179 			ceph_update_write_metrics(metric, req->r_start_latency,
1180 						  req->r_end_latency, len, rc);
1181 		else
1182 			ceph_update_read_metrics(metric, req->r_start_latency,
1183 						 req->r_end_latency, len, rc);
1184 	}
1185 
1186 	put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs,
1187 		  aio_req->should_dirty);
1188 	ceph_osdc_put_request(req);
1189 
1190 	if (rc < 0)
1191 		cmpxchg(&aio_req->error, 0, rc);
1192 
1193 	ceph_aio_complete(inode, aio_req);
1194 	return;
1195 }
1196 
1197 static void ceph_aio_retry_work(struct work_struct *work)
1198 {
1199 	struct ceph_aio_work *aio_work =
1200 		container_of(work, struct ceph_aio_work, work);
1201 	struct ceph_osd_request *orig_req = aio_work->req;
1202 	struct ceph_aio_request *aio_req = orig_req->r_priv;
1203 	struct inode *inode = orig_req->r_inode;
1204 	struct ceph_inode_info *ci = ceph_inode(inode);
1205 	struct ceph_snap_context *snapc;
1206 	struct ceph_osd_request *req;
1207 	int ret;
1208 
1209 	spin_lock(&ci->i_ceph_lock);
1210 	if (__ceph_have_pending_cap_snap(ci)) {
1211 		struct ceph_cap_snap *capsnap =
1212 			list_last_entry(&ci->i_cap_snaps,
1213 					struct ceph_cap_snap,
1214 					ci_item);
1215 		snapc = ceph_get_snap_context(capsnap->context);
1216 	} else {
1217 		BUG_ON(!ci->i_head_snapc);
1218 		snapc = ceph_get_snap_context(ci->i_head_snapc);
1219 	}
1220 	spin_unlock(&ci->i_ceph_lock);
1221 
1222 	req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 1,
1223 			false, GFP_NOFS);
1224 	if (!req) {
1225 		ret = -ENOMEM;
1226 		req = orig_req;
1227 		goto out;
1228 	}
1229 
1230 	req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1231 	ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
1232 	ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
1233 
1234 	req->r_ops[0] = orig_req->r_ops[0];
1235 
1236 	req->r_mtime = aio_req->mtime;
1237 	req->r_data_offset = req->r_ops[0].extent.offset;
1238 
1239 	ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
1240 	if (ret) {
1241 		ceph_osdc_put_request(req);
1242 		req = orig_req;
1243 		goto out;
1244 	}
1245 
1246 	ceph_osdc_put_request(orig_req);
1247 
1248 	req->r_callback = ceph_aio_complete_req;
1249 	req->r_inode = inode;
1250 	req->r_priv = aio_req;
1251 
1252 	ceph_osdc_start_request(req->r_osdc, req);
1253 out:
1254 	if (ret < 0) {
1255 		req->r_result = ret;
1256 		ceph_aio_complete_req(req);
1257 	}
1258 
1259 	ceph_put_snap_context(snapc);
1260 	kfree(aio_work);
1261 }
1262 
1263 static ssize_t
1264 ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
1265 		       struct ceph_snap_context *snapc,
1266 		       struct ceph_cap_flush **pcf)
1267 {
1268 	struct file *file = iocb->ki_filp;
1269 	struct inode *inode = file_inode(file);
1270 	struct ceph_inode_info *ci = ceph_inode(inode);
1271 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1272 	struct ceph_client_metric *metric = &fsc->mdsc->metric;
1273 	struct ceph_vino vino;
1274 	struct ceph_osd_request *req;
1275 	struct bio_vec *bvecs;
1276 	struct ceph_aio_request *aio_req = NULL;
1277 	int num_pages = 0;
1278 	int flags;
1279 	int ret = 0;
1280 	struct timespec64 mtime = current_time(inode);
1281 	size_t count = iov_iter_count(iter);
1282 	loff_t pos = iocb->ki_pos;
1283 	bool write = iov_iter_rw(iter) == WRITE;
1284 	bool should_dirty = !write && user_backed_iter(iter);
1285 
1286 	if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1287 		return -EROFS;
1288 
1289 	dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
1290 	     (write ? "write" : "read"), file, pos, (unsigned)count,
1291 	     snapc, snapc ? snapc->seq : 0);
1292 
1293 	if (write) {
1294 		int ret2;
1295 
1296 		ceph_fscache_invalidate(inode, true);
1297 
1298 		ret2 = invalidate_inode_pages2_range(inode->i_mapping,
1299 					pos >> PAGE_SHIFT,
1300 					(pos + count - 1) >> PAGE_SHIFT);
1301 		if (ret2 < 0)
1302 			dout("invalidate_inode_pages2_range returned %d\n", ret2);
1303 
1304 		flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1305 	} else {
1306 		flags = CEPH_OSD_FLAG_READ;
1307 	}
1308 
1309 	while (iov_iter_count(iter) > 0) {
1310 		u64 size = iov_iter_count(iter);
1311 		ssize_t len;
1312 
1313 		if (write)
1314 			size = min_t(u64, size, fsc->mount_options->wsize);
1315 		else
1316 			size = min_t(u64, size, fsc->mount_options->rsize);
1317 
1318 		vino = ceph_vino(inode);
1319 		req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1320 					    vino, pos, &size, 0,
1321 					    1,
1322 					    write ? CEPH_OSD_OP_WRITE :
1323 						    CEPH_OSD_OP_READ,
1324 					    flags, snapc,
1325 					    ci->i_truncate_seq,
1326 					    ci->i_truncate_size,
1327 					    false);
1328 		if (IS_ERR(req)) {
1329 			ret = PTR_ERR(req);
1330 			break;
1331 		}
1332 
1333 		len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
1334 		if (len < 0) {
1335 			ceph_osdc_put_request(req);
1336 			ret = len;
1337 			break;
1338 		}
1339 		if (len != size)
1340 			osd_req_op_extent_update(req, 0, len);
1341 
1342 		/*
1343 		 * To simplify error handling, allow AIO when IO within i_size
1344 		 * or IO can be satisfied by single OSD request.
1345 		 */
1346 		if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
1347 		    (len == count || pos + count <= i_size_read(inode))) {
1348 			aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
1349 			if (aio_req) {
1350 				aio_req->iocb = iocb;
1351 				aio_req->write = write;
1352 				aio_req->should_dirty = should_dirty;
1353 				INIT_LIST_HEAD(&aio_req->osd_reqs);
1354 				if (write) {
1355 					aio_req->mtime = mtime;
1356 					swap(aio_req->prealloc_cf, *pcf);
1357 				}
1358 			}
1359 			/* ignore error */
1360 		}
1361 
1362 		if (write) {
1363 			/*
1364 			 * throw out any page cache pages in this range. this
1365 			 * may block.
1366 			 */
1367 			truncate_inode_pages_range(inode->i_mapping, pos,
1368 						   PAGE_ALIGN(pos + len) - 1);
1369 
1370 			req->r_mtime = mtime;
1371 		}
1372 
1373 		osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
1374 
1375 		if (aio_req) {
1376 			aio_req->total_len += len;
1377 			aio_req->num_reqs++;
1378 			atomic_inc(&aio_req->pending_reqs);
1379 
1380 			req->r_callback = ceph_aio_complete_req;
1381 			req->r_inode = inode;
1382 			req->r_priv = aio_req;
1383 			list_add_tail(&req->r_private_item, &aio_req->osd_reqs);
1384 
1385 			pos += len;
1386 			continue;
1387 		}
1388 
1389 		ceph_osdc_start_request(req->r_osdc, req);
1390 		ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1391 
1392 		if (write)
1393 			ceph_update_write_metrics(metric, req->r_start_latency,
1394 						  req->r_end_latency, len, ret);
1395 		else
1396 			ceph_update_read_metrics(metric, req->r_start_latency,
1397 						 req->r_end_latency, len, ret);
1398 
1399 		size = i_size_read(inode);
1400 		if (!write) {
1401 			if (ret == -ENOENT)
1402 				ret = 0;
1403 			if (ret >= 0 && ret < len && pos + ret < size) {
1404 				struct iov_iter i;
1405 				int zlen = min_t(size_t, len - ret,
1406 						 size - pos - ret);
1407 
1408 				iov_iter_bvec(&i, ITER_DEST, bvecs, num_pages, len);
1409 				iov_iter_advance(&i, ret);
1410 				iov_iter_zero(zlen, &i);
1411 				ret += zlen;
1412 			}
1413 			if (ret >= 0)
1414 				len = ret;
1415 		}
1416 
1417 		put_bvecs(bvecs, num_pages, should_dirty);
1418 		ceph_osdc_put_request(req);
1419 		if (ret < 0)
1420 			break;
1421 
1422 		pos += len;
1423 		if (!write && pos >= size)
1424 			break;
1425 
1426 		if (write && pos > size) {
1427 			if (ceph_inode_set_size(inode, pos))
1428 				ceph_check_caps(ceph_inode(inode),
1429 						CHECK_CAPS_AUTHONLY);
1430 		}
1431 	}
1432 
1433 	if (aio_req) {
1434 		LIST_HEAD(osd_reqs);
1435 
1436 		if (aio_req->num_reqs == 0) {
1437 			kfree(aio_req);
1438 			return ret;
1439 		}
1440 
1441 		ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1442 					      CEPH_CAP_FILE_RD);
1443 
1444 		list_splice(&aio_req->osd_reqs, &osd_reqs);
1445 		inode_dio_begin(inode);
1446 		while (!list_empty(&osd_reqs)) {
1447 			req = list_first_entry(&osd_reqs,
1448 					       struct ceph_osd_request,
1449 					       r_private_item);
1450 			list_del_init(&req->r_private_item);
1451 			if (ret >= 0)
1452 				ceph_osdc_start_request(req->r_osdc, req);
1453 			if (ret < 0) {
1454 				req->r_result = ret;
1455 				ceph_aio_complete_req(req);
1456 			}
1457 		}
1458 		return -EIOCBQUEUED;
1459 	}
1460 
1461 	if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1462 		ret = pos - iocb->ki_pos;
1463 		iocb->ki_pos = pos;
1464 	}
1465 	return ret;
1466 }
1467 
1468 /*
1469  * Synchronous write, straight from __user pointer or user pages.
1470  *
1471  * If write spans object boundary, just do multiple writes.  (For a
1472  * correct atomic write, we should e.g. take write locks on all
1473  * objects, rollback on failure, etc.)
1474  */
1475 static ssize_t
1476 ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1477 		struct ceph_snap_context *snapc)
1478 {
1479 	struct file *file = iocb->ki_filp;
1480 	struct inode *inode = file_inode(file);
1481 	struct ceph_inode_info *ci = ceph_inode(inode);
1482 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1483 	struct ceph_vino vino;
1484 	struct ceph_osd_request *req;
1485 	struct page **pages;
1486 	u64 len;
1487 	int num_pages;
1488 	int written = 0;
1489 	int flags;
1490 	int ret;
1491 	bool check_caps = false;
1492 	struct timespec64 mtime = current_time(inode);
1493 	size_t count = iov_iter_count(from);
1494 
1495 	if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1496 		return -EROFS;
1497 
1498 	dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
1499 	     file, pos, (unsigned)count, snapc, snapc->seq);
1500 
1501 	ret = filemap_write_and_wait_range(inode->i_mapping,
1502 					   pos, pos + count - 1);
1503 	if (ret < 0)
1504 		return ret;
1505 
1506 	ceph_fscache_invalidate(inode, false);
1507 	ret = invalidate_inode_pages2_range(inode->i_mapping,
1508 					    pos >> PAGE_SHIFT,
1509 					    (pos + count - 1) >> PAGE_SHIFT);
1510 	if (ret < 0)
1511 		dout("invalidate_inode_pages2_range returned %d\n", ret);
1512 
1513 	flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1514 
1515 	while ((len = iov_iter_count(from)) > 0) {
1516 		size_t left;
1517 		int n;
1518 
1519 		vino = ceph_vino(inode);
1520 		req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1521 					    vino, pos, &len, 0, 1,
1522 					    CEPH_OSD_OP_WRITE, flags, snapc,
1523 					    ci->i_truncate_seq,
1524 					    ci->i_truncate_size,
1525 					    false);
1526 		if (IS_ERR(req)) {
1527 			ret = PTR_ERR(req);
1528 			break;
1529 		}
1530 
1531 		/*
1532 		 * write from beginning of first page,
1533 		 * regardless of io alignment
1534 		 */
1535 		num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1536 
1537 		pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1538 		if (IS_ERR(pages)) {
1539 			ret = PTR_ERR(pages);
1540 			goto out;
1541 		}
1542 
1543 		left = len;
1544 		for (n = 0; n < num_pages; n++) {
1545 			size_t plen = min_t(size_t, left, PAGE_SIZE);
1546 			ret = copy_page_from_iter(pages[n], 0, plen, from);
1547 			if (ret != plen) {
1548 				ret = -EFAULT;
1549 				break;
1550 			}
1551 			left -= ret;
1552 		}
1553 
1554 		if (ret < 0) {
1555 			ceph_release_page_vector(pages, num_pages);
1556 			goto out;
1557 		}
1558 
1559 		req->r_inode = inode;
1560 
1561 		osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1562 						false, true);
1563 
1564 		req->r_mtime = mtime;
1565 		ceph_osdc_start_request(&fsc->client->osdc, req);
1566 		ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1567 
1568 		ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
1569 					  req->r_end_latency, len, ret);
1570 out:
1571 		ceph_osdc_put_request(req);
1572 		if (ret != 0) {
1573 			ceph_set_error_write(ci);
1574 			break;
1575 		}
1576 
1577 		ceph_clear_error_write(ci);
1578 		pos += len;
1579 		written += len;
1580 		if (pos > i_size_read(inode)) {
1581 			check_caps = ceph_inode_set_size(inode, pos);
1582 			if (check_caps)
1583 				ceph_check_caps(ceph_inode(inode),
1584 						CHECK_CAPS_AUTHONLY);
1585 		}
1586 
1587 	}
1588 
1589 	if (ret != -EOLDSNAPC && written > 0) {
1590 		ret = written;
1591 		iocb->ki_pos = pos;
1592 	}
1593 	return ret;
1594 }
1595 
1596 /*
1597  * Wrap generic_file_aio_read with checks for cap bits on the inode.
1598  * Atomically grab references, so that those bits are not released
1599  * back to the MDS mid-read.
1600  *
1601  * Hmm, the sync read case isn't actually async... should it be?
1602  */
1603 static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1604 {
1605 	struct file *filp = iocb->ki_filp;
1606 	struct ceph_file_info *fi = filp->private_data;
1607 	size_t len = iov_iter_count(to);
1608 	struct inode *inode = file_inode(filp);
1609 	struct ceph_inode_info *ci = ceph_inode(inode);
1610 	bool direct_lock = iocb->ki_flags & IOCB_DIRECT;
1611 	ssize_t ret;
1612 	int want = 0, got = 0;
1613 	int retry_op = 0, read = 0;
1614 
1615 again:
1616 	dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1617 	     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1618 
1619 	if (ceph_inode_is_shutdown(inode))
1620 		return -ESTALE;
1621 
1622 	if (direct_lock)
1623 		ceph_start_io_direct(inode);
1624 	else
1625 		ceph_start_io_read(inode);
1626 
1627 	if (!(fi->flags & CEPH_F_SYNC) && !direct_lock)
1628 		want |= CEPH_CAP_FILE_CACHE;
1629 	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1630 		want |= CEPH_CAP_FILE_LAZYIO;
1631 
1632 	ret = ceph_get_caps(filp, CEPH_CAP_FILE_RD, want, -1, &got);
1633 	if (ret < 0) {
1634 		if (direct_lock)
1635 			ceph_end_io_direct(inode);
1636 		else
1637 			ceph_end_io_read(inode);
1638 		return ret;
1639 	}
1640 
1641 	if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1642 	    (iocb->ki_flags & IOCB_DIRECT) ||
1643 	    (fi->flags & CEPH_F_SYNC)) {
1644 
1645 		dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1646 		     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1647 		     ceph_cap_string(got));
1648 
1649 		if (!ceph_has_inline_data(ci)) {
1650 			if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1651 				ret = ceph_direct_read_write(iocb, to,
1652 							     NULL, NULL);
1653 				if (ret >= 0 && ret < len)
1654 					retry_op = CHECK_EOF;
1655 			} else {
1656 				ret = ceph_sync_read(iocb, to, &retry_op);
1657 			}
1658 		} else {
1659 			retry_op = READ_INLINE;
1660 		}
1661 	} else {
1662 		CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1663 		dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1664 		     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1665 		     ceph_cap_string(got));
1666 		ceph_add_rw_context(fi, &rw_ctx);
1667 		ret = generic_file_read_iter(iocb, to);
1668 		ceph_del_rw_context(fi, &rw_ctx);
1669 	}
1670 
1671 	dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1672 	     inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1673 	ceph_put_cap_refs(ci, got);
1674 
1675 	if (direct_lock)
1676 		ceph_end_io_direct(inode);
1677 	else
1678 		ceph_end_io_read(inode);
1679 
1680 	if (retry_op > HAVE_RETRIED && ret >= 0) {
1681 		int statret;
1682 		struct page *page = NULL;
1683 		loff_t i_size;
1684 		if (retry_op == READ_INLINE) {
1685 			page = __page_cache_alloc(GFP_KERNEL);
1686 			if (!page)
1687 				return -ENOMEM;
1688 		}
1689 
1690 		statret = __ceph_do_getattr(inode, page,
1691 					    CEPH_STAT_CAP_INLINE_DATA, !!page);
1692 		if (statret < 0) {
1693 			if (page)
1694 				__free_page(page);
1695 			if (statret == -ENODATA) {
1696 				BUG_ON(retry_op != READ_INLINE);
1697 				goto again;
1698 			}
1699 			return statret;
1700 		}
1701 
1702 		i_size = i_size_read(inode);
1703 		if (retry_op == READ_INLINE) {
1704 			BUG_ON(ret > 0 || read > 0);
1705 			if (iocb->ki_pos < i_size &&
1706 			    iocb->ki_pos < PAGE_SIZE) {
1707 				loff_t end = min_t(loff_t, i_size,
1708 						   iocb->ki_pos + len);
1709 				end = min_t(loff_t, end, PAGE_SIZE);
1710 				if (statret < end)
1711 					zero_user_segment(page, statret, end);
1712 				ret = copy_page_to_iter(page,
1713 						iocb->ki_pos & ~PAGE_MASK,
1714 						end - iocb->ki_pos, to);
1715 				iocb->ki_pos += ret;
1716 				read += ret;
1717 			}
1718 			if (iocb->ki_pos < i_size && read < len) {
1719 				size_t zlen = min_t(size_t, len - read,
1720 						    i_size - iocb->ki_pos);
1721 				ret = iov_iter_zero(zlen, to);
1722 				iocb->ki_pos += ret;
1723 				read += ret;
1724 			}
1725 			__free_pages(page, 0);
1726 			return read;
1727 		}
1728 
1729 		/* hit EOF or hole? */
1730 		if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1731 		    ret < len) {
1732 			dout("sync_read hit hole, ppos %lld < size %lld"
1733 			     ", reading more\n", iocb->ki_pos, i_size);
1734 
1735 			read += ret;
1736 			len -= ret;
1737 			retry_op = HAVE_RETRIED;
1738 			goto again;
1739 		}
1740 	}
1741 
1742 	if (ret >= 0)
1743 		ret += read;
1744 
1745 	return ret;
1746 }
1747 
1748 /*
1749  * Take cap references to avoid releasing caps to MDS mid-write.
1750  *
1751  * If we are synchronous, and write with an old snap context, the OSD
1752  * may return EOLDSNAPC.  In that case, retry the write.. _after_
1753  * dropping our cap refs and allowing the pending snap to logically
1754  * complete _before_ this write occurs.
1755  *
1756  * If we are near ENOSPC, write synchronously.
1757  */
1758 static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1759 {
1760 	struct file *file = iocb->ki_filp;
1761 	struct ceph_file_info *fi = file->private_data;
1762 	struct inode *inode = file_inode(file);
1763 	struct ceph_inode_info *ci = ceph_inode(inode);
1764 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1765 	struct ceph_osd_client *osdc = &fsc->client->osdc;
1766 	struct ceph_cap_flush *prealloc_cf;
1767 	ssize_t count, written = 0;
1768 	int err, want = 0, got;
1769 	bool direct_lock = false;
1770 	u32 map_flags;
1771 	u64 pool_flags;
1772 	loff_t pos;
1773 	loff_t limit = max(i_size_read(inode), fsc->max_file_size);
1774 
1775 	if (ceph_inode_is_shutdown(inode))
1776 		return -ESTALE;
1777 
1778 	if (ceph_snap(inode) != CEPH_NOSNAP)
1779 		return -EROFS;
1780 
1781 	prealloc_cf = ceph_alloc_cap_flush();
1782 	if (!prealloc_cf)
1783 		return -ENOMEM;
1784 
1785 	if ((iocb->ki_flags & (IOCB_DIRECT | IOCB_APPEND)) == IOCB_DIRECT)
1786 		direct_lock = true;
1787 
1788 retry_snap:
1789 	if (direct_lock)
1790 		ceph_start_io_direct(inode);
1791 	else
1792 		ceph_start_io_write(inode);
1793 
1794 	/* We can write back this queue in page reclaim */
1795 	current->backing_dev_info = inode_to_bdi(inode);
1796 
1797 	if (iocb->ki_flags & IOCB_APPEND) {
1798 		err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1799 		if (err < 0)
1800 			goto out;
1801 	}
1802 
1803 	err = generic_write_checks(iocb, from);
1804 	if (err <= 0)
1805 		goto out;
1806 
1807 	pos = iocb->ki_pos;
1808 	if (unlikely(pos >= limit)) {
1809 		err = -EFBIG;
1810 		goto out;
1811 	} else {
1812 		iov_iter_truncate(from, limit - pos);
1813 	}
1814 
1815 	count = iov_iter_count(from);
1816 	if (ceph_quota_is_max_bytes_exceeded(inode, pos + count)) {
1817 		err = -EDQUOT;
1818 		goto out;
1819 	}
1820 
1821 	down_read(&osdc->lock);
1822 	map_flags = osdc->osdmap->flags;
1823 	pool_flags = ceph_pg_pool_flags(osdc->osdmap, ci->i_layout.pool_id);
1824 	up_read(&osdc->lock);
1825 	if ((map_flags & CEPH_OSDMAP_FULL) ||
1826 	    (pool_flags & CEPH_POOL_FLAG_FULL)) {
1827 		err = -ENOSPC;
1828 		goto out;
1829 	}
1830 
1831 	err = file_remove_privs(file);
1832 	if (err)
1833 		goto out;
1834 
1835 	dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1836 	     inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1837 	if (!(fi->flags & CEPH_F_SYNC) && !direct_lock)
1838 		want |= CEPH_CAP_FILE_BUFFER;
1839 	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1840 		want |= CEPH_CAP_FILE_LAZYIO;
1841 	got = 0;
1842 	err = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, pos + count, &got);
1843 	if (err < 0)
1844 		goto out;
1845 
1846 	err = file_update_time(file);
1847 	if (err)
1848 		goto out_caps;
1849 
1850 	inode_inc_iversion_raw(inode);
1851 
1852 	dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1853 	     inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1854 
1855 	if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1856 	    (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
1857 	    (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) {
1858 		struct ceph_snap_context *snapc;
1859 		struct iov_iter data;
1860 
1861 		spin_lock(&ci->i_ceph_lock);
1862 		if (__ceph_have_pending_cap_snap(ci)) {
1863 			struct ceph_cap_snap *capsnap =
1864 					list_last_entry(&ci->i_cap_snaps,
1865 							struct ceph_cap_snap,
1866 							ci_item);
1867 			snapc = ceph_get_snap_context(capsnap->context);
1868 		} else {
1869 			BUG_ON(!ci->i_head_snapc);
1870 			snapc = ceph_get_snap_context(ci->i_head_snapc);
1871 		}
1872 		spin_unlock(&ci->i_ceph_lock);
1873 
1874 		/* we might need to revert back to that point */
1875 		data = *from;
1876 		if (iocb->ki_flags & IOCB_DIRECT)
1877 			written = ceph_direct_read_write(iocb, &data, snapc,
1878 							 &prealloc_cf);
1879 		else
1880 			written = ceph_sync_write(iocb, &data, pos, snapc);
1881 		if (direct_lock)
1882 			ceph_end_io_direct(inode);
1883 		else
1884 			ceph_end_io_write(inode);
1885 		if (written > 0)
1886 			iov_iter_advance(from, written);
1887 		ceph_put_snap_context(snapc);
1888 	} else {
1889 		/*
1890 		 * No need to acquire the i_truncate_mutex. Because
1891 		 * the MDS revokes Fwb caps before sending truncate
1892 		 * message to us. We can't get Fwb cap while there
1893 		 * are pending vmtruncate. So write and vmtruncate
1894 		 * can not run at the same time
1895 		 */
1896 		written = generic_perform_write(iocb, from);
1897 		if (likely(written >= 0))
1898 			iocb->ki_pos = pos + written;
1899 		ceph_end_io_write(inode);
1900 	}
1901 
1902 	if (written >= 0) {
1903 		int dirty;
1904 
1905 		spin_lock(&ci->i_ceph_lock);
1906 		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1907 					       &prealloc_cf);
1908 		spin_unlock(&ci->i_ceph_lock);
1909 		if (dirty)
1910 			__mark_inode_dirty(inode, dirty);
1911 		if (ceph_quota_is_max_bytes_approaching(inode, iocb->ki_pos))
1912 			ceph_check_caps(ci, CHECK_CAPS_FLUSH);
1913 	}
1914 
1915 	dout("aio_write %p %llx.%llx %llu~%u  dropping cap refs on %s\n",
1916 	     inode, ceph_vinop(inode), pos, (unsigned)count,
1917 	     ceph_cap_string(got));
1918 	ceph_put_cap_refs(ci, got);
1919 
1920 	if (written == -EOLDSNAPC) {
1921 		dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
1922 		     inode, ceph_vinop(inode), pos, (unsigned)count);
1923 		goto retry_snap;
1924 	}
1925 
1926 	if (written >= 0) {
1927 		if ((map_flags & CEPH_OSDMAP_NEARFULL) ||
1928 		    (pool_flags & CEPH_POOL_FLAG_NEARFULL))
1929 			iocb->ki_flags |= IOCB_DSYNC;
1930 		written = generic_write_sync(iocb, written);
1931 	}
1932 
1933 	goto out_unlocked;
1934 out_caps:
1935 	ceph_put_cap_refs(ci, got);
1936 out:
1937 	if (direct_lock)
1938 		ceph_end_io_direct(inode);
1939 	else
1940 		ceph_end_io_write(inode);
1941 out_unlocked:
1942 	ceph_free_cap_flush(prealloc_cf);
1943 	current->backing_dev_info = NULL;
1944 	return written ? written : err;
1945 }
1946 
1947 /*
1948  * llseek.  be sure to verify file size on SEEK_END.
1949  */
1950 static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1951 {
1952 	if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1953 		struct inode *inode = file_inode(file);
1954 		int ret;
1955 
1956 		ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1957 		if (ret < 0)
1958 			return ret;
1959 	}
1960 	return generic_file_llseek(file, offset, whence);
1961 }
1962 
1963 static inline void ceph_zero_partial_page(
1964 	struct inode *inode, loff_t offset, unsigned size)
1965 {
1966 	struct page *page;
1967 	pgoff_t index = offset >> PAGE_SHIFT;
1968 
1969 	page = find_lock_page(inode->i_mapping, index);
1970 	if (page) {
1971 		wait_on_page_writeback(page);
1972 		zero_user(page, offset & (PAGE_SIZE - 1), size);
1973 		unlock_page(page);
1974 		put_page(page);
1975 	}
1976 }
1977 
1978 static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1979 				      loff_t length)
1980 {
1981 	loff_t nearly = round_up(offset, PAGE_SIZE);
1982 	if (offset < nearly) {
1983 		loff_t size = nearly - offset;
1984 		if (length < size)
1985 			size = length;
1986 		ceph_zero_partial_page(inode, offset, size);
1987 		offset += size;
1988 		length -= size;
1989 	}
1990 	if (length >= PAGE_SIZE) {
1991 		loff_t size = round_down(length, PAGE_SIZE);
1992 		truncate_pagecache_range(inode, offset, offset + size - 1);
1993 		offset += size;
1994 		length -= size;
1995 	}
1996 	if (length)
1997 		ceph_zero_partial_page(inode, offset, length);
1998 }
1999 
2000 static int ceph_zero_partial_object(struct inode *inode,
2001 				    loff_t offset, loff_t *length)
2002 {
2003 	struct ceph_inode_info *ci = ceph_inode(inode);
2004 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
2005 	struct ceph_osd_request *req;
2006 	int ret = 0;
2007 	loff_t zero = 0;
2008 	int op;
2009 
2010 	if (ceph_inode_is_shutdown(inode))
2011 		return -EIO;
2012 
2013 	if (!length) {
2014 		op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
2015 		length = &zero;
2016 	} else {
2017 		op = CEPH_OSD_OP_ZERO;
2018 	}
2019 
2020 	req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
2021 					ceph_vino(inode),
2022 					offset, length,
2023 					0, 1, op,
2024 					CEPH_OSD_FLAG_WRITE,
2025 					NULL, 0, 0, false);
2026 	if (IS_ERR(req)) {
2027 		ret = PTR_ERR(req);
2028 		goto out;
2029 	}
2030 
2031 	req->r_mtime = inode->i_mtime;
2032 	ceph_osdc_start_request(&fsc->client->osdc, req);
2033 	ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
2034 	if (ret == -ENOENT)
2035 		ret = 0;
2036 	ceph_osdc_put_request(req);
2037 
2038 out:
2039 	return ret;
2040 }
2041 
2042 static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
2043 {
2044 	int ret = 0;
2045 	struct ceph_inode_info *ci = ceph_inode(inode);
2046 	s32 stripe_unit = ci->i_layout.stripe_unit;
2047 	s32 stripe_count = ci->i_layout.stripe_count;
2048 	s32 object_size = ci->i_layout.object_size;
2049 	u64 object_set_size = object_size * stripe_count;
2050 	u64 nearly, t;
2051 
2052 	/* round offset up to next period boundary */
2053 	nearly = offset + object_set_size - 1;
2054 	t = nearly;
2055 	nearly -= do_div(t, object_set_size);
2056 
2057 	while (length && offset < nearly) {
2058 		loff_t size = length;
2059 		ret = ceph_zero_partial_object(inode, offset, &size);
2060 		if (ret < 0)
2061 			return ret;
2062 		offset += size;
2063 		length -= size;
2064 	}
2065 	while (length >= object_set_size) {
2066 		int i;
2067 		loff_t pos = offset;
2068 		for (i = 0; i < stripe_count; ++i) {
2069 			ret = ceph_zero_partial_object(inode, pos, NULL);
2070 			if (ret < 0)
2071 				return ret;
2072 			pos += stripe_unit;
2073 		}
2074 		offset += object_set_size;
2075 		length -= object_set_size;
2076 	}
2077 	while (length) {
2078 		loff_t size = length;
2079 		ret = ceph_zero_partial_object(inode, offset, &size);
2080 		if (ret < 0)
2081 			return ret;
2082 		offset += size;
2083 		length -= size;
2084 	}
2085 	return ret;
2086 }
2087 
2088 static long ceph_fallocate(struct file *file, int mode,
2089 				loff_t offset, loff_t length)
2090 {
2091 	struct ceph_file_info *fi = file->private_data;
2092 	struct inode *inode = file_inode(file);
2093 	struct ceph_inode_info *ci = ceph_inode(inode);
2094 	struct ceph_cap_flush *prealloc_cf;
2095 	int want, got = 0;
2096 	int dirty;
2097 	int ret = 0;
2098 	loff_t endoff = 0;
2099 	loff_t size;
2100 
2101 	dout("%s %p %llx.%llx mode %x, offset %llu length %llu\n", __func__,
2102 	     inode, ceph_vinop(inode), mode, offset, length);
2103 
2104 	if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2105 		return -EOPNOTSUPP;
2106 
2107 	if (!S_ISREG(inode->i_mode))
2108 		return -EOPNOTSUPP;
2109 
2110 	prealloc_cf = ceph_alloc_cap_flush();
2111 	if (!prealloc_cf)
2112 		return -ENOMEM;
2113 
2114 	inode_lock(inode);
2115 
2116 	if (ceph_snap(inode) != CEPH_NOSNAP) {
2117 		ret = -EROFS;
2118 		goto unlock;
2119 	}
2120 
2121 	size = i_size_read(inode);
2122 
2123 	/* Are we punching a hole beyond EOF? */
2124 	if (offset >= size)
2125 		goto unlock;
2126 	if ((offset + length) > size)
2127 		length = size - offset;
2128 
2129 	if (fi->fmode & CEPH_FILE_MODE_LAZY)
2130 		want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
2131 	else
2132 		want = CEPH_CAP_FILE_BUFFER;
2133 
2134 	ret = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, endoff, &got);
2135 	if (ret < 0)
2136 		goto unlock;
2137 
2138 	ret = file_modified(file);
2139 	if (ret)
2140 		goto put_caps;
2141 
2142 	filemap_invalidate_lock(inode->i_mapping);
2143 	ceph_fscache_invalidate(inode, false);
2144 	ceph_zero_pagecache_range(inode, offset, length);
2145 	ret = ceph_zero_objects(inode, offset, length);
2146 
2147 	if (!ret) {
2148 		spin_lock(&ci->i_ceph_lock);
2149 		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
2150 					       &prealloc_cf);
2151 		spin_unlock(&ci->i_ceph_lock);
2152 		if (dirty)
2153 			__mark_inode_dirty(inode, dirty);
2154 	}
2155 	filemap_invalidate_unlock(inode->i_mapping);
2156 
2157 put_caps:
2158 	ceph_put_cap_refs(ci, got);
2159 unlock:
2160 	inode_unlock(inode);
2161 	ceph_free_cap_flush(prealloc_cf);
2162 	return ret;
2163 }
2164 
2165 /*
2166  * This function tries to get FILE_WR capabilities for dst_ci and FILE_RD for
2167  * src_ci.  Two attempts are made to obtain both caps, and an error is return if
2168  * this fails; zero is returned on success.
2169  */
2170 static int get_rd_wr_caps(struct file *src_filp, int *src_got,
2171 			  struct file *dst_filp,
2172 			  loff_t dst_endoff, int *dst_got)
2173 {
2174 	int ret = 0;
2175 	bool retrying = false;
2176 
2177 retry_caps:
2178 	ret = ceph_get_caps(dst_filp, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER,
2179 			    dst_endoff, dst_got);
2180 	if (ret < 0)
2181 		return ret;
2182 
2183 	/*
2184 	 * Since we're already holding the FILE_WR capability for the dst file,
2185 	 * we would risk a deadlock by using ceph_get_caps.  Thus, we'll do some
2186 	 * retry dance instead to try to get both capabilities.
2187 	 */
2188 	ret = ceph_try_get_caps(file_inode(src_filp),
2189 				CEPH_CAP_FILE_RD, CEPH_CAP_FILE_SHARED,
2190 				false, src_got);
2191 	if (ret <= 0) {
2192 		/* Start by dropping dst_ci caps and getting src_ci caps */
2193 		ceph_put_cap_refs(ceph_inode(file_inode(dst_filp)), *dst_got);
2194 		if (retrying) {
2195 			if (!ret)
2196 				/* ceph_try_get_caps masks EAGAIN */
2197 				ret = -EAGAIN;
2198 			return ret;
2199 		}
2200 		ret = ceph_get_caps(src_filp, CEPH_CAP_FILE_RD,
2201 				    CEPH_CAP_FILE_SHARED, -1, src_got);
2202 		if (ret < 0)
2203 			return ret;
2204 		/*... drop src_ci caps too, and retry */
2205 		ceph_put_cap_refs(ceph_inode(file_inode(src_filp)), *src_got);
2206 		retrying = true;
2207 		goto retry_caps;
2208 	}
2209 	return ret;
2210 }
2211 
2212 static void put_rd_wr_caps(struct ceph_inode_info *src_ci, int src_got,
2213 			   struct ceph_inode_info *dst_ci, int dst_got)
2214 {
2215 	ceph_put_cap_refs(src_ci, src_got);
2216 	ceph_put_cap_refs(dst_ci, dst_got);
2217 }
2218 
2219 /*
2220  * This function does several size-related checks, returning an error if:
2221  *  - source file is smaller than off+len
2222  *  - destination file size is not OK (inode_newsize_ok())
2223  *  - max bytes quotas is exceeded
2224  */
2225 static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
2226 			   loff_t src_off, loff_t dst_off, size_t len)
2227 {
2228 	loff_t size, endoff;
2229 
2230 	size = i_size_read(src_inode);
2231 	/*
2232 	 * Don't copy beyond source file EOF.  Instead of simply setting length
2233 	 * to (size - src_off), just drop to VFS default implementation, as the
2234 	 * local i_size may be stale due to other clients writing to the source
2235 	 * inode.
2236 	 */
2237 	if (src_off + len > size) {
2238 		dout("Copy beyond EOF (%llu + %zu > %llu)\n",
2239 		     src_off, len, size);
2240 		return -EOPNOTSUPP;
2241 	}
2242 	size = i_size_read(dst_inode);
2243 
2244 	endoff = dst_off + len;
2245 	if (inode_newsize_ok(dst_inode, endoff))
2246 		return -EOPNOTSUPP;
2247 
2248 	if (ceph_quota_is_max_bytes_exceeded(dst_inode, endoff))
2249 		return -EDQUOT;
2250 
2251 	return 0;
2252 }
2253 
2254 static struct ceph_osd_request *
2255 ceph_alloc_copyfrom_request(struct ceph_osd_client *osdc,
2256 			    u64 src_snapid,
2257 			    struct ceph_object_id *src_oid,
2258 			    struct ceph_object_locator *src_oloc,
2259 			    struct ceph_object_id *dst_oid,
2260 			    struct ceph_object_locator *dst_oloc,
2261 			    u32 truncate_seq, u64 truncate_size)
2262 {
2263 	struct ceph_osd_request *req;
2264 	int ret;
2265 	u32 src_fadvise_flags =
2266 		CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2267 		CEPH_OSD_OP_FLAG_FADVISE_NOCACHE;
2268 	u32 dst_fadvise_flags =
2269 		CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2270 		CEPH_OSD_OP_FLAG_FADVISE_DONTNEED;
2271 
2272 	req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
2273 	if (!req)
2274 		return ERR_PTR(-ENOMEM);
2275 
2276 	req->r_flags = CEPH_OSD_FLAG_WRITE;
2277 
2278 	ceph_oloc_copy(&req->r_t.base_oloc, dst_oloc);
2279 	ceph_oid_copy(&req->r_t.base_oid, dst_oid);
2280 
2281 	ret = osd_req_op_copy_from_init(req, src_snapid, 0,
2282 					src_oid, src_oloc,
2283 					src_fadvise_flags,
2284 					dst_fadvise_flags,
2285 					truncate_seq,
2286 					truncate_size,
2287 					CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ);
2288 	if (ret)
2289 		goto out;
2290 
2291 	ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
2292 	if (ret)
2293 		goto out;
2294 
2295 	return req;
2296 
2297 out:
2298 	ceph_osdc_put_request(req);
2299 	return ERR_PTR(ret);
2300 }
2301 
2302 static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off,
2303 				    struct ceph_inode_info *dst_ci, u64 *dst_off,
2304 				    struct ceph_fs_client *fsc,
2305 				    size_t len, unsigned int flags)
2306 {
2307 	struct ceph_object_locator src_oloc, dst_oloc;
2308 	struct ceph_object_id src_oid, dst_oid;
2309 	struct ceph_osd_client *osdc;
2310 	struct ceph_osd_request *req;
2311 	size_t bytes = 0;
2312 	u64 src_objnum, src_objoff, dst_objnum, dst_objoff;
2313 	u32 src_objlen, dst_objlen;
2314 	u32 object_size = src_ci->i_layout.object_size;
2315 	int ret;
2316 
2317 	src_oloc.pool = src_ci->i_layout.pool_id;
2318 	src_oloc.pool_ns = ceph_try_get_string(src_ci->i_layout.pool_ns);
2319 	dst_oloc.pool = dst_ci->i_layout.pool_id;
2320 	dst_oloc.pool_ns = ceph_try_get_string(dst_ci->i_layout.pool_ns);
2321 	osdc = &fsc->client->osdc;
2322 
2323 	while (len >= object_size) {
2324 		ceph_calc_file_object_mapping(&src_ci->i_layout, *src_off,
2325 					      object_size, &src_objnum,
2326 					      &src_objoff, &src_objlen);
2327 		ceph_calc_file_object_mapping(&dst_ci->i_layout, *dst_off,
2328 					      object_size, &dst_objnum,
2329 					      &dst_objoff, &dst_objlen);
2330 		ceph_oid_init(&src_oid);
2331 		ceph_oid_printf(&src_oid, "%llx.%08llx",
2332 				src_ci->i_vino.ino, src_objnum);
2333 		ceph_oid_init(&dst_oid);
2334 		ceph_oid_printf(&dst_oid, "%llx.%08llx",
2335 				dst_ci->i_vino.ino, dst_objnum);
2336 		/* Do an object remote copy */
2337 		req = ceph_alloc_copyfrom_request(osdc, src_ci->i_vino.snap,
2338 						  &src_oid, &src_oloc,
2339 						  &dst_oid, &dst_oloc,
2340 						  dst_ci->i_truncate_seq,
2341 						  dst_ci->i_truncate_size);
2342 		if (IS_ERR(req))
2343 			ret = PTR_ERR(req);
2344 		else {
2345 			ceph_osdc_start_request(osdc, req);
2346 			ret = ceph_osdc_wait_request(osdc, req);
2347 			ceph_update_copyfrom_metrics(&fsc->mdsc->metric,
2348 						     req->r_start_latency,
2349 						     req->r_end_latency,
2350 						     object_size, ret);
2351 			ceph_osdc_put_request(req);
2352 		}
2353 		if (ret) {
2354 			if (ret == -EOPNOTSUPP) {
2355 				fsc->have_copy_from2 = false;
2356 				pr_notice("OSDs don't support copy-from2; disabling copy offload\n");
2357 			}
2358 			dout("ceph_osdc_copy_from returned %d\n", ret);
2359 			if (!bytes)
2360 				bytes = ret;
2361 			goto out;
2362 		}
2363 		len -= object_size;
2364 		bytes += object_size;
2365 		*src_off += object_size;
2366 		*dst_off += object_size;
2367 	}
2368 
2369 out:
2370 	ceph_oloc_destroy(&src_oloc);
2371 	ceph_oloc_destroy(&dst_oloc);
2372 	return bytes;
2373 }
2374 
2375 static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
2376 				      struct file *dst_file, loff_t dst_off,
2377 				      size_t len, unsigned int flags)
2378 {
2379 	struct inode *src_inode = file_inode(src_file);
2380 	struct inode *dst_inode = file_inode(dst_file);
2381 	struct ceph_inode_info *src_ci = ceph_inode(src_inode);
2382 	struct ceph_inode_info *dst_ci = ceph_inode(dst_inode);
2383 	struct ceph_cap_flush *prealloc_cf;
2384 	struct ceph_fs_client *src_fsc = ceph_inode_to_client(src_inode);
2385 	loff_t size;
2386 	ssize_t ret = -EIO, bytes;
2387 	u64 src_objnum, dst_objnum, src_objoff, dst_objoff;
2388 	u32 src_objlen, dst_objlen;
2389 	int src_got = 0, dst_got = 0, err, dirty;
2390 
2391 	if (src_inode->i_sb != dst_inode->i_sb) {
2392 		struct ceph_fs_client *dst_fsc = ceph_inode_to_client(dst_inode);
2393 
2394 		if (ceph_fsid_compare(&src_fsc->client->fsid,
2395 				      &dst_fsc->client->fsid)) {
2396 			dout("Copying files across clusters: src: %pU dst: %pU\n",
2397 			     &src_fsc->client->fsid, &dst_fsc->client->fsid);
2398 			return -EXDEV;
2399 		}
2400 	}
2401 	if (ceph_snap(dst_inode) != CEPH_NOSNAP)
2402 		return -EROFS;
2403 
2404 	/*
2405 	 * Some of the checks below will return -EOPNOTSUPP, which will force a
2406 	 * fallback to the default VFS copy_file_range implementation.  This is
2407 	 * desirable in several cases (for ex, the 'len' is smaller than the
2408 	 * size of the objects, or in cases where that would be more
2409 	 * efficient).
2410 	 */
2411 
2412 	if (ceph_test_mount_opt(src_fsc, NOCOPYFROM))
2413 		return -EOPNOTSUPP;
2414 
2415 	if (!src_fsc->have_copy_from2)
2416 		return -EOPNOTSUPP;
2417 
2418 	/*
2419 	 * Striped file layouts require that we copy partial objects, but the
2420 	 * OSD copy-from operation only supports full-object copies.  Limit
2421 	 * this to non-striped file layouts for now.
2422 	 */
2423 	if ((src_ci->i_layout.stripe_unit != dst_ci->i_layout.stripe_unit) ||
2424 	    (src_ci->i_layout.stripe_count != 1) ||
2425 	    (dst_ci->i_layout.stripe_count != 1) ||
2426 	    (src_ci->i_layout.object_size != dst_ci->i_layout.object_size)) {
2427 		dout("Invalid src/dst files layout\n");
2428 		return -EOPNOTSUPP;
2429 	}
2430 
2431 	if (len < src_ci->i_layout.object_size)
2432 		return -EOPNOTSUPP; /* no remote copy will be done */
2433 
2434 	prealloc_cf = ceph_alloc_cap_flush();
2435 	if (!prealloc_cf)
2436 		return -ENOMEM;
2437 
2438 	/* Start by sync'ing the source and destination files */
2439 	ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
2440 	if (ret < 0) {
2441 		dout("failed to write src file (%zd)\n", ret);
2442 		goto out;
2443 	}
2444 	ret = file_write_and_wait_range(dst_file, dst_off, (dst_off + len));
2445 	if (ret < 0) {
2446 		dout("failed to write dst file (%zd)\n", ret);
2447 		goto out;
2448 	}
2449 
2450 	/*
2451 	 * We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other
2452 	 * clients may have dirty data in their caches.  And OSDs know nothing
2453 	 * about caps, so they can't safely do the remote object copies.
2454 	 */
2455 	err = get_rd_wr_caps(src_file, &src_got,
2456 			     dst_file, (dst_off + len), &dst_got);
2457 	if (err < 0) {
2458 		dout("get_rd_wr_caps returned %d\n", err);
2459 		ret = -EOPNOTSUPP;
2460 		goto out;
2461 	}
2462 
2463 	ret = is_file_size_ok(src_inode, dst_inode, src_off, dst_off, len);
2464 	if (ret < 0)
2465 		goto out_caps;
2466 
2467 	/* Drop dst file cached pages */
2468 	ceph_fscache_invalidate(dst_inode, false);
2469 	ret = invalidate_inode_pages2_range(dst_inode->i_mapping,
2470 					    dst_off >> PAGE_SHIFT,
2471 					    (dst_off + len) >> PAGE_SHIFT);
2472 	if (ret < 0) {
2473 		dout("Failed to invalidate inode pages (%zd)\n", ret);
2474 		ret = 0; /* XXX */
2475 	}
2476 	ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
2477 				      src_ci->i_layout.object_size,
2478 				      &src_objnum, &src_objoff, &src_objlen);
2479 	ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off,
2480 				      dst_ci->i_layout.object_size,
2481 				      &dst_objnum, &dst_objoff, &dst_objlen);
2482 	/* object-level offsets need to the same */
2483 	if (src_objoff != dst_objoff) {
2484 		ret = -EOPNOTSUPP;
2485 		goto out_caps;
2486 	}
2487 
2488 	/*
2489 	 * Do a manual copy if the object offset isn't object aligned.
2490 	 * 'src_objlen' contains the bytes left until the end of the object,
2491 	 * starting at the src_off
2492 	 */
2493 	if (src_objoff) {
2494 		dout("Initial partial copy of %u bytes\n", src_objlen);
2495 
2496 		/*
2497 		 * we need to temporarily drop all caps as we'll be calling
2498 		 * {read,write}_iter, which will get caps again.
2499 		 */
2500 		put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2501 		ret = do_splice_direct(src_file, &src_off, dst_file,
2502 				       &dst_off, src_objlen, flags);
2503 		/* Abort on short copies or on error */
2504 		if (ret < src_objlen) {
2505 			dout("Failed partial copy (%zd)\n", ret);
2506 			goto out;
2507 		}
2508 		len -= ret;
2509 		err = get_rd_wr_caps(src_file, &src_got,
2510 				     dst_file, (dst_off + len), &dst_got);
2511 		if (err < 0)
2512 			goto out;
2513 		err = is_file_size_ok(src_inode, dst_inode,
2514 				      src_off, dst_off, len);
2515 		if (err < 0)
2516 			goto out_caps;
2517 	}
2518 
2519 	size = i_size_read(dst_inode);
2520 	bytes = ceph_do_objects_copy(src_ci, &src_off, dst_ci, &dst_off,
2521 				     src_fsc, len, flags);
2522 	if (bytes <= 0) {
2523 		if (!ret)
2524 			ret = bytes;
2525 		goto out_caps;
2526 	}
2527 	dout("Copied %zu bytes out of %zu\n", bytes, len);
2528 	len -= bytes;
2529 	ret += bytes;
2530 
2531 	file_update_time(dst_file);
2532 	inode_inc_iversion_raw(dst_inode);
2533 
2534 	if (dst_off > size) {
2535 		/* Let the MDS know about dst file size change */
2536 		if (ceph_inode_set_size(dst_inode, dst_off) ||
2537 		    ceph_quota_is_max_bytes_approaching(dst_inode, dst_off))
2538 			ceph_check_caps(dst_ci, CHECK_CAPS_AUTHONLY | CHECK_CAPS_FLUSH);
2539 	}
2540 	/* Mark Fw dirty */
2541 	spin_lock(&dst_ci->i_ceph_lock);
2542 	dirty = __ceph_mark_dirty_caps(dst_ci, CEPH_CAP_FILE_WR, &prealloc_cf);
2543 	spin_unlock(&dst_ci->i_ceph_lock);
2544 	if (dirty)
2545 		__mark_inode_dirty(dst_inode, dirty);
2546 
2547 out_caps:
2548 	put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2549 
2550 	/*
2551 	 * Do the final manual copy if we still have some bytes left, unless
2552 	 * there were errors in remote object copies (len >= object_size).
2553 	 */
2554 	if (len && (len < src_ci->i_layout.object_size)) {
2555 		dout("Final partial copy of %zu bytes\n", len);
2556 		bytes = do_splice_direct(src_file, &src_off, dst_file,
2557 					 &dst_off, len, flags);
2558 		if (bytes > 0)
2559 			ret += bytes;
2560 		else
2561 			dout("Failed partial copy (%zd)\n", bytes);
2562 	}
2563 
2564 out:
2565 	ceph_free_cap_flush(prealloc_cf);
2566 
2567 	return ret;
2568 }
2569 
2570 static ssize_t ceph_copy_file_range(struct file *src_file, loff_t src_off,
2571 				    struct file *dst_file, loff_t dst_off,
2572 				    size_t len, unsigned int flags)
2573 {
2574 	ssize_t ret;
2575 
2576 	ret = __ceph_copy_file_range(src_file, src_off, dst_file, dst_off,
2577 				     len, flags);
2578 
2579 	if (ret == -EOPNOTSUPP || ret == -EXDEV)
2580 		ret = generic_copy_file_range(src_file, src_off, dst_file,
2581 					      dst_off, len, flags);
2582 	return ret;
2583 }
2584 
2585 const struct file_operations ceph_file_fops = {
2586 	.open = ceph_open,
2587 	.release = ceph_release,
2588 	.llseek = ceph_llseek,
2589 	.read_iter = ceph_read_iter,
2590 	.write_iter = ceph_write_iter,
2591 	.mmap = ceph_mmap,
2592 	.fsync = ceph_fsync,
2593 	.lock = ceph_lock,
2594 	.setlease = simple_nosetlease,
2595 	.flock = ceph_flock,
2596 	.splice_read = generic_file_splice_read,
2597 	.splice_write = iter_file_splice_write,
2598 	.unlocked_ioctl = ceph_ioctl,
2599 	.compat_ioctl = compat_ptr_ioctl,
2600 	.fallocate	= ceph_fallocate,
2601 	.copy_file_range = ceph_copy_file_range,
2602 };
2603