xref: /openbmc/linux/fs/ceph/file.c (revision b240b419db5d624ce7a5a397d6f62a1a686009ec)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 
4 #include <linux/module.h>
5 #include <linux/sched.h>
6 #include <linux/slab.h>
7 #include <linux/file.h>
8 #include <linux/mount.h>
9 #include <linux/namei.h>
10 #include <linux/writeback.h>
11 #include <linux/falloc.h>
12 
13 #include "super.h"
14 #include "mds_client.h"
15 #include "cache.h"
16 
17 static __le32 ceph_flags_sys2wire(u32 flags)
18 {
19 	u32 wire_flags = 0;
20 
21 	switch (flags & O_ACCMODE) {
22 	case O_RDONLY:
23 		wire_flags |= CEPH_O_RDONLY;
24 		break;
25 	case O_WRONLY:
26 		wire_flags |= CEPH_O_WRONLY;
27 		break;
28 	case O_RDWR:
29 		wire_flags |= CEPH_O_RDWR;
30 		break;
31 	}
32 
33 #define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
34 
35 	ceph_sys2wire(O_CREAT);
36 	ceph_sys2wire(O_EXCL);
37 	ceph_sys2wire(O_TRUNC);
38 	ceph_sys2wire(O_DIRECTORY);
39 	ceph_sys2wire(O_NOFOLLOW);
40 
41 #undef ceph_sys2wire
42 
43 	if (flags)
44 		dout("unused open flags: %x", flags);
45 
46 	return cpu_to_le32(wire_flags);
47 }
48 
49 /*
50  * Ceph file operations
51  *
52  * Implement basic open/close functionality, and implement
53  * read/write.
54  *
55  * We implement three modes of file I/O:
56  *  - buffered uses the generic_file_aio_{read,write} helpers
57  *
58  *  - synchronous is used when there is multi-client read/write
59  *    sharing, avoids the page cache, and synchronously waits for an
60  *    ack from the OSD.
61  *
62  *  - direct io takes the variant of the sync path that references
63  *    user pages directly.
64  *
65  * fsync() flushes and waits on dirty pages, but just queues metadata
66  * for writeback: since the MDS can recover size and mtime there is no
67  * need to wait for MDS acknowledgement.
68  */
69 
70 /*
71  * Calculate the length sum of direct io vectors that can
72  * be combined into one page vector.
73  */
74 static size_t dio_get_pagev_size(const struct iov_iter *it)
75 {
76     const struct iovec *iov = it->iov;
77     const struct iovec *iovend = iov + it->nr_segs;
78     size_t size;
79 
80     size = iov->iov_len - it->iov_offset;
81     /*
82      * An iov can be page vectored when both the current tail
83      * and the next base are page aligned.
84      */
85     while (PAGE_ALIGNED((iov->iov_base + iov->iov_len)) &&
86            (++iov < iovend && PAGE_ALIGNED((iov->iov_base)))) {
87         size += iov->iov_len;
88     }
89     dout("dio_get_pagevlen len = %zu\n", size);
90     return size;
91 }
92 
93 /*
94  * Allocate a page vector based on (@it, @nbytes).
95  * The return value is the tuple describing a page vector,
96  * that is (@pages, @page_align, @num_pages).
97  */
98 static struct page **
99 dio_get_pages_alloc(const struct iov_iter *it, size_t nbytes,
100 		    size_t *page_align, int *num_pages)
101 {
102 	struct iov_iter tmp_it = *it;
103 	size_t align;
104 	struct page **pages;
105 	int ret = 0, idx, npages;
106 
107 	align = (unsigned long)(it->iov->iov_base + it->iov_offset) &
108 		(PAGE_SIZE - 1);
109 	npages = calc_pages_for(align, nbytes);
110 	pages = kvmalloc(sizeof(*pages) * npages, GFP_KERNEL);
111 	if (!pages)
112 		return ERR_PTR(-ENOMEM);
113 
114 	for (idx = 0; idx < npages; ) {
115 		size_t start;
116 		ret = iov_iter_get_pages(&tmp_it, pages + idx, nbytes,
117 					 npages - idx, &start);
118 		if (ret < 0)
119 			goto fail;
120 
121 		iov_iter_advance(&tmp_it, ret);
122 		nbytes -= ret;
123 		idx += (ret + start + PAGE_SIZE - 1) / PAGE_SIZE;
124 	}
125 
126 	BUG_ON(nbytes != 0);
127 	*num_pages = npages;
128 	*page_align = align;
129 	dout("dio_get_pages_alloc: got %d pages align %zu\n", npages, align);
130 	return pages;
131 fail:
132 	ceph_put_page_vector(pages, idx, false);
133 	return ERR_PTR(ret);
134 }
135 
136 /*
137  * Prepare an open request.  Preallocate ceph_cap to avoid an
138  * inopportune ENOMEM later.
139  */
140 static struct ceph_mds_request *
141 prepare_open_request(struct super_block *sb, int flags, int create_mode)
142 {
143 	struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
144 	struct ceph_mds_client *mdsc = fsc->mdsc;
145 	struct ceph_mds_request *req;
146 	int want_auth = USE_ANY_MDS;
147 	int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
148 
149 	if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
150 		want_auth = USE_AUTH_MDS;
151 
152 	req = ceph_mdsc_create_request(mdsc, op, want_auth);
153 	if (IS_ERR(req))
154 		goto out;
155 	req->r_fmode = ceph_flags_to_mode(flags);
156 	req->r_args.open.flags = ceph_flags_sys2wire(flags);
157 	req->r_args.open.mode = cpu_to_le32(create_mode);
158 out:
159 	return req;
160 }
161 
162 /*
163  * initialize private struct file data.
164  * if we fail, clean up by dropping fmode reference on the ceph_inode
165  */
166 static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
167 {
168 	struct ceph_file_info *cf;
169 	int ret = 0;
170 
171 	switch (inode->i_mode & S_IFMT) {
172 	case S_IFREG:
173 		ceph_fscache_register_inode_cookie(inode);
174 		ceph_fscache_file_set_cookie(inode, file);
175 	case S_IFDIR:
176 		dout("init_file %p %p 0%o (regular)\n", inode, file,
177 		     inode->i_mode);
178 		cf = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
179 		if (!cf) {
180 			ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
181 			return -ENOMEM;
182 		}
183 		cf->fmode = fmode;
184 
185 		spin_lock_init(&cf->rw_contexts_lock);
186 		INIT_LIST_HEAD(&cf->rw_contexts);
187 
188 		cf->next_offset = 2;
189 		cf->readdir_cache_idx = -1;
190 		file->private_data = cf;
191 		BUG_ON(inode->i_fop->release != ceph_release);
192 		break;
193 
194 	case S_IFLNK:
195 		dout("init_file %p %p 0%o (symlink)\n", inode, file,
196 		     inode->i_mode);
197 		ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
198 		break;
199 
200 	default:
201 		dout("init_file %p %p 0%o (special)\n", inode, file,
202 		     inode->i_mode);
203 		/*
204 		 * we need to drop the open ref now, since we don't
205 		 * have .release set to ceph_release.
206 		 */
207 		ceph_put_fmode(ceph_inode(inode), fmode); /* clean up */
208 		BUG_ON(inode->i_fop->release == ceph_release);
209 
210 		/* call the proper open fop */
211 		ret = inode->i_fop->open(inode, file);
212 	}
213 	return ret;
214 }
215 
216 /*
217  * try renew caps after session gets killed.
218  */
219 int ceph_renew_caps(struct inode *inode)
220 {
221 	struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
222 	struct ceph_inode_info *ci = ceph_inode(inode);
223 	struct ceph_mds_request *req;
224 	int err, flags, wanted;
225 
226 	spin_lock(&ci->i_ceph_lock);
227 	wanted = __ceph_caps_file_wanted(ci);
228 	if (__ceph_is_any_real_caps(ci) &&
229 	    (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
230 		int issued = __ceph_caps_issued(ci, NULL);
231 		spin_unlock(&ci->i_ceph_lock);
232 		dout("renew caps %p want %s issued %s updating mds_wanted\n",
233 		     inode, ceph_cap_string(wanted), ceph_cap_string(issued));
234 		ceph_check_caps(ci, 0, NULL);
235 		return 0;
236 	}
237 	spin_unlock(&ci->i_ceph_lock);
238 
239 	flags = 0;
240 	if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
241 		flags = O_RDWR;
242 	else if (wanted & CEPH_CAP_FILE_RD)
243 		flags = O_RDONLY;
244 	else if (wanted & CEPH_CAP_FILE_WR)
245 		flags = O_WRONLY;
246 #ifdef O_LAZY
247 	if (wanted & CEPH_CAP_FILE_LAZYIO)
248 		flags |= O_LAZY;
249 #endif
250 
251 	req = prepare_open_request(inode->i_sb, flags, 0);
252 	if (IS_ERR(req)) {
253 		err = PTR_ERR(req);
254 		goto out;
255 	}
256 
257 	req->r_inode = inode;
258 	ihold(inode);
259 	req->r_num_caps = 1;
260 	req->r_fmode = -1;
261 
262 	err = ceph_mdsc_do_request(mdsc, NULL, req);
263 	ceph_mdsc_put_request(req);
264 out:
265 	dout("renew caps %p open result=%d\n", inode, err);
266 	return err < 0 ? err : 0;
267 }
268 
269 /*
270  * If we already have the requisite capabilities, we can satisfy
271  * the open request locally (no need to request new caps from the
272  * MDS).  We do, however, need to inform the MDS (asynchronously)
273  * if our wanted caps set expands.
274  */
275 int ceph_open(struct inode *inode, struct file *file)
276 {
277 	struct ceph_inode_info *ci = ceph_inode(inode);
278 	struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
279 	struct ceph_mds_client *mdsc = fsc->mdsc;
280 	struct ceph_mds_request *req;
281 	struct ceph_file_info *cf = file->private_data;
282 	int err;
283 	int flags, fmode, wanted;
284 
285 	if (cf) {
286 		dout("open file %p is already opened\n", file);
287 		return 0;
288 	}
289 
290 	/* filter out O_CREAT|O_EXCL; vfs did that already.  yuck. */
291 	flags = file->f_flags & ~(O_CREAT|O_EXCL);
292 	if (S_ISDIR(inode->i_mode))
293 		flags = O_DIRECTORY;  /* mds likes to know */
294 
295 	dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
296 	     ceph_vinop(inode), file, flags, file->f_flags);
297 	fmode = ceph_flags_to_mode(flags);
298 	wanted = ceph_caps_for_mode(fmode);
299 
300 	/* snapped files are read-only */
301 	if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
302 		return -EROFS;
303 
304 	/* trivially open snapdir */
305 	if (ceph_snap(inode) == CEPH_SNAPDIR) {
306 		spin_lock(&ci->i_ceph_lock);
307 		__ceph_get_fmode(ci, fmode);
308 		spin_unlock(&ci->i_ceph_lock);
309 		return ceph_init_file(inode, file, fmode);
310 	}
311 
312 	/*
313 	 * No need to block if we have caps on the auth MDS (for
314 	 * write) or any MDS (for read).  Update wanted set
315 	 * asynchronously.
316 	 */
317 	spin_lock(&ci->i_ceph_lock);
318 	if (__ceph_is_any_real_caps(ci) &&
319 	    (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
320 		int mds_wanted = __ceph_caps_mds_wanted(ci, true);
321 		int issued = __ceph_caps_issued(ci, NULL);
322 
323 		dout("open %p fmode %d want %s issued %s using existing\n",
324 		     inode, fmode, ceph_cap_string(wanted),
325 		     ceph_cap_string(issued));
326 		__ceph_get_fmode(ci, fmode);
327 		spin_unlock(&ci->i_ceph_lock);
328 
329 		/* adjust wanted? */
330 		if ((issued & wanted) != wanted &&
331 		    (mds_wanted & wanted) != wanted &&
332 		    ceph_snap(inode) != CEPH_SNAPDIR)
333 			ceph_check_caps(ci, 0, NULL);
334 
335 		return ceph_init_file(inode, file, fmode);
336 	} else if (ceph_snap(inode) != CEPH_NOSNAP &&
337 		   (ci->i_snap_caps & wanted) == wanted) {
338 		__ceph_get_fmode(ci, fmode);
339 		spin_unlock(&ci->i_ceph_lock);
340 		return ceph_init_file(inode, file, fmode);
341 	}
342 
343 	spin_unlock(&ci->i_ceph_lock);
344 
345 	dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
346 	req = prepare_open_request(inode->i_sb, flags, 0);
347 	if (IS_ERR(req)) {
348 		err = PTR_ERR(req);
349 		goto out;
350 	}
351 	req->r_inode = inode;
352 	ihold(inode);
353 
354 	req->r_num_caps = 1;
355 	err = ceph_mdsc_do_request(mdsc, NULL, req);
356 	if (!err)
357 		err = ceph_init_file(inode, file, req->r_fmode);
358 	ceph_mdsc_put_request(req);
359 	dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
360 out:
361 	return err;
362 }
363 
364 
365 /*
366  * Do a lookup + open with a single request.  If we get a non-existent
367  * file or symlink, return 1 so the VFS can retry.
368  */
369 int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
370 		     struct file *file, unsigned flags, umode_t mode,
371 		     int *opened)
372 {
373 	struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
374 	struct ceph_mds_client *mdsc = fsc->mdsc;
375 	struct ceph_mds_request *req;
376 	struct dentry *dn;
377 	struct ceph_acls_info acls = {};
378        int mask;
379 	int err;
380 
381 	dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
382 	     dir, dentry, dentry,
383 	     d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
384 
385 	if (dentry->d_name.len > NAME_MAX)
386 		return -ENAMETOOLONG;
387 
388 	if (flags & O_CREAT) {
389 		err = ceph_pre_init_acls(dir, &mode, &acls);
390 		if (err < 0)
391 			return err;
392 	}
393 
394 	/* do the open */
395 	req = prepare_open_request(dir->i_sb, flags, mode);
396 	if (IS_ERR(req)) {
397 		err = PTR_ERR(req);
398 		goto out_acl;
399 	}
400 	req->r_dentry = dget(dentry);
401 	req->r_num_caps = 2;
402 	if (flags & O_CREAT) {
403 		req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
404 		req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
405 		if (acls.pagelist) {
406 			req->r_pagelist = acls.pagelist;
407 			acls.pagelist = NULL;
408 		}
409 	}
410 
411        mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
412        if (ceph_security_xattr_wanted(dir))
413                mask |= CEPH_CAP_XATTR_SHARED;
414        req->r_args.open.mask = cpu_to_le32(mask);
415 
416 	req->r_parent = dir;
417 	set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
418 	err = ceph_mdsc_do_request(mdsc,
419 				   (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
420 				   req);
421 	err = ceph_handle_snapdir(req, dentry, err);
422 	if (err)
423 		goto out_req;
424 
425 	if ((flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
426 		err = ceph_handle_notrace_create(dir, dentry);
427 
428 	if (d_in_lookup(dentry)) {
429 		dn = ceph_finish_lookup(req, dentry, err);
430 		if (IS_ERR(dn))
431 			err = PTR_ERR(dn);
432 	} else {
433 		/* we were given a hashed negative dentry */
434 		dn = NULL;
435 	}
436 	if (err)
437 		goto out_req;
438 	if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
439 		/* make vfs retry on splice, ENOENT, or symlink */
440 		dout("atomic_open finish_no_open on dn %p\n", dn);
441 		err = finish_no_open(file, dn);
442 	} else {
443 		dout("atomic_open finish_open on dn %p\n", dn);
444 		if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
445 			ceph_init_inode_acls(d_inode(dentry), &acls);
446 			*opened |= FILE_CREATED;
447 		}
448 		err = finish_open(file, dentry, ceph_open, opened);
449 	}
450 out_req:
451 	if (!req->r_err && req->r_target_inode)
452 		ceph_put_fmode(ceph_inode(req->r_target_inode), req->r_fmode);
453 	ceph_mdsc_put_request(req);
454 out_acl:
455 	ceph_release_acls_info(&acls);
456 	dout("atomic_open result=%d\n", err);
457 	return err;
458 }
459 
460 int ceph_release(struct inode *inode, struct file *file)
461 {
462 	struct ceph_inode_info *ci = ceph_inode(inode);
463 	struct ceph_file_info *cf = file->private_data;
464 
465 	dout("release inode %p file %p\n", inode, file);
466 	ceph_put_fmode(ci, cf->fmode);
467 	if (cf->last_readdir)
468 		ceph_mdsc_put_request(cf->last_readdir);
469 	kfree(cf->last_name);
470 	kfree(cf->dir_info);
471 	WARN_ON(!list_empty(&cf->rw_contexts));
472 	kmem_cache_free(ceph_file_cachep, cf);
473 
474 	/* wake up anyone waiting for caps on this inode */
475 	wake_up_all(&ci->i_cap_wq);
476 	return 0;
477 }
478 
479 enum {
480 	HAVE_RETRIED = 1,
481 	CHECK_EOF =    2,
482 	READ_INLINE =  3,
483 };
484 
485 /*
486  * Read a range of bytes striped over one or more objects.  Iterate over
487  * objects we stripe over.  (That's not atomic, but good enough for now.)
488  *
489  * If we get a short result from the OSD, check against i_size; we need to
490  * only return a short read to the caller if we hit EOF.
491  */
492 static int striped_read(struct inode *inode,
493 			u64 pos, u64 len,
494 			struct page **pages, int num_pages,
495 			int page_align, int *checkeof)
496 {
497 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
498 	struct ceph_inode_info *ci = ceph_inode(inode);
499 	u64 this_len;
500 	loff_t i_size;
501 	int page_idx;
502 	int ret, read = 0;
503 	bool hit_stripe, was_short;
504 
505 	/*
506 	 * we may need to do multiple reads.  not atomic, unfortunately.
507 	 */
508 more:
509 	this_len = len;
510 	page_idx = (page_align + read) >> PAGE_SHIFT;
511 	ret = ceph_osdc_readpages(&fsc->client->osdc, ceph_vino(inode),
512 				  &ci->i_layout, pos, &this_len,
513 				  ci->i_truncate_seq, ci->i_truncate_size,
514 				  pages + page_idx, num_pages - page_idx,
515 				  ((page_align + read) & ~PAGE_MASK));
516 	if (ret == -ENOENT)
517 		ret = 0;
518 	hit_stripe = this_len < len;
519 	was_short = ret >= 0 && ret < this_len;
520 	dout("striped_read %llu~%llu (read %u) got %d%s%s\n", pos, len, read,
521 	     ret, hit_stripe ? " HITSTRIPE" : "", was_short ? " SHORT" : "");
522 
523 	i_size = i_size_read(inode);
524 	if (ret >= 0) {
525 		if (was_short && (pos + ret < i_size)) {
526 			int zlen = min(this_len - ret, i_size - pos - ret);
527 			int zoff = page_align + read + ret;
528 			dout(" zero gap %llu to %llu\n",
529 			     pos + ret, pos + ret + zlen);
530 			ceph_zero_page_vector_range(zoff, zlen, pages);
531 			ret += zlen;
532 		}
533 
534 		read += ret;
535 		pos += ret;
536 		len -= ret;
537 
538 		/* hit stripe and need continue*/
539 		if (len && hit_stripe && pos < i_size)
540 			goto more;
541 	}
542 
543 	if (read > 0) {
544 		ret = read;
545 		/* did we bounce off eof? */
546 		if (pos + len > i_size)
547 			*checkeof = CHECK_EOF;
548 	}
549 
550 	dout("striped_read returns %d\n", ret);
551 	return ret;
552 }
553 
554 /*
555  * Completely synchronous read and write methods.  Direct from __user
556  * buffer to osd, or directly to user pages (if O_DIRECT).
557  *
558  * If the read spans object boundary, just do multiple reads.
559  */
560 static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
561 			      int *checkeof)
562 {
563 	struct file *file = iocb->ki_filp;
564 	struct inode *inode = file_inode(file);
565 	struct page **pages;
566 	u64 off = iocb->ki_pos;
567 	int num_pages;
568 	ssize_t ret;
569 	size_t len = iov_iter_count(to);
570 
571 	dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
572 	     (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
573 
574 	if (!len)
575 		return 0;
576 	/*
577 	 * flush any page cache pages in this range.  this
578 	 * will make concurrent normal and sync io slow,
579 	 * but it will at least behave sensibly when they are
580 	 * in sequence.
581 	 */
582 	ret = filemap_write_and_wait_range(inode->i_mapping, off,
583 						off + len);
584 	if (ret < 0)
585 		return ret;
586 
587 	if (unlikely(to->type & ITER_PIPE)) {
588 		size_t page_off;
589 		ret = iov_iter_get_pages_alloc(to, &pages, len,
590 					       &page_off);
591 		if (ret <= 0)
592 			return -ENOMEM;
593 		num_pages = DIV_ROUND_UP(ret + page_off, PAGE_SIZE);
594 
595 		ret = striped_read(inode, off, ret, pages, num_pages,
596 				   page_off, checkeof);
597 		if (ret > 0) {
598 			iov_iter_advance(to, ret);
599 			off += ret;
600 		} else {
601 			iov_iter_advance(to, 0);
602 		}
603 		ceph_put_page_vector(pages, num_pages, false);
604 	} else {
605 		num_pages = calc_pages_for(off, len);
606 		pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
607 		if (IS_ERR(pages))
608 			return PTR_ERR(pages);
609 
610 		ret = striped_read(inode, off, len, pages, num_pages,
611 				   (off & ~PAGE_MASK), checkeof);
612 		if (ret > 0) {
613 			int l, k = 0;
614 			size_t left = ret;
615 
616 			while (left) {
617 				size_t page_off = off & ~PAGE_MASK;
618 				size_t copy = min_t(size_t, left,
619 						    PAGE_SIZE - page_off);
620 				l = copy_page_to_iter(pages[k++], page_off,
621 						      copy, to);
622 				off += l;
623 				left -= l;
624 				if (l < copy)
625 					break;
626 			}
627 		}
628 		ceph_release_page_vector(pages, num_pages);
629 	}
630 
631 	if (off > iocb->ki_pos) {
632 		ret = off - iocb->ki_pos;
633 		iocb->ki_pos = off;
634 	}
635 
636 	dout("sync_read result %zd\n", ret);
637 	return ret;
638 }
639 
640 struct ceph_aio_request {
641 	struct kiocb *iocb;
642 	size_t total_len;
643 	bool write;
644 	bool should_dirty;
645 	int error;
646 	struct list_head osd_reqs;
647 	unsigned num_reqs;
648 	atomic_t pending_reqs;
649 	struct timespec mtime;
650 	struct ceph_cap_flush *prealloc_cf;
651 };
652 
653 struct ceph_aio_work {
654 	struct work_struct work;
655 	struct ceph_osd_request *req;
656 };
657 
658 static void ceph_aio_retry_work(struct work_struct *work);
659 
660 static void ceph_aio_complete(struct inode *inode,
661 			      struct ceph_aio_request *aio_req)
662 {
663 	struct ceph_inode_info *ci = ceph_inode(inode);
664 	int ret;
665 
666 	if (!atomic_dec_and_test(&aio_req->pending_reqs))
667 		return;
668 
669 	ret = aio_req->error;
670 	if (!ret)
671 		ret = aio_req->total_len;
672 
673 	dout("ceph_aio_complete %p rc %d\n", inode, ret);
674 
675 	if (ret >= 0 && aio_req->write) {
676 		int dirty;
677 
678 		loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
679 		if (endoff > i_size_read(inode)) {
680 			if (ceph_inode_set_size(inode, endoff))
681 				ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
682 		}
683 
684 		spin_lock(&ci->i_ceph_lock);
685 		ci->i_inline_version = CEPH_INLINE_NONE;
686 		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
687 					       &aio_req->prealloc_cf);
688 		spin_unlock(&ci->i_ceph_lock);
689 		if (dirty)
690 			__mark_inode_dirty(inode, dirty);
691 
692 	}
693 
694 	ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
695 						CEPH_CAP_FILE_RD));
696 
697 	aio_req->iocb->ki_complete(aio_req->iocb, ret, 0);
698 
699 	ceph_free_cap_flush(aio_req->prealloc_cf);
700 	kfree(aio_req);
701 }
702 
703 static void ceph_aio_complete_req(struct ceph_osd_request *req)
704 {
705 	int rc = req->r_result;
706 	struct inode *inode = req->r_inode;
707 	struct ceph_aio_request *aio_req = req->r_priv;
708 	struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
709 	int num_pages = calc_pages_for((u64)osd_data->alignment,
710 				       osd_data->length);
711 
712 	dout("ceph_aio_complete_req %p rc %d bytes %llu\n",
713 	     inode, rc, osd_data->length);
714 
715 	if (rc == -EOLDSNAPC) {
716 		struct ceph_aio_work *aio_work;
717 		BUG_ON(!aio_req->write);
718 
719 		aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
720 		if (aio_work) {
721 			INIT_WORK(&aio_work->work, ceph_aio_retry_work);
722 			aio_work->req = req;
723 			queue_work(ceph_inode_to_client(inode)->wb_wq,
724 				   &aio_work->work);
725 			return;
726 		}
727 		rc = -ENOMEM;
728 	} else if (!aio_req->write) {
729 		if (rc == -ENOENT)
730 			rc = 0;
731 		if (rc >= 0 && osd_data->length > rc) {
732 			int zoff = osd_data->alignment + rc;
733 			int zlen = osd_data->length - rc;
734 			/*
735 			 * If read is satisfied by single OSD request,
736 			 * it can pass EOF. Otherwise read is within
737 			 * i_size.
738 			 */
739 			if (aio_req->num_reqs == 1) {
740 				loff_t i_size = i_size_read(inode);
741 				loff_t endoff = aio_req->iocb->ki_pos + rc;
742 				if (endoff < i_size)
743 					zlen = min_t(size_t, zlen,
744 						     i_size - endoff);
745 				aio_req->total_len = rc + zlen;
746 			}
747 
748 			if (zlen > 0)
749 				ceph_zero_page_vector_range(zoff, zlen,
750 							    osd_data->pages);
751 		}
752 	}
753 
754 	ceph_put_page_vector(osd_data->pages, num_pages, aio_req->should_dirty);
755 	ceph_osdc_put_request(req);
756 
757 	if (rc < 0)
758 		cmpxchg(&aio_req->error, 0, rc);
759 
760 	ceph_aio_complete(inode, aio_req);
761 	return;
762 }
763 
764 static void ceph_aio_retry_work(struct work_struct *work)
765 {
766 	struct ceph_aio_work *aio_work =
767 		container_of(work, struct ceph_aio_work, work);
768 	struct ceph_osd_request *orig_req = aio_work->req;
769 	struct ceph_aio_request *aio_req = orig_req->r_priv;
770 	struct inode *inode = orig_req->r_inode;
771 	struct ceph_inode_info *ci = ceph_inode(inode);
772 	struct ceph_snap_context *snapc;
773 	struct ceph_osd_request *req;
774 	int ret;
775 
776 	spin_lock(&ci->i_ceph_lock);
777 	if (__ceph_have_pending_cap_snap(ci)) {
778 		struct ceph_cap_snap *capsnap =
779 			list_last_entry(&ci->i_cap_snaps,
780 					struct ceph_cap_snap,
781 					ci_item);
782 		snapc = ceph_get_snap_context(capsnap->context);
783 	} else {
784 		BUG_ON(!ci->i_head_snapc);
785 		snapc = ceph_get_snap_context(ci->i_head_snapc);
786 	}
787 	spin_unlock(&ci->i_ceph_lock);
788 
789 	req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 2,
790 			false, GFP_NOFS);
791 	if (!req) {
792 		ret = -ENOMEM;
793 		req = orig_req;
794 		goto out;
795 	}
796 
797 	req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
798 	ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
799 	ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
800 
801 	ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
802 	if (ret) {
803 		ceph_osdc_put_request(req);
804 		req = orig_req;
805 		goto out;
806 	}
807 
808 	req->r_ops[0] = orig_req->r_ops[0];
809 
810 	req->r_mtime = aio_req->mtime;
811 	req->r_data_offset = req->r_ops[0].extent.offset;
812 
813 	ceph_osdc_put_request(orig_req);
814 
815 	req->r_callback = ceph_aio_complete_req;
816 	req->r_inode = inode;
817 	req->r_priv = aio_req;
818 	req->r_abort_on_full = true;
819 
820 	ret = ceph_osdc_start_request(req->r_osdc, req, false);
821 out:
822 	if (ret < 0) {
823 		req->r_result = ret;
824 		ceph_aio_complete_req(req);
825 	}
826 
827 	ceph_put_snap_context(snapc);
828 	kfree(aio_work);
829 }
830 
831 static ssize_t
832 ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
833 		       struct ceph_snap_context *snapc,
834 		       struct ceph_cap_flush **pcf)
835 {
836 	struct file *file = iocb->ki_filp;
837 	struct inode *inode = file_inode(file);
838 	struct ceph_inode_info *ci = ceph_inode(inode);
839 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
840 	struct ceph_vino vino;
841 	struct ceph_osd_request *req;
842 	struct page **pages;
843 	struct ceph_aio_request *aio_req = NULL;
844 	int num_pages = 0;
845 	int flags;
846 	int ret;
847 	struct timespec mtime = current_time(inode);
848 	size_t count = iov_iter_count(iter);
849 	loff_t pos = iocb->ki_pos;
850 	bool write = iov_iter_rw(iter) == WRITE;
851 	bool should_dirty = !write && iter_is_iovec(iter);
852 
853 	if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
854 		return -EROFS;
855 
856 	dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
857 	     (write ? "write" : "read"), file, pos, (unsigned)count,
858 	     snapc, snapc->seq);
859 
860 	ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
861 	if (ret < 0)
862 		return ret;
863 
864 	if (write) {
865 		int ret2 = invalidate_inode_pages2_range(inode->i_mapping,
866 					pos >> PAGE_SHIFT,
867 					(pos + count) >> PAGE_SHIFT);
868 		if (ret2 < 0)
869 			dout("invalidate_inode_pages2_range returned %d\n", ret2);
870 
871 		flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
872 	} else {
873 		flags = CEPH_OSD_FLAG_READ;
874 	}
875 
876 	while (iov_iter_count(iter) > 0) {
877 		u64 size = dio_get_pagev_size(iter);
878 		size_t start = 0;
879 		ssize_t len;
880 
881 		vino = ceph_vino(inode);
882 		req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
883 					    vino, pos, &size, 0,
884 					    1,
885 					    write ? CEPH_OSD_OP_WRITE :
886 						    CEPH_OSD_OP_READ,
887 					    flags, snapc,
888 					    ci->i_truncate_seq,
889 					    ci->i_truncate_size,
890 					    false);
891 		if (IS_ERR(req)) {
892 			ret = PTR_ERR(req);
893 			break;
894 		}
895 
896 		if (write)
897 			size = min_t(u64, size, fsc->mount_options->wsize);
898 		else
899 			size = min_t(u64, size, fsc->mount_options->rsize);
900 
901 		len = size;
902 		pages = dio_get_pages_alloc(iter, len, &start, &num_pages);
903 		if (IS_ERR(pages)) {
904 			ceph_osdc_put_request(req);
905 			ret = PTR_ERR(pages);
906 			break;
907 		}
908 
909 		/*
910 		 * To simplify error handling, allow AIO when IO within i_size
911 		 * or IO can be satisfied by single OSD request.
912 		 */
913 		if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
914 		    (len == count || pos + count <= i_size_read(inode))) {
915 			aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
916 			if (aio_req) {
917 				aio_req->iocb = iocb;
918 				aio_req->write = write;
919 				aio_req->should_dirty = should_dirty;
920 				INIT_LIST_HEAD(&aio_req->osd_reqs);
921 				if (write) {
922 					aio_req->mtime = mtime;
923 					swap(aio_req->prealloc_cf, *pcf);
924 				}
925 			}
926 			/* ignore error */
927 		}
928 
929 		if (write) {
930 			/*
931 			 * throw out any page cache pages in this range. this
932 			 * may block.
933 			 */
934 			truncate_inode_pages_range(inode->i_mapping, pos,
935 					(pos+len) | (PAGE_SIZE - 1));
936 
937 			req->r_mtime = mtime;
938 		}
939 
940 		osd_req_op_extent_osd_data_pages(req, 0, pages, len, start,
941 						 false, false);
942 
943 		if (aio_req) {
944 			aio_req->total_len += len;
945 			aio_req->num_reqs++;
946 			atomic_inc(&aio_req->pending_reqs);
947 
948 			req->r_callback = ceph_aio_complete_req;
949 			req->r_inode = inode;
950 			req->r_priv = aio_req;
951 			list_add_tail(&req->r_unsafe_item, &aio_req->osd_reqs);
952 
953 			pos += len;
954 			iov_iter_advance(iter, len);
955 			continue;
956 		}
957 
958 		ret = ceph_osdc_start_request(req->r_osdc, req, false);
959 		if (!ret)
960 			ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
961 
962 		size = i_size_read(inode);
963 		if (!write) {
964 			if (ret == -ENOENT)
965 				ret = 0;
966 			if (ret >= 0 && ret < len && pos + ret < size) {
967 				int zlen = min_t(size_t, len - ret,
968 						 size - pos - ret);
969 				ceph_zero_page_vector_range(start + ret, zlen,
970 							    pages);
971 				ret += zlen;
972 			}
973 			if (ret >= 0)
974 				len = ret;
975 		}
976 
977 		ceph_put_page_vector(pages, num_pages, should_dirty);
978 
979 		ceph_osdc_put_request(req);
980 		if (ret < 0)
981 			break;
982 
983 		pos += len;
984 		iov_iter_advance(iter, len);
985 
986 		if (!write && pos >= size)
987 			break;
988 
989 		if (write && pos > size) {
990 			if (ceph_inode_set_size(inode, pos))
991 				ceph_check_caps(ceph_inode(inode),
992 						CHECK_CAPS_AUTHONLY,
993 						NULL);
994 		}
995 	}
996 
997 	if (aio_req) {
998 		LIST_HEAD(osd_reqs);
999 
1000 		if (aio_req->num_reqs == 0) {
1001 			kfree(aio_req);
1002 			return ret;
1003 		}
1004 
1005 		ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1006 					      CEPH_CAP_FILE_RD);
1007 
1008 		list_splice(&aio_req->osd_reqs, &osd_reqs);
1009 		while (!list_empty(&osd_reqs)) {
1010 			req = list_first_entry(&osd_reqs,
1011 					       struct ceph_osd_request,
1012 					       r_unsafe_item);
1013 			list_del_init(&req->r_unsafe_item);
1014 			if (ret >= 0)
1015 				ret = ceph_osdc_start_request(req->r_osdc,
1016 							      req, false);
1017 			if (ret < 0) {
1018 				req->r_result = ret;
1019 				ceph_aio_complete_req(req);
1020 			}
1021 		}
1022 		return -EIOCBQUEUED;
1023 	}
1024 
1025 	if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1026 		ret = pos - iocb->ki_pos;
1027 		iocb->ki_pos = pos;
1028 	}
1029 	return ret;
1030 }
1031 
1032 /*
1033  * Synchronous write, straight from __user pointer or user pages.
1034  *
1035  * If write spans object boundary, just do multiple writes.  (For a
1036  * correct atomic write, we should e.g. take write locks on all
1037  * objects, rollback on failure, etc.)
1038  */
1039 static ssize_t
1040 ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1041 		struct ceph_snap_context *snapc)
1042 {
1043 	struct file *file = iocb->ki_filp;
1044 	struct inode *inode = file_inode(file);
1045 	struct ceph_inode_info *ci = ceph_inode(inode);
1046 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1047 	struct ceph_vino vino;
1048 	struct ceph_osd_request *req;
1049 	struct page **pages;
1050 	u64 len;
1051 	int num_pages;
1052 	int written = 0;
1053 	int flags;
1054 	int ret;
1055 	bool check_caps = false;
1056 	struct timespec mtime = current_time(inode);
1057 	size_t count = iov_iter_count(from);
1058 
1059 	if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1060 		return -EROFS;
1061 
1062 	dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
1063 	     file, pos, (unsigned)count, snapc, snapc->seq);
1064 
1065 	ret = filemap_write_and_wait_range(inode->i_mapping, pos, pos + count);
1066 	if (ret < 0)
1067 		return ret;
1068 
1069 	ret = invalidate_inode_pages2_range(inode->i_mapping,
1070 					    pos >> PAGE_SHIFT,
1071 					    (pos + count) >> PAGE_SHIFT);
1072 	if (ret < 0)
1073 		dout("invalidate_inode_pages2_range returned %d\n", ret);
1074 
1075 	flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1076 
1077 	while ((len = iov_iter_count(from)) > 0) {
1078 		size_t left;
1079 		int n;
1080 
1081 		vino = ceph_vino(inode);
1082 		req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1083 					    vino, pos, &len, 0, 1,
1084 					    CEPH_OSD_OP_WRITE, flags, snapc,
1085 					    ci->i_truncate_seq,
1086 					    ci->i_truncate_size,
1087 					    false);
1088 		if (IS_ERR(req)) {
1089 			ret = PTR_ERR(req);
1090 			break;
1091 		}
1092 
1093 		/*
1094 		 * write from beginning of first page,
1095 		 * regardless of io alignment
1096 		 */
1097 		num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1098 
1099 		pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1100 		if (IS_ERR(pages)) {
1101 			ret = PTR_ERR(pages);
1102 			goto out;
1103 		}
1104 
1105 		left = len;
1106 		for (n = 0; n < num_pages; n++) {
1107 			size_t plen = min_t(size_t, left, PAGE_SIZE);
1108 			ret = copy_page_from_iter(pages[n], 0, plen, from);
1109 			if (ret != plen) {
1110 				ret = -EFAULT;
1111 				break;
1112 			}
1113 			left -= ret;
1114 		}
1115 
1116 		if (ret < 0) {
1117 			ceph_release_page_vector(pages, num_pages);
1118 			goto out;
1119 		}
1120 
1121 		req->r_inode = inode;
1122 
1123 		osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1124 						false, true);
1125 
1126 		req->r_mtime = mtime;
1127 		ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1128 		if (!ret)
1129 			ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1130 
1131 out:
1132 		ceph_osdc_put_request(req);
1133 		if (ret != 0) {
1134 			ceph_set_error_write(ci);
1135 			break;
1136 		}
1137 
1138 		ceph_clear_error_write(ci);
1139 		pos += len;
1140 		written += len;
1141 		if (pos > i_size_read(inode)) {
1142 			check_caps = ceph_inode_set_size(inode, pos);
1143 			if (check_caps)
1144 				ceph_check_caps(ceph_inode(inode),
1145 						CHECK_CAPS_AUTHONLY,
1146 						NULL);
1147 		}
1148 
1149 	}
1150 
1151 	if (ret != -EOLDSNAPC && written > 0) {
1152 		ret = written;
1153 		iocb->ki_pos = pos;
1154 	}
1155 	return ret;
1156 }
1157 
1158 /*
1159  * Wrap generic_file_aio_read with checks for cap bits on the inode.
1160  * Atomically grab references, so that those bits are not released
1161  * back to the MDS mid-read.
1162  *
1163  * Hmm, the sync read case isn't actually async... should it be?
1164  */
1165 static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1166 {
1167 	struct file *filp = iocb->ki_filp;
1168 	struct ceph_file_info *fi = filp->private_data;
1169 	size_t len = iov_iter_count(to);
1170 	struct inode *inode = file_inode(filp);
1171 	struct ceph_inode_info *ci = ceph_inode(inode);
1172 	struct page *pinned_page = NULL;
1173 	ssize_t ret;
1174 	int want, got = 0;
1175 	int retry_op = 0, read = 0;
1176 
1177 again:
1178 	dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1179 	     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1180 
1181 	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1182 		want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
1183 	else
1184 		want = CEPH_CAP_FILE_CACHE;
1185 	ret = ceph_get_caps(ci, CEPH_CAP_FILE_RD, want, -1, &got, &pinned_page);
1186 	if (ret < 0)
1187 		return ret;
1188 
1189 	if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1190 	    (iocb->ki_flags & IOCB_DIRECT) ||
1191 	    (fi->flags & CEPH_F_SYNC)) {
1192 
1193 		dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1194 		     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1195 		     ceph_cap_string(got));
1196 
1197 		if (ci->i_inline_version == CEPH_INLINE_NONE) {
1198 			if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1199 				ret = ceph_direct_read_write(iocb, to,
1200 							     NULL, NULL);
1201 				if (ret >= 0 && ret < len)
1202 					retry_op = CHECK_EOF;
1203 			} else {
1204 				ret = ceph_sync_read(iocb, to, &retry_op);
1205 			}
1206 		} else {
1207 			retry_op = READ_INLINE;
1208 		}
1209 	} else {
1210 		CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1211 		dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1212 		     inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1213 		     ceph_cap_string(got));
1214 		ceph_add_rw_context(fi, &rw_ctx);
1215 		ret = generic_file_read_iter(iocb, to);
1216 		ceph_del_rw_context(fi, &rw_ctx);
1217 	}
1218 	dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1219 	     inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1220 	if (pinned_page) {
1221 		put_page(pinned_page);
1222 		pinned_page = NULL;
1223 	}
1224 	ceph_put_cap_refs(ci, got);
1225 	if (retry_op > HAVE_RETRIED && ret >= 0) {
1226 		int statret;
1227 		struct page *page = NULL;
1228 		loff_t i_size;
1229 		if (retry_op == READ_INLINE) {
1230 			page = __page_cache_alloc(GFP_KERNEL);
1231 			if (!page)
1232 				return -ENOMEM;
1233 		}
1234 
1235 		statret = __ceph_do_getattr(inode, page,
1236 					    CEPH_STAT_CAP_INLINE_DATA, !!page);
1237 		if (statret < 0) {
1238 			if (page)
1239 				__free_page(page);
1240 			if (statret == -ENODATA) {
1241 				BUG_ON(retry_op != READ_INLINE);
1242 				goto again;
1243 			}
1244 			return statret;
1245 		}
1246 
1247 		i_size = i_size_read(inode);
1248 		if (retry_op == READ_INLINE) {
1249 			BUG_ON(ret > 0 || read > 0);
1250 			if (iocb->ki_pos < i_size &&
1251 			    iocb->ki_pos < PAGE_SIZE) {
1252 				loff_t end = min_t(loff_t, i_size,
1253 						   iocb->ki_pos + len);
1254 				end = min_t(loff_t, end, PAGE_SIZE);
1255 				if (statret < end)
1256 					zero_user_segment(page, statret, end);
1257 				ret = copy_page_to_iter(page,
1258 						iocb->ki_pos & ~PAGE_MASK,
1259 						end - iocb->ki_pos, to);
1260 				iocb->ki_pos += ret;
1261 				read += ret;
1262 			}
1263 			if (iocb->ki_pos < i_size && read < len) {
1264 				size_t zlen = min_t(size_t, len - read,
1265 						    i_size - iocb->ki_pos);
1266 				ret = iov_iter_zero(zlen, to);
1267 				iocb->ki_pos += ret;
1268 				read += ret;
1269 			}
1270 			__free_pages(page, 0);
1271 			return read;
1272 		}
1273 
1274 		/* hit EOF or hole? */
1275 		if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1276 		    ret < len) {
1277 			dout("sync_read hit hole, ppos %lld < size %lld"
1278 			     ", reading more\n", iocb->ki_pos, i_size);
1279 
1280 			read += ret;
1281 			len -= ret;
1282 			retry_op = HAVE_RETRIED;
1283 			goto again;
1284 		}
1285 	}
1286 
1287 	if (ret >= 0)
1288 		ret += read;
1289 
1290 	return ret;
1291 }
1292 
1293 /*
1294  * Take cap references to avoid releasing caps to MDS mid-write.
1295  *
1296  * If we are synchronous, and write with an old snap context, the OSD
1297  * may return EOLDSNAPC.  In that case, retry the write.. _after_
1298  * dropping our cap refs and allowing the pending snap to logically
1299  * complete _before_ this write occurs.
1300  *
1301  * If we are near ENOSPC, write synchronously.
1302  */
1303 static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1304 {
1305 	struct file *file = iocb->ki_filp;
1306 	struct ceph_file_info *fi = file->private_data;
1307 	struct inode *inode = file_inode(file);
1308 	struct ceph_inode_info *ci = ceph_inode(inode);
1309 	struct ceph_osd_client *osdc =
1310 		&ceph_sb_to_client(inode->i_sb)->client->osdc;
1311 	struct ceph_cap_flush *prealloc_cf;
1312 	ssize_t count, written = 0;
1313 	int err, want, got;
1314 	loff_t pos;
1315 
1316 	if (ceph_snap(inode) != CEPH_NOSNAP)
1317 		return -EROFS;
1318 
1319 	prealloc_cf = ceph_alloc_cap_flush();
1320 	if (!prealloc_cf)
1321 		return -ENOMEM;
1322 
1323 retry_snap:
1324 	inode_lock(inode);
1325 
1326 	/* We can write back this queue in page reclaim */
1327 	current->backing_dev_info = inode_to_bdi(inode);
1328 
1329 	if (iocb->ki_flags & IOCB_APPEND) {
1330 		err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1331 		if (err < 0)
1332 			goto out;
1333 	}
1334 
1335 	err = generic_write_checks(iocb, from);
1336 	if (err <= 0)
1337 		goto out;
1338 
1339 	pos = iocb->ki_pos;
1340 	count = iov_iter_count(from);
1341 	err = file_remove_privs(file);
1342 	if (err)
1343 		goto out;
1344 
1345 	err = file_update_time(file);
1346 	if (err)
1347 		goto out;
1348 
1349 	if (ci->i_inline_version != CEPH_INLINE_NONE) {
1350 		err = ceph_uninline_data(file, NULL);
1351 		if (err < 0)
1352 			goto out;
1353 	}
1354 
1355 	/* FIXME: not complete since it doesn't account for being at quota */
1356 	if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL)) {
1357 		err = -ENOSPC;
1358 		goto out;
1359 	}
1360 
1361 	dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1362 	     inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1363 	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1364 		want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1365 	else
1366 		want = CEPH_CAP_FILE_BUFFER;
1367 	got = 0;
1368 	err = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, pos + count,
1369 			    &got, NULL);
1370 	if (err < 0)
1371 		goto out;
1372 
1373 	dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1374 	     inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1375 
1376 	if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1377 	    (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
1378 	    (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) {
1379 		struct ceph_snap_context *snapc;
1380 		struct iov_iter data;
1381 		inode_unlock(inode);
1382 
1383 		spin_lock(&ci->i_ceph_lock);
1384 		if (__ceph_have_pending_cap_snap(ci)) {
1385 			struct ceph_cap_snap *capsnap =
1386 					list_last_entry(&ci->i_cap_snaps,
1387 							struct ceph_cap_snap,
1388 							ci_item);
1389 			snapc = ceph_get_snap_context(capsnap->context);
1390 		} else {
1391 			BUG_ON(!ci->i_head_snapc);
1392 			snapc = ceph_get_snap_context(ci->i_head_snapc);
1393 		}
1394 		spin_unlock(&ci->i_ceph_lock);
1395 
1396 		/* we might need to revert back to that point */
1397 		data = *from;
1398 		if (iocb->ki_flags & IOCB_DIRECT)
1399 			written = ceph_direct_read_write(iocb, &data, snapc,
1400 							 &prealloc_cf);
1401 		else
1402 			written = ceph_sync_write(iocb, &data, pos, snapc);
1403 		if (written > 0)
1404 			iov_iter_advance(from, written);
1405 		ceph_put_snap_context(snapc);
1406 	} else {
1407 		/*
1408 		 * No need to acquire the i_truncate_mutex. Because
1409 		 * the MDS revokes Fwb caps before sending truncate
1410 		 * message to us. We can't get Fwb cap while there
1411 		 * are pending vmtruncate. So write and vmtruncate
1412 		 * can not run at the same time
1413 		 */
1414 		written = generic_perform_write(file, from, pos);
1415 		if (likely(written >= 0))
1416 			iocb->ki_pos = pos + written;
1417 		inode_unlock(inode);
1418 	}
1419 
1420 	if (written >= 0) {
1421 		int dirty;
1422 		spin_lock(&ci->i_ceph_lock);
1423 		ci->i_inline_version = CEPH_INLINE_NONE;
1424 		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1425 					       &prealloc_cf);
1426 		spin_unlock(&ci->i_ceph_lock);
1427 		if (dirty)
1428 			__mark_inode_dirty(inode, dirty);
1429 	}
1430 
1431 	dout("aio_write %p %llx.%llx %llu~%u  dropping cap refs on %s\n",
1432 	     inode, ceph_vinop(inode), pos, (unsigned)count,
1433 	     ceph_cap_string(got));
1434 	ceph_put_cap_refs(ci, got);
1435 
1436 	if (written == -EOLDSNAPC) {
1437 		dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
1438 		     inode, ceph_vinop(inode), pos, (unsigned)count);
1439 		goto retry_snap;
1440 	}
1441 
1442 	if (written >= 0) {
1443 		if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_NEARFULL))
1444 			iocb->ki_flags |= IOCB_DSYNC;
1445 		written = generic_write_sync(iocb, written);
1446 	}
1447 
1448 	goto out_unlocked;
1449 
1450 out:
1451 	inode_unlock(inode);
1452 out_unlocked:
1453 	ceph_free_cap_flush(prealloc_cf);
1454 	current->backing_dev_info = NULL;
1455 	return written ? written : err;
1456 }
1457 
1458 /*
1459  * llseek.  be sure to verify file size on SEEK_END.
1460  */
1461 static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1462 {
1463 	struct inode *inode = file->f_mapping->host;
1464 	loff_t i_size;
1465 	loff_t ret;
1466 
1467 	inode_lock(inode);
1468 
1469 	if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1470 		ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1471 		if (ret < 0)
1472 			goto out;
1473 	}
1474 
1475 	i_size = i_size_read(inode);
1476 	switch (whence) {
1477 	case SEEK_END:
1478 		offset += i_size;
1479 		break;
1480 	case SEEK_CUR:
1481 		/*
1482 		 * Here we special-case the lseek(fd, 0, SEEK_CUR)
1483 		 * position-querying operation.  Avoid rewriting the "same"
1484 		 * f_pos value back to the file because a concurrent read(),
1485 		 * write() or lseek() might have altered it
1486 		 */
1487 		if (offset == 0) {
1488 			ret = file->f_pos;
1489 			goto out;
1490 		}
1491 		offset += file->f_pos;
1492 		break;
1493 	case SEEK_DATA:
1494 		if (offset < 0 || offset >= i_size) {
1495 			ret = -ENXIO;
1496 			goto out;
1497 		}
1498 		break;
1499 	case SEEK_HOLE:
1500 		if (offset < 0 || offset >= i_size) {
1501 			ret = -ENXIO;
1502 			goto out;
1503 		}
1504 		offset = i_size;
1505 		break;
1506 	}
1507 
1508 	ret = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
1509 
1510 out:
1511 	inode_unlock(inode);
1512 	return ret;
1513 }
1514 
1515 static inline void ceph_zero_partial_page(
1516 	struct inode *inode, loff_t offset, unsigned size)
1517 {
1518 	struct page *page;
1519 	pgoff_t index = offset >> PAGE_SHIFT;
1520 
1521 	page = find_lock_page(inode->i_mapping, index);
1522 	if (page) {
1523 		wait_on_page_writeback(page);
1524 		zero_user(page, offset & (PAGE_SIZE - 1), size);
1525 		unlock_page(page);
1526 		put_page(page);
1527 	}
1528 }
1529 
1530 static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
1531 				      loff_t length)
1532 {
1533 	loff_t nearly = round_up(offset, PAGE_SIZE);
1534 	if (offset < nearly) {
1535 		loff_t size = nearly - offset;
1536 		if (length < size)
1537 			size = length;
1538 		ceph_zero_partial_page(inode, offset, size);
1539 		offset += size;
1540 		length -= size;
1541 	}
1542 	if (length >= PAGE_SIZE) {
1543 		loff_t size = round_down(length, PAGE_SIZE);
1544 		truncate_pagecache_range(inode, offset, offset + size - 1);
1545 		offset += size;
1546 		length -= size;
1547 	}
1548 	if (length)
1549 		ceph_zero_partial_page(inode, offset, length);
1550 }
1551 
1552 static int ceph_zero_partial_object(struct inode *inode,
1553 				    loff_t offset, loff_t *length)
1554 {
1555 	struct ceph_inode_info *ci = ceph_inode(inode);
1556 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1557 	struct ceph_osd_request *req;
1558 	int ret = 0;
1559 	loff_t zero = 0;
1560 	int op;
1561 
1562 	if (!length) {
1563 		op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
1564 		length = &zero;
1565 	} else {
1566 		op = CEPH_OSD_OP_ZERO;
1567 	}
1568 
1569 	req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1570 					ceph_vino(inode),
1571 					offset, length,
1572 					0, 1, op,
1573 					CEPH_OSD_FLAG_WRITE,
1574 					NULL, 0, 0, false);
1575 	if (IS_ERR(req)) {
1576 		ret = PTR_ERR(req);
1577 		goto out;
1578 	}
1579 
1580 	req->r_mtime = inode->i_mtime;
1581 	ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1582 	if (!ret) {
1583 		ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1584 		if (ret == -ENOENT)
1585 			ret = 0;
1586 	}
1587 	ceph_osdc_put_request(req);
1588 
1589 out:
1590 	return ret;
1591 }
1592 
1593 static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
1594 {
1595 	int ret = 0;
1596 	struct ceph_inode_info *ci = ceph_inode(inode);
1597 	s32 stripe_unit = ci->i_layout.stripe_unit;
1598 	s32 stripe_count = ci->i_layout.stripe_count;
1599 	s32 object_size = ci->i_layout.object_size;
1600 	u64 object_set_size = object_size * stripe_count;
1601 	u64 nearly, t;
1602 
1603 	/* round offset up to next period boundary */
1604 	nearly = offset + object_set_size - 1;
1605 	t = nearly;
1606 	nearly -= do_div(t, object_set_size);
1607 
1608 	while (length && offset < nearly) {
1609 		loff_t size = length;
1610 		ret = ceph_zero_partial_object(inode, offset, &size);
1611 		if (ret < 0)
1612 			return ret;
1613 		offset += size;
1614 		length -= size;
1615 	}
1616 	while (length >= object_set_size) {
1617 		int i;
1618 		loff_t pos = offset;
1619 		for (i = 0; i < stripe_count; ++i) {
1620 			ret = ceph_zero_partial_object(inode, pos, NULL);
1621 			if (ret < 0)
1622 				return ret;
1623 			pos += stripe_unit;
1624 		}
1625 		offset += object_set_size;
1626 		length -= object_set_size;
1627 	}
1628 	while (length) {
1629 		loff_t size = length;
1630 		ret = ceph_zero_partial_object(inode, offset, &size);
1631 		if (ret < 0)
1632 			return ret;
1633 		offset += size;
1634 		length -= size;
1635 	}
1636 	return ret;
1637 }
1638 
1639 static long ceph_fallocate(struct file *file, int mode,
1640 				loff_t offset, loff_t length)
1641 {
1642 	struct ceph_file_info *fi = file->private_data;
1643 	struct inode *inode = file_inode(file);
1644 	struct ceph_inode_info *ci = ceph_inode(inode);
1645 	struct ceph_osd_client *osdc =
1646 		&ceph_inode_to_client(inode)->client->osdc;
1647 	struct ceph_cap_flush *prealloc_cf;
1648 	int want, got = 0;
1649 	int dirty;
1650 	int ret = 0;
1651 	loff_t endoff = 0;
1652 	loff_t size;
1653 
1654 	if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
1655 		return -EOPNOTSUPP;
1656 
1657 	if (!S_ISREG(inode->i_mode))
1658 		return -EOPNOTSUPP;
1659 
1660 	prealloc_cf = ceph_alloc_cap_flush();
1661 	if (!prealloc_cf)
1662 		return -ENOMEM;
1663 
1664 	inode_lock(inode);
1665 
1666 	if (ceph_snap(inode) != CEPH_NOSNAP) {
1667 		ret = -EROFS;
1668 		goto unlock;
1669 	}
1670 
1671 	if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) &&
1672 	    !(mode & FALLOC_FL_PUNCH_HOLE)) {
1673 		ret = -ENOSPC;
1674 		goto unlock;
1675 	}
1676 
1677 	if (ci->i_inline_version != CEPH_INLINE_NONE) {
1678 		ret = ceph_uninline_data(file, NULL);
1679 		if (ret < 0)
1680 			goto unlock;
1681 	}
1682 
1683 	size = i_size_read(inode);
1684 	if (!(mode & FALLOC_FL_KEEP_SIZE)) {
1685 		endoff = offset + length;
1686 		ret = inode_newsize_ok(inode, endoff);
1687 		if (ret)
1688 			goto unlock;
1689 	}
1690 
1691 	if (fi->fmode & CEPH_FILE_MODE_LAZY)
1692 		want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
1693 	else
1694 		want = CEPH_CAP_FILE_BUFFER;
1695 
1696 	ret = ceph_get_caps(ci, CEPH_CAP_FILE_WR, want, endoff, &got, NULL);
1697 	if (ret < 0)
1698 		goto unlock;
1699 
1700 	if (mode & FALLOC_FL_PUNCH_HOLE) {
1701 		if (offset < size)
1702 			ceph_zero_pagecache_range(inode, offset, length);
1703 		ret = ceph_zero_objects(inode, offset, length);
1704 	} else if (endoff > size) {
1705 		truncate_pagecache_range(inode, size, -1);
1706 		if (ceph_inode_set_size(inode, endoff))
1707 			ceph_check_caps(ceph_inode(inode),
1708 				CHECK_CAPS_AUTHONLY, NULL);
1709 	}
1710 
1711 	if (!ret) {
1712 		spin_lock(&ci->i_ceph_lock);
1713 		ci->i_inline_version = CEPH_INLINE_NONE;
1714 		dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1715 					       &prealloc_cf);
1716 		spin_unlock(&ci->i_ceph_lock);
1717 		if (dirty)
1718 			__mark_inode_dirty(inode, dirty);
1719 	}
1720 
1721 	ceph_put_cap_refs(ci, got);
1722 unlock:
1723 	inode_unlock(inode);
1724 	ceph_free_cap_flush(prealloc_cf);
1725 	return ret;
1726 }
1727 
1728 const struct file_operations ceph_file_fops = {
1729 	.open = ceph_open,
1730 	.release = ceph_release,
1731 	.llseek = ceph_llseek,
1732 	.read_iter = ceph_read_iter,
1733 	.write_iter = ceph_write_iter,
1734 	.mmap = ceph_mmap,
1735 	.fsync = ceph_fsync,
1736 	.lock = ceph_lock,
1737 	.flock = ceph_flock,
1738 	.splice_read = generic_file_splice_read,
1739 	.splice_write = iter_file_splice_write,
1740 	.unlocked_ioctl = ceph_ioctl,
1741 	.compat_ioctl	= ceph_ioctl,
1742 	.fallocate	= ceph_fallocate,
1743 };
1744 
1745