xref: /openbmc/linux/fs/afs/file.c (revision 88f4ede4)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* AFS filesystem file handling
3  *
4  * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/fs.h>
12 #include <linux/pagemap.h>
13 #include <linux/writeback.h>
14 #include <linux/gfp.h>
15 #include <linux/task_io_accounting_ops.h>
16 #include <linux/mm.h>
17 #include "internal.h"
18 
19 static int afs_file_mmap(struct file *file, struct vm_area_struct *vma);
20 static int afs_readpage(struct file *file, struct page *page);
21 static void afs_invalidatepage(struct page *page, unsigned int offset,
22 			       unsigned int length);
23 static int afs_releasepage(struct page *page, gfp_t gfp_flags);
24 
25 static int afs_readpages(struct file *filp, struct address_space *mapping,
26 			 struct list_head *pages, unsigned nr_pages);
27 
28 const struct file_operations afs_file_operations = {
29 	.open		= afs_open,
30 	.release	= afs_release,
31 	.llseek		= generic_file_llseek,
32 	.read_iter	= generic_file_read_iter,
33 	.write_iter	= afs_file_write,
34 	.mmap		= afs_file_mmap,
35 	.splice_read	= generic_file_splice_read,
36 	.splice_write	= iter_file_splice_write,
37 	.fsync		= afs_fsync,
38 	.lock		= afs_lock,
39 	.flock		= afs_flock,
40 };
41 
42 const struct inode_operations afs_file_inode_operations = {
43 	.getattr	= afs_getattr,
44 	.setattr	= afs_setattr,
45 	.permission	= afs_permission,
46 	.listxattr	= afs_listxattr,
47 };
48 
49 const struct address_space_operations afs_fs_aops = {
50 	.readpage	= afs_readpage,
51 	.readpages	= afs_readpages,
52 	.set_page_dirty	= afs_set_page_dirty,
53 	.launder_page	= afs_launder_page,
54 	.releasepage	= afs_releasepage,
55 	.invalidatepage	= afs_invalidatepage,
56 	.write_begin	= afs_write_begin,
57 	.write_end	= afs_write_end,
58 	.writepage	= afs_writepage,
59 	.writepages	= afs_writepages,
60 };
61 
62 static const struct vm_operations_struct afs_vm_ops = {
63 	.fault		= filemap_fault,
64 	.map_pages	= filemap_map_pages,
65 	.page_mkwrite	= afs_page_mkwrite,
66 };
67 
68 /*
69  * Discard a pin on a writeback key.
70  */
71 void afs_put_wb_key(struct afs_wb_key *wbk)
72 {
73 	if (wbk && refcount_dec_and_test(&wbk->usage)) {
74 		key_put(wbk->key);
75 		kfree(wbk);
76 	}
77 }
78 
79 /*
80  * Cache key for writeback.
81  */
82 int afs_cache_wb_key(struct afs_vnode *vnode, struct afs_file *af)
83 {
84 	struct afs_wb_key *wbk, *p;
85 
86 	wbk = kzalloc(sizeof(struct afs_wb_key), GFP_KERNEL);
87 	if (!wbk)
88 		return -ENOMEM;
89 	refcount_set(&wbk->usage, 2);
90 	wbk->key = af->key;
91 
92 	spin_lock(&vnode->wb_lock);
93 	list_for_each_entry(p, &vnode->wb_keys, vnode_link) {
94 		if (p->key == wbk->key)
95 			goto found;
96 	}
97 
98 	key_get(wbk->key);
99 	list_add_tail(&wbk->vnode_link, &vnode->wb_keys);
100 	spin_unlock(&vnode->wb_lock);
101 	af->wb = wbk;
102 	return 0;
103 
104 found:
105 	refcount_inc(&p->usage);
106 	spin_unlock(&vnode->wb_lock);
107 	af->wb = p;
108 	kfree(wbk);
109 	return 0;
110 }
111 
112 /*
113  * open an AFS file or directory and attach a key to it
114  */
115 int afs_open(struct inode *inode, struct file *file)
116 {
117 	struct afs_vnode *vnode = AFS_FS_I(inode);
118 	struct afs_file *af;
119 	struct key *key;
120 	int ret;
121 
122 	_enter("{%llx:%llu},", vnode->fid.vid, vnode->fid.vnode);
123 
124 	key = afs_request_key(vnode->volume->cell);
125 	if (IS_ERR(key)) {
126 		ret = PTR_ERR(key);
127 		goto error;
128 	}
129 
130 	af = kzalloc(sizeof(*af), GFP_KERNEL);
131 	if (!af) {
132 		ret = -ENOMEM;
133 		goto error_key;
134 	}
135 	af->key = key;
136 
137 	ret = afs_validate(vnode, key);
138 	if (ret < 0)
139 		goto error_af;
140 
141 	if (file->f_mode & FMODE_WRITE) {
142 		ret = afs_cache_wb_key(vnode, af);
143 		if (ret < 0)
144 			goto error_af;
145 	}
146 
147 	if (file->f_flags & O_TRUNC)
148 		set_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
149 
150 	file->private_data = af;
151 	_leave(" = 0");
152 	return 0;
153 
154 error_af:
155 	kfree(af);
156 error_key:
157 	key_put(key);
158 error:
159 	_leave(" = %d", ret);
160 	return ret;
161 }
162 
163 /*
164  * release an AFS file or directory and discard its key
165  */
166 int afs_release(struct inode *inode, struct file *file)
167 {
168 	struct afs_vnode *vnode = AFS_FS_I(inode);
169 	struct afs_file *af = file->private_data;
170 	int ret = 0;
171 
172 	_enter("{%llx:%llu},", vnode->fid.vid, vnode->fid.vnode);
173 
174 	if ((file->f_mode & FMODE_WRITE))
175 		ret = vfs_fsync(file, 0);
176 
177 	file->private_data = NULL;
178 	if (af->wb)
179 		afs_put_wb_key(af->wb);
180 	key_put(af->key);
181 	kfree(af);
182 	afs_prune_wb_keys(vnode);
183 	_leave(" = %d", ret);
184 	return ret;
185 }
186 
187 /*
188  * Dispose of a ref to a read record.
189  */
190 void afs_put_read(struct afs_read *req)
191 {
192 	int i;
193 
194 	if (refcount_dec_and_test(&req->usage)) {
195 		if (req->pages) {
196 			for (i = 0; i < req->nr_pages; i++)
197 				if (req->pages[i])
198 					put_page(req->pages[i]);
199 			if (req->pages != req->array)
200 				kfree(req->pages);
201 		}
202 		kfree(req);
203 	}
204 }
205 
206 #ifdef CONFIG_AFS_FSCACHE
207 /*
208  * deal with notification that a page was read from the cache
209  */
210 static void afs_file_readpage_read_complete(struct page *page,
211 					    void *data,
212 					    int error)
213 {
214 	_enter("%p,%p,%d", page, data, error);
215 
216 	/* if the read completes with an error, we just unlock the page and let
217 	 * the VM reissue the readpage */
218 	if (!error)
219 		SetPageUptodate(page);
220 	unlock_page(page);
221 }
222 #endif
223 
224 static void afs_fetch_data_success(struct afs_operation *op)
225 {
226 	struct afs_vnode *vnode = op->file[0].vnode;
227 
228 	_enter("op=%08x", op->debug_id);
229 	afs_vnode_commit_status(op, &op->file[0]);
230 	afs_stat_v(vnode, n_fetches);
231 	atomic_long_add(op->fetch.req->actual_len, &op->net->n_fetch_bytes);
232 }
233 
234 static void afs_fetch_data_put(struct afs_operation *op)
235 {
236 	afs_put_read(op->fetch.req);
237 }
238 
239 static const struct afs_operation_ops afs_fetch_data_operation = {
240 	.issue_afs_rpc	= afs_fs_fetch_data,
241 	.issue_yfs_rpc	= yfs_fs_fetch_data,
242 	.success	= afs_fetch_data_success,
243 	.aborted	= afs_check_for_remote_deletion,
244 	.put		= afs_fetch_data_put,
245 };
246 
247 /*
248  * Fetch file data from the volume.
249  */
250 int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *req)
251 {
252 	struct afs_operation *op;
253 
254 	_enter("%s{%llx:%llu.%u},%x,,,",
255 	       vnode->volume->name,
256 	       vnode->fid.vid,
257 	       vnode->fid.vnode,
258 	       vnode->fid.unique,
259 	       key_serial(key));
260 
261 	op = afs_alloc_operation(key, vnode->volume);
262 	if (IS_ERR(op))
263 		return PTR_ERR(op);
264 
265 	afs_op_set_vnode(op, 0, vnode);
266 
267 	op->fetch.req	= afs_get_read(req);
268 	op->ops		= &afs_fetch_data_operation;
269 	return afs_do_sync_operation(op);
270 }
271 
272 /*
273  * read page from file, directory or symlink, given a key to use
274  */
275 int afs_page_filler(void *data, struct page *page)
276 {
277 	struct inode *inode = page->mapping->host;
278 	struct afs_vnode *vnode = AFS_FS_I(inode);
279 	struct afs_read *req;
280 	struct key *key = data;
281 	int ret;
282 
283 	_enter("{%x},{%lu},{%lu}", key_serial(key), inode->i_ino, page->index);
284 
285 	BUG_ON(!PageLocked(page));
286 
287 	ret = -ESTALE;
288 	if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
289 		goto error;
290 
291 	/* is it cached? */
292 #ifdef CONFIG_AFS_FSCACHE
293 	ret = fscache_read_or_alloc_page(vnode->cache,
294 					 page,
295 					 afs_file_readpage_read_complete,
296 					 NULL,
297 					 GFP_KERNEL);
298 #else
299 	ret = -ENOBUFS;
300 #endif
301 	switch (ret) {
302 		/* read BIO submitted (page in cache) */
303 	case 0:
304 		break;
305 
306 		/* page not yet cached */
307 	case -ENODATA:
308 		_debug("cache said ENODATA");
309 		goto go_on;
310 
311 		/* page will not be cached */
312 	case -ENOBUFS:
313 		_debug("cache said ENOBUFS");
314 
315 		fallthrough;
316 	default:
317 	go_on:
318 		req = kzalloc(struct_size(req, array, 1), GFP_KERNEL);
319 		if (!req)
320 			goto enomem;
321 
322 		/* We request a full page.  If the page is a partial one at the
323 		 * end of the file, the server will return a short read and the
324 		 * unmarshalling code will clear the unfilled space.
325 		 */
326 		refcount_set(&req->usage, 1);
327 		req->pos = (loff_t)page->index << PAGE_SHIFT;
328 		req->len = PAGE_SIZE;
329 		req->nr_pages = 1;
330 		req->pages = req->array;
331 		req->pages[0] = page;
332 		get_page(page);
333 
334 		/* read the contents of the file from the server into the
335 		 * page */
336 		ret = afs_fetch_data(vnode, key, req);
337 		afs_put_read(req);
338 
339 		if (ret < 0) {
340 			if (ret == -ENOENT) {
341 				_debug("got NOENT from server"
342 				       " - marking file deleted and stale");
343 				set_bit(AFS_VNODE_DELETED, &vnode->flags);
344 				ret = -ESTALE;
345 			}
346 
347 #ifdef CONFIG_AFS_FSCACHE
348 			fscache_uncache_page(vnode->cache, page);
349 #endif
350 			BUG_ON(PageFsCache(page));
351 
352 			if (ret == -EINTR ||
353 			    ret == -ENOMEM ||
354 			    ret == -ERESTARTSYS ||
355 			    ret == -EAGAIN)
356 				goto error;
357 			goto io_error;
358 		}
359 
360 		SetPageUptodate(page);
361 
362 		/* send the page to the cache */
363 #ifdef CONFIG_AFS_FSCACHE
364 		if (PageFsCache(page) &&
365 		    fscache_write_page(vnode->cache, page, vnode->status.size,
366 				       GFP_KERNEL) != 0) {
367 			fscache_uncache_page(vnode->cache, page);
368 			BUG_ON(PageFsCache(page));
369 		}
370 #endif
371 		unlock_page(page);
372 	}
373 
374 	_leave(" = 0");
375 	return 0;
376 
377 io_error:
378 	SetPageError(page);
379 	goto error;
380 enomem:
381 	ret = -ENOMEM;
382 error:
383 	unlock_page(page);
384 	_leave(" = %d", ret);
385 	return ret;
386 }
387 
388 /*
389  * read page from file, directory or symlink, given a file to nominate the key
390  * to be used
391  */
392 static int afs_readpage(struct file *file, struct page *page)
393 {
394 	struct key *key;
395 	int ret;
396 
397 	if (file) {
398 		key = afs_file_key(file);
399 		ASSERT(key != NULL);
400 		ret = afs_page_filler(key, page);
401 	} else {
402 		struct inode *inode = page->mapping->host;
403 		key = afs_request_key(AFS_FS_S(inode->i_sb)->cell);
404 		if (IS_ERR(key)) {
405 			ret = PTR_ERR(key);
406 		} else {
407 			ret = afs_page_filler(key, page);
408 			key_put(key);
409 		}
410 	}
411 	return ret;
412 }
413 
414 /*
415  * Make pages available as they're filled.
416  */
417 static void afs_readpages_page_done(struct afs_read *req)
418 {
419 #ifdef CONFIG_AFS_FSCACHE
420 	struct afs_vnode *vnode = req->vnode;
421 #endif
422 	struct page *page = req->pages[req->index];
423 
424 	req->pages[req->index] = NULL;
425 	SetPageUptodate(page);
426 
427 	/* send the page to the cache */
428 #ifdef CONFIG_AFS_FSCACHE
429 	if (PageFsCache(page) &&
430 	    fscache_write_page(vnode->cache, page, vnode->status.size,
431 			       GFP_KERNEL) != 0) {
432 		fscache_uncache_page(vnode->cache, page);
433 		BUG_ON(PageFsCache(page));
434 	}
435 #endif
436 	unlock_page(page);
437 	put_page(page);
438 }
439 
440 /*
441  * Read a contiguous set of pages.
442  */
443 static int afs_readpages_one(struct file *file, struct address_space *mapping,
444 			     struct list_head *pages)
445 {
446 	struct afs_vnode *vnode = AFS_FS_I(mapping->host);
447 	struct afs_read *req;
448 	struct list_head *p;
449 	struct page *first, *page;
450 	struct key *key = afs_file_key(file);
451 	pgoff_t index;
452 	int ret, n, i;
453 
454 	/* Count the number of contiguous pages at the front of the list.  Note
455 	 * that the list goes prev-wards rather than next-wards.
456 	 */
457 	first = lru_to_page(pages);
458 	index = first->index + 1;
459 	n = 1;
460 	for (p = first->lru.prev; p != pages; p = p->prev) {
461 		page = list_entry(p, struct page, lru);
462 		if (page->index != index)
463 			break;
464 		index++;
465 		n++;
466 	}
467 
468 	req = kzalloc(struct_size(req, array, n), GFP_NOFS);
469 	if (!req)
470 		return -ENOMEM;
471 
472 	refcount_set(&req->usage, 1);
473 	req->vnode = vnode;
474 	req->page_done = afs_readpages_page_done;
475 	req->pos = first->index;
476 	req->pos <<= PAGE_SHIFT;
477 	req->pages = req->array;
478 
479 	/* Transfer the pages to the request.  We add them in until one fails
480 	 * to add to the LRU and then we stop (as that'll make a hole in the
481 	 * contiguous run.
482 	 *
483 	 * Note that it's possible for the file size to change whilst we're
484 	 * doing this, but we rely on the server returning less than we asked
485 	 * for if the file shrank.  We also rely on this to deal with a partial
486 	 * page at the end of the file.
487 	 */
488 	do {
489 		page = lru_to_page(pages);
490 		list_del(&page->lru);
491 		index = page->index;
492 		if (add_to_page_cache_lru(page, mapping, index,
493 					  readahead_gfp_mask(mapping))) {
494 #ifdef CONFIG_AFS_FSCACHE
495 			fscache_uncache_page(vnode->cache, page);
496 #endif
497 			put_page(page);
498 			break;
499 		}
500 
501 		req->pages[req->nr_pages++] = page;
502 		req->len += PAGE_SIZE;
503 	} while (req->nr_pages < n);
504 
505 	if (req->nr_pages == 0) {
506 		kfree(req);
507 		return 0;
508 	}
509 
510 	ret = afs_fetch_data(vnode, key, req);
511 	if (ret < 0)
512 		goto error;
513 
514 	task_io_account_read(PAGE_SIZE * req->nr_pages);
515 	afs_put_read(req);
516 	return 0;
517 
518 error:
519 	if (ret == -ENOENT) {
520 		_debug("got NOENT from server"
521 		       " - marking file deleted and stale");
522 		set_bit(AFS_VNODE_DELETED, &vnode->flags);
523 		ret = -ESTALE;
524 	}
525 
526 	for (i = 0; i < req->nr_pages; i++) {
527 		page = req->pages[i];
528 		if (page) {
529 #ifdef CONFIG_AFS_FSCACHE
530 			fscache_uncache_page(vnode->cache, page);
531 #endif
532 			SetPageError(page);
533 			unlock_page(page);
534 		}
535 	}
536 
537 	afs_put_read(req);
538 	return ret;
539 }
540 
541 /*
542  * read a set of pages
543  */
544 static int afs_readpages(struct file *file, struct address_space *mapping,
545 			 struct list_head *pages, unsigned nr_pages)
546 {
547 	struct key *key = afs_file_key(file);
548 	struct afs_vnode *vnode;
549 	int ret = 0;
550 
551 	_enter("{%d},{%lu},,%d",
552 	       key_serial(key), mapping->host->i_ino, nr_pages);
553 
554 	ASSERT(key != NULL);
555 
556 	vnode = AFS_FS_I(mapping->host);
557 	if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
558 		_leave(" = -ESTALE");
559 		return -ESTALE;
560 	}
561 
562 	/* attempt to read as many of the pages as possible */
563 #ifdef CONFIG_AFS_FSCACHE
564 	ret = fscache_read_or_alloc_pages(vnode->cache,
565 					  mapping,
566 					  pages,
567 					  &nr_pages,
568 					  afs_file_readpage_read_complete,
569 					  NULL,
570 					  mapping_gfp_mask(mapping));
571 #else
572 	ret = -ENOBUFS;
573 #endif
574 
575 	switch (ret) {
576 		/* all pages are being read from the cache */
577 	case 0:
578 		BUG_ON(!list_empty(pages));
579 		BUG_ON(nr_pages != 0);
580 		_leave(" = 0 [reading all]");
581 		return 0;
582 
583 		/* there were pages that couldn't be read from the cache */
584 	case -ENODATA:
585 	case -ENOBUFS:
586 		break;
587 
588 		/* other error */
589 	default:
590 		_leave(" = %d", ret);
591 		return ret;
592 	}
593 
594 	while (!list_empty(pages)) {
595 		ret = afs_readpages_one(file, mapping, pages);
596 		if (ret < 0)
597 			break;
598 	}
599 
600 	_leave(" = %d [netting]", ret);
601 	return ret;
602 }
603 
604 /*
605  * Adjust the dirty region of the page on truncation or full invalidation,
606  * getting rid of the markers altogether if the region is entirely invalidated.
607  */
608 static void afs_invalidate_dirty(struct page *page, unsigned int offset,
609 				 unsigned int length)
610 {
611 	struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
612 	unsigned long priv;
613 	unsigned int f, t, end = offset + length;
614 
615 	priv = page_private(page);
616 
617 	/* we clean up only if the entire page is being invalidated */
618 	if (offset == 0 && length == thp_size(page))
619 		goto full_invalidate;
620 
621 	 /* If the page was dirtied by page_mkwrite(), the PTE stays writable
622 	  * and we don't get another notification to tell us to expand it
623 	  * again.
624 	  */
625 	if (afs_is_page_dirty_mmapped(priv))
626 		return;
627 
628 	/* We may need to shorten the dirty region */
629 	f = afs_page_dirty_from(priv);
630 	t = afs_page_dirty_to(priv);
631 
632 	if (t <= offset || f >= end)
633 		return; /* Doesn't overlap */
634 
635 	if (f < offset && t > end)
636 		return; /* Splits the dirty region - just absorb it */
637 
638 	if (f >= offset && t <= end)
639 		goto undirty;
640 
641 	if (f < offset)
642 		t = offset;
643 	else
644 		f = end;
645 	if (f == t)
646 		goto undirty;
647 
648 	priv = afs_page_dirty(f, t);
649 	set_page_private(page, priv);
650 	trace_afs_page_dirty(vnode, tracepoint_string("trunc"), page->index, priv);
651 	return;
652 
653 undirty:
654 	trace_afs_page_dirty(vnode, tracepoint_string("undirty"), page->index, priv);
655 	clear_page_dirty_for_io(page);
656 full_invalidate:
657 	priv = (unsigned long)detach_page_private(page);
658 	trace_afs_page_dirty(vnode, tracepoint_string("inval"), page->index, priv);
659 }
660 
661 /*
662  * invalidate part or all of a page
663  * - release a page and clean up its private data if offset is 0 (indicating
664  *   the entire page)
665  */
666 static void afs_invalidatepage(struct page *page, unsigned int offset,
667 			       unsigned int length)
668 {
669 	_enter("{%lu},%u,%u", page->index, offset, length);
670 
671 	BUG_ON(!PageLocked(page));
672 
673 #ifdef CONFIG_AFS_FSCACHE
674 	/* we clean up only if the entire page is being invalidated */
675 	if (offset == 0 && length == PAGE_SIZE) {
676 		if (PageFsCache(page)) {
677 			struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
678 			fscache_wait_on_page_write(vnode->cache, page);
679 			fscache_uncache_page(vnode->cache, page);
680 		}
681 	}
682 #endif
683 
684 	if (PagePrivate(page))
685 		afs_invalidate_dirty(page, offset, length);
686 
687 	_leave("");
688 }
689 
690 /*
691  * release a page and clean up its private state if it's not busy
692  * - return true if the page can now be released, false if not
693  */
694 static int afs_releasepage(struct page *page, gfp_t gfp_flags)
695 {
696 	struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
697 	unsigned long priv;
698 
699 	_enter("{{%llx:%llu}[%lu],%lx},%x",
700 	       vnode->fid.vid, vnode->fid.vnode, page->index, page->flags,
701 	       gfp_flags);
702 
703 	/* deny if page is being written to the cache and the caller hasn't
704 	 * elected to wait */
705 #ifdef CONFIG_AFS_FSCACHE
706 	if (!fscache_maybe_release_page(vnode->cache, page, gfp_flags)) {
707 		_leave(" = F [cache busy]");
708 		return 0;
709 	}
710 #endif
711 
712 	if (PagePrivate(page)) {
713 		priv = (unsigned long)detach_page_private(page);
714 		trace_afs_page_dirty(vnode, tracepoint_string("rel"),
715 				     page->index, priv);
716 	}
717 
718 	/* indicate that the page can be released */
719 	_leave(" = T");
720 	return 1;
721 }
722 
723 /*
724  * Handle setting up a memory mapping on an AFS file.
725  */
726 static int afs_file_mmap(struct file *file, struct vm_area_struct *vma)
727 {
728 	int ret;
729 
730 	ret = generic_file_mmap(file, vma);
731 	if (ret == 0)
732 		vma->vm_ops = &afs_vm_ops;
733 	return ret;
734 }
735