xref: /openbmc/linux/fs/afs/file.c (revision ae213c44)
1 /* AFS filesystem file handling
2  *
3  * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/init.h>
15 #include <linux/fs.h>
16 #include <linux/pagemap.h>
17 #include <linux/writeback.h>
18 #include <linux/gfp.h>
19 #include <linux/task_io_accounting_ops.h>
20 #include <linux/mm.h>
21 #include "internal.h"
22 
23 static int afs_file_mmap(struct file *file, struct vm_area_struct *vma);
24 static int afs_readpage(struct file *file, struct page *page);
25 static void afs_invalidatepage(struct page *page, unsigned int offset,
26 			       unsigned int length);
27 static int afs_releasepage(struct page *page, gfp_t gfp_flags);
28 
29 static int afs_readpages(struct file *filp, struct address_space *mapping,
30 			 struct list_head *pages, unsigned nr_pages);
31 
32 const struct file_operations afs_file_operations = {
33 	.open		= afs_open,
34 	.release	= afs_release,
35 	.llseek		= generic_file_llseek,
36 	.read_iter	= generic_file_read_iter,
37 	.write_iter	= afs_file_write,
38 	.mmap		= afs_file_mmap,
39 	.splice_read	= generic_file_splice_read,
40 	.fsync		= afs_fsync,
41 	.lock		= afs_lock,
42 	.flock		= afs_flock,
43 };
44 
45 const struct inode_operations afs_file_inode_operations = {
46 	.getattr	= afs_getattr,
47 	.setattr	= afs_setattr,
48 	.permission	= afs_permission,
49 	.listxattr	= afs_listxattr,
50 };
51 
52 const struct address_space_operations afs_fs_aops = {
53 	.readpage	= afs_readpage,
54 	.readpages	= afs_readpages,
55 	.set_page_dirty	= afs_set_page_dirty,
56 	.launder_page	= afs_launder_page,
57 	.releasepage	= afs_releasepage,
58 	.invalidatepage	= afs_invalidatepage,
59 	.write_begin	= afs_write_begin,
60 	.write_end	= afs_write_end,
61 	.writepage	= afs_writepage,
62 	.writepages	= afs_writepages,
63 };
64 
65 static const struct vm_operations_struct afs_vm_ops = {
66 	.fault		= filemap_fault,
67 	.map_pages	= filemap_map_pages,
68 	.page_mkwrite	= afs_page_mkwrite,
69 };
70 
71 /*
72  * Discard a pin on a writeback key.
73  */
74 void afs_put_wb_key(struct afs_wb_key *wbk)
75 {
76 	if (refcount_dec_and_test(&wbk->usage)) {
77 		key_put(wbk->key);
78 		kfree(wbk);
79 	}
80 }
81 
82 /*
83  * Cache key for writeback.
84  */
85 int afs_cache_wb_key(struct afs_vnode *vnode, struct afs_file *af)
86 {
87 	struct afs_wb_key *wbk, *p;
88 
89 	wbk = kzalloc(sizeof(struct afs_wb_key), GFP_KERNEL);
90 	if (!wbk)
91 		return -ENOMEM;
92 	refcount_set(&wbk->usage, 2);
93 	wbk->key = af->key;
94 
95 	spin_lock(&vnode->wb_lock);
96 	list_for_each_entry(p, &vnode->wb_keys, vnode_link) {
97 		if (p->key == wbk->key)
98 			goto found;
99 	}
100 
101 	key_get(wbk->key);
102 	list_add_tail(&wbk->vnode_link, &vnode->wb_keys);
103 	spin_unlock(&vnode->wb_lock);
104 	af->wb = wbk;
105 	return 0;
106 
107 found:
108 	refcount_inc(&p->usage);
109 	spin_unlock(&vnode->wb_lock);
110 	af->wb = p;
111 	kfree(wbk);
112 	return 0;
113 }
114 
115 /*
116  * open an AFS file or directory and attach a key to it
117  */
118 int afs_open(struct inode *inode, struct file *file)
119 {
120 	struct afs_vnode *vnode = AFS_FS_I(inode);
121 	struct afs_file *af;
122 	struct key *key;
123 	int ret;
124 
125 	_enter("{%llx:%llu},", vnode->fid.vid, vnode->fid.vnode);
126 
127 	key = afs_request_key(vnode->volume->cell);
128 	if (IS_ERR(key)) {
129 		ret = PTR_ERR(key);
130 		goto error;
131 	}
132 
133 	af = kzalloc(sizeof(*af), GFP_KERNEL);
134 	if (!af) {
135 		ret = -ENOMEM;
136 		goto error_key;
137 	}
138 	af->key = key;
139 
140 	ret = afs_validate(vnode, key);
141 	if (ret < 0)
142 		goto error_af;
143 
144 	if (file->f_mode & FMODE_WRITE) {
145 		ret = afs_cache_wb_key(vnode, af);
146 		if (ret < 0)
147 			goto error_af;
148 	}
149 
150 	if (file->f_flags & O_TRUNC)
151 		set_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
152 
153 	file->private_data = af;
154 	_leave(" = 0");
155 	return 0;
156 
157 error_af:
158 	kfree(af);
159 error_key:
160 	key_put(key);
161 error:
162 	_leave(" = %d", ret);
163 	return ret;
164 }
165 
166 /*
167  * release an AFS file or directory and discard its key
168  */
169 int afs_release(struct inode *inode, struct file *file)
170 {
171 	struct afs_vnode *vnode = AFS_FS_I(inode);
172 	struct afs_file *af = file->private_data;
173 	int ret = 0;
174 
175 	_enter("{%llx:%llu},", vnode->fid.vid, vnode->fid.vnode);
176 
177 	if ((file->f_mode & FMODE_WRITE))
178 		ret = vfs_fsync(file, 0);
179 
180 	file->private_data = NULL;
181 	if (af->wb)
182 		afs_put_wb_key(af->wb);
183 	key_put(af->key);
184 	kfree(af);
185 	afs_prune_wb_keys(vnode);
186 	_leave(" = %d", ret);
187 	return ret;
188 }
189 
190 /*
191  * Dispose of a ref to a read record.
192  */
193 void afs_put_read(struct afs_read *req)
194 {
195 	int i;
196 
197 	if (refcount_dec_and_test(&req->usage)) {
198 		for (i = 0; i < req->nr_pages; i++)
199 			if (req->pages[i])
200 				put_page(req->pages[i]);
201 		if (req->pages != req->array)
202 			kfree(req->pages);
203 		kfree(req);
204 	}
205 }
206 
207 #ifdef CONFIG_AFS_FSCACHE
208 /*
209  * deal with notification that a page was read from the cache
210  */
211 static void afs_file_readpage_read_complete(struct page *page,
212 					    void *data,
213 					    int error)
214 {
215 	_enter("%p,%p,%d", page, data, error);
216 
217 	/* if the read completes with an error, we just unlock the page and let
218 	 * the VM reissue the readpage */
219 	if (!error)
220 		SetPageUptodate(page);
221 	unlock_page(page);
222 }
223 #endif
224 
225 /*
226  * Fetch file data from the volume.
227  */
228 int afs_fetch_data(struct afs_vnode *vnode, struct key *key, struct afs_read *desc)
229 {
230 	struct afs_fs_cursor fc;
231 	struct afs_status_cb *scb;
232 	int ret;
233 
234 	_enter("%s{%llx:%llu.%u},%x,,,",
235 	       vnode->volume->name,
236 	       vnode->fid.vid,
237 	       vnode->fid.vnode,
238 	       vnode->fid.unique,
239 	       key_serial(key));
240 
241 	scb = kzalloc(sizeof(struct afs_status_cb), GFP_KERNEL);
242 	if (!scb)
243 		return -ENOMEM;
244 
245 	ret = -ERESTARTSYS;
246 	if (afs_begin_vnode_operation(&fc, vnode, key, true)) {
247 		afs_dataversion_t data_version = vnode->status.data_version;
248 
249 		while (afs_select_fileserver(&fc)) {
250 			fc.cb_break = afs_calc_vnode_cb_break(vnode);
251 			afs_fs_fetch_data(&fc, scb, desc);
252 		}
253 
254 		afs_check_for_remote_deletion(&fc, vnode);
255 		afs_vnode_commit_status(&fc, vnode, fc.cb_break,
256 					&data_version, scb);
257 		ret = afs_end_vnode_operation(&fc);
258 	}
259 
260 	if (ret == 0) {
261 		afs_stat_v(vnode, n_fetches);
262 		atomic_long_add(desc->actual_len,
263 				&afs_v2net(vnode)->n_fetch_bytes);
264 	}
265 
266 	kfree(scb);
267 	_leave(" = %d", ret);
268 	return ret;
269 }
270 
271 /*
272  * read page from file, directory or symlink, given a key to use
273  */
274 int afs_page_filler(void *data, struct page *page)
275 {
276 	struct inode *inode = page->mapping->host;
277 	struct afs_vnode *vnode = AFS_FS_I(inode);
278 	struct afs_read *req;
279 	struct key *key = data;
280 	int ret;
281 
282 	_enter("{%x},{%lu},{%lu}", key_serial(key), inode->i_ino, page->index);
283 
284 	BUG_ON(!PageLocked(page));
285 
286 	ret = -ESTALE;
287 	if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
288 		goto error;
289 
290 	/* is it cached? */
291 #ifdef CONFIG_AFS_FSCACHE
292 	ret = fscache_read_or_alloc_page(vnode->cache,
293 					 page,
294 					 afs_file_readpage_read_complete,
295 					 NULL,
296 					 GFP_KERNEL);
297 #else
298 	ret = -ENOBUFS;
299 #endif
300 	switch (ret) {
301 		/* read BIO submitted (page in cache) */
302 	case 0:
303 		break;
304 
305 		/* page not yet cached */
306 	case -ENODATA:
307 		_debug("cache said ENODATA");
308 		goto go_on;
309 
310 		/* page will not be cached */
311 	case -ENOBUFS:
312 		_debug("cache said ENOBUFS");
313 
314 		/* fall through */
315 	default:
316 	go_on:
317 		req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *),
318 			      GFP_KERNEL);
319 		if (!req)
320 			goto enomem;
321 
322 		/* We request a full page.  If the page is a partial one at the
323 		 * end of the file, the server will return a short read and the
324 		 * unmarshalling code will clear the unfilled space.
325 		 */
326 		refcount_set(&req->usage, 1);
327 		req->pos = (loff_t)page->index << PAGE_SHIFT;
328 		req->len = PAGE_SIZE;
329 		req->nr_pages = 1;
330 		req->pages = req->array;
331 		req->pages[0] = page;
332 		get_page(page);
333 
334 		/* read the contents of the file from the server into the
335 		 * page */
336 		ret = afs_fetch_data(vnode, key, req);
337 		afs_put_read(req);
338 
339 		if (ret < 0) {
340 			if (ret == -ENOENT) {
341 				_debug("got NOENT from server"
342 				       " - marking file deleted and stale");
343 				set_bit(AFS_VNODE_DELETED, &vnode->flags);
344 				ret = -ESTALE;
345 			}
346 
347 #ifdef CONFIG_AFS_FSCACHE
348 			fscache_uncache_page(vnode->cache, page);
349 #endif
350 			BUG_ON(PageFsCache(page));
351 
352 			if (ret == -EINTR ||
353 			    ret == -ENOMEM ||
354 			    ret == -ERESTARTSYS ||
355 			    ret == -EAGAIN)
356 				goto error;
357 			goto io_error;
358 		}
359 
360 		SetPageUptodate(page);
361 
362 		/* send the page to the cache */
363 #ifdef CONFIG_AFS_FSCACHE
364 		if (PageFsCache(page) &&
365 		    fscache_write_page(vnode->cache, page, vnode->status.size,
366 				       GFP_KERNEL) != 0) {
367 			fscache_uncache_page(vnode->cache, page);
368 			BUG_ON(PageFsCache(page));
369 		}
370 #endif
371 		unlock_page(page);
372 	}
373 
374 	_leave(" = 0");
375 	return 0;
376 
377 io_error:
378 	SetPageError(page);
379 	goto error;
380 enomem:
381 	ret = -ENOMEM;
382 error:
383 	unlock_page(page);
384 	_leave(" = %d", ret);
385 	return ret;
386 }
387 
388 /*
389  * read page from file, directory or symlink, given a file to nominate the key
390  * to be used
391  */
392 static int afs_readpage(struct file *file, struct page *page)
393 {
394 	struct key *key;
395 	int ret;
396 
397 	if (file) {
398 		key = afs_file_key(file);
399 		ASSERT(key != NULL);
400 		ret = afs_page_filler(key, page);
401 	} else {
402 		struct inode *inode = page->mapping->host;
403 		key = afs_request_key(AFS_FS_S(inode->i_sb)->cell);
404 		if (IS_ERR(key)) {
405 			ret = PTR_ERR(key);
406 		} else {
407 			ret = afs_page_filler(key, page);
408 			key_put(key);
409 		}
410 	}
411 	return ret;
412 }
413 
414 /*
415  * Make pages available as they're filled.
416  */
417 static void afs_readpages_page_done(struct afs_read *req)
418 {
419 #ifdef CONFIG_AFS_FSCACHE
420 	struct afs_vnode *vnode = req->vnode;
421 #endif
422 	struct page *page = req->pages[req->index];
423 
424 	req->pages[req->index] = NULL;
425 	SetPageUptodate(page);
426 
427 	/* send the page to the cache */
428 #ifdef CONFIG_AFS_FSCACHE
429 	if (PageFsCache(page) &&
430 	    fscache_write_page(vnode->cache, page, vnode->status.size,
431 			       GFP_KERNEL) != 0) {
432 		fscache_uncache_page(vnode->cache, page);
433 		BUG_ON(PageFsCache(page));
434 	}
435 #endif
436 	unlock_page(page);
437 	put_page(page);
438 }
439 
440 /*
441  * Read a contiguous set of pages.
442  */
443 static int afs_readpages_one(struct file *file, struct address_space *mapping,
444 			     struct list_head *pages)
445 {
446 	struct afs_vnode *vnode = AFS_FS_I(mapping->host);
447 	struct afs_read *req;
448 	struct list_head *p;
449 	struct page *first, *page;
450 	struct key *key = afs_file_key(file);
451 	pgoff_t index;
452 	int ret, n, i;
453 
454 	/* Count the number of contiguous pages at the front of the list.  Note
455 	 * that the list goes prev-wards rather than next-wards.
456 	 */
457 	first = lru_to_page(pages);
458 	index = first->index + 1;
459 	n = 1;
460 	for (p = first->lru.prev; p != pages; p = p->prev) {
461 		page = list_entry(p, struct page, lru);
462 		if (page->index != index)
463 			break;
464 		index++;
465 		n++;
466 	}
467 
468 	req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *) * n,
469 		      GFP_NOFS);
470 	if (!req)
471 		return -ENOMEM;
472 
473 	refcount_set(&req->usage, 1);
474 	req->vnode = vnode;
475 	req->page_done = afs_readpages_page_done;
476 	req->pos = first->index;
477 	req->pos <<= PAGE_SHIFT;
478 	req->pages = req->array;
479 
480 	/* Transfer the pages to the request.  We add them in until one fails
481 	 * to add to the LRU and then we stop (as that'll make a hole in the
482 	 * contiguous run.
483 	 *
484 	 * Note that it's possible for the file size to change whilst we're
485 	 * doing this, but we rely on the server returning less than we asked
486 	 * for if the file shrank.  We also rely on this to deal with a partial
487 	 * page at the end of the file.
488 	 */
489 	do {
490 		page = lru_to_page(pages);
491 		list_del(&page->lru);
492 		index = page->index;
493 		if (add_to_page_cache_lru(page, mapping, index,
494 					  readahead_gfp_mask(mapping))) {
495 #ifdef CONFIG_AFS_FSCACHE
496 			fscache_uncache_page(vnode->cache, page);
497 #endif
498 			put_page(page);
499 			break;
500 		}
501 
502 		req->pages[req->nr_pages++] = page;
503 		req->len += PAGE_SIZE;
504 	} while (req->nr_pages < n);
505 
506 	if (req->nr_pages == 0) {
507 		kfree(req);
508 		return 0;
509 	}
510 
511 	ret = afs_fetch_data(vnode, key, req);
512 	if (ret < 0)
513 		goto error;
514 
515 	task_io_account_read(PAGE_SIZE * req->nr_pages);
516 	afs_put_read(req);
517 	return 0;
518 
519 error:
520 	if (ret == -ENOENT) {
521 		_debug("got NOENT from server"
522 		       " - marking file deleted and stale");
523 		set_bit(AFS_VNODE_DELETED, &vnode->flags);
524 		ret = -ESTALE;
525 	}
526 
527 	for (i = 0; i < req->nr_pages; i++) {
528 		page = req->pages[i];
529 		if (page) {
530 #ifdef CONFIG_AFS_FSCACHE
531 			fscache_uncache_page(vnode->cache, page);
532 #endif
533 			SetPageError(page);
534 			unlock_page(page);
535 		}
536 	}
537 
538 	afs_put_read(req);
539 	return ret;
540 }
541 
542 /*
543  * read a set of pages
544  */
545 static int afs_readpages(struct file *file, struct address_space *mapping,
546 			 struct list_head *pages, unsigned nr_pages)
547 {
548 	struct key *key = afs_file_key(file);
549 	struct afs_vnode *vnode;
550 	int ret = 0;
551 
552 	_enter("{%d},{%lu},,%d",
553 	       key_serial(key), mapping->host->i_ino, nr_pages);
554 
555 	ASSERT(key != NULL);
556 
557 	vnode = AFS_FS_I(mapping->host);
558 	if (test_bit(AFS_VNODE_DELETED, &vnode->flags)) {
559 		_leave(" = -ESTALE");
560 		return -ESTALE;
561 	}
562 
563 	/* attempt to read as many of the pages as possible */
564 #ifdef CONFIG_AFS_FSCACHE
565 	ret = fscache_read_or_alloc_pages(vnode->cache,
566 					  mapping,
567 					  pages,
568 					  &nr_pages,
569 					  afs_file_readpage_read_complete,
570 					  NULL,
571 					  mapping_gfp_mask(mapping));
572 #else
573 	ret = -ENOBUFS;
574 #endif
575 
576 	switch (ret) {
577 		/* all pages are being read from the cache */
578 	case 0:
579 		BUG_ON(!list_empty(pages));
580 		BUG_ON(nr_pages != 0);
581 		_leave(" = 0 [reading all]");
582 		return 0;
583 
584 		/* there were pages that couldn't be read from the cache */
585 	case -ENODATA:
586 	case -ENOBUFS:
587 		break;
588 
589 		/* other error */
590 	default:
591 		_leave(" = %d", ret);
592 		return ret;
593 	}
594 
595 	while (!list_empty(pages)) {
596 		ret = afs_readpages_one(file, mapping, pages);
597 		if (ret < 0)
598 			break;
599 	}
600 
601 	_leave(" = %d [netting]", ret);
602 	return ret;
603 }
604 
605 /*
606  * invalidate part or all of a page
607  * - release a page and clean up its private data if offset is 0 (indicating
608  *   the entire page)
609  */
610 static void afs_invalidatepage(struct page *page, unsigned int offset,
611 			       unsigned int length)
612 {
613 	struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
614 	unsigned long priv;
615 
616 	_enter("{%lu},%u,%u", page->index, offset, length);
617 
618 	BUG_ON(!PageLocked(page));
619 
620 	/* we clean up only if the entire page is being invalidated */
621 	if (offset == 0 && length == PAGE_SIZE) {
622 #ifdef CONFIG_AFS_FSCACHE
623 		if (PageFsCache(page)) {
624 			struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
625 			fscache_wait_on_page_write(vnode->cache, page);
626 			fscache_uncache_page(vnode->cache, page);
627 		}
628 #endif
629 
630 		if (PagePrivate(page)) {
631 			priv = page_private(page);
632 			trace_afs_page_dirty(vnode, tracepoint_string("inval"),
633 					     page->index, priv);
634 			set_page_private(page, 0);
635 			ClearPagePrivate(page);
636 		}
637 	}
638 
639 	_leave("");
640 }
641 
642 /*
643  * release a page and clean up its private state if it's not busy
644  * - return true if the page can now be released, false if not
645  */
646 static int afs_releasepage(struct page *page, gfp_t gfp_flags)
647 {
648 	struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
649 	unsigned long priv;
650 
651 	_enter("{{%llx:%llu}[%lu],%lx},%x",
652 	       vnode->fid.vid, vnode->fid.vnode, page->index, page->flags,
653 	       gfp_flags);
654 
655 	/* deny if page is being written to the cache and the caller hasn't
656 	 * elected to wait */
657 #ifdef CONFIG_AFS_FSCACHE
658 	if (!fscache_maybe_release_page(vnode->cache, page, gfp_flags)) {
659 		_leave(" = F [cache busy]");
660 		return 0;
661 	}
662 #endif
663 
664 	if (PagePrivate(page)) {
665 		priv = page_private(page);
666 		trace_afs_page_dirty(vnode, tracepoint_string("rel"),
667 				     page->index, priv);
668 		set_page_private(page, 0);
669 		ClearPagePrivate(page);
670 	}
671 
672 	/* indicate that the page can be released */
673 	_leave(" = T");
674 	return 1;
675 }
676 
677 /*
678  * Handle setting up a memory mapping on an AFS file.
679  */
680 static int afs_file_mmap(struct file *file, struct vm_area_struct *vma)
681 {
682 	int ret;
683 
684 	ret = generic_file_mmap(file, vma);
685 	if (ret == 0)
686 		vma->vm_ops = &afs_vm_ops;
687 	return ret;
688 }
689