xref: /openbmc/linux/fs/afs/file.c (revision 6fcd6fea)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* AFS filesystem file handling
3  *
4  * Copyright (C) 2002, 2007 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/fs.h>
12 #include <linux/pagemap.h>
13 #include <linux/writeback.h>
14 #include <linux/gfp.h>
15 #include <linux/task_io_accounting_ops.h>
16 #include <linux/mm.h>
17 #include <linux/swap.h>
18 #include <linux/netfs.h>
19 #include "internal.h"
20 
21 static int afs_file_mmap(struct file *file, struct vm_area_struct *vma);
22 static int afs_symlink_read_folio(struct file *file, struct folio *folio);
23 static void afs_invalidate_folio(struct folio *folio, size_t offset,
24 			       size_t length);
25 static bool afs_release_folio(struct folio *folio, gfp_t gfp_flags);
26 
27 static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter);
28 static ssize_t afs_file_splice_read(struct file *in, loff_t *ppos,
29 				    struct pipe_inode_info *pipe,
30 				    size_t len, unsigned int flags);
31 static void afs_vm_open(struct vm_area_struct *area);
32 static void afs_vm_close(struct vm_area_struct *area);
33 static vm_fault_t afs_vm_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff);
34 
35 const struct file_operations afs_file_operations = {
36 	.open		= afs_open,
37 	.release	= afs_release,
38 	.llseek		= generic_file_llseek,
39 	.read_iter	= afs_file_read_iter,
40 	.write_iter	= afs_file_write,
41 	.mmap		= afs_file_mmap,
42 	.splice_read	= afs_file_splice_read,
43 	.splice_write	= iter_file_splice_write,
44 	.fsync		= afs_fsync,
45 	.lock		= afs_lock,
46 	.flock		= afs_flock,
47 };
48 
49 const struct inode_operations afs_file_inode_operations = {
50 	.getattr	= afs_getattr,
51 	.setattr	= afs_setattr,
52 	.permission	= afs_permission,
53 };
54 
55 const struct address_space_operations afs_file_aops = {
56 	.read_folio	= netfs_read_folio,
57 	.readahead	= netfs_readahead,
58 	.dirty_folio	= afs_dirty_folio,
59 	.launder_folio	= afs_launder_folio,
60 	.release_folio	= afs_release_folio,
61 	.invalidate_folio = afs_invalidate_folio,
62 	.write_begin	= afs_write_begin,
63 	.write_end	= afs_write_end,
64 	.writepages	= afs_writepages,
65 	.migrate_folio	= filemap_migrate_folio,
66 };
67 
68 const struct address_space_operations afs_symlink_aops = {
69 	.read_folio	= afs_symlink_read_folio,
70 	.release_folio	= afs_release_folio,
71 	.invalidate_folio = afs_invalidate_folio,
72 	.migrate_folio	= filemap_migrate_folio,
73 };
74 
75 static const struct vm_operations_struct afs_vm_ops = {
76 	.open		= afs_vm_open,
77 	.close		= afs_vm_close,
78 	.fault		= filemap_fault,
79 	.map_pages	= afs_vm_map_pages,
80 	.page_mkwrite	= afs_page_mkwrite,
81 };
82 
83 /*
84  * Discard a pin on a writeback key.
85  */
86 void afs_put_wb_key(struct afs_wb_key *wbk)
87 {
88 	if (wbk && refcount_dec_and_test(&wbk->usage)) {
89 		key_put(wbk->key);
90 		kfree(wbk);
91 	}
92 }
93 
94 /*
95  * Cache key for writeback.
96  */
97 int afs_cache_wb_key(struct afs_vnode *vnode, struct afs_file *af)
98 {
99 	struct afs_wb_key *wbk, *p;
100 
101 	wbk = kzalloc(sizeof(struct afs_wb_key), GFP_KERNEL);
102 	if (!wbk)
103 		return -ENOMEM;
104 	refcount_set(&wbk->usage, 2);
105 	wbk->key = af->key;
106 
107 	spin_lock(&vnode->wb_lock);
108 	list_for_each_entry(p, &vnode->wb_keys, vnode_link) {
109 		if (p->key == wbk->key)
110 			goto found;
111 	}
112 
113 	key_get(wbk->key);
114 	list_add_tail(&wbk->vnode_link, &vnode->wb_keys);
115 	spin_unlock(&vnode->wb_lock);
116 	af->wb = wbk;
117 	return 0;
118 
119 found:
120 	refcount_inc(&p->usage);
121 	spin_unlock(&vnode->wb_lock);
122 	af->wb = p;
123 	kfree(wbk);
124 	return 0;
125 }
126 
127 /*
128  * open an AFS file or directory and attach a key to it
129  */
130 int afs_open(struct inode *inode, struct file *file)
131 {
132 	struct afs_vnode *vnode = AFS_FS_I(inode);
133 	struct afs_file *af;
134 	struct key *key;
135 	int ret;
136 
137 	_enter("{%llx:%llu},", vnode->fid.vid, vnode->fid.vnode);
138 
139 	key = afs_request_key(vnode->volume->cell);
140 	if (IS_ERR(key)) {
141 		ret = PTR_ERR(key);
142 		goto error;
143 	}
144 
145 	af = kzalloc(sizeof(*af), GFP_KERNEL);
146 	if (!af) {
147 		ret = -ENOMEM;
148 		goto error_key;
149 	}
150 	af->key = key;
151 
152 	ret = afs_validate(vnode, key);
153 	if (ret < 0)
154 		goto error_af;
155 
156 	if (file->f_mode & FMODE_WRITE) {
157 		ret = afs_cache_wb_key(vnode, af);
158 		if (ret < 0)
159 			goto error_af;
160 	}
161 
162 	if (file->f_flags & O_TRUNC)
163 		set_bit(AFS_VNODE_NEW_CONTENT, &vnode->flags);
164 
165 	fscache_use_cookie(afs_vnode_cache(vnode), file->f_mode & FMODE_WRITE);
166 
167 	file->private_data = af;
168 	_leave(" = 0");
169 	return 0;
170 
171 error_af:
172 	kfree(af);
173 error_key:
174 	key_put(key);
175 error:
176 	_leave(" = %d", ret);
177 	return ret;
178 }
179 
180 /*
181  * release an AFS file or directory and discard its key
182  */
183 int afs_release(struct inode *inode, struct file *file)
184 {
185 	struct afs_vnode_cache_aux aux;
186 	struct afs_vnode *vnode = AFS_FS_I(inode);
187 	struct afs_file *af = file->private_data;
188 	loff_t i_size;
189 	int ret = 0;
190 
191 	_enter("{%llx:%llu},", vnode->fid.vid, vnode->fid.vnode);
192 
193 	if ((file->f_mode & FMODE_WRITE))
194 		ret = vfs_fsync(file, 0);
195 
196 	file->private_data = NULL;
197 	if (af->wb)
198 		afs_put_wb_key(af->wb);
199 
200 	if ((file->f_mode & FMODE_WRITE)) {
201 		i_size = i_size_read(&vnode->netfs.inode);
202 		afs_set_cache_aux(vnode, &aux);
203 		fscache_unuse_cookie(afs_vnode_cache(vnode), &aux, &i_size);
204 	} else {
205 		fscache_unuse_cookie(afs_vnode_cache(vnode), NULL, NULL);
206 	}
207 
208 	key_put(af->key);
209 	kfree(af);
210 	afs_prune_wb_keys(vnode);
211 	_leave(" = %d", ret);
212 	return ret;
213 }
214 
215 /*
216  * Allocate a new read record.
217  */
218 struct afs_read *afs_alloc_read(gfp_t gfp)
219 {
220 	struct afs_read *req;
221 
222 	req = kzalloc(sizeof(struct afs_read), gfp);
223 	if (req)
224 		refcount_set(&req->usage, 1);
225 
226 	return req;
227 }
228 
229 /*
230  * Dispose of a ref to a read record.
231  */
232 void afs_put_read(struct afs_read *req)
233 {
234 	if (refcount_dec_and_test(&req->usage)) {
235 		if (req->cleanup)
236 			req->cleanup(req);
237 		key_put(req->key);
238 		kfree(req);
239 	}
240 }
241 
242 static void afs_fetch_data_notify(struct afs_operation *op)
243 {
244 	struct afs_read *req = op->fetch.req;
245 	struct netfs_io_subrequest *subreq = req->subreq;
246 	int error = op->error;
247 
248 	if (error == -ECONNABORTED)
249 		error = afs_abort_to_error(op->ac.abort_code);
250 	req->error = error;
251 
252 	if (subreq) {
253 		__set_bit(NETFS_SREQ_CLEAR_TAIL, &subreq->flags);
254 		netfs_subreq_terminated(subreq, error ?: req->actual_len, false);
255 		req->subreq = NULL;
256 	} else if (req->done) {
257 		req->done(req);
258 	}
259 }
260 
261 static void afs_fetch_data_success(struct afs_operation *op)
262 {
263 	struct afs_vnode *vnode = op->file[0].vnode;
264 
265 	_enter("op=%08x", op->debug_id);
266 	afs_vnode_commit_status(op, &op->file[0]);
267 	afs_stat_v(vnode, n_fetches);
268 	atomic_long_add(op->fetch.req->actual_len, &op->net->n_fetch_bytes);
269 	afs_fetch_data_notify(op);
270 }
271 
272 static void afs_fetch_data_put(struct afs_operation *op)
273 {
274 	op->fetch.req->error = op->error;
275 	afs_put_read(op->fetch.req);
276 }
277 
278 static const struct afs_operation_ops afs_fetch_data_operation = {
279 	.issue_afs_rpc	= afs_fs_fetch_data,
280 	.issue_yfs_rpc	= yfs_fs_fetch_data,
281 	.success	= afs_fetch_data_success,
282 	.aborted	= afs_check_for_remote_deletion,
283 	.failed		= afs_fetch_data_notify,
284 	.put		= afs_fetch_data_put,
285 };
286 
287 /*
288  * Fetch file data from the volume.
289  */
290 int afs_fetch_data(struct afs_vnode *vnode, struct afs_read *req)
291 {
292 	struct afs_operation *op;
293 
294 	_enter("%s{%llx:%llu.%u},%x,,,",
295 	       vnode->volume->name,
296 	       vnode->fid.vid,
297 	       vnode->fid.vnode,
298 	       vnode->fid.unique,
299 	       key_serial(req->key));
300 
301 	op = afs_alloc_operation(req->key, vnode->volume);
302 	if (IS_ERR(op)) {
303 		if (req->subreq)
304 			netfs_subreq_terminated(req->subreq, PTR_ERR(op), false);
305 		return PTR_ERR(op);
306 	}
307 
308 	afs_op_set_vnode(op, 0, vnode);
309 
310 	op->fetch.req	= afs_get_read(req);
311 	op->ops		= &afs_fetch_data_operation;
312 	return afs_do_sync_operation(op);
313 }
314 
315 static void afs_issue_read(struct netfs_io_subrequest *subreq)
316 {
317 	struct afs_vnode *vnode = AFS_FS_I(subreq->rreq->inode);
318 	struct afs_read *fsreq;
319 
320 	fsreq = afs_alloc_read(GFP_NOFS);
321 	if (!fsreq)
322 		return netfs_subreq_terminated(subreq, -ENOMEM, false);
323 
324 	fsreq->subreq	= subreq;
325 	fsreq->pos	= subreq->start + subreq->transferred;
326 	fsreq->len	= subreq->len   - subreq->transferred;
327 	fsreq->key	= key_get(subreq->rreq->netfs_priv);
328 	fsreq->vnode	= vnode;
329 	fsreq->iter	= &fsreq->def_iter;
330 
331 	iov_iter_xarray(&fsreq->def_iter, ITER_DEST,
332 			&fsreq->vnode->netfs.inode.i_mapping->i_pages,
333 			fsreq->pos, fsreq->len);
334 
335 	afs_fetch_data(fsreq->vnode, fsreq);
336 	afs_put_read(fsreq);
337 }
338 
339 static int afs_symlink_read_folio(struct file *file, struct folio *folio)
340 {
341 	struct afs_vnode *vnode = AFS_FS_I(folio->mapping->host);
342 	struct afs_read *fsreq;
343 	int ret;
344 
345 	fsreq = afs_alloc_read(GFP_NOFS);
346 	if (!fsreq)
347 		return -ENOMEM;
348 
349 	fsreq->pos	= folio_pos(folio);
350 	fsreq->len	= folio_size(folio);
351 	fsreq->vnode	= vnode;
352 	fsreq->iter	= &fsreq->def_iter;
353 	iov_iter_xarray(&fsreq->def_iter, ITER_DEST, &folio->mapping->i_pages,
354 			fsreq->pos, fsreq->len);
355 
356 	ret = afs_fetch_data(fsreq->vnode, fsreq);
357 	if (ret == 0)
358 		folio_mark_uptodate(folio);
359 	folio_unlock(folio);
360 	return ret;
361 }
362 
363 static int afs_init_request(struct netfs_io_request *rreq, struct file *file)
364 {
365 	rreq->netfs_priv = key_get(afs_file_key(file));
366 	return 0;
367 }
368 
369 static int afs_begin_cache_operation(struct netfs_io_request *rreq)
370 {
371 #ifdef CONFIG_AFS_FSCACHE
372 	struct afs_vnode *vnode = AFS_FS_I(rreq->inode);
373 
374 	return fscache_begin_read_operation(&rreq->cache_resources,
375 					    afs_vnode_cache(vnode));
376 #else
377 	return -ENOBUFS;
378 #endif
379 }
380 
381 static int afs_check_write_begin(struct file *file, loff_t pos, unsigned len,
382 				 struct folio **foliop, void **_fsdata)
383 {
384 	struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
385 
386 	return test_bit(AFS_VNODE_DELETED, &vnode->flags) ? -ESTALE : 0;
387 }
388 
389 static void afs_free_request(struct netfs_io_request *rreq)
390 {
391 	key_put(rreq->netfs_priv);
392 }
393 
394 const struct netfs_request_ops afs_req_ops = {
395 	.init_request		= afs_init_request,
396 	.free_request		= afs_free_request,
397 	.begin_cache_operation	= afs_begin_cache_operation,
398 	.check_write_begin	= afs_check_write_begin,
399 	.issue_read		= afs_issue_read,
400 };
401 
402 int afs_write_inode(struct inode *inode, struct writeback_control *wbc)
403 {
404 	fscache_unpin_writeback(wbc, afs_vnode_cache(AFS_FS_I(inode)));
405 	return 0;
406 }
407 
408 /*
409  * Adjust the dirty region of the page on truncation or full invalidation,
410  * getting rid of the markers altogether if the region is entirely invalidated.
411  */
412 static void afs_invalidate_dirty(struct folio *folio, size_t offset,
413 				 size_t length)
414 {
415 	struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
416 	unsigned long priv;
417 	unsigned int f, t, end = offset + length;
418 
419 	priv = (unsigned long)folio_get_private(folio);
420 
421 	/* we clean up only if the entire page is being invalidated */
422 	if (offset == 0 && length == folio_size(folio))
423 		goto full_invalidate;
424 
425 	 /* If the page was dirtied by page_mkwrite(), the PTE stays writable
426 	  * and we don't get another notification to tell us to expand it
427 	  * again.
428 	  */
429 	if (afs_is_folio_dirty_mmapped(priv))
430 		return;
431 
432 	/* We may need to shorten the dirty region */
433 	f = afs_folio_dirty_from(folio, priv);
434 	t = afs_folio_dirty_to(folio, priv);
435 
436 	if (t <= offset || f >= end)
437 		return; /* Doesn't overlap */
438 
439 	if (f < offset && t > end)
440 		return; /* Splits the dirty region - just absorb it */
441 
442 	if (f >= offset && t <= end)
443 		goto undirty;
444 
445 	if (f < offset)
446 		t = offset;
447 	else
448 		f = end;
449 	if (f == t)
450 		goto undirty;
451 
452 	priv = afs_folio_dirty(folio, f, t);
453 	folio_change_private(folio, (void *)priv);
454 	trace_afs_folio_dirty(vnode, tracepoint_string("trunc"), folio);
455 	return;
456 
457 undirty:
458 	trace_afs_folio_dirty(vnode, tracepoint_string("undirty"), folio);
459 	folio_clear_dirty_for_io(folio);
460 full_invalidate:
461 	trace_afs_folio_dirty(vnode, tracepoint_string("inval"), folio);
462 	folio_detach_private(folio);
463 }
464 
465 /*
466  * invalidate part or all of a page
467  * - release a page and clean up its private data if offset is 0 (indicating
468  *   the entire page)
469  */
470 static void afs_invalidate_folio(struct folio *folio, size_t offset,
471 			       size_t length)
472 {
473 	_enter("{%lu},%zu,%zu", folio->index, offset, length);
474 
475 	BUG_ON(!folio_test_locked(folio));
476 
477 	if (folio_get_private(folio))
478 		afs_invalidate_dirty(folio, offset, length);
479 
480 	folio_wait_fscache(folio);
481 	_leave("");
482 }
483 
484 /*
485  * release a page and clean up its private state if it's not busy
486  * - return true if the page can now be released, false if not
487  */
488 static bool afs_release_folio(struct folio *folio, gfp_t gfp)
489 {
490 	struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
491 
492 	_enter("{{%llx:%llu}[%lu],%lx},%x",
493 	       vnode->fid.vid, vnode->fid.vnode, folio_index(folio), folio->flags,
494 	       gfp);
495 
496 	/* deny if folio is being written to the cache and the caller hasn't
497 	 * elected to wait */
498 #ifdef CONFIG_AFS_FSCACHE
499 	if (folio_test_fscache(folio)) {
500 		if (current_is_kswapd() || !(gfp & __GFP_FS))
501 			return false;
502 		folio_wait_fscache(folio);
503 	}
504 	fscache_note_page_release(afs_vnode_cache(vnode));
505 #endif
506 
507 	if (folio_test_private(folio)) {
508 		trace_afs_folio_dirty(vnode, tracepoint_string("rel"), folio);
509 		folio_detach_private(folio);
510 	}
511 
512 	/* Indicate that the folio can be released */
513 	_leave(" = T");
514 	return true;
515 }
516 
517 static void afs_add_open_mmap(struct afs_vnode *vnode)
518 {
519 	if (atomic_inc_return(&vnode->cb_nr_mmap) == 1) {
520 		down_write(&vnode->volume->cell->fs_open_mmaps_lock);
521 
522 		if (list_empty(&vnode->cb_mmap_link))
523 			list_add_tail(&vnode->cb_mmap_link,
524 				      &vnode->volume->cell->fs_open_mmaps);
525 
526 		up_write(&vnode->volume->cell->fs_open_mmaps_lock);
527 	}
528 }
529 
530 static void afs_drop_open_mmap(struct afs_vnode *vnode)
531 {
532 	if (atomic_add_unless(&vnode->cb_nr_mmap, -1, 1))
533 		return;
534 
535 	down_write(&vnode->volume->cell->fs_open_mmaps_lock);
536 
537 	read_seqlock_excl(&vnode->cb_lock);
538 	// the only place where ->cb_nr_mmap may hit 0
539 	// see __afs_break_callback() for the other side...
540 	if (atomic_dec_and_test(&vnode->cb_nr_mmap))
541 		list_del_init(&vnode->cb_mmap_link);
542 	read_sequnlock_excl(&vnode->cb_lock);
543 
544 	up_write(&vnode->volume->cell->fs_open_mmaps_lock);
545 	flush_work(&vnode->cb_work);
546 }
547 
548 /*
549  * Handle setting up a memory mapping on an AFS file.
550  */
551 static int afs_file_mmap(struct file *file, struct vm_area_struct *vma)
552 {
553 	struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
554 	int ret;
555 
556 	afs_add_open_mmap(vnode);
557 
558 	ret = generic_file_mmap(file, vma);
559 	if (ret == 0)
560 		vma->vm_ops = &afs_vm_ops;
561 	else
562 		afs_drop_open_mmap(vnode);
563 	return ret;
564 }
565 
566 static void afs_vm_open(struct vm_area_struct *vma)
567 {
568 	afs_add_open_mmap(AFS_FS_I(file_inode(vma->vm_file)));
569 }
570 
571 static void afs_vm_close(struct vm_area_struct *vma)
572 {
573 	afs_drop_open_mmap(AFS_FS_I(file_inode(vma->vm_file)));
574 }
575 
576 static vm_fault_t afs_vm_map_pages(struct vm_fault *vmf, pgoff_t start_pgoff, pgoff_t end_pgoff)
577 {
578 	struct afs_vnode *vnode = AFS_FS_I(file_inode(vmf->vma->vm_file));
579 
580 	if (afs_pagecache_valid(vnode))
581 		return filemap_map_pages(vmf, start_pgoff, end_pgoff);
582 	return 0;
583 }
584 
585 static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)
586 {
587 	struct afs_vnode *vnode = AFS_FS_I(file_inode(iocb->ki_filp));
588 	struct afs_file *af = iocb->ki_filp->private_data;
589 	int ret;
590 
591 	ret = afs_validate(vnode, af->key);
592 	if (ret < 0)
593 		return ret;
594 
595 	return generic_file_read_iter(iocb, iter);
596 }
597 
598 static ssize_t afs_file_splice_read(struct file *in, loff_t *ppos,
599 				    struct pipe_inode_info *pipe,
600 				    size_t len, unsigned int flags)
601 {
602 	struct afs_vnode *vnode = AFS_FS_I(file_inode(in));
603 	struct afs_file *af = in->private_data;
604 	int ret;
605 
606 	ret = afs_validate(vnode, af->key);
607 	if (ret < 0)
608 		return ret;
609 
610 	return filemap_splice_read(in, ppos, pipe, len, flags);
611 }
612