xref: /openbmc/linux/fs/erofs/fscache.c (revision 5bd9628b)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2022, Alibaba Cloud
4  */
5 #include <linux/fscache.h>
6 #include "internal.h"
7 
8 static struct netfs_io_request *erofs_fscache_alloc_request(struct address_space *mapping,
9 					     loff_t start, size_t len)
10 {
11 	struct netfs_io_request *rreq;
12 
13 	rreq = kzalloc(sizeof(struct netfs_io_request), GFP_KERNEL);
14 	if (!rreq)
15 		return ERR_PTR(-ENOMEM);
16 
17 	rreq->start	= start;
18 	rreq->len	= len;
19 	rreq->mapping	= mapping;
20 	rreq->inode	= mapping->host;
21 	INIT_LIST_HEAD(&rreq->subrequests);
22 	refcount_set(&rreq->ref, 1);
23 	return rreq;
24 }
25 
26 static void erofs_fscache_put_request(struct netfs_io_request *rreq)
27 {
28 	if (!refcount_dec_and_test(&rreq->ref))
29 		return;
30 	if (rreq->cache_resources.ops)
31 		rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
32 	kfree(rreq);
33 }
34 
35 static void erofs_fscache_put_subrequest(struct netfs_io_subrequest *subreq)
36 {
37 	if (!refcount_dec_and_test(&subreq->ref))
38 		return;
39 	erofs_fscache_put_request(subreq->rreq);
40 	kfree(subreq);
41 }
42 
43 static void erofs_fscache_clear_subrequests(struct netfs_io_request *rreq)
44 {
45 	struct netfs_io_subrequest *subreq;
46 
47 	while (!list_empty(&rreq->subrequests)) {
48 		subreq = list_first_entry(&rreq->subrequests,
49 				struct netfs_io_subrequest, rreq_link);
50 		list_del(&subreq->rreq_link);
51 		erofs_fscache_put_subrequest(subreq);
52 	}
53 }
54 
55 static void erofs_fscache_rreq_unlock_folios(struct netfs_io_request *rreq)
56 {
57 	struct netfs_io_subrequest *subreq;
58 	struct folio *folio;
59 	unsigned int iopos = 0;
60 	pgoff_t start_page = rreq->start / PAGE_SIZE;
61 	pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
62 	bool subreq_failed = false;
63 
64 	XA_STATE(xas, &rreq->mapping->i_pages, start_page);
65 
66 	subreq = list_first_entry(&rreq->subrequests,
67 				  struct netfs_io_subrequest, rreq_link);
68 	subreq_failed = (subreq->error < 0);
69 
70 	rcu_read_lock();
71 	xas_for_each(&xas, folio, last_page) {
72 		unsigned int pgpos =
73 			(folio_index(folio) - start_page) * PAGE_SIZE;
74 		unsigned int pgend = pgpos + folio_size(folio);
75 		bool pg_failed = false;
76 
77 		for (;;) {
78 			if (!subreq) {
79 				pg_failed = true;
80 				break;
81 			}
82 
83 			pg_failed |= subreq_failed;
84 			if (pgend < iopos + subreq->len)
85 				break;
86 
87 			iopos += subreq->len;
88 			if (!list_is_last(&subreq->rreq_link,
89 					  &rreq->subrequests)) {
90 				subreq = list_next_entry(subreq, rreq_link);
91 				subreq_failed = (subreq->error < 0);
92 			} else {
93 				subreq = NULL;
94 				subreq_failed = false;
95 			}
96 			if (pgend == iopos)
97 				break;
98 		}
99 
100 		if (!pg_failed)
101 			folio_mark_uptodate(folio);
102 
103 		folio_unlock(folio);
104 	}
105 	rcu_read_unlock();
106 }
107 
108 static void erofs_fscache_rreq_complete(struct netfs_io_request *rreq)
109 {
110 	erofs_fscache_rreq_unlock_folios(rreq);
111 	erofs_fscache_clear_subrequests(rreq);
112 	erofs_fscache_put_request(rreq);
113 }
114 
115 static void erofc_fscache_subreq_complete(void *priv,
116 		ssize_t transferred_or_error, bool was_async)
117 {
118 	struct netfs_io_subrequest *subreq = priv;
119 	struct netfs_io_request *rreq = subreq->rreq;
120 
121 	if (IS_ERR_VALUE(transferred_or_error))
122 		subreq->error = transferred_or_error;
123 
124 	if (atomic_dec_and_test(&rreq->nr_outstanding))
125 		erofs_fscache_rreq_complete(rreq);
126 
127 	erofs_fscache_put_subrequest(subreq);
128 }
129 
130 /*
131  * Read data from fscache and fill the read data into page cache described by
132  * @rreq, which shall be both aligned with PAGE_SIZE. @pstart describes
133  * the start physical address in the cache file.
134  */
135 static int erofs_fscache_read_folios_async(struct fscache_cookie *cookie,
136 				struct netfs_io_request *rreq, loff_t pstart)
137 {
138 	enum netfs_io_source source;
139 	struct super_block *sb = rreq->mapping->host->i_sb;
140 	struct netfs_io_subrequest *subreq;
141 	struct netfs_cache_resources *cres = &rreq->cache_resources;
142 	struct iov_iter iter;
143 	loff_t start = rreq->start;
144 	size_t len = rreq->len;
145 	size_t done = 0;
146 	int ret;
147 
148 	atomic_set(&rreq->nr_outstanding, 1);
149 
150 	ret = fscache_begin_read_operation(cres, cookie);
151 	if (ret)
152 		goto out;
153 
154 	while (done < len) {
155 		subreq = kzalloc(sizeof(struct netfs_io_subrequest),
156 				 GFP_KERNEL);
157 		if (subreq) {
158 			INIT_LIST_HEAD(&subreq->rreq_link);
159 			refcount_set(&subreq->ref, 2);
160 			subreq->rreq = rreq;
161 			refcount_inc(&rreq->ref);
162 		} else {
163 			ret = -ENOMEM;
164 			goto out;
165 		}
166 
167 		subreq->start = pstart + done;
168 		subreq->len	=  len - done;
169 		subreq->flags = 1 << NETFS_SREQ_ONDEMAND;
170 
171 		list_add_tail(&subreq->rreq_link, &rreq->subrequests);
172 
173 		source = cres->ops->prepare_read(subreq, LLONG_MAX);
174 		if (WARN_ON(subreq->len == 0))
175 			source = NETFS_INVALID_READ;
176 		if (source != NETFS_READ_FROM_CACHE) {
177 			erofs_err(sb, "failed to fscache prepare_read (source %d)",
178 				  source);
179 			ret = -EIO;
180 			subreq->error = ret;
181 			erofs_fscache_put_subrequest(subreq);
182 			goto out;
183 		}
184 
185 		atomic_inc(&rreq->nr_outstanding);
186 
187 		iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages,
188 				start + done, subreq->len);
189 
190 		ret = fscache_read(cres, subreq->start, &iter,
191 				   NETFS_READ_HOLE_FAIL,
192 				   erofc_fscache_subreq_complete, subreq);
193 		if (ret == -EIOCBQUEUED)
194 			ret = 0;
195 		if (ret) {
196 			erofs_err(sb, "failed to fscache_read (ret %d)", ret);
197 			goto out;
198 		}
199 
200 		done += subreq->len;
201 	}
202 out:
203 	if (atomic_dec_and_test(&rreq->nr_outstanding))
204 		erofs_fscache_rreq_complete(rreq);
205 
206 	return ret;
207 }
208 
209 static int erofs_fscache_meta_read_folio(struct file *data, struct folio *folio)
210 {
211 	int ret;
212 	struct super_block *sb = folio_mapping(folio)->host->i_sb;
213 	struct netfs_io_request *rreq;
214 	struct erofs_map_dev mdev = {
215 		.m_deviceid = 0,
216 		.m_pa = folio_pos(folio),
217 	};
218 
219 	ret = erofs_map_dev(sb, &mdev);
220 	if (ret)
221 		goto out;
222 
223 	rreq = erofs_fscache_alloc_request(folio_mapping(folio),
224 				folio_pos(folio), folio_size(folio));
225 	if (IS_ERR(rreq)) {
226 		ret = PTR_ERR(rreq);
227 		goto out;
228 	}
229 
230 	return erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
231 				rreq, mdev.m_pa);
232 out:
233 	folio_unlock(folio);
234 	return ret;
235 }
236 
237 static int erofs_fscache_read_folio_inline(struct folio *folio,
238 					 struct erofs_map_blocks *map)
239 {
240 	struct super_block *sb = folio_mapping(folio)->host->i_sb;
241 	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
242 	erofs_blk_t blknr;
243 	size_t offset, len;
244 	void *src, *dst;
245 
246 	/* For tail packing layout, the offset may be non-zero. */
247 	offset = erofs_blkoff(map->m_pa);
248 	blknr = erofs_blknr(map->m_pa);
249 	len = map->m_llen;
250 
251 	src = erofs_read_metabuf(&buf, sb, blknr, EROFS_KMAP);
252 	if (IS_ERR(src))
253 		return PTR_ERR(src);
254 
255 	dst = kmap_local_folio(folio, 0);
256 	memcpy(dst, src + offset, len);
257 	memset(dst + len, 0, PAGE_SIZE - len);
258 	kunmap_local(dst);
259 
260 	erofs_put_metabuf(&buf);
261 	return 0;
262 }
263 
264 static int erofs_fscache_read_folio(struct file *file, struct folio *folio)
265 {
266 	struct inode *inode = folio_mapping(folio)->host;
267 	struct super_block *sb = inode->i_sb;
268 	struct erofs_map_blocks map;
269 	struct erofs_map_dev mdev;
270 	struct netfs_io_request *rreq;
271 	erofs_off_t pos;
272 	loff_t pstart;
273 	int ret;
274 
275 	DBG_BUGON(folio_size(folio) != EROFS_BLKSIZ);
276 
277 	pos = folio_pos(folio);
278 	map.m_la = pos;
279 
280 	ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
281 	if (ret)
282 		goto out_unlock;
283 
284 	if (!(map.m_flags & EROFS_MAP_MAPPED)) {
285 		folio_zero_range(folio, 0, folio_size(folio));
286 		goto out_uptodate;
287 	}
288 
289 	if (map.m_flags & EROFS_MAP_META) {
290 		ret = erofs_fscache_read_folio_inline(folio, &map);
291 		goto out_uptodate;
292 	}
293 
294 	mdev = (struct erofs_map_dev) {
295 		.m_deviceid = map.m_deviceid,
296 		.m_pa = map.m_pa,
297 	};
298 
299 	ret = erofs_map_dev(sb, &mdev);
300 	if (ret)
301 		goto out_unlock;
302 
303 
304 	rreq = erofs_fscache_alloc_request(folio_mapping(folio),
305 				folio_pos(folio), folio_size(folio));
306 	if (IS_ERR(rreq)) {
307 		ret = PTR_ERR(rreq);
308 		goto out_unlock;
309 	}
310 
311 	pstart = mdev.m_pa + (pos - map.m_la);
312 	return erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
313 				rreq, pstart);
314 
315 out_uptodate:
316 	if (!ret)
317 		folio_mark_uptodate(folio);
318 out_unlock:
319 	folio_unlock(folio);
320 	return ret;
321 }
322 
323 static void erofs_fscache_advance_folios(struct readahead_control *rac,
324 					 size_t len, bool unlock)
325 {
326 	while (len) {
327 		struct folio *folio = readahead_folio(rac);
328 		len -= folio_size(folio);
329 		if (unlock) {
330 			folio_mark_uptodate(folio);
331 			folio_unlock(folio);
332 		}
333 	}
334 }
335 
336 static void erofs_fscache_readahead(struct readahead_control *rac)
337 {
338 	struct inode *inode = rac->mapping->host;
339 	struct super_block *sb = inode->i_sb;
340 	size_t len, count, done = 0;
341 	erofs_off_t pos;
342 	loff_t start, offset;
343 	int ret;
344 
345 	if (!readahead_count(rac))
346 		return;
347 
348 	start = readahead_pos(rac);
349 	len = readahead_length(rac);
350 
351 	do {
352 		struct erofs_map_blocks map;
353 		struct erofs_map_dev mdev;
354 		struct netfs_io_request *rreq;
355 
356 		pos = start + done;
357 		map.m_la = pos;
358 
359 		ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
360 		if (ret)
361 			return;
362 
363 		offset = start + done;
364 		count = min_t(size_t, map.m_llen - (pos - map.m_la),
365 			      len - done);
366 
367 		if (!(map.m_flags & EROFS_MAP_MAPPED)) {
368 			struct iov_iter iter;
369 
370 			iov_iter_xarray(&iter, READ, &rac->mapping->i_pages,
371 					offset, count);
372 			iov_iter_zero(count, &iter);
373 
374 			erofs_fscache_advance_folios(rac, count, true);
375 			ret = count;
376 			continue;
377 		}
378 
379 		if (map.m_flags & EROFS_MAP_META) {
380 			struct folio *folio = readahead_folio(rac);
381 
382 			ret = erofs_fscache_read_folio_inline(folio, &map);
383 			if (!ret) {
384 				folio_mark_uptodate(folio);
385 				ret = folio_size(folio);
386 			}
387 
388 			folio_unlock(folio);
389 			continue;
390 		}
391 
392 		mdev = (struct erofs_map_dev) {
393 			.m_deviceid = map.m_deviceid,
394 			.m_pa = map.m_pa,
395 		};
396 		ret = erofs_map_dev(sb, &mdev);
397 		if (ret)
398 			return;
399 
400 		rreq = erofs_fscache_alloc_request(rac->mapping, offset, count);
401 		if (IS_ERR(rreq))
402 			return;
403 		/*
404 		 * Drop the ref of folios here. Unlock them in
405 		 * rreq_unlock_folios() when rreq complete.
406 		 */
407 		erofs_fscache_advance_folios(rac, count, false);
408 		ret = erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
409 					rreq, mdev.m_pa + (pos - map.m_la));
410 		if (!ret)
411 			ret = count;
412 	} while (ret > 0 && ((done += ret) < len));
413 }
414 
415 static const struct address_space_operations erofs_fscache_meta_aops = {
416 	.read_folio = erofs_fscache_meta_read_folio,
417 };
418 
419 const struct address_space_operations erofs_fscache_access_aops = {
420 	.read_folio = erofs_fscache_read_folio,
421 	.readahead = erofs_fscache_readahead,
422 };
423 
424 int erofs_fscache_register_cookie(struct super_block *sb,
425 				  struct erofs_fscache **fscache,
426 				  char *name, bool need_inode)
427 {
428 	struct fscache_volume *volume = EROFS_SB(sb)->volume;
429 	struct erofs_fscache *ctx;
430 	struct fscache_cookie *cookie;
431 	int ret;
432 
433 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
434 	if (!ctx)
435 		return -ENOMEM;
436 
437 	cookie = fscache_acquire_cookie(volume, FSCACHE_ADV_WANT_CACHE_SIZE,
438 					name, strlen(name), NULL, 0, 0);
439 	if (!cookie) {
440 		erofs_err(sb, "failed to get cookie for %s", name);
441 		ret = -EINVAL;
442 		goto err;
443 	}
444 
445 	fscache_use_cookie(cookie, false);
446 	ctx->cookie = cookie;
447 
448 	if (need_inode) {
449 		struct inode *const inode = new_inode(sb);
450 
451 		if (!inode) {
452 			erofs_err(sb, "failed to get anon inode for %s", name);
453 			ret = -ENOMEM;
454 			goto err_cookie;
455 		}
456 
457 		set_nlink(inode, 1);
458 		inode->i_size = OFFSET_MAX;
459 		inode->i_mapping->a_ops = &erofs_fscache_meta_aops;
460 		mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
461 
462 		ctx->inode = inode;
463 	}
464 
465 	*fscache = ctx;
466 	return 0;
467 
468 err_cookie:
469 	fscache_unuse_cookie(ctx->cookie, NULL, NULL);
470 	fscache_relinquish_cookie(ctx->cookie, false);
471 	ctx->cookie = NULL;
472 err:
473 	kfree(ctx);
474 	return ret;
475 }
476 
477 void erofs_fscache_unregister_cookie(struct erofs_fscache **fscache)
478 {
479 	struct erofs_fscache *ctx = *fscache;
480 
481 	if (!ctx)
482 		return;
483 
484 	fscache_unuse_cookie(ctx->cookie, NULL, NULL);
485 	fscache_relinquish_cookie(ctx->cookie, false);
486 	ctx->cookie = NULL;
487 
488 	iput(ctx->inode);
489 	ctx->inode = NULL;
490 
491 	kfree(ctx);
492 	*fscache = NULL;
493 }
494 
495 int erofs_fscache_register_fs(struct super_block *sb)
496 {
497 	struct erofs_sb_info *sbi = EROFS_SB(sb);
498 	struct fscache_volume *volume;
499 	char *name;
500 	int ret = 0;
501 
502 	name = kasprintf(GFP_KERNEL, "erofs,%s", sbi->opt.fsid);
503 	if (!name)
504 		return -ENOMEM;
505 
506 	volume = fscache_acquire_volume(name, NULL, NULL, 0);
507 	if (IS_ERR_OR_NULL(volume)) {
508 		erofs_err(sb, "failed to register volume for %s", name);
509 		ret = volume ? PTR_ERR(volume) : -EOPNOTSUPP;
510 		volume = NULL;
511 	}
512 
513 	sbi->volume = volume;
514 	kfree(name);
515 	return ret;
516 }
517 
518 void erofs_fscache_unregister_fs(struct super_block *sb)
519 {
520 	struct erofs_sb_info *sbi = EROFS_SB(sb);
521 
522 	fscache_relinquish_volume(sbi->volume, NULL, false);
523 	sbi->volume = NULL;
524 }
525