xref: /openbmc/linux/fs/erofs/fscache.c (revision d435d532)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2022, Alibaba Cloud
4  */
5 #include <linux/fscache.h>
6 #include "internal.h"
7 
8 static struct netfs_io_request *erofs_fscache_alloc_request(struct address_space *mapping,
9 					     loff_t start, size_t len)
10 {
11 	struct netfs_io_request *rreq;
12 
13 	rreq = kzalloc(sizeof(struct netfs_io_request), GFP_KERNEL);
14 	if (!rreq)
15 		return ERR_PTR(-ENOMEM);
16 
17 	rreq->start	= start;
18 	rreq->len	= len;
19 	rreq->mapping	= mapping;
20 	INIT_LIST_HEAD(&rreq->subrequests);
21 	refcount_set(&rreq->ref, 1);
22 	return rreq;
23 }
24 
25 static void erofs_fscache_put_request(struct netfs_io_request *rreq)
26 {
27 	if (!refcount_dec_and_test(&rreq->ref))
28 		return;
29 	if (rreq->cache_resources.ops)
30 		rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
31 	kfree(rreq);
32 }
33 
34 static void erofs_fscache_put_subrequest(struct netfs_io_subrequest *subreq)
35 {
36 	if (!refcount_dec_and_test(&subreq->ref))
37 		return;
38 	erofs_fscache_put_request(subreq->rreq);
39 	kfree(subreq);
40 }
41 
42 static void erofs_fscache_clear_subrequests(struct netfs_io_request *rreq)
43 {
44 	struct netfs_io_subrequest *subreq;
45 
46 	while (!list_empty(&rreq->subrequests)) {
47 		subreq = list_first_entry(&rreq->subrequests,
48 				struct netfs_io_subrequest, rreq_link);
49 		list_del(&subreq->rreq_link);
50 		erofs_fscache_put_subrequest(subreq);
51 	}
52 }
53 
54 static void erofs_fscache_rreq_unlock_folios(struct netfs_io_request *rreq)
55 {
56 	struct netfs_io_subrequest *subreq;
57 	struct folio *folio;
58 	unsigned int iopos = 0;
59 	pgoff_t start_page = rreq->start / PAGE_SIZE;
60 	pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
61 	bool subreq_failed = false;
62 
63 	XA_STATE(xas, &rreq->mapping->i_pages, start_page);
64 
65 	subreq = list_first_entry(&rreq->subrequests,
66 				  struct netfs_io_subrequest, rreq_link);
67 	subreq_failed = (subreq->error < 0);
68 
69 	rcu_read_lock();
70 	xas_for_each(&xas, folio, last_page) {
71 		unsigned int pgpos =
72 			(folio_index(folio) - start_page) * PAGE_SIZE;
73 		unsigned int pgend = pgpos + folio_size(folio);
74 		bool pg_failed = false;
75 
76 		for (;;) {
77 			if (!subreq) {
78 				pg_failed = true;
79 				break;
80 			}
81 
82 			pg_failed |= subreq_failed;
83 			if (pgend < iopos + subreq->len)
84 				break;
85 
86 			iopos += subreq->len;
87 			if (!list_is_last(&subreq->rreq_link,
88 					  &rreq->subrequests)) {
89 				subreq = list_next_entry(subreq, rreq_link);
90 				subreq_failed = (subreq->error < 0);
91 			} else {
92 				subreq = NULL;
93 				subreq_failed = false;
94 			}
95 			if (pgend == iopos)
96 				break;
97 		}
98 
99 		if (!pg_failed)
100 			folio_mark_uptodate(folio);
101 
102 		folio_unlock(folio);
103 	}
104 	rcu_read_unlock();
105 }
106 
107 static void erofs_fscache_rreq_complete(struct netfs_io_request *rreq)
108 {
109 	erofs_fscache_rreq_unlock_folios(rreq);
110 	erofs_fscache_clear_subrequests(rreq);
111 	erofs_fscache_put_request(rreq);
112 }
113 
114 static void erofc_fscache_subreq_complete(void *priv,
115 		ssize_t transferred_or_error, bool was_async)
116 {
117 	struct netfs_io_subrequest *subreq = priv;
118 	struct netfs_io_request *rreq = subreq->rreq;
119 
120 	if (IS_ERR_VALUE(transferred_or_error))
121 		subreq->error = transferred_or_error;
122 
123 	if (atomic_dec_and_test(&rreq->nr_outstanding))
124 		erofs_fscache_rreq_complete(rreq);
125 
126 	erofs_fscache_put_subrequest(subreq);
127 }
128 
129 /*
130  * Read data from fscache and fill the read data into page cache described by
131  * @rreq, which shall be both aligned with PAGE_SIZE. @pstart describes
132  * the start physical address in the cache file.
133  */
134 static int erofs_fscache_read_folios_async(struct fscache_cookie *cookie,
135 				struct netfs_io_request *rreq, loff_t pstart)
136 {
137 	enum netfs_io_source source;
138 	struct super_block *sb = rreq->mapping->host->i_sb;
139 	struct netfs_io_subrequest *subreq;
140 	struct netfs_cache_resources *cres = &rreq->cache_resources;
141 	struct iov_iter iter;
142 	loff_t start = rreq->start;
143 	size_t len = rreq->len;
144 	size_t done = 0;
145 	int ret;
146 
147 	atomic_set(&rreq->nr_outstanding, 1);
148 
149 	ret = fscache_begin_read_operation(cres, cookie);
150 	if (ret)
151 		goto out;
152 
153 	while (done < len) {
154 		subreq = kzalloc(sizeof(struct netfs_io_subrequest),
155 				 GFP_KERNEL);
156 		if (subreq) {
157 			INIT_LIST_HEAD(&subreq->rreq_link);
158 			refcount_set(&subreq->ref, 2);
159 			subreq->rreq = rreq;
160 			refcount_inc(&rreq->ref);
161 		} else {
162 			ret = -ENOMEM;
163 			goto out;
164 		}
165 
166 		subreq->start = pstart + done;
167 		subreq->len	=  len - done;
168 		subreq->flags = 1 << NETFS_SREQ_ONDEMAND;
169 
170 		list_add_tail(&subreq->rreq_link, &rreq->subrequests);
171 
172 		source = cres->ops->prepare_read(subreq, LLONG_MAX);
173 		if (WARN_ON(subreq->len == 0))
174 			source = NETFS_INVALID_READ;
175 		if (source != NETFS_READ_FROM_CACHE) {
176 			erofs_err(sb, "failed to fscache prepare_read (source %d)",
177 				  source);
178 			ret = -EIO;
179 			subreq->error = ret;
180 			erofs_fscache_put_subrequest(subreq);
181 			goto out;
182 		}
183 
184 		atomic_inc(&rreq->nr_outstanding);
185 
186 		iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages,
187 				start + done, subreq->len);
188 
189 		ret = fscache_read(cres, subreq->start, &iter,
190 				   NETFS_READ_HOLE_FAIL,
191 				   erofc_fscache_subreq_complete, subreq);
192 		if (ret == -EIOCBQUEUED)
193 			ret = 0;
194 		if (ret) {
195 			erofs_err(sb, "failed to fscache_read (ret %d)", ret);
196 			goto out;
197 		}
198 
199 		done += subreq->len;
200 	}
201 out:
202 	if (atomic_dec_and_test(&rreq->nr_outstanding))
203 		erofs_fscache_rreq_complete(rreq);
204 
205 	return ret;
206 }
207 
208 static int erofs_fscache_meta_readpage(struct file *data, struct page *page)
209 {
210 	int ret;
211 	struct folio *folio = page_folio(page);
212 	struct super_block *sb = folio_mapping(folio)->host->i_sb;
213 	struct netfs_io_request *rreq;
214 	struct erofs_map_dev mdev = {
215 		.m_deviceid = 0,
216 		.m_pa = folio_pos(folio),
217 	};
218 
219 	ret = erofs_map_dev(sb, &mdev);
220 	if (ret)
221 		goto out;
222 
223 	rreq = erofs_fscache_alloc_request(folio_mapping(folio),
224 				folio_pos(folio), folio_size(folio));
225 	if (IS_ERR(rreq))
226 		goto out;
227 
228 	return erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
229 				rreq, mdev.m_pa);
230 out:
231 	folio_unlock(folio);
232 	return ret;
233 }
234 
235 static int erofs_fscache_readpage_inline(struct folio *folio,
236 					 struct erofs_map_blocks *map)
237 {
238 	struct super_block *sb = folio_mapping(folio)->host->i_sb;
239 	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
240 	erofs_blk_t blknr;
241 	size_t offset, len;
242 	void *src, *dst;
243 
244 	/* For tail packing layout, the offset may be non-zero. */
245 	offset = erofs_blkoff(map->m_pa);
246 	blknr = erofs_blknr(map->m_pa);
247 	len = map->m_llen;
248 
249 	src = erofs_read_metabuf(&buf, sb, blknr, EROFS_KMAP);
250 	if (IS_ERR(src))
251 		return PTR_ERR(src);
252 
253 	dst = kmap_local_folio(folio, 0);
254 	memcpy(dst, src + offset, len);
255 	memset(dst + len, 0, PAGE_SIZE - len);
256 	kunmap_local(dst);
257 
258 	erofs_put_metabuf(&buf);
259 	return 0;
260 }
261 
262 static int erofs_fscache_readpage(struct file *file, struct page *page)
263 {
264 	struct folio *folio = page_folio(page);
265 	struct inode *inode = folio_mapping(folio)->host;
266 	struct super_block *sb = inode->i_sb;
267 	struct erofs_map_blocks map;
268 	struct erofs_map_dev mdev;
269 	struct netfs_io_request *rreq;
270 	erofs_off_t pos;
271 	loff_t pstart;
272 	int ret;
273 
274 	DBG_BUGON(folio_size(folio) != EROFS_BLKSIZ);
275 
276 	pos = folio_pos(folio);
277 	map.m_la = pos;
278 
279 	ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
280 	if (ret)
281 		goto out_unlock;
282 
283 	if (!(map.m_flags & EROFS_MAP_MAPPED)) {
284 		folio_zero_range(folio, 0, folio_size(folio));
285 		goto out_uptodate;
286 	}
287 
288 	if (map.m_flags & EROFS_MAP_META) {
289 		ret = erofs_fscache_readpage_inline(folio, &map);
290 		goto out_uptodate;
291 	}
292 
293 	mdev = (struct erofs_map_dev) {
294 		.m_deviceid = map.m_deviceid,
295 		.m_pa = map.m_pa,
296 	};
297 
298 	ret = erofs_map_dev(sb, &mdev);
299 	if (ret)
300 		goto out_unlock;
301 
302 
303 	rreq = erofs_fscache_alloc_request(folio_mapping(folio),
304 				folio_pos(folio), folio_size(folio));
305 	if (IS_ERR(rreq))
306 		goto out_unlock;
307 
308 	pstart = mdev.m_pa + (pos - map.m_la);
309 	return erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
310 				rreq, pstart);
311 
312 out_uptodate:
313 	if (!ret)
314 		folio_mark_uptodate(folio);
315 out_unlock:
316 	folio_unlock(folio);
317 	return ret;
318 }
319 
320 static void erofs_fscache_advance_folios(struct readahead_control *rac,
321 					 size_t len, bool unlock)
322 {
323 	while (len) {
324 		struct folio *folio = readahead_folio(rac);
325 		len -= folio_size(folio);
326 		if (unlock) {
327 			folio_mark_uptodate(folio);
328 			folio_unlock(folio);
329 		}
330 	}
331 }
332 
333 static void erofs_fscache_readahead(struct readahead_control *rac)
334 {
335 	struct inode *inode = rac->mapping->host;
336 	struct super_block *sb = inode->i_sb;
337 	size_t len, count, done = 0;
338 	erofs_off_t pos;
339 	loff_t start, offset;
340 	int ret;
341 
342 	if (!readahead_count(rac))
343 		return;
344 
345 	start = readahead_pos(rac);
346 	len = readahead_length(rac);
347 
348 	do {
349 		struct erofs_map_blocks map;
350 		struct erofs_map_dev mdev;
351 		struct netfs_io_request *rreq;
352 
353 		pos = start + done;
354 		map.m_la = pos;
355 
356 		ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
357 		if (ret)
358 			return;
359 
360 		offset = start + done;
361 		count = min_t(size_t, map.m_llen - (pos - map.m_la),
362 			      len - done);
363 
364 		if (!(map.m_flags & EROFS_MAP_MAPPED)) {
365 			struct iov_iter iter;
366 
367 			iov_iter_xarray(&iter, READ, &rac->mapping->i_pages,
368 					offset, count);
369 			iov_iter_zero(count, &iter);
370 
371 			erofs_fscache_advance_folios(rac, count, true);
372 			ret = count;
373 			continue;
374 		}
375 
376 		if (map.m_flags & EROFS_MAP_META) {
377 			struct folio *folio = readahead_folio(rac);
378 
379 			ret = erofs_fscache_readpage_inline(folio, &map);
380 			if (!ret) {
381 				folio_mark_uptodate(folio);
382 				ret = folio_size(folio);
383 			}
384 
385 			folio_unlock(folio);
386 			continue;
387 		}
388 
389 		mdev = (struct erofs_map_dev) {
390 			.m_deviceid = map.m_deviceid,
391 			.m_pa = map.m_pa,
392 		};
393 		ret = erofs_map_dev(sb, &mdev);
394 		if (ret)
395 			return;
396 
397 		rreq = erofs_fscache_alloc_request(rac->mapping, offset, count);
398 		if (IS_ERR(rreq))
399 			return;
400 		/*
401 		 * Drop the ref of folios here. Unlock them in
402 		 * rreq_unlock_folios() when rreq complete.
403 		 */
404 		erofs_fscache_advance_folios(rac, count, false);
405 		ret = erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
406 					rreq, mdev.m_pa + (pos - map.m_la));
407 		if (!ret)
408 			ret = count;
409 	} while (ret > 0 && ((done += ret) < len));
410 }
411 
412 static const struct address_space_operations erofs_fscache_meta_aops = {
413 	.readpage = erofs_fscache_meta_readpage,
414 };
415 
416 const struct address_space_operations erofs_fscache_access_aops = {
417 	.readpage = erofs_fscache_readpage,
418 	.readahead = erofs_fscache_readahead,
419 };
420 
421 int erofs_fscache_register_cookie(struct super_block *sb,
422 				  struct erofs_fscache **fscache,
423 				  char *name, bool need_inode)
424 {
425 	struct fscache_volume *volume = EROFS_SB(sb)->volume;
426 	struct erofs_fscache *ctx;
427 	struct fscache_cookie *cookie;
428 	int ret;
429 
430 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
431 	if (!ctx)
432 		return -ENOMEM;
433 
434 	cookie = fscache_acquire_cookie(volume, FSCACHE_ADV_WANT_CACHE_SIZE,
435 					name, strlen(name), NULL, 0, 0);
436 	if (!cookie) {
437 		erofs_err(sb, "failed to get cookie for %s", name);
438 		ret = -EINVAL;
439 		goto err;
440 	}
441 
442 	fscache_use_cookie(cookie, false);
443 	ctx->cookie = cookie;
444 
445 	if (need_inode) {
446 		struct inode *const inode = new_inode(sb);
447 
448 		if (!inode) {
449 			erofs_err(sb, "failed to get anon inode for %s", name);
450 			ret = -ENOMEM;
451 			goto err_cookie;
452 		}
453 
454 		set_nlink(inode, 1);
455 		inode->i_size = OFFSET_MAX;
456 		inode->i_mapping->a_ops = &erofs_fscache_meta_aops;
457 		mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
458 
459 		ctx->inode = inode;
460 	}
461 
462 	*fscache = ctx;
463 	return 0;
464 
465 err_cookie:
466 	fscache_unuse_cookie(ctx->cookie, NULL, NULL);
467 	fscache_relinquish_cookie(ctx->cookie, false);
468 	ctx->cookie = NULL;
469 err:
470 	kfree(ctx);
471 	return ret;
472 }
473 
474 void erofs_fscache_unregister_cookie(struct erofs_fscache **fscache)
475 {
476 	struct erofs_fscache *ctx = *fscache;
477 
478 	if (!ctx)
479 		return;
480 
481 	fscache_unuse_cookie(ctx->cookie, NULL, NULL);
482 	fscache_relinquish_cookie(ctx->cookie, false);
483 	ctx->cookie = NULL;
484 
485 	iput(ctx->inode);
486 	ctx->inode = NULL;
487 
488 	kfree(ctx);
489 	*fscache = NULL;
490 }
491 
492 int erofs_fscache_register_fs(struct super_block *sb)
493 {
494 	struct erofs_sb_info *sbi = EROFS_SB(sb);
495 	struct fscache_volume *volume;
496 	char *name;
497 	int ret = 0;
498 
499 	name = kasprintf(GFP_KERNEL, "erofs,%s", sbi->opt.fsid);
500 	if (!name)
501 		return -ENOMEM;
502 
503 	volume = fscache_acquire_volume(name, NULL, NULL, 0);
504 	if (IS_ERR_OR_NULL(volume)) {
505 		erofs_err(sb, "failed to register volume for %s", name);
506 		ret = volume ? PTR_ERR(volume) : -EOPNOTSUPP;
507 		volume = NULL;
508 	}
509 
510 	sbi->volume = volume;
511 	kfree(name);
512 	return ret;
513 }
514 
515 void erofs_fscache_unregister_fs(struct super_block *sb)
516 {
517 	struct erofs_sb_info *sbi = EROFS_SB(sb);
518 
519 	fscache_relinquish_volume(sbi->volume, NULL, false);
520 	sbi->volume = NULL;
521 }
522