xref: /openbmc/linux/fs/erofs/fscache.c (revision fdaf9a5840acaab18694a19e0eb0aa51162eeeed)
1c6be2bd0SJeffle Xu // SPDX-License-Identifier: GPL-2.0-or-later
2c6be2bd0SJeffle Xu /*
3c6be2bd0SJeffle Xu  * Copyright (C) 2022, Alibaba Cloud
4c6be2bd0SJeffle Xu  */
5c6be2bd0SJeffle Xu #include <linux/fscache.h>
6c6be2bd0SJeffle Xu #include "internal.h"
7c6be2bd0SJeffle Xu 
8d435d532SXin Yin static struct netfs_io_request *erofs_fscache_alloc_request(struct address_space *mapping,
9d435d532SXin Yin 					     loff_t start, size_t len)
10d435d532SXin Yin {
11d435d532SXin Yin 	struct netfs_io_request *rreq;
12d435d532SXin Yin 
13d435d532SXin Yin 	rreq = kzalloc(sizeof(struct netfs_io_request), GFP_KERNEL);
14d435d532SXin Yin 	if (!rreq)
15d435d532SXin Yin 		return ERR_PTR(-ENOMEM);
16d435d532SXin Yin 
17d435d532SXin Yin 	rreq->start	= start;
18d435d532SXin Yin 	rreq->len	= len;
19d435d532SXin Yin 	rreq->mapping	= mapping;
20d435d532SXin Yin 	INIT_LIST_HEAD(&rreq->subrequests);
21d435d532SXin Yin 	refcount_set(&rreq->ref, 1);
22d435d532SXin Yin 	return rreq;
23d435d532SXin Yin }
24d435d532SXin Yin 
25d435d532SXin Yin static void erofs_fscache_put_request(struct netfs_io_request *rreq)
26d435d532SXin Yin {
27d435d532SXin Yin 	if (!refcount_dec_and_test(&rreq->ref))
28d435d532SXin Yin 		return;
29d435d532SXin Yin 	if (rreq->cache_resources.ops)
30d435d532SXin Yin 		rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
31d435d532SXin Yin 	kfree(rreq);
32d435d532SXin Yin }
33d435d532SXin Yin 
34d435d532SXin Yin static void erofs_fscache_put_subrequest(struct netfs_io_subrequest *subreq)
35d435d532SXin Yin {
36d435d532SXin Yin 	if (!refcount_dec_and_test(&subreq->ref))
37d435d532SXin Yin 		return;
38d435d532SXin Yin 	erofs_fscache_put_request(subreq->rreq);
39d435d532SXin Yin 	kfree(subreq);
40d435d532SXin Yin }
41d435d532SXin Yin 
42d435d532SXin Yin static void erofs_fscache_clear_subrequests(struct netfs_io_request *rreq)
43d435d532SXin Yin {
44d435d532SXin Yin 	struct netfs_io_subrequest *subreq;
45d435d532SXin Yin 
46d435d532SXin Yin 	while (!list_empty(&rreq->subrequests)) {
47d435d532SXin Yin 		subreq = list_first_entry(&rreq->subrequests,
48d435d532SXin Yin 				struct netfs_io_subrequest, rreq_link);
49d435d532SXin Yin 		list_del(&subreq->rreq_link);
50d435d532SXin Yin 		erofs_fscache_put_subrequest(subreq);
51d435d532SXin Yin 	}
52d435d532SXin Yin }
53d435d532SXin Yin 
54d435d532SXin Yin static void erofs_fscache_rreq_unlock_folios(struct netfs_io_request *rreq)
55d435d532SXin Yin {
56d435d532SXin Yin 	struct netfs_io_subrequest *subreq;
57d435d532SXin Yin 	struct folio *folio;
58d435d532SXin Yin 	unsigned int iopos = 0;
59d435d532SXin Yin 	pgoff_t start_page = rreq->start / PAGE_SIZE;
60d435d532SXin Yin 	pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
61d435d532SXin Yin 	bool subreq_failed = false;
62d435d532SXin Yin 
63d435d532SXin Yin 	XA_STATE(xas, &rreq->mapping->i_pages, start_page);
64d435d532SXin Yin 
65d435d532SXin Yin 	subreq = list_first_entry(&rreq->subrequests,
66d435d532SXin Yin 				  struct netfs_io_subrequest, rreq_link);
67d435d532SXin Yin 	subreq_failed = (subreq->error < 0);
68d435d532SXin Yin 
69d435d532SXin Yin 	rcu_read_lock();
70d435d532SXin Yin 	xas_for_each(&xas, folio, last_page) {
71d435d532SXin Yin 		unsigned int pgpos =
72d435d532SXin Yin 			(folio_index(folio) - start_page) * PAGE_SIZE;
73d435d532SXin Yin 		unsigned int pgend = pgpos + folio_size(folio);
74d435d532SXin Yin 		bool pg_failed = false;
75d435d532SXin Yin 
76d435d532SXin Yin 		for (;;) {
77d435d532SXin Yin 			if (!subreq) {
78d435d532SXin Yin 				pg_failed = true;
79d435d532SXin Yin 				break;
80d435d532SXin Yin 			}
81d435d532SXin Yin 
82d435d532SXin Yin 			pg_failed |= subreq_failed;
83d435d532SXin Yin 			if (pgend < iopos + subreq->len)
84d435d532SXin Yin 				break;
85d435d532SXin Yin 
86d435d532SXin Yin 			iopos += subreq->len;
87d435d532SXin Yin 			if (!list_is_last(&subreq->rreq_link,
88d435d532SXin Yin 					  &rreq->subrequests)) {
89d435d532SXin Yin 				subreq = list_next_entry(subreq, rreq_link);
90d435d532SXin Yin 				subreq_failed = (subreq->error < 0);
91d435d532SXin Yin 			} else {
92d435d532SXin Yin 				subreq = NULL;
93d435d532SXin Yin 				subreq_failed = false;
94d435d532SXin Yin 			}
95d435d532SXin Yin 			if (pgend == iopos)
96d435d532SXin Yin 				break;
97d435d532SXin Yin 		}
98d435d532SXin Yin 
99d435d532SXin Yin 		if (!pg_failed)
100d435d532SXin Yin 			folio_mark_uptodate(folio);
101d435d532SXin Yin 
102d435d532SXin Yin 		folio_unlock(folio);
103d435d532SXin Yin 	}
104d435d532SXin Yin 	rcu_read_unlock();
105d435d532SXin Yin }
106d435d532SXin Yin 
107d435d532SXin Yin static void erofs_fscache_rreq_complete(struct netfs_io_request *rreq)
108d435d532SXin Yin {
109d435d532SXin Yin 	erofs_fscache_rreq_unlock_folios(rreq);
110d435d532SXin Yin 	erofs_fscache_clear_subrequests(rreq);
111d435d532SXin Yin 	erofs_fscache_put_request(rreq);
112d435d532SXin Yin }
113d435d532SXin Yin 
114d435d532SXin Yin static void erofc_fscache_subreq_complete(void *priv,
115d435d532SXin Yin 		ssize_t transferred_or_error, bool was_async)
116d435d532SXin Yin {
117d435d532SXin Yin 	struct netfs_io_subrequest *subreq = priv;
118d435d532SXin Yin 	struct netfs_io_request *rreq = subreq->rreq;
119d435d532SXin Yin 
120d435d532SXin Yin 	if (IS_ERR_VALUE(transferred_or_error))
121d435d532SXin Yin 		subreq->error = transferred_or_error;
122d435d532SXin Yin 
123d435d532SXin Yin 	if (atomic_dec_and_test(&rreq->nr_outstanding))
124d435d532SXin Yin 		erofs_fscache_rreq_complete(rreq);
125d435d532SXin Yin 
126d435d532SXin Yin 	erofs_fscache_put_subrequest(subreq);
127d435d532SXin Yin }
128d435d532SXin Yin 
129ec00b5e2SJeffle Xu /*
130ec00b5e2SJeffle Xu  * Read data from fscache and fill the read data into page cache described by
131d435d532SXin Yin  * @rreq, which shall be both aligned with PAGE_SIZE. @pstart describes
132ec00b5e2SJeffle Xu  * the start physical address in the cache file.
133ec00b5e2SJeffle Xu  */
134d435d532SXin Yin static int erofs_fscache_read_folios_async(struct fscache_cookie *cookie,
135d435d532SXin Yin 				struct netfs_io_request *rreq, loff_t pstart)
136ec00b5e2SJeffle Xu {
137ec00b5e2SJeffle Xu 	enum netfs_io_source source;
138d435d532SXin Yin 	struct super_block *sb = rreq->mapping->host->i_sb;
139d435d532SXin Yin 	struct netfs_io_subrequest *subreq;
140d435d532SXin Yin 	struct netfs_cache_resources *cres = &rreq->cache_resources;
141ec00b5e2SJeffle Xu 	struct iov_iter iter;
142d435d532SXin Yin 	loff_t start = rreq->start;
143d435d532SXin Yin 	size_t len = rreq->len;
144ec00b5e2SJeffle Xu 	size_t done = 0;
145ec00b5e2SJeffle Xu 	int ret;
146ec00b5e2SJeffle Xu 
147d435d532SXin Yin 	atomic_set(&rreq->nr_outstanding, 1);
148d435d532SXin Yin 
149ec00b5e2SJeffle Xu 	ret = fscache_begin_read_operation(cres, cookie);
150ec00b5e2SJeffle Xu 	if (ret)
151d435d532SXin Yin 		goto out;
152ec00b5e2SJeffle Xu 
153ec00b5e2SJeffle Xu 	while (done < len) {
154d435d532SXin Yin 		subreq = kzalloc(sizeof(struct netfs_io_subrequest),
155d435d532SXin Yin 				 GFP_KERNEL);
156d435d532SXin Yin 		if (subreq) {
157d435d532SXin Yin 			INIT_LIST_HEAD(&subreq->rreq_link);
158d435d532SXin Yin 			refcount_set(&subreq->ref, 2);
159d435d532SXin Yin 			subreq->rreq = rreq;
160d435d532SXin Yin 			refcount_inc(&rreq->ref);
161d435d532SXin Yin 		} else {
162d435d532SXin Yin 			ret = -ENOMEM;
163d435d532SXin Yin 			goto out;
164d435d532SXin Yin 		}
165ec00b5e2SJeffle Xu 
166d435d532SXin Yin 		subreq->start = pstart + done;
167d435d532SXin Yin 		subreq->len	=  len - done;
168d435d532SXin Yin 		subreq->flags = 1 << NETFS_SREQ_ONDEMAND;
169d435d532SXin Yin 
170d435d532SXin Yin 		list_add_tail(&subreq->rreq_link, &rreq->subrequests);
171d435d532SXin Yin 
172d435d532SXin Yin 		source = cres->ops->prepare_read(subreq, LLONG_MAX);
173d435d532SXin Yin 		if (WARN_ON(subreq->len == 0))
174ec00b5e2SJeffle Xu 			source = NETFS_INVALID_READ;
175ec00b5e2SJeffle Xu 		if (source != NETFS_READ_FROM_CACHE) {
176ec00b5e2SJeffle Xu 			erofs_err(sb, "failed to fscache prepare_read (source %d)",
177ec00b5e2SJeffle Xu 				  source);
178ec00b5e2SJeffle Xu 			ret = -EIO;
179d435d532SXin Yin 			subreq->error = ret;
180d435d532SXin Yin 			erofs_fscache_put_subrequest(subreq);
181ec00b5e2SJeffle Xu 			goto out;
182ec00b5e2SJeffle Xu 		}
183ec00b5e2SJeffle Xu 
184d435d532SXin Yin 		atomic_inc(&rreq->nr_outstanding);
185d435d532SXin Yin 
186d435d532SXin Yin 		iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages,
187d435d532SXin Yin 				start + done, subreq->len);
188d435d532SXin Yin 
189d435d532SXin Yin 		ret = fscache_read(cres, subreq->start, &iter,
190d435d532SXin Yin 				   NETFS_READ_HOLE_FAIL,
191d435d532SXin Yin 				   erofc_fscache_subreq_complete, subreq);
192d435d532SXin Yin 		if (ret == -EIOCBQUEUED)
193d435d532SXin Yin 			ret = 0;
194ec00b5e2SJeffle Xu 		if (ret) {
195ec00b5e2SJeffle Xu 			erofs_err(sb, "failed to fscache_read (ret %d)", ret);
196ec00b5e2SJeffle Xu 			goto out;
197ec00b5e2SJeffle Xu 		}
198ec00b5e2SJeffle Xu 
199d435d532SXin Yin 		done += subreq->len;
200ec00b5e2SJeffle Xu 	}
201ec00b5e2SJeffle Xu out:
202d435d532SXin Yin 	if (atomic_dec_and_test(&rreq->nr_outstanding))
203d435d532SXin Yin 		erofs_fscache_rreq_complete(rreq);
204d435d532SXin Yin 
205ec00b5e2SJeffle Xu 	return ret;
206ec00b5e2SJeffle Xu }
207ec00b5e2SJeffle Xu 
208*fdaf9a58SLinus Torvalds static int erofs_fscache_meta_read_folio(struct file *data, struct folio *folio)
2095375e7c8SJeffle Xu {
2105375e7c8SJeffle Xu 	int ret;
2115375e7c8SJeffle Xu 	struct super_block *sb = folio_mapping(folio)->host->i_sb;
212d435d532SXin Yin 	struct netfs_io_request *rreq;
2135375e7c8SJeffle Xu 	struct erofs_map_dev mdev = {
2145375e7c8SJeffle Xu 		.m_deviceid = 0,
2155375e7c8SJeffle Xu 		.m_pa = folio_pos(folio),
2165375e7c8SJeffle Xu 	};
2175375e7c8SJeffle Xu 
2185375e7c8SJeffle Xu 	ret = erofs_map_dev(sb, &mdev);
2195375e7c8SJeffle Xu 	if (ret)
2205375e7c8SJeffle Xu 		goto out;
2215375e7c8SJeffle Xu 
222d435d532SXin Yin 	rreq = erofs_fscache_alloc_request(folio_mapping(folio),
223d435d532SXin Yin 				folio_pos(folio), folio_size(folio));
224d435d532SXin Yin 	if (IS_ERR(rreq))
225d435d532SXin Yin 		goto out;
226d435d532SXin Yin 
227d435d532SXin Yin 	return erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
228d435d532SXin Yin 				rreq, mdev.m_pa);
2295375e7c8SJeffle Xu out:
2305375e7c8SJeffle Xu 	folio_unlock(folio);
2315375e7c8SJeffle Xu 	return ret;
2325375e7c8SJeffle Xu }
2335375e7c8SJeffle Xu 
234*fdaf9a58SLinus Torvalds static int erofs_fscache_read_folio_inline(struct folio *folio,
235bd735bdaSJeffle Xu 					 struct erofs_map_blocks *map)
236bd735bdaSJeffle Xu {
237bd735bdaSJeffle Xu 	struct super_block *sb = folio_mapping(folio)->host->i_sb;
238bd735bdaSJeffle Xu 	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
239bd735bdaSJeffle Xu 	erofs_blk_t blknr;
240bd735bdaSJeffle Xu 	size_t offset, len;
241bd735bdaSJeffle Xu 	void *src, *dst;
242bd735bdaSJeffle Xu 
243bd735bdaSJeffle Xu 	/* For tail packing layout, the offset may be non-zero. */
244bd735bdaSJeffle Xu 	offset = erofs_blkoff(map->m_pa);
245bd735bdaSJeffle Xu 	blknr = erofs_blknr(map->m_pa);
246bd735bdaSJeffle Xu 	len = map->m_llen;
247bd735bdaSJeffle Xu 
248bd735bdaSJeffle Xu 	src = erofs_read_metabuf(&buf, sb, blknr, EROFS_KMAP);
249bd735bdaSJeffle Xu 	if (IS_ERR(src))
250bd735bdaSJeffle Xu 		return PTR_ERR(src);
251bd735bdaSJeffle Xu 
252bd735bdaSJeffle Xu 	dst = kmap_local_folio(folio, 0);
253bd735bdaSJeffle Xu 	memcpy(dst, src + offset, len);
254bd735bdaSJeffle Xu 	memset(dst + len, 0, PAGE_SIZE - len);
255bd735bdaSJeffle Xu 	kunmap_local(dst);
256bd735bdaSJeffle Xu 
257bd735bdaSJeffle Xu 	erofs_put_metabuf(&buf);
258bd735bdaSJeffle Xu 	return 0;
259bd735bdaSJeffle Xu }
260bd735bdaSJeffle Xu 
261*fdaf9a58SLinus Torvalds static int erofs_fscache_read_folio(struct file *file, struct folio *folio)
2621442b02bSJeffle Xu {
2631442b02bSJeffle Xu 	struct inode *inode = folio_mapping(folio)->host;
2641442b02bSJeffle Xu 	struct super_block *sb = inode->i_sb;
2651442b02bSJeffle Xu 	struct erofs_map_blocks map;
2661442b02bSJeffle Xu 	struct erofs_map_dev mdev;
267d435d532SXin Yin 	struct netfs_io_request *rreq;
2681442b02bSJeffle Xu 	erofs_off_t pos;
2691442b02bSJeffle Xu 	loff_t pstart;
2701442b02bSJeffle Xu 	int ret;
2711442b02bSJeffle Xu 
2721442b02bSJeffle Xu 	DBG_BUGON(folio_size(folio) != EROFS_BLKSIZ);
2731442b02bSJeffle Xu 
2741442b02bSJeffle Xu 	pos = folio_pos(folio);
2751442b02bSJeffle Xu 	map.m_la = pos;
2761442b02bSJeffle Xu 
2771442b02bSJeffle Xu 	ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
2781442b02bSJeffle Xu 	if (ret)
2791442b02bSJeffle Xu 		goto out_unlock;
2801442b02bSJeffle Xu 
2811442b02bSJeffle Xu 	if (!(map.m_flags & EROFS_MAP_MAPPED)) {
2821442b02bSJeffle Xu 		folio_zero_range(folio, 0, folio_size(folio));
2831442b02bSJeffle Xu 		goto out_uptodate;
2841442b02bSJeffle Xu 	}
2851442b02bSJeffle Xu 
286bd735bdaSJeffle Xu 	if (map.m_flags & EROFS_MAP_META) {
287*fdaf9a58SLinus Torvalds 		ret = erofs_fscache_read_folio_inline(folio, &map);
288bd735bdaSJeffle Xu 		goto out_uptodate;
289bd735bdaSJeffle Xu 	}
290bd735bdaSJeffle Xu 
2911442b02bSJeffle Xu 	mdev = (struct erofs_map_dev) {
2921442b02bSJeffle Xu 		.m_deviceid = map.m_deviceid,
2931442b02bSJeffle Xu 		.m_pa = map.m_pa,
2941442b02bSJeffle Xu 	};
2951442b02bSJeffle Xu 
2961442b02bSJeffle Xu 	ret = erofs_map_dev(sb, &mdev);
2971442b02bSJeffle Xu 	if (ret)
2981442b02bSJeffle Xu 		goto out_unlock;
2991442b02bSJeffle Xu 
300d435d532SXin Yin 
301d435d532SXin Yin 	rreq = erofs_fscache_alloc_request(folio_mapping(folio),
302d435d532SXin Yin 				folio_pos(folio), folio_size(folio));
303d435d532SXin Yin 	if (IS_ERR(rreq))
304d435d532SXin Yin 		goto out_unlock;
305d435d532SXin Yin 
3061442b02bSJeffle Xu 	pstart = mdev.m_pa + (pos - map.m_la);
307d435d532SXin Yin 	return erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
308d435d532SXin Yin 				rreq, pstart);
3091442b02bSJeffle Xu 
3101442b02bSJeffle Xu out_uptodate:
3111442b02bSJeffle Xu 	if (!ret)
3121442b02bSJeffle Xu 		folio_mark_uptodate(folio);
3131442b02bSJeffle Xu out_unlock:
3141442b02bSJeffle Xu 	folio_unlock(folio);
3151442b02bSJeffle Xu 	return ret;
3161442b02bSJeffle Xu }
3171442b02bSJeffle Xu 
318d435d532SXin Yin static void erofs_fscache_advance_folios(struct readahead_control *rac,
319d435d532SXin Yin 					 size_t len, bool unlock)
320c665b394SJeffle Xu {
321c665b394SJeffle Xu 	while (len) {
322c665b394SJeffle Xu 		struct folio *folio = readahead_folio(rac);
323c665b394SJeffle Xu 		len -= folio_size(folio);
324d435d532SXin Yin 		if (unlock) {
325c665b394SJeffle Xu 			folio_mark_uptodate(folio);
326c665b394SJeffle Xu 			folio_unlock(folio);
327c665b394SJeffle Xu 		}
328c665b394SJeffle Xu 	}
329d435d532SXin Yin }
330c665b394SJeffle Xu 
331c665b394SJeffle Xu static void erofs_fscache_readahead(struct readahead_control *rac)
332c665b394SJeffle Xu {
333c665b394SJeffle Xu 	struct inode *inode = rac->mapping->host;
334c665b394SJeffle Xu 	struct super_block *sb = inode->i_sb;
335c665b394SJeffle Xu 	size_t len, count, done = 0;
336c665b394SJeffle Xu 	erofs_off_t pos;
337c665b394SJeffle Xu 	loff_t start, offset;
338c665b394SJeffle Xu 	int ret;
339c665b394SJeffle Xu 
340c665b394SJeffle Xu 	if (!readahead_count(rac))
341c665b394SJeffle Xu 		return;
342c665b394SJeffle Xu 
343c665b394SJeffle Xu 	start = readahead_pos(rac);
344c665b394SJeffle Xu 	len = readahead_length(rac);
345c665b394SJeffle Xu 
346c665b394SJeffle Xu 	do {
347c665b394SJeffle Xu 		struct erofs_map_blocks map;
348c665b394SJeffle Xu 		struct erofs_map_dev mdev;
349d435d532SXin Yin 		struct netfs_io_request *rreq;
350c665b394SJeffle Xu 
351c665b394SJeffle Xu 		pos = start + done;
352c665b394SJeffle Xu 		map.m_la = pos;
353c665b394SJeffle Xu 
354c665b394SJeffle Xu 		ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
355c665b394SJeffle Xu 		if (ret)
356c665b394SJeffle Xu 			return;
357c665b394SJeffle Xu 
358c665b394SJeffle Xu 		offset = start + done;
359c665b394SJeffle Xu 		count = min_t(size_t, map.m_llen - (pos - map.m_la),
360c665b394SJeffle Xu 			      len - done);
361c665b394SJeffle Xu 
362c665b394SJeffle Xu 		if (!(map.m_flags & EROFS_MAP_MAPPED)) {
363c665b394SJeffle Xu 			struct iov_iter iter;
364c665b394SJeffle Xu 
365c665b394SJeffle Xu 			iov_iter_xarray(&iter, READ, &rac->mapping->i_pages,
366c665b394SJeffle Xu 					offset, count);
367c665b394SJeffle Xu 			iov_iter_zero(count, &iter);
368c665b394SJeffle Xu 
369d435d532SXin Yin 			erofs_fscache_advance_folios(rac, count, true);
370c665b394SJeffle Xu 			ret = count;
371c665b394SJeffle Xu 			continue;
372c665b394SJeffle Xu 		}
373c665b394SJeffle Xu 
374c665b394SJeffle Xu 		if (map.m_flags & EROFS_MAP_META) {
375c665b394SJeffle Xu 			struct folio *folio = readahead_folio(rac);
376c665b394SJeffle Xu 
377*fdaf9a58SLinus Torvalds 			ret = erofs_fscache_read_folio_inline(folio, &map);
378c665b394SJeffle Xu 			if (!ret) {
379c665b394SJeffle Xu 				folio_mark_uptodate(folio);
380c665b394SJeffle Xu 				ret = folio_size(folio);
381c665b394SJeffle Xu 			}
382c665b394SJeffle Xu 
383c665b394SJeffle Xu 			folio_unlock(folio);
384c665b394SJeffle Xu 			continue;
385c665b394SJeffle Xu 		}
386c665b394SJeffle Xu 
387c665b394SJeffle Xu 		mdev = (struct erofs_map_dev) {
388c665b394SJeffle Xu 			.m_deviceid = map.m_deviceid,
389c665b394SJeffle Xu 			.m_pa = map.m_pa,
390c665b394SJeffle Xu 		};
391c665b394SJeffle Xu 		ret = erofs_map_dev(sb, &mdev);
392c665b394SJeffle Xu 		if (ret)
393c665b394SJeffle Xu 			return;
394c665b394SJeffle Xu 
395d435d532SXin Yin 		rreq = erofs_fscache_alloc_request(rac->mapping, offset, count);
396d435d532SXin Yin 		if (IS_ERR(rreq))
397d435d532SXin Yin 			return;
398c665b394SJeffle Xu 		/*
399d435d532SXin Yin 		 * Drop the ref of folios here. Unlock them in
400d435d532SXin Yin 		 * rreq_unlock_folios() when rreq complete.
401c665b394SJeffle Xu 		 */
402d435d532SXin Yin 		erofs_fscache_advance_folios(rac, count, false);
403d435d532SXin Yin 		ret = erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
404d435d532SXin Yin 					rreq, mdev.m_pa + (pos - map.m_la));
405d435d532SXin Yin 		if (!ret)
406c665b394SJeffle Xu 			ret = count;
407c665b394SJeffle Xu 	} while (ret > 0 && ((done += ret) < len));
408c665b394SJeffle Xu }
409c665b394SJeffle Xu 
4103c265d7dSJeffle Xu static const struct address_space_operations erofs_fscache_meta_aops = {
411*fdaf9a58SLinus Torvalds 	.read_folio = erofs_fscache_meta_read_folio,
4123c265d7dSJeffle Xu };
4133c265d7dSJeffle Xu 
4141442b02bSJeffle Xu const struct address_space_operations erofs_fscache_access_aops = {
415*fdaf9a58SLinus Torvalds 	.read_folio = erofs_fscache_read_folio,
416c665b394SJeffle Xu 	.readahead = erofs_fscache_readahead,
4171442b02bSJeffle Xu };
4181442b02bSJeffle Xu 
419b02c602fSJeffle Xu int erofs_fscache_register_cookie(struct super_block *sb,
4203c265d7dSJeffle Xu 				  struct erofs_fscache **fscache,
4213c265d7dSJeffle Xu 				  char *name, bool need_inode)
422b02c602fSJeffle Xu {
423b02c602fSJeffle Xu 	struct fscache_volume *volume = EROFS_SB(sb)->volume;
424b02c602fSJeffle Xu 	struct erofs_fscache *ctx;
425b02c602fSJeffle Xu 	struct fscache_cookie *cookie;
4263c265d7dSJeffle Xu 	int ret;
427b02c602fSJeffle Xu 
428b02c602fSJeffle Xu 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
429b02c602fSJeffle Xu 	if (!ctx)
430b02c602fSJeffle Xu 		return -ENOMEM;
431b02c602fSJeffle Xu 
432b02c602fSJeffle Xu 	cookie = fscache_acquire_cookie(volume, FSCACHE_ADV_WANT_CACHE_SIZE,
433b02c602fSJeffle Xu 					name, strlen(name), NULL, 0, 0);
434b02c602fSJeffle Xu 	if (!cookie) {
435b02c602fSJeffle Xu 		erofs_err(sb, "failed to get cookie for %s", name);
4363c265d7dSJeffle Xu 		ret = -EINVAL;
4373c265d7dSJeffle Xu 		goto err;
438b02c602fSJeffle Xu 	}
439b02c602fSJeffle Xu 
440b02c602fSJeffle Xu 	fscache_use_cookie(cookie, false);
441b02c602fSJeffle Xu 	ctx->cookie = cookie;
442b02c602fSJeffle Xu 
4433c265d7dSJeffle Xu 	if (need_inode) {
4443c265d7dSJeffle Xu 		struct inode *const inode = new_inode(sb);
4453c265d7dSJeffle Xu 
4463c265d7dSJeffle Xu 		if (!inode) {
4473c265d7dSJeffle Xu 			erofs_err(sb, "failed to get anon inode for %s", name);
4483c265d7dSJeffle Xu 			ret = -ENOMEM;
4493c265d7dSJeffle Xu 			goto err_cookie;
4503c265d7dSJeffle Xu 		}
4513c265d7dSJeffle Xu 
4523c265d7dSJeffle Xu 		set_nlink(inode, 1);
4533c265d7dSJeffle Xu 		inode->i_size = OFFSET_MAX;
4543c265d7dSJeffle Xu 		inode->i_mapping->a_ops = &erofs_fscache_meta_aops;
4553c265d7dSJeffle Xu 		mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
4563c265d7dSJeffle Xu 
4573c265d7dSJeffle Xu 		ctx->inode = inode;
4583c265d7dSJeffle Xu 	}
4593c265d7dSJeffle Xu 
460b02c602fSJeffle Xu 	*fscache = ctx;
461b02c602fSJeffle Xu 	return 0;
4623c265d7dSJeffle Xu 
4633c265d7dSJeffle Xu err_cookie:
4643c265d7dSJeffle Xu 	fscache_unuse_cookie(ctx->cookie, NULL, NULL);
4653c265d7dSJeffle Xu 	fscache_relinquish_cookie(ctx->cookie, false);
4663c265d7dSJeffle Xu 	ctx->cookie = NULL;
4673c265d7dSJeffle Xu err:
4683c265d7dSJeffle Xu 	kfree(ctx);
4693c265d7dSJeffle Xu 	return ret;
470b02c602fSJeffle Xu }
471b02c602fSJeffle Xu 
472b02c602fSJeffle Xu void erofs_fscache_unregister_cookie(struct erofs_fscache **fscache)
473b02c602fSJeffle Xu {
474b02c602fSJeffle Xu 	struct erofs_fscache *ctx = *fscache;
475b02c602fSJeffle Xu 
476b02c602fSJeffle Xu 	if (!ctx)
477b02c602fSJeffle Xu 		return;
478b02c602fSJeffle Xu 
479b02c602fSJeffle Xu 	fscache_unuse_cookie(ctx->cookie, NULL, NULL);
480b02c602fSJeffle Xu 	fscache_relinquish_cookie(ctx->cookie, false);
481b02c602fSJeffle Xu 	ctx->cookie = NULL;
482b02c602fSJeffle Xu 
4833c265d7dSJeffle Xu 	iput(ctx->inode);
4843c265d7dSJeffle Xu 	ctx->inode = NULL;
4853c265d7dSJeffle Xu 
486b02c602fSJeffle Xu 	kfree(ctx);
487b02c602fSJeffle Xu 	*fscache = NULL;
488b02c602fSJeffle Xu }
489b02c602fSJeffle Xu 
490c6be2bd0SJeffle Xu int erofs_fscache_register_fs(struct super_block *sb)
491c6be2bd0SJeffle Xu {
492c6be2bd0SJeffle Xu 	struct erofs_sb_info *sbi = EROFS_SB(sb);
493c6be2bd0SJeffle Xu 	struct fscache_volume *volume;
494c6be2bd0SJeffle Xu 	char *name;
495c6be2bd0SJeffle Xu 	int ret = 0;
496c6be2bd0SJeffle Xu 
497c6be2bd0SJeffle Xu 	name = kasprintf(GFP_KERNEL, "erofs,%s", sbi->opt.fsid);
498c6be2bd0SJeffle Xu 	if (!name)
499c6be2bd0SJeffle Xu 		return -ENOMEM;
500c6be2bd0SJeffle Xu 
501c6be2bd0SJeffle Xu 	volume = fscache_acquire_volume(name, NULL, NULL, 0);
502c6be2bd0SJeffle Xu 	if (IS_ERR_OR_NULL(volume)) {
503c6be2bd0SJeffle Xu 		erofs_err(sb, "failed to register volume for %s", name);
504c6be2bd0SJeffle Xu 		ret = volume ? PTR_ERR(volume) : -EOPNOTSUPP;
505c6be2bd0SJeffle Xu 		volume = NULL;
506c6be2bd0SJeffle Xu 	}
507c6be2bd0SJeffle Xu 
508c6be2bd0SJeffle Xu 	sbi->volume = volume;
509c6be2bd0SJeffle Xu 	kfree(name);
510c6be2bd0SJeffle Xu 	return ret;
511c6be2bd0SJeffle Xu }
512c6be2bd0SJeffle Xu 
513c6be2bd0SJeffle Xu void erofs_fscache_unregister_fs(struct super_block *sb)
514c6be2bd0SJeffle Xu {
515c6be2bd0SJeffle Xu 	struct erofs_sb_info *sbi = EROFS_SB(sb);
516c6be2bd0SJeffle Xu 
517c6be2bd0SJeffle Xu 	fscache_relinquish_volume(sbi->volume, NULL, false);
518c6be2bd0SJeffle Xu 	sbi->volume = NULL;
519c6be2bd0SJeffle Xu }
520