xref: /openbmc/linux/fs/erofs/fscache.c (revision d435d53228dd039fffecae123b8c138af6f96f99)
1c6be2bd0SJeffle Xu // SPDX-License-Identifier: GPL-2.0-or-later
2c6be2bd0SJeffle Xu /*
3c6be2bd0SJeffle Xu  * Copyright (C) 2022, Alibaba Cloud
4c6be2bd0SJeffle Xu  */
5c6be2bd0SJeffle Xu #include <linux/fscache.h>
6c6be2bd0SJeffle Xu #include "internal.h"
7c6be2bd0SJeffle Xu 
8*d435d532SXin Yin static struct netfs_io_request *erofs_fscache_alloc_request(struct address_space *mapping,
9*d435d532SXin Yin 					     loff_t start, size_t len)
10*d435d532SXin Yin {
11*d435d532SXin Yin 	struct netfs_io_request *rreq;
12*d435d532SXin Yin 
13*d435d532SXin Yin 	rreq = kzalloc(sizeof(struct netfs_io_request), GFP_KERNEL);
14*d435d532SXin Yin 	if (!rreq)
15*d435d532SXin Yin 		return ERR_PTR(-ENOMEM);
16*d435d532SXin Yin 
17*d435d532SXin Yin 	rreq->start	= start;
18*d435d532SXin Yin 	rreq->len	= len;
19*d435d532SXin Yin 	rreq->mapping	= mapping;
20*d435d532SXin Yin 	INIT_LIST_HEAD(&rreq->subrequests);
21*d435d532SXin Yin 	refcount_set(&rreq->ref, 1);
22*d435d532SXin Yin 	return rreq;
23*d435d532SXin Yin }
24*d435d532SXin Yin 
25*d435d532SXin Yin static void erofs_fscache_put_request(struct netfs_io_request *rreq)
26*d435d532SXin Yin {
27*d435d532SXin Yin 	if (!refcount_dec_and_test(&rreq->ref))
28*d435d532SXin Yin 		return;
29*d435d532SXin Yin 	if (rreq->cache_resources.ops)
30*d435d532SXin Yin 		rreq->cache_resources.ops->end_operation(&rreq->cache_resources);
31*d435d532SXin Yin 	kfree(rreq);
32*d435d532SXin Yin }
33*d435d532SXin Yin 
34*d435d532SXin Yin static void erofs_fscache_put_subrequest(struct netfs_io_subrequest *subreq)
35*d435d532SXin Yin {
36*d435d532SXin Yin 	if (!refcount_dec_and_test(&subreq->ref))
37*d435d532SXin Yin 		return;
38*d435d532SXin Yin 	erofs_fscache_put_request(subreq->rreq);
39*d435d532SXin Yin 	kfree(subreq);
40*d435d532SXin Yin }
41*d435d532SXin Yin 
42*d435d532SXin Yin static void erofs_fscache_clear_subrequests(struct netfs_io_request *rreq)
43*d435d532SXin Yin {
44*d435d532SXin Yin 	struct netfs_io_subrequest *subreq;
45*d435d532SXin Yin 
46*d435d532SXin Yin 	while (!list_empty(&rreq->subrequests)) {
47*d435d532SXin Yin 		subreq = list_first_entry(&rreq->subrequests,
48*d435d532SXin Yin 				struct netfs_io_subrequest, rreq_link);
49*d435d532SXin Yin 		list_del(&subreq->rreq_link);
50*d435d532SXin Yin 		erofs_fscache_put_subrequest(subreq);
51*d435d532SXin Yin 	}
52*d435d532SXin Yin }
53*d435d532SXin Yin 
54*d435d532SXin Yin static void erofs_fscache_rreq_unlock_folios(struct netfs_io_request *rreq)
55*d435d532SXin Yin {
56*d435d532SXin Yin 	struct netfs_io_subrequest *subreq;
57*d435d532SXin Yin 	struct folio *folio;
58*d435d532SXin Yin 	unsigned int iopos = 0;
59*d435d532SXin Yin 	pgoff_t start_page = rreq->start / PAGE_SIZE;
60*d435d532SXin Yin 	pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
61*d435d532SXin Yin 	bool subreq_failed = false;
62*d435d532SXin Yin 
63*d435d532SXin Yin 	XA_STATE(xas, &rreq->mapping->i_pages, start_page);
64*d435d532SXin Yin 
65*d435d532SXin Yin 	subreq = list_first_entry(&rreq->subrequests,
66*d435d532SXin Yin 				  struct netfs_io_subrequest, rreq_link);
67*d435d532SXin Yin 	subreq_failed = (subreq->error < 0);
68*d435d532SXin Yin 
69*d435d532SXin Yin 	rcu_read_lock();
70*d435d532SXin Yin 	xas_for_each(&xas, folio, last_page) {
71*d435d532SXin Yin 		unsigned int pgpos =
72*d435d532SXin Yin 			(folio_index(folio) - start_page) * PAGE_SIZE;
73*d435d532SXin Yin 		unsigned int pgend = pgpos + folio_size(folio);
74*d435d532SXin Yin 		bool pg_failed = false;
75*d435d532SXin Yin 
76*d435d532SXin Yin 		for (;;) {
77*d435d532SXin Yin 			if (!subreq) {
78*d435d532SXin Yin 				pg_failed = true;
79*d435d532SXin Yin 				break;
80*d435d532SXin Yin 			}
81*d435d532SXin Yin 
82*d435d532SXin Yin 			pg_failed |= subreq_failed;
83*d435d532SXin Yin 			if (pgend < iopos + subreq->len)
84*d435d532SXin Yin 				break;
85*d435d532SXin Yin 
86*d435d532SXin Yin 			iopos += subreq->len;
87*d435d532SXin Yin 			if (!list_is_last(&subreq->rreq_link,
88*d435d532SXin Yin 					  &rreq->subrequests)) {
89*d435d532SXin Yin 				subreq = list_next_entry(subreq, rreq_link);
90*d435d532SXin Yin 				subreq_failed = (subreq->error < 0);
91*d435d532SXin Yin 			} else {
92*d435d532SXin Yin 				subreq = NULL;
93*d435d532SXin Yin 				subreq_failed = false;
94*d435d532SXin Yin 			}
95*d435d532SXin Yin 			if (pgend == iopos)
96*d435d532SXin Yin 				break;
97*d435d532SXin Yin 		}
98*d435d532SXin Yin 
99*d435d532SXin Yin 		if (!pg_failed)
100*d435d532SXin Yin 			folio_mark_uptodate(folio);
101*d435d532SXin Yin 
102*d435d532SXin Yin 		folio_unlock(folio);
103*d435d532SXin Yin 	}
104*d435d532SXin Yin 	rcu_read_unlock();
105*d435d532SXin Yin }
106*d435d532SXin Yin 
107*d435d532SXin Yin static void erofs_fscache_rreq_complete(struct netfs_io_request *rreq)
108*d435d532SXin Yin {
109*d435d532SXin Yin 	erofs_fscache_rreq_unlock_folios(rreq);
110*d435d532SXin Yin 	erofs_fscache_clear_subrequests(rreq);
111*d435d532SXin Yin 	erofs_fscache_put_request(rreq);
112*d435d532SXin Yin }
113*d435d532SXin Yin 
114*d435d532SXin Yin static void erofc_fscache_subreq_complete(void *priv,
115*d435d532SXin Yin 		ssize_t transferred_or_error, bool was_async)
116*d435d532SXin Yin {
117*d435d532SXin Yin 	struct netfs_io_subrequest *subreq = priv;
118*d435d532SXin Yin 	struct netfs_io_request *rreq = subreq->rreq;
119*d435d532SXin Yin 
120*d435d532SXin Yin 	if (IS_ERR_VALUE(transferred_or_error))
121*d435d532SXin Yin 		subreq->error = transferred_or_error;
122*d435d532SXin Yin 
123*d435d532SXin Yin 	if (atomic_dec_and_test(&rreq->nr_outstanding))
124*d435d532SXin Yin 		erofs_fscache_rreq_complete(rreq);
125*d435d532SXin Yin 
126*d435d532SXin Yin 	erofs_fscache_put_subrequest(subreq);
127*d435d532SXin Yin }
128*d435d532SXin Yin 
129ec00b5e2SJeffle Xu /*
130ec00b5e2SJeffle Xu  * Read data from fscache and fill the read data into page cache described by
131*d435d532SXin Yin  * @rreq, which shall be both aligned with PAGE_SIZE. @pstart describes
132ec00b5e2SJeffle Xu  * the start physical address in the cache file.
133ec00b5e2SJeffle Xu  */
134*d435d532SXin Yin static int erofs_fscache_read_folios_async(struct fscache_cookie *cookie,
135*d435d532SXin Yin 				struct netfs_io_request *rreq, loff_t pstart)
136ec00b5e2SJeffle Xu {
137ec00b5e2SJeffle Xu 	enum netfs_io_source source;
138*d435d532SXin Yin 	struct super_block *sb = rreq->mapping->host->i_sb;
139*d435d532SXin Yin 	struct netfs_io_subrequest *subreq;
140*d435d532SXin Yin 	struct netfs_cache_resources *cres = &rreq->cache_resources;
141ec00b5e2SJeffle Xu 	struct iov_iter iter;
142*d435d532SXin Yin 	loff_t start = rreq->start;
143*d435d532SXin Yin 	size_t len = rreq->len;
144ec00b5e2SJeffle Xu 	size_t done = 0;
145ec00b5e2SJeffle Xu 	int ret;
146ec00b5e2SJeffle Xu 
147*d435d532SXin Yin 	atomic_set(&rreq->nr_outstanding, 1);
148*d435d532SXin Yin 
149ec00b5e2SJeffle Xu 	ret = fscache_begin_read_operation(cres, cookie);
150ec00b5e2SJeffle Xu 	if (ret)
151*d435d532SXin Yin 		goto out;
152ec00b5e2SJeffle Xu 
153ec00b5e2SJeffle Xu 	while (done < len) {
154*d435d532SXin Yin 		subreq = kzalloc(sizeof(struct netfs_io_subrequest),
155*d435d532SXin Yin 				 GFP_KERNEL);
156*d435d532SXin Yin 		if (subreq) {
157*d435d532SXin Yin 			INIT_LIST_HEAD(&subreq->rreq_link);
158*d435d532SXin Yin 			refcount_set(&subreq->ref, 2);
159*d435d532SXin Yin 			subreq->rreq = rreq;
160*d435d532SXin Yin 			refcount_inc(&rreq->ref);
161*d435d532SXin Yin 		} else {
162*d435d532SXin Yin 			ret = -ENOMEM;
163*d435d532SXin Yin 			goto out;
164*d435d532SXin Yin 		}
165ec00b5e2SJeffle Xu 
166*d435d532SXin Yin 		subreq->start = pstart + done;
167*d435d532SXin Yin 		subreq->len	=  len - done;
168*d435d532SXin Yin 		subreq->flags = 1 << NETFS_SREQ_ONDEMAND;
169*d435d532SXin Yin 
170*d435d532SXin Yin 		list_add_tail(&subreq->rreq_link, &rreq->subrequests);
171*d435d532SXin Yin 
172*d435d532SXin Yin 		source = cres->ops->prepare_read(subreq, LLONG_MAX);
173*d435d532SXin Yin 		if (WARN_ON(subreq->len == 0))
174ec00b5e2SJeffle Xu 			source = NETFS_INVALID_READ;
175ec00b5e2SJeffle Xu 		if (source != NETFS_READ_FROM_CACHE) {
176ec00b5e2SJeffle Xu 			erofs_err(sb, "failed to fscache prepare_read (source %d)",
177ec00b5e2SJeffle Xu 				  source);
178ec00b5e2SJeffle Xu 			ret = -EIO;
179*d435d532SXin Yin 			subreq->error = ret;
180*d435d532SXin Yin 			erofs_fscache_put_subrequest(subreq);
181ec00b5e2SJeffle Xu 			goto out;
182ec00b5e2SJeffle Xu 		}
183ec00b5e2SJeffle Xu 
184*d435d532SXin Yin 		atomic_inc(&rreq->nr_outstanding);
185*d435d532SXin Yin 
186*d435d532SXin Yin 		iov_iter_xarray(&iter, READ, &rreq->mapping->i_pages,
187*d435d532SXin Yin 				start + done, subreq->len);
188*d435d532SXin Yin 
189*d435d532SXin Yin 		ret = fscache_read(cres, subreq->start, &iter,
190*d435d532SXin Yin 				   NETFS_READ_HOLE_FAIL,
191*d435d532SXin Yin 				   erofc_fscache_subreq_complete, subreq);
192*d435d532SXin Yin 		if (ret == -EIOCBQUEUED)
193*d435d532SXin Yin 			ret = 0;
194ec00b5e2SJeffle Xu 		if (ret) {
195ec00b5e2SJeffle Xu 			erofs_err(sb, "failed to fscache_read (ret %d)", ret);
196ec00b5e2SJeffle Xu 			goto out;
197ec00b5e2SJeffle Xu 		}
198ec00b5e2SJeffle Xu 
199*d435d532SXin Yin 		done += subreq->len;
200ec00b5e2SJeffle Xu 	}
201ec00b5e2SJeffle Xu out:
202*d435d532SXin Yin 	if (atomic_dec_and_test(&rreq->nr_outstanding))
203*d435d532SXin Yin 		erofs_fscache_rreq_complete(rreq);
204*d435d532SXin Yin 
205ec00b5e2SJeffle Xu 	return ret;
206ec00b5e2SJeffle Xu }
207ec00b5e2SJeffle Xu 
2085375e7c8SJeffle Xu static int erofs_fscache_meta_readpage(struct file *data, struct page *page)
2095375e7c8SJeffle Xu {
2105375e7c8SJeffle Xu 	int ret;
2115375e7c8SJeffle Xu 	struct folio *folio = page_folio(page);
2125375e7c8SJeffle Xu 	struct super_block *sb = folio_mapping(folio)->host->i_sb;
213*d435d532SXin Yin 	struct netfs_io_request *rreq;
2145375e7c8SJeffle Xu 	struct erofs_map_dev mdev = {
2155375e7c8SJeffle Xu 		.m_deviceid = 0,
2165375e7c8SJeffle Xu 		.m_pa = folio_pos(folio),
2175375e7c8SJeffle Xu 	};
2185375e7c8SJeffle Xu 
2195375e7c8SJeffle Xu 	ret = erofs_map_dev(sb, &mdev);
2205375e7c8SJeffle Xu 	if (ret)
2215375e7c8SJeffle Xu 		goto out;
2225375e7c8SJeffle Xu 
223*d435d532SXin Yin 	rreq = erofs_fscache_alloc_request(folio_mapping(folio),
224*d435d532SXin Yin 				folio_pos(folio), folio_size(folio));
225*d435d532SXin Yin 	if (IS_ERR(rreq))
226*d435d532SXin Yin 		goto out;
227*d435d532SXin Yin 
228*d435d532SXin Yin 	return erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
229*d435d532SXin Yin 				rreq, mdev.m_pa);
2305375e7c8SJeffle Xu out:
2315375e7c8SJeffle Xu 	folio_unlock(folio);
2325375e7c8SJeffle Xu 	return ret;
2335375e7c8SJeffle Xu }
2345375e7c8SJeffle Xu 
235bd735bdaSJeffle Xu static int erofs_fscache_readpage_inline(struct folio *folio,
236bd735bdaSJeffle Xu 					 struct erofs_map_blocks *map)
237bd735bdaSJeffle Xu {
238bd735bdaSJeffle Xu 	struct super_block *sb = folio_mapping(folio)->host->i_sb;
239bd735bdaSJeffle Xu 	struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
240bd735bdaSJeffle Xu 	erofs_blk_t blknr;
241bd735bdaSJeffle Xu 	size_t offset, len;
242bd735bdaSJeffle Xu 	void *src, *dst;
243bd735bdaSJeffle Xu 
244bd735bdaSJeffle Xu 	/* For tail packing layout, the offset may be non-zero. */
245bd735bdaSJeffle Xu 	offset = erofs_blkoff(map->m_pa);
246bd735bdaSJeffle Xu 	blknr = erofs_blknr(map->m_pa);
247bd735bdaSJeffle Xu 	len = map->m_llen;
248bd735bdaSJeffle Xu 
249bd735bdaSJeffle Xu 	src = erofs_read_metabuf(&buf, sb, blknr, EROFS_KMAP);
250bd735bdaSJeffle Xu 	if (IS_ERR(src))
251bd735bdaSJeffle Xu 		return PTR_ERR(src);
252bd735bdaSJeffle Xu 
253bd735bdaSJeffle Xu 	dst = kmap_local_folio(folio, 0);
254bd735bdaSJeffle Xu 	memcpy(dst, src + offset, len);
255bd735bdaSJeffle Xu 	memset(dst + len, 0, PAGE_SIZE - len);
256bd735bdaSJeffle Xu 	kunmap_local(dst);
257bd735bdaSJeffle Xu 
258bd735bdaSJeffle Xu 	erofs_put_metabuf(&buf);
259bd735bdaSJeffle Xu 	return 0;
260bd735bdaSJeffle Xu }
261bd735bdaSJeffle Xu 
2621442b02bSJeffle Xu static int erofs_fscache_readpage(struct file *file, struct page *page)
2631442b02bSJeffle Xu {
2641442b02bSJeffle Xu 	struct folio *folio = page_folio(page);
2651442b02bSJeffle Xu 	struct inode *inode = folio_mapping(folio)->host;
2661442b02bSJeffle Xu 	struct super_block *sb = inode->i_sb;
2671442b02bSJeffle Xu 	struct erofs_map_blocks map;
2681442b02bSJeffle Xu 	struct erofs_map_dev mdev;
269*d435d532SXin Yin 	struct netfs_io_request *rreq;
2701442b02bSJeffle Xu 	erofs_off_t pos;
2711442b02bSJeffle Xu 	loff_t pstart;
2721442b02bSJeffle Xu 	int ret;
2731442b02bSJeffle Xu 
2741442b02bSJeffle Xu 	DBG_BUGON(folio_size(folio) != EROFS_BLKSIZ);
2751442b02bSJeffle Xu 
2761442b02bSJeffle Xu 	pos = folio_pos(folio);
2771442b02bSJeffle Xu 	map.m_la = pos;
2781442b02bSJeffle Xu 
2791442b02bSJeffle Xu 	ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
2801442b02bSJeffle Xu 	if (ret)
2811442b02bSJeffle Xu 		goto out_unlock;
2821442b02bSJeffle Xu 
2831442b02bSJeffle Xu 	if (!(map.m_flags & EROFS_MAP_MAPPED)) {
2841442b02bSJeffle Xu 		folio_zero_range(folio, 0, folio_size(folio));
2851442b02bSJeffle Xu 		goto out_uptodate;
2861442b02bSJeffle Xu 	}
2871442b02bSJeffle Xu 
288bd735bdaSJeffle Xu 	if (map.m_flags & EROFS_MAP_META) {
289bd735bdaSJeffle Xu 		ret = erofs_fscache_readpage_inline(folio, &map);
290bd735bdaSJeffle Xu 		goto out_uptodate;
291bd735bdaSJeffle Xu 	}
292bd735bdaSJeffle Xu 
2931442b02bSJeffle Xu 	mdev = (struct erofs_map_dev) {
2941442b02bSJeffle Xu 		.m_deviceid = map.m_deviceid,
2951442b02bSJeffle Xu 		.m_pa = map.m_pa,
2961442b02bSJeffle Xu 	};
2971442b02bSJeffle Xu 
2981442b02bSJeffle Xu 	ret = erofs_map_dev(sb, &mdev);
2991442b02bSJeffle Xu 	if (ret)
3001442b02bSJeffle Xu 		goto out_unlock;
3011442b02bSJeffle Xu 
302*d435d532SXin Yin 
303*d435d532SXin Yin 	rreq = erofs_fscache_alloc_request(folio_mapping(folio),
304*d435d532SXin Yin 				folio_pos(folio), folio_size(folio));
305*d435d532SXin Yin 	if (IS_ERR(rreq))
306*d435d532SXin Yin 		goto out_unlock;
307*d435d532SXin Yin 
3081442b02bSJeffle Xu 	pstart = mdev.m_pa + (pos - map.m_la);
309*d435d532SXin Yin 	return erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
310*d435d532SXin Yin 				rreq, pstart);
3111442b02bSJeffle Xu 
3121442b02bSJeffle Xu out_uptodate:
3131442b02bSJeffle Xu 	if (!ret)
3141442b02bSJeffle Xu 		folio_mark_uptodate(folio);
3151442b02bSJeffle Xu out_unlock:
3161442b02bSJeffle Xu 	folio_unlock(folio);
3171442b02bSJeffle Xu 	return ret;
3181442b02bSJeffle Xu }
3191442b02bSJeffle Xu 
320*d435d532SXin Yin static void erofs_fscache_advance_folios(struct readahead_control *rac,
321*d435d532SXin Yin 					 size_t len, bool unlock)
322c665b394SJeffle Xu {
323c665b394SJeffle Xu 	while (len) {
324c665b394SJeffle Xu 		struct folio *folio = readahead_folio(rac);
325c665b394SJeffle Xu 		len -= folio_size(folio);
326*d435d532SXin Yin 		if (unlock) {
327c665b394SJeffle Xu 			folio_mark_uptodate(folio);
328c665b394SJeffle Xu 			folio_unlock(folio);
329c665b394SJeffle Xu 		}
330c665b394SJeffle Xu 	}
331*d435d532SXin Yin }
332c665b394SJeffle Xu 
333c665b394SJeffle Xu static void erofs_fscache_readahead(struct readahead_control *rac)
334c665b394SJeffle Xu {
335c665b394SJeffle Xu 	struct inode *inode = rac->mapping->host;
336c665b394SJeffle Xu 	struct super_block *sb = inode->i_sb;
337c665b394SJeffle Xu 	size_t len, count, done = 0;
338c665b394SJeffle Xu 	erofs_off_t pos;
339c665b394SJeffle Xu 	loff_t start, offset;
340c665b394SJeffle Xu 	int ret;
341c665b394SJeffle Xu 
342c665b394SJeffle Xu 	if (!readahead_count(rac))
343c665b394SJeffle Xu 		return;
344c665b394SJeffle Xu 
345c665b394SJeffle Xu 	start = readahead_pos(rac);
346c665b394SJeffle Xu 	len = readahead_length(rac);
347c665b394SJeffle Xu 
348c665b394SJeffle Xu 	do {
349c665b394SJeffle Xu 		struct erofs_map_blocks map;
350c665b394SJeffle Xu 		struct erofs_map_dev mdev;
351*d435d532SXin Yin 		struct netfs_io_request *rreq;
352c665b394SJeffle Xu 
353c665b394SJeffle Xu 		pos = start + done;
354c665b394SJeffle Xu 		map.m_la = pos;
355c665b394SJeffle Xu 
356c665b394SJeffle Xu 		ret = erofs_map_blocks(inode, &map, EROFS_GET_BLOCKS_RAW);
357c665b394SJeffle Xu 		if (ret)
358c665b394SJeffle Xu 			return;
359c665b394SJeffle Xu 
360c665b394SJeffle Xu 		offset = start + done;
361c665b394SJeffle Xu 		count = min_t(size_t, map.m_llen - (pos - map.m_la),
362c665b394SJeffle Xu 			      len - done);
363c665b394SJeffle Xu 
364c665b394SJeffle Xu 		if (!(map.m_flags & EROFS_MAP_MAPPED)) {
365c665b394SJeffle Xu 			struct iov_iter iter;
366c665b394SJeffle Xu 
367c665b394SJeffle Xu 			iov_iter_xarray(&iter, READ, &rac->mapping->i_pages,
368c665b394SJeffle Xu 					offset, count);
369c665b394SJeffle Xu 			iov_iter_zero(count, &iter);
370c665b394SJeffle Xu 
371*d435d532SXin Yin 			erofs_fscache_advance_folios(rac, count, true);
372c665b394SJeffle Xu 			ret = count;
373c665b394SJeffle Xu 			continue;
374c665b394SJeffle Xu 		}
375c665b394SJeffle Xu 
376c665b394SJeffle Xu 		if (map.m_flags & EROFS_MAP_META) {
377c665b394SJeffle Xu 			struct folio *folio = readahead_folio(rac);
378c665b394SJeffle Xu 
379c665b394SJeffle Xu 			ret = erofs_fscache_readpage_inline(folio, &map);
380c665b394SJeffle Xu 			if (!ret) {
381c665b394SJeffle Xu 				folio_mark_uptodate(folio);
382c665b394SJeffle Xu 				ret = folio_size(folio);
383c665b394SJeffle Xu 			}
384c665b394SJeffle Xu 
385c665b394SJeffle Xu 			folio_unlock(folio);
386c665b394SJeffle Xu 			continue;
387c665b394SJeffle Xu 		}
388c665b394SJeffle Xu 
389c665b394SJeffle Xu 		mdev = (struct erofs_map_dev) {
390c665b394SJeffle Xu 			.m_deviceid = map.m_deviceid,
391c665b394SJeffle Xu 			.m_pa = map.m_pa,
392c665b394SJeffle Xu 		};
393c665b394SJeffle Xu 		ret = erofs_map_dev(sb, &mdev);
394c665b394SJeffle Xu 		if (ret)
395c665b394SJeffle Xu 			return;
396c665b394SJeffle Xu 
397*d435d532SXin Yin 		rreq = erofs_fscache_alloc_request(rac->mapping, offset, count);
398*d435d532SXin Yin 		if (IS_ERR(rreq))
399*d435d532SXin Yin 			return;
400c665b394SJeffle Xu 		/*
401*d435d532SXin Yin 		 * Drop the ref of folios here. Unlock them in
402*d435d532SXin Yin 		 * rreq_unlock_folios() when rreq complete.
403c665b394SJeffle Xu 		 */
404*d435d532SXin Yin 		erofs_fscache_advance_folios(rac, count, false);
405*d435d532SXin Yin 		ret = erofs_fscache_read_folios_async(mdev.m_fscache->cookie,
406*d435d532SXin Yin 					rreq, mdev.m_pa + (pos - map.m_la));
407*d435d532SXin Yin 		if (!ret)
408c665b394SJeffle Xu 			ret = count;
409c665b394SJeffle Xu 	} while (ret > 0 && ((done += ret) < len));
410c665b394SJeffle Xu }
411c665b394SJeffle Xu 
4123c265d7dSJeffle Xu static const struct address_space_operations erofs_fscache_meta_aops = {
4135375e7c8SJeffle Xu 	.readpage = erofs_fscache_meta_readpage,
4143c265d7dSJeffle Xu };
4153c265d7dSJeffle Xu 
4161442b02bSJeffle Xu const struct address_space_operations erofs_fscache_access_aops = {
4171442b02bSJeffle Xu 	.readpage = erofs_fscache_readpage,
418c665b394SJeffle Xu 	.readahead = erofs_fscache_readahead,
4191442b02bSJeffle Xu };
4201442b02bSJeffle Xu 
421b02c602fSJeffle Xu int erofs_fscache_register_cookie(struct super_block *sb,
4223c265d7dSJeffle Xu 				  struct erofs_fscache **fscache,
4233c265d7dSJeffle Xu 				  char *name, bool need_inode)
424b02c602fSJeffle Xu {
425b02c602fSJeffle Xu 	struct fscache_volume *volume = EROFS_SB(sb)->volume;
426b02c602fSJeffle Xu 	struct erofs_fscache *ctx;
427b02c602fSJeffle Xu 	struct fscache_cookie *cookie;
4283c265d7dSJeffle Xu 	int ret;
429b02c602fSJeffle Xu 
430b02c602fSJeffle Xu 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
431b02c602fSJeffle Xu 	if (!ctx)
432b02c602fSJeffle Xu 		return -ENOMEM;
433b02c602fSJeffle Xu 
434b02c602fSJeffle Xu 	cookie = fscache_acquire_cookie(volume, FSCACHE_ADV_WANT_CACHE_SIZE,
435b02c602fSJeffle Xu 					name, strlen(name), NULL, 0, 0);
436b02c602fSJeffle Xu 	if (!cookie) {
437b02c602fSJeffle Xu 		erofs_err(sb, "failed to get cookie for %s", name);
4383c265d7dSJeffle Xu 		ret = -EINVAL;
4393c265d7dSJeffle Xu 		goto err;
440b02c602fSJeffle Xu 	}
441b02c602fSJeffle Xu 
442b02c602fSJeffle Xu 	fscache_use_cookie(cookie, false);
443b02c602fSJeffle Xu 	ctx->cookie = cookie;
444b02c602fSJeffle Xu 
4453c265d7dSJeffle Xu 	if (need_inode) {
4463c265d7dSJeffle Xu 		struct inode *const inode = new_inode(sb);
4473c265d7dSJeffle Xu 
4483c265d7dSJeffle Xu 		if (!inode) {
4493c265d7dSJeffle Xu 			erofs_err(sb, "failed to get anon inode for %s", name);
4503c265d7dSJeffle Xu 			ret = -ENOMEM;
4513c265d7dSJeffle Xu 			goto err_cookie;
4523c265d7dSJeffle Xu 		}
4533c265d7dSJeffle Xu 
4543c265d7dSJeffle Xu 		set_nlink(inode, 1);
4553c265d7dSJeffle Xu 		inode->i_size = OFFSET_MAX;
4563c265d7dSJeffle Xu 		inode->i_mapping->a_ops = &erofs_fscache_meta_aops;
4573c265d7dSJeffle Xu 		mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
4583c265d7dSJeffle Xu 
4593c265d7dSJeffle Xu 		ctx->inode = inode;
4603c265d7dSJeffle Xu 	}
4613c265d7dSJeffle Xu 
462b02c602fSJeffle Xu 	*fscache = ctx;
463b02c602fSJeffle Xu 	return 0;
4643c265d7dSJeffle Xu 
4653c265d7dSJeffle Xu err_cookie:
4663c265d7dSJeffle Xu 	fscache_unuse_cookie(ctx->cookie, NULL, NULL);
4673c265d7dSJeffle Xu 	fscache_relinquish_cookie(ctx->cookie, false);
4683c265d7dSJeffle Xu 	ctx->cookie = NULL;
4693c265d7dSJeffle Xu err:
4703c265d7dSJeffle Xu 	kfree(ctx);
4713c265d7dSJeffle Xu 	return ret;
472b02c602fSJeffle Xu }
473b02c602fSJeffle Xu 
474b02c602fSJeffle Xu void erofs_fscache_unregister_cookie(struct erofs_fscache **fscache)
475b02c602fSJeffle Xu {
476b02c602fSJeffle Xu 	struct erofs_fscache *ctx = *fscache;
477b02c602fSJeffle Xu 
478b02c602fSJeffle Xu 	if (!ctx)
479b02c602fSJeffle Xu 		return;
480b02c602fSJeffle Xu 
481b02c602fSJeffle Xu 	fscache_unuse_cookie(ctx->cookie, NULL, NULL);
482b02c602fSJeffle Xu 	fscache_relinquish_cookie(ctx->cookie, false);
483b02c602fSJeffle Xu 	ctx->cookie = NULL;
484b02c602fSJeffle Xu 
4853c265d7dSJeffle Xu 	iput(ctx->inode);
4863c265d7dSJeffle Xu 	ctx->inode = NULL;
4873c265d7dSJeffle Xu 
488b02c602fSJeffle Xu 	kfree(ctx);
489b02c602fSJeffle Xu 	*fscache = NULL;
490b02c602fSJeffle Xu }
491b02c602fSJeffle Xu 
492c6be2bd0SJeffle Xu int erofs_fscache_register_fs(struct super_block *sb)
493c6be2bd0SJeffle Xu {
494c6be2bd0SJeffle Xu 	struct erofs_sb_info *sbi = EROFS_SB(sb);
495c6be2bd0SJeffle Xu 	struct fscache_volume *volume;
496c6be2bd0SJeffle Xu 	char *name;
497c6be2bd0SJeffle Xu 	int ret = 0;
498c6be2bd0SJeffle Xu 
499c6be2bd0SJeffle Xu 	name = kasprintf(GFP_KERNEL, "erofs,%s", sbi->opt.fsid);
500c6be2bd0SJeffle Xu 	if (!name)
501c6be2bd0SJeffle Xu 		return -ENOMEM;
502c6be2bd0SJeffle Xu 
503c6be2bd0SJeffle Xu 	volume = fscache_acquire_volume(name, NULL, NULL, 0);
504c6be2bd0SJeffle Xu 	if (IS_ERR_OR_NULL(volume)) {
505c6be2bd0SJeffle Xu 		erofs_err(sb, "failed to register volume for %s", name);
506c6be2bd0SJeffle Xu 		ret = volume ? PTR_ERR(volume) : -EOPNOTSUPP;
507c6be2bd0SJeffle Xu 		volume = NULL;
508c6be2bd0SJeffle Xu 	}
509c6be2bd0SJeffle Xu 
510c6be2bd0SJeffle Xu 	sbi->volume = volume;
511c6be2bd0SJeffle Xu 	kfree(name);
512c6be2bd0SJeffle Xu 	return ret;
513c6be2bd0SJeffle Xu }
514c6be2bd0SJeffle Xu 
515c6be2bd0SJeffle Xu void erofs_fscache_unregister_fs(struct super_block *sb)
516c6be2bd0SJeffle Xu {
517c6be2bd0SJeffle Xu 	struct erofs_sb_info *sbi = EROFS_SB(sb);
518c6be2bd0SJeffle Xu 
519c6be2bd0SJeffle Xu 	fscache_relinquish_volume(sbi->volume, NULL, false);
520c6be2bd0SJeffle Xu 	sbi->volume = NULL;
521c6be2bd0SJeffle Xu }
522