xref: /openbmc/linux/fs/ceph/cache.c (revision 4e95bc26)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Ceph cache definitions.
4  *
5  *  Copyright (C) 2013 by Adfin Solutions, Inc. All Rights Reserved.
6  *  Written by Milosz Tanski (milosz@adfin.com)
7  */
8 
9 #include "super.h"
10 #include "cache.h"
11 
12 struct ceph_aux_inode {
13 	u64 	version;
14 	u64	mtime_sec;
15 	u64	mtime_nsec;
16 };
17 
18 struct fscache_netfs ceph_cache_netfs = {
19 	.name		= "ceph",
20 	.version	= 0,
21 };
22 
23 static DEFINE_MUTEX(ceph_fscache_lock);
24 static LIST_HEAD(ceph_fscache_list);
25 
26 struct ceph_fscache_entry {
27 	struct list_head list;
28 	struct fscache_cookie *fscache;
29 	size_t uniq_len;
30 	/* The following members must be last */
31 	struct ceph_fsid fsid;
32 	char uniquifier[0];
33 };
34 
35 static const struct fscache_cookie_def ceph_fscache_fsid_object_def = {
36 	.name		= "CEPH.fsid",
37 	.type		= FSCACHE_COOKIE_TYPE_INDEX,
38 };
39 
40 int __init ceph_fscache_register(void)
41 {
42 	return fscache_register_netfs(&ceph_cache_netfs);
43 }
44 
45 void ceph_fscache_unregister(void)
46 {
47 	fscache_unregister_netfs(&ceph_cache_netfs);
48 }
49 
50 int ceph_fscache_register_fs(struct ceph_fs_client* fsc)
51 {
52 	const struct ceph_fsid *fsid = &fsc->client->fsid;
53 	const char *fscache_uniq = fsc->mount_options->fscache_uniq;
54 	size_t uniq_len = fscache_uniq ? strlen(fscache_uniq) : 0;
55 	struct ceph_fscache_entry *ent;
56 	int err = 0;
57 
58 	mutex_lock(&ceph_fscache_lock);
59 	list_for_each_entry(ent, &ceph_fscache_list, list) {
60 		if (memcmp(&ent->fsid, fsid, sizeof(*fsid)))
61 			continue;
62 		if (ent->uniq_len != uniq_len)
63 			continue;
64 		if (uniq_len && memcmp(ent->uniquifier, fscache_uniq, uniq_len))
65 			continue;
66 
67 		pr_err("fscache cookie already registered for fsid %pU\n", fsid);
68 		pr_err("  use fsc=%%s mount option to specify a uniquifier\n");
69 		err = -EBUSY;
70 		goto out_unlock;
71 	}
72 
73 	ent = kzalloc(sizeof(*ent) + uniq_len, GFP_KERNEL);
74 	if (!ent) {
75 		err = -ENOMEM;
76 		goto out_unlock;
77 	}
78 
79 	memcpy(&ent->fsid, fsid, sizeof(*fsid));
80 	if (uniq_len > 0) {
81 		memcpy(&ent->uniquifier, fscache_uniq, uniq_len);
82 		ent->uniq_len = uniq_len;
83 	}
84 
85 	fsc->fscache = fscache_acquire_cookie(ceph_cache_netfs.primary_index,
86 					      &ceph_fscache_fsid_object_def,
87 					      &ent->fsid, sizeof(ent->fsid) + uniq_len,
88 					      NULL, 0,
89 					      fsc, 0, true);
90 
91 	if (fsc->fscache) {
92 		ent->fscache = fsc->fscache;
93 		list_add_tail(&ent->list, &ceph_fscache_list);
94 	} else {
95 		kfree(ent);
96 		pr_err("unable to register fscache cookie for fsid %pU\n",
97 		       fsid);
98 		/* all other fs ignore this error */
99 	}
100 out_unlock:
101 	mutex_unlock(&ceph_fscache_lock);
102 	return err;
103 }
104 
105 static enum fscache_checkaux ceph_fscache_inode_check_aux(
106 	void *cookie_netfs_data, const void *data, uint16_t dlen,
107 	loff_t object_size)
108 {
109 	struct ceph_aux_inode aux;
110 	struct ceph_inode_info* ci = cookie_netfs_data;
111 	struct inode* inode = &ci->vfs_inode;
112 
113 	if (dlen != sizeof(aux) ||
114 	    i_size_read(inode) != object_size)
115 		return FSCACHE_CHECKAUX_OBSOLETE;
116 
117 	memset(&aux, 0, sizeof(aux));
118 	aux.version = ci->i_version;
119 	aux.mtime_sec = inode->i_mtime.tv_sec;
120 	aux.mtime_nsec = inode->i_mtime.tv_nsec;
121 
122 	if (memcmp(data, &aux, sizeof(aux)) != 0)
123 		return FSCACHE_CHECKAUX_OBSOLETE;
124 
125 	dout("ceph inode 0x%p cached okay\n", ci);
126 	return FSCACHE_CHECKAUX_OKAY;
127 }
128 
129 static const struct fscache_cookie_def ceph_fscache_inode_object_def = {
130 	.name		= "CEPH.inode",
131 	.type		= FSCACHE_COOKIE_TYPE_DATAFILE,
132 	.check_aux	= ceph_fscache_inode_check_aux,
133 };
134 
135 void ceph_fscache_register_inode_cookie(struct inode *inode)
136 {
137 	struct ceph_inode_info *ci = ceph_inode(inode);
138 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
139 	struct ceph_aux_inode aux;
140 
141 	/* No caching for filesystem */
142 	if (!fsc->fscache)
143 		return;
144 
145 	/* Only cache for regular files that are read only */
146 	if (!S_ISREG(inode->i_mode))
147 		return;
148 
149 	inode_lock_nested(inode, I_MUTEX_CHILD);
150 	if (!ci->fscache) {
151 		memset(&aux, 0, sizeof(aux));
152 		aux.version = ci->i_version;
153 		aux.mtime_sec = inode->i_mtime.tv_sec;
154 		aux.mtime_nsec = inode->i_mtime.tv_nsec;
155 		ci->fscache = fscache_acquire_cookie(fsc->fscache,
156 						     &ceph_fscache_inode_object_def,
157 						     &ci->i_vino, sizeof(ci->i_vino),
158 						     &aux, sizeof(aux),
159 						     ci, i_size_read(inode), false);
160 	}
161 	inode_unlock(inode);
162 }
163 
164 void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
165 {
166 	struct fscache_cookie* cookie;
167 
168 	if ((cookie = ci->fscache) == NULL)
169 		return;
170 
171 	ci->fscache = NULL;
172 
173 	fscache_uncache_all_inode_pages(cookie, &ci->vfs_inode);
174 	fscache_relinquish_cookie(cookie, &ci->i_vino, false);
175 }
176 
177 static bool ceph_fscache_can_enable(void *data)
178 {
179 	struct inode *inode = data;
180 	return !inode_is_open_for_write(inode);
181 }
182 
183 void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp)
184 {
185 	struct ceph_inode_info *ci = ceph_inode(inode);
186 
187 	if (!fscache_cookie_valid(ci->fscache))
188 		return;
189 
190 	if (inode_is_open_for_write(inode)) {
191 		dout("fscache_file_set_cookie %p %p disabling cache\n",
192 		     inode, filp);
193 		fscache_disable_cookie(ci->fscache, &ci->i_vino, false);
194 		fscache_uncache_all_inode_pages(ci->fscache, inode);
195 	} else {
196 		fscache_enable_cookie(ci->fscache, &ci->i_vino, i_size_read(inode),
197 				      ceph_fscache_can_enable, inode);
198 		if (fscache_cookie_enabled(ci->fscache)) {
199 			dout("fscache_file_set_cookie %p %p enabling cache\n",
200 			     inode, filp);
201 		}
202 	}
203 }
204 
205 static void ceph_readpage_from_fscache_complete(struct page *page, void *data, int error)
206 {
207 	if (!error)
208 		SetPageUptodate(page);
209 
210 	unlock_page(page);
211 }
212 
213 static inline bool cache_valid(struct ceph_inode_info *ci)
214 {
215 	return ci->i_fscache_gen == ci->i_rdcache_gen;
216 }
217 
218 
219 /* Atempt to read from the fscache,
220  *
221  * This function is called from the readpage_nounlock context. DO NOT attempt to
222  * unlock the page here (or in the callback).
223  */
224 int ceph_readpage_from_fscache(struct inode *inode, struct page *page)
225 {
226 	struct ceph_inode_info *ci = ceph_inode(inode);
227 	int ret;
228 
229 	if (!cache_valid(ci))
230 		return -ENOBUFS;
231 
232 	ret = fscache_read_or_alloc_page(ci->fscache, page,
233 					 ceph_readpage_from_fscache_complete, NULL,
234 					 GFP_KERNEL);
235 
236 	switch (ret) {
237 		case 0: /* Page found */
238 			dout("page read submitted\n");
239 			return 0;
240 		case -ENOBUFS: /* Pages were not found, and can't be */
241 		case -ENODATA: /* Pages were not found */
242 			dout("page/inode not in cache\n");
243 			return ret;
244 		default:
245 			dout("%s: unknown error ret = %i\n", __func__, ret);
246 			return ret;
247 	}
248 }
249 
250 int ceph_readpages_from_fscache(struct inode *inode,
251 				  struct address_space *mapping,
252 				  struct list_head *pages,
253 				  unsigned *nr_pages)
254 {
255 	struct ceph_inode_info *ci = ceph_inode(inode);
256 	int ret;
257 
258 	if (!cache_valid(ci))
259 		return -ENOBUFS;
260 
261 	ret = fscache_read_or_alloc_pages(ci->fscache, mapping, pages, nr_pages,
262 					  ceph_readpage_from_fscache_complete,
263 					  NULL, mapping_gfp_mask(mapping));
264 
265 	switch (ret) {
266 		case 0: /* All pages found */
267 			dout("all-page read submitted\n");
268 			return 0;
269 		case -ENOBUFS: /* Some pages were not found, and can't be */
270 		case -ENODATA: /* some pages were not found */
271 			dout("page/inode not in cache\n");
272 			return ret;
273 		default:
274 			dout("%s: unknown error ret = %i\n", __func__, ret);
275 			return ret;
276 	}
277 }
278 
279 void ceph_readpage_to_fscache(struct inode *inode, struct page *page)
280 {
281 	struct ceph_inode_info *ci = ceph_inode(inode);
282 	int ret;
283 
284 	if (!PageFsCache(page))
285 		return;
286 
287 	if (!cache_valid(ci))
288 		return;
289 
290 	ret = fscache_write_page(ci->fscache, page, i_size_read(inode),
291 				 GFP_KERNEL);
292 	if (ret)
293 		 fscache_uncache_page(ci->fscache, page);
294 }
295 
296 void ceph_invalidate_fscache_page(struct inode* inode, struct page *page)
297 {
298 	struct ceph_inode_info *ci = ceph_inode(inode);
299 
300 	if (!PageFsCache(page))
301 		return;
302 
303 	fscache_wait_on_page_write(ci->fscache, page);
304 	fscache_uncache_page(ci->fscache, page);
305 }
306 
307 void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc)
308 {
309 	if (fscache_cookie_valid(fsc->fscache)) {
310 		struct ceph_fscache_entry *ent;
311 		bool found = false;
312 
313 		mutex_lock(&ceph_fscache_lock);
314 		list_for_each_entry(ent, &ceph_fscache_list, list) {
315 			if (ent->fscache == fsc->fscache) {
316 				list_del(&ent->list);
317 				kfree(ent);
318 				found = true;
319 				break;
320 			}
321 		}
322 		WARN_ON_ONCE(!found);
323 		mutex_unlock(&ceph_fscache_lock);
324 
325 		__fscache_relinquish_cookie(fsc->fscache, NULL, false);
326 	}
327 	fsc->fscache = NULL;
328 }
329 
330 /*
331  * caller should hold CEPH_CAP_FILE_{RD,CACHE}
332  */
333 void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci)
334 {
335 	if (cache_valid(ci))
336 		return;
337 
338 	/* resue i_truncate_mutex. There should be no pending
339 	 * truncate while the caller holds CEPH_CAP_FILE_RD */
340 	mutex_lock(&ci->i_truncate_mutex);
341 	if (!cache_valid(ci)) {
342 		if (fscache_check_consistency(ci->fscache, &ci->i_vino))
343 			fscache_invalidate(ci->fscache);
344 		spin_lock(&ci->i_ceph_lock);
345 		ci->i_fscache_gen = ci->i_rdcache_gen;
346 		spin_unlock(&ci->i_ceph_lock);
347 	}
348 	mutex_unlock(&ci->i_truncate_mutex);
349 }
350