xref: /openbmc/linux/fs/ceph/cache.c (revision 842ed298)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Ceph cache definitions.
4  *
5  *  Copyright (C) 2013 by Adfin Solutions, Inc. All Rights Reserved.
6  *  Written by Milosz Tanski (milosz@adfin.com)
7  */
8 
9 #include <linux/ceph/ceph_debug.h>
10 
11 #include <linux/fs_context.h>
12 #include "super.h"
13 #include "cache.h"
14 
15 struct ceph_aux_inode {
16 	u64 	version;
17 	u64	mtime_sec;
18 	u64	mtime_nsec;
19 };
20 
21 struct fscache_netfs ceph_cache_netfs = {
22 	.name		= "ceph",
23 	.version	= 0,
24 };
25 
26 static DEFINE_MUTEX(ceph_fscache_lock);
27 static LIST_HEAD(ceph_fscache_list);
28 
29 struct ceph_fscache_entry {
30 	struct list_head list;
31 	struct fscache_cookie *fscache;
32 	size_t uniq_len;
33 	/* The following members must be last */
34 	struct ceph_fsid fsid;
35 	char uniquifier[];
36 };
37 
38 static const struct fscache_cookie_def ceph_fscache_fsid_object_def = {
39 	.name		= "CEPH.fsid",
40 	.type		= FSCACHE_COOKIE_TYPE_INDEX,
41 };
42 
43 int __init ceph_fscache_register(void)
44 {
45 	return fscache_register_netfs(&ceph_cache_netfs);
46 }
47 
48 void ceph_fscache_unregister(void)
49 {
50 	fscache_unregister_netfs(&ceph_cache_netfs);
51 }
52 
53 int ceph_fscache_register_fs(struct ceph_fs_client* fsc, struct fs_context *fc)
54 {
55 	const struct ceph_fsid *fsid = &fsc->client->fsid;
56 	const char *fscache_uniq = fsc->mount_options->fscache_uniq;
57 	size_t uniq_len = fscache_uniq ? strlen(fscache_uniq) : 0;
58 	struct ceph_fscache_entry *ent;
59 	int err = 0;
60 
61 	mutex_lock(&ceph_fscache_lock);
62 	list_for_each_entry(ent, &ceph_fscache_list, list) {
63 		if (memcmp(&ent->fsid, fsid, sizeof(*fsid)))
64 			continue;
65 		if (ent->uniq_len != uniq_len)
66 			continue;
67 		if (uniq_len && memcmp(ent->uniquifier, fscache_uniq, uniq_len))
68 			continue;
69 
70 		errorfc(fc, "fscache cookie already registered for fsid %pU, use fsc=<uniquifier> option",
71 		       fsid);
72 		err = -EBUSY;
73 		goto out_unlock;
74 	}
75 
76 	ent = kzalloc(sizeof(*ent) + uniq_len, GFP_KERNEL);
77 	if (!ent) {
78 		err = -ENOMEM;
79 		goto out_unlock;
80 	}
81 
82 	memcpy(&ent->fsid, fsid, sizeof(*fsid));
83 	if (uniq_len > 0) {
84 		memcpy(&ent->uniquifier, fscache_uniq, uniq_len);
85 		ent->uniq_len = uniq_len;
86 	}
87 
88 	fsc->fscache = fscache_acquire_cookie(ceph_cache_netfs.primary_index,
89 					      &ceph_fscache_fsid_object_def,
90 					      &ent->fsid, sizeof(ent->fsid) + uniq_len,
91 					      NULL, 0,
92 					      fsc, 0, true);
93 
94 	if (fsc->fscache) {
95 		ent->fscache = fsc->fscache;
96 		list_add_tail(&ent->list, &ceph_fscache_list);
97 	} else {
98 		kfree(ent);
99 		errorfc(fc, "unable to register fscache cookie for fsid %pU",
100 		       fsid);
101 		/* all other fs ignore this error */
102 	}
103 out_unlock:
104 	mutex_unlock(&ceph_fscache_lock);
105 	return err;
106 }
107 
108 static enum fscache_checkaux ceph_fscache_inode_check_aux(
109 	void *cookie_netfs_data, const void *data, uint16_t dlen,
110 	loff_t object_size)
111 {
112 	struct ceph_aux_inode aux;
113 	struct ceph_inode_info* ci = cookie_netfs_data;
114 	struct inode* inode = &ci->vfs_inode;
115 
116 	if (dlen != sizeof(aux) ||
117 	    i_size_read(inode) != object_size)
118 		return FSCACHE_CHECKAUX_OBSOLETE;
119 
120 	memset(&aux, 0, sizeof(aux));
121 	aux.version = ci->i_version;
122 	aux.mtime_sec = inode->i_mtime.tv_sec;
123 	aux.mtime_nsec = inode->i_mtime.tv_nsec;
124 
125 	if (memcmp(data, &aux, sizeof(aux)) != 0)
126 		return FSCACHE_CHECKAUX_OBSOLETE;
127 
128 	dout("ceph inode 0x%p cached okay\n", ci);
129 	return FSCACHE_CHECKAUX_OKAY;
130 }
131 
132 static const struct fscache_cookie_def ceph_fscache_inode_object_def = {
133 	.name		= "CEPH.inode",
134 	.type		= FSCACHE_COOKIE_TYPE_DATAFILE,
135 	.check_aux	= ceph_fscache_inode_check_aux,
136 };
137 
138 void ceph_fscache_register_inode_cookie(struct inode *inode)
139 {
140 	struct ceph_inode_info *ci = ceph_inode(inode);
141 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
142 	struct ceph_aux_inode aux;
143 
144 	/* No caching for filesystem */
145 	if (!fsc->fscache)
146 		return;
147 
148 	/* Only cache for regular files that are read only */
149 	if (!S_ISREG(inode->i_mode))
150 		return;
151 
152 	inode_lock_nested(inode, I_MUTEX_CHILD);
153 	if (!ci->fscache) {
154 		memset(&aux, 0, sizeof(aux));
155 		aux.version = ci->i_version;
156 		aux.mtime_sec = inode->i_mtime.tv_sec;
157 		aux.mtime_nsec = inode->i_mtime.tv_nsec;
158 		ci->fscache = fscache_acquire_cookie(fsc->fscache,
159 						     &ceph_fscache_inode_object_def,
160 						     &ci->i_vino, sizeof(ci->i_vino),
161 						     &aux, sizeof(aux),
162 						     ci, i_size_read(inode), false);
163 	}
164 	inode_unlock(inode);
165 }
166 
167 void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
168 {
169 	struct fscache_cookie* cookie;
170 
171 	if ((cookie = ci->fscache) == NULL)
172 		return;
173 
174 	ci->fscache = NULL;
175 
176 	fscache_uncache_all_inode_pages(cookie, &ci->vfs_inode);
177 	fscache_relinquish_cookie(cookie, &ci->i_vino, false);
178 }
179 
180 static bool ceph_fscache_can_enable(void *data)
181 {
182 	struct inode *inode = data;
183 	return !inode_is_open_for_write(inode);
184 }
185 
186 void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp)
187 {
188 	struct ceph_inode_info *ci = ceph_inode(inode);
189 
190 	if (!fscache_cookie_valid(ci->fscache))
191 		return;
192 
193 	if (inode_is_open_for_write(inode)) {
194 		dout("fscache_file_set_cookie %p %p disabling cache\n",
195 		     inode, filp);
196 		fscache_disable_cookie(ci->fscache, &ci->i_vino, false);
197 		fscache_uncache_all_inode_pages(ci->fscache, inode);
198 	} else {
199 		fscache_enable_cookie(ci->fscache, &ci->i_vino, i_size_read(inode),
200 				      ceph_fscache_can_enable, inode);
201 		if (fscache_cookie_enabled(ci->fscache)) {
202 			dout("fscache_file_set_cookie %p %p enabling cache\n",
203 			     inode, filp);
204 		}
205 	}
206 }
207 
208 static void ceph_readpage_from_fscache_complete(struct page *page, void *data, int error)
209 {
210 	if (!error)
211 		SetPageUptodate(page);
212 
213 	unlock_page(page);
214 }
215 
216 static inline bool cache_valid(struct ceph_inode_info *ci)
217 {
218 	return ci->i_fscache_gen == ci->i_rdcache_gen;
219 }
220 
221 
222 /* Atempt to read from the fscache,
223  *
224  * This function is called from the readpage_nounlock context. DO NOT attempt to
225  * unlock the page here (or in the callback).
226  */
227 int ceph_readpage_from_fscache(struct inode *inode, struct page *page)
228 {
229 	struct ceph_inode_info *ci = ceph_inode(inode);
230 	int ret;
231 
232 	if (!cache_valid(ci))
233 		return -ENOBUFS;
234 
235 	ret = fscache_read_or_alloc_page(ci->fscache, page,
236 					 ceph_readpage_from_fscache_complete, NULL,
237 					 GFP_KERNEL);
238 
239 	switch (ret) {
240 		case 0: /* Page found */
241 			dout("page read submitted\n");
242 			return 0;
243 		case -ENOBUFS: /* Pages were not found, and can't be */
244 		case -ENODATA: /* Pages were not found */
245 			dout("page/inode not in cache\n");
246 			return ret;
247 		default:
248 			dout("%s: unknown error ret = %i\n", __func__, ret);
249 			return ret;
250 	}
251 }
252 
253 int ceph_readpages_from_fscache(struct inode *inode,
254 				  struct address_space *mapping,
255 				  struct list_head *pages,
256 				  unsigned *nr_pages)
257 {
258 	struct ceph_inode_info *ci = ceph_inode(inode);
259 	int ret;
260 
261 	if (!cache_valid(ci))
262 		return -ENOBUFS;
263 
264 	ret = fscache_read_or_alloc_pages(ci->fscache, mapping, pages, nr_pages,
265 					  ceph_readpage_from_fscache_complete,
266 					  NULL, mapping_gfp_mask(mapping));
267 
268 	switch (ret) {
269 		case 0: /* All pages found */
270 			dout("all-page read submitted\n");
271 			return 0;
272 		case -ENOBUFS: /* Some pages were not found, and can't be */
273 		case -ENODATA: /* some pages were not found */
274 			dout("page/inode not in cache\n");
275 			return ret;
276 		default:
277 			dout("%s: unknown error ret = %i\n", __func__, ret);
278 			return ret;
279 	}
280 }
281 
282 void ceph_readpage_to_fscache(struct inode *inode, struct page *page)
283 {
284 	struct ceph_inode_info *ci = ceph_inode(inode);
285 	int ret;
286 
287 	if (!PageFsCache(page))
288 		return;
289 
290 	if (!cache_valid(ci))
291 		return;
292 
293 	ret = fscache_write_page(ci->fscache, page, i_size_read(inode),
294 				 GFP_KERNEL);
295 	if (ret)
296 		 fscache_uncache_page(ci->fscache, page);
297 }
298 
299 void ceph_invalidate_fscache_page(struct inode* inode, struct page *page)
300 {
301 	struct ceph_inode_info *ci = ceph_inode(inode);
302 
303 	if (!PageFsCache(page))
304 		return;
305 
306 	fscache_wait_on_page_write(ci->fscache, page);
307 	fscache_uncache_page(ci->fscache, page);
308 }
309 
310 void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc)
311 {
312 	if (fscache_cookie_valid(fsc->fscache)) {
313 		struct ceph_fscache_entry *ent;
314 		bool found = false;
315 
316 		mutex_lock(&ceph_fscache_lock);
317 		list_for_each_entry(ent, &ceph_fscache_list, list) {
318 			if (ent->fscache == fsc->fscache) {
319 				list_del(&ent->list);
320 				kfree(ent);
321 				found = true;
322 				break;
323 			}
324 		}
325 		WARN_ON_ONCE(!found);
326 		mutex_unlock(&ceph_fscache_lock);
327 
328 		__fscache_relinquish_cookie(fsc->fscache, NULL, false);
329 	}
330 	fsc->fscache = NULL;
331 }
332 
333 /*
334  * caller should hold CEPH_CAP_FILE_{RD,CACHE}
335  */
336 void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci)
337 {
338 	if (cache_valid(ci))
339 		return;
340 
341 	/* resue i_truncate_mutex. There should be no pending
342 	 * truncate while the caller holds CEPH_CAP_FILE_RD */
343 	mutex_lock(&ci->i_truncate_mutex);
344 	if (!cache_valid(ci)) {
345 		if (fscache_check_consistency(ci->fscache, &ci->i_vino))
346 			fscache_invalidate(ci->fscache);
347 		spin_lock(&ci->i_ceph_lock);
348 		ci->i_fscache_gen = ci->i_rdcache_gen;
349 		spin_unlock(&ci->i_ceph_lock);
350 	}
351 	mutex_unlock(&ci->i_truncate_mutex);
352 }
353