xref: /openbmc/linux/fs/ceph/cache.c (revision a17922de)
1 /*
2  * Ceph cache definitions.
3  *
4  *  Copyright (C) 2013 by Adfin Solutions, Inc. All Rights Reserved.
5  *  Written by Milosz Tanski (milosz@adfin.com)
6  *
7  *  This program is free software; you can redistribute it and/or modify
8  *  it under the terms of the GNU General Public License version 2
9  *  as published by the Free Software Foundation.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, write to:
18  *  Free Software Foundation
19  *  51 Franklin Street, Fifth Floor
20  *  Boston, MA  02111-1301  USA
21  *
22  */
23 
24 #include "super.h"
25 #include "cache.h"
26 
27 struct ceph_aux_inode {
28 	u64 		version;
29 	struct timespec	mtime;
30 };
31 
32 struct fscache_netfs ceph_cache_netfs = {
33 	.name		= "ceph",
34 	.version	= 0,
35 };
36 
37 static DEFINE_MUTEX(ceph_fscache_lock);
38 static LIST_HEAD(ceph_fscache_list);
39 
40 struct ceph_fscache_entry {
41 	struct list_head list;
42 	struct fscache_cookie *fscache;
43 	size_t uniq_len;
44 	/* The following members must be last */
45 	struct ceph_fsid fsid;
46 	char uniquifier[0];
47 };
48 
49 static const struct fscache_cookie_def ceph_fscache_fsid_object_def = {
50 	.name		= "CEPH.fsid",
51 	.type		= FSCACHE_COOKIE_TYPE_INDEX,
52 };
53 
54 int __init ceph_fscache_register(void)
55 {
56 	return fscache_register_netfs(&ceph_cache_netfs);
57 }
58 
59 void ceph_fscache_unregister(void)
60 {
61 	fscache_unregister_netfs(&ceph_cache_netfs);
62 }
63 
64 int ceph_fscache_register_fs(struct ceph_fs_client* fsc)
65 {
66 	const struct ceph_fsid *fsid = &fsc->client->fsid;
67 	const char *fscache_uniq = fsc->mount_options->fscache_uniq;
68 	size_t uniq_len = fscache_uniq ? strlen(fscache_uniq) : 0;
69 	struct ceph_fscache_entry *ent;
70 	int err = 0;
71 
72 	mutex_lock(&ceph_fscache_lock);
73 	list_for_each_entry(ent, &ceph_fscache_list, list) {
74 		if (memcmp(&ent->fsid, fsid, sizeof(*fsid)))
75 			continue;
76 		if (ent->uniq_len != uniq_len)
77 			continue;
78 		if (uniq_len && memcmp(ent->uniquifier, fscache_uniq, uniq_len))
79 			continue;
80 
81 		pr_err("fscache cookie already registered for fsid %pU\n", fsid);
82 		pr_err("  use fsc=%%s mount option to specify a uniquifier\n");
83 		err = -EBUSY;
84 		goto out_unlock;
85 	}
86 
87 	ent = kzalloc(sizeof(*ent) + uniq_len, GFP_KERNEL);
88 	if (!ent) {
89 		err = -ENOMEM;
90 		goto out_unlock;
91 	}
92 
93 	memcpy(&ent->fsid, fsid, sizeof(*fsid));
94 	if (uniq_len > 0) {
95 		memcpy(&ent->uniquifier, fscache_uniq, uniq_len);
96 		ent->uniq_len = uniq_len;
97 	}
98 
99 	fsc->fscache = fscache_acquire_cookie(ceph_cache_netfs.primary_index,
100 					      &ceph_fscache_fsid_object_def,
101 					      &ent->fsid, sizeof(ent->fsid) + uniq_len,
102 					      NULL, 0,
103 					      fsc, 0, true);
104 
105 	if (fsc->fscache) {
106 		ent->fscache = fsc->fscache;
107 		list_add_tail(&ent->list, &ceph_fscache_list);
108 	} else {
109 		kfree(ent);
110 		pr_err("unable to register fscache cookie for fsid %pU\n",
111 		       fsid);
112 		/* all other fs ignore this error */
113 	}
114 out_unlock:
115 	mutex_unlock(&ceph_fscache_lock);
116 	return err;
117 }
118 
119 static enum fscache_checkaux ceph_fscache_inode_check_aux(
120 	void *cookie_netfs_data, const void *data, uint16_t dlen,
121 	loff_t object_size)
122 {
123 	struct ceph_aux_inode aux;
124 	struct ceph_inode_info* ci = cookie_netfs_data;
125 	struct inode* inode = &ci->vfs_inode;
126 
127 	if (dlen != sizeof(aux) ||
128 	    i_size_read(inode) != object_size)
129 		return FSCACHE_CHECKAUX_OBSOLETE;
130 
131 	memset(&aux, 0, sizeof(aux));
132 	aux.version = ci->i_version;
133 	aux.mtime = timespec64_to_timespec(inode->i_mtime);
134 
135 	if (memcmp(data, &aux, sizeof(aux)) != 0)
136 		return FSCACHE_CHECKAUX_OBSOLETE;
137 
138 	dout("ceph inode 0x%p cached okay\n", ci);
139 	return FSCACHE_CHECKAUX_OKAY;
140 }
141 
142 static const struct fscache_cookie_def ceph_fscache_inode_object_def = {
143 	.name		= "CEPH.inode",
144 	.type		= FSCACHE_COOKIE_TYPE_DATAFILE,
145 	.check_aux	= ceph_fscache_inode_check_aux,
146 };
147 
148 void ceph_fscache_register_inode_cookie(struct inode *inode)
149 {
150 	struct ceph_inode_info *ci = ceph_inode(inode);
151 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
152 	struct ceph_aux_inode aux;
153 
154 	/* No caching for filesystem */
155 	if (!fsc->fscache)
156 		return;
157 
158 	/* Only cache for regular files that are read only */
159 	if (!S_ISREG(inode->i_mode))
160 		return;
161 
162 	inode_lock_nested(inode, I_MUTEX_CHILD);
163 	if (!ci->fscache) {
164 		memset(&aux, 0, sizeof(aux));
165 		aux.version = ci->i_version;
166 		aux.mtime = timespec64_to_timespec(inode->i_mtime);
167 		ci->fscache = fscache_acquire_cookie(fsc->fscache,
168 						     &ceph_fscache_inode_object_def,
169 						     &ci->i_vino, sizeof(ci->i_vino),
170 						     &aux, sizeof(aux),
171 						     ci, i_size_read(inode), false);
172 	}
173 	inode_unlock(inode);
174 }
175 
176 void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
177 {
178 	struct fscache_cookie* cookie;
179 
180 	if ((cookie = ci->fscache) == NULL)
181 		return;
182 
183 	ci->fscache = NULL;
184 
185 	fscache_uncache_all_inode_pages(cookie, &ci->vfs_inode);
186 	fscache_relinquish_cookie(cookie, &ci->i_vino, false);
187 }
188 
189 static bool ceph_fscache_can_enable(void *data)
190 {
191 	struct inode *inode = data;
192 	return !inode_is_open_for_write(inode);
193 }
194 
195 void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp)
196 {
197 	struct ceph_inode_info *ci = ceph_inode(inode);
198 
199 	if (!fscache_cookie_valid(ci->fscache))
200 		return;
201 
202 	if (inode_is_open_for_write(inode)) {
203 		dout("fscache_file_set_cookie %p %p disabling cache\n",
204 		     inode, filp);
205 		fscache_disable_cookie(ci->fscache, &ci->i_vino, false);
206 		fscache_uncache_all_inode_pages(ci->fscache, inode);
207 	} else {
208 		fscache_enable_cookie(ci->fscache, &ci->i_vino, i_size_read(inode),
209 				      ceph_fscache_can_enable, inode);
210 		if (fscache_cookie_enabled(ci->fscache)) {
211 			dout("fscache_file_set_cookie %p %p enabling cache\n",
212 			     inode, filp);
213 		}
214 	}
215 }
216 
217 static void ceph_readpage_from_fscache_complete(struct page *page, void *data, int error)
218 {
219 	if (!error)
220 		SetPageUptodate(page);
221 
222 	unlock_page(page);
223 }
224 
225 static inline bool cache_valid(struct ceph_inode_info *ci)
226 {
227 	return ci->i_fscache_gen == ci->i_rdcache_gen;
228 }
229 
230 
231 /* Atempt to read from the fscache,
232  *
233  * This function is called from the readpage_nounlock context. DO NOT attempt to
234  * unlock the page here (or in the callback).
235  */
236 int ceph_readpage_from_fscache(struct inode *inode, struct page *page)
237 {
238 	struct ceph_inode_info *ci = ceph_inode(inode);
239 	int ret;
240 
241 	if (!cache_valid(ci))
242 		return -ENOBUFS;
243 
244 	ret = fscache_read_or_alloc_page(ci->fscache, page,
245 					 ceph_readpage_from_fscache_complete, NULL,
246 					 GFP_KERNEL);
247 
248 	switch (ret) {
249 		case 0: /* Page found */
250 			dout("page read submitted\n");
251 			return 0;
252 		case -ENOBUFS: /* Pages were not found, and can't be */
253 		case -ENODATA: /* Pages were not found */
254 			dout("page/inode not in cache\n");
255 			return ret;
256 		default:
257 			dout("%s: unknown error ret = %i\n", __func__, ret);
258 			return ret;
259 	}
260 }
261 
262 int ceph_readpages_from_fscache(struct inode *inode,
263 				  struct address_space *mapping,
264 				  struct list_head *pages,
265 				  unsigned *nr_pages)
266 {
267 	struct ceph_inode_info *ci = ceph_inode(inode);
268 	int ret;
269 
270 	if (!cache_valid(ci))
271 		return -ENOBUFS;
272 
273 	ret = fscache_read_or_alloc_pages(ci->fscache, mapping, pages, nr_pages,
274 					  ceph_readpage_from_fscache_complete,
275 					  NULL, mapping_gfp_mask(mapping));
276 
277 	switch (ret) {
278 		case 0: /* All pages found */
279 			dout("all-page read submitted\n");
280 			return 0;
281 		case -ENOBUFS: /* Some pages were not found, and can't be */
282 		case -ENODATA: /* some pages were not found */
283 			dout("page/inode not in cache\n");
284 			return ret;
285 		default:
286 			dout("%s: unknown error ret = %i\n", __func__, ret);
287 			return ret;
288 	}
289 }
290 
291 void ceph_readpage_to_fscache(struct inode *inode, struct page *page)
292 {
293 	struct ceph_inode_info *ci = ceph_inode(inode);
294 	int ret;
295 
296 	if (!PageFsCache(page))
297 		return;
298 
299 	if (!cache_valid(ci))
300 		return;
301 
302 	ret = fscache_write_page(ci->fscache, page, i_size_read(inode),
303 				 GFP_KERNEL);
304 	if (ret)
305 		 fscache_uncache_page(ci->fscache, page);
306 }
307 
308 void ceph_invalidate_fscache_page(struct inode* inode, struct page *page)
309 {
310 	struct ceph_inode_info *ci = ceph_inode(inode);
311 
312 	if (!PageFsCache(page))
313 		return;
314 
315 	fscache_wait_on_page_write(ci->fscache, page);
316 	fscache_uncache_page(ci->fscache, page);
317 }
318 
319 void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc)
320 {
321 	if (fscache_cookie_valid(fsc->fscache)) {
322 		struct ceph_fscache_entry *ent;
323 		bool found = false;
324 
325 		mutex_lock(&ceph_fscache_lock);
326 		list_for_each_entry(ent, &ceph_fscache_list, list) {
327 			if (ent->fscache == fsc->fscache) {
328 				list_del(&ent->list);
329 				kfree(ent);
330 				found = true;
331 				break;
332 			}
333 		}
334 		WARN_ON_ONCE(!found);
335 		mutex_unlock(&ceph_fscache_lock);
336 
337 		__fscache_relinquish_cookie(fsc->fscache, NULL, false);
338 	}
339 	fsc->fscache = NULL;
340 }
341 
342 /*
343  * caller should hold CEPH_CAP_FILE_{RD,CACHE}
344  */
345 void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci)
346 {
347 	if (cache_valid(ci))
348 		return;
349 
350 	/* resue i_truncate_mutex. There should be no pending
351 	 * truncate while the caller holds CEPH_CAP_FILE_RD */
352 	mutex_lock(&ci->i_truncate_mutex);
353 	if (!cache_valid(ci)) {
354 		if (fscache_check_consistency(ci->fscache, &ci->i_vino))
355 			fscache_invalidate(ci->fscache);
356 		spin_lock(&ci->i_ceph_lock);
357 		ci->i_fscache_gen = ci->i_rdcache_gen;
358 		spin_unlock(&ci->i_ceph_lock);
359 	}
360 	mutex_unlock(&ci->i_truncate_mutex);
361 }
362