xref: /openbmc/linux/fs/ceph/cache.c (revision 151f4e2b)
1 /*
2  * Ceph cache definitions.
3  *
4  *  Copyright (C) 2013 by Adfin Solutions, Inc. All Rights Reserved.
5  *  Written by Milosz Tanski (milosz@adfin.com)
6  *
7  *  This program is free software; you can redistribute it and/or modify
8  *  it under the terms of the GNU General Public License version 2
9  *  as published by the Free Software Foundation.
10  *
11  *  This program is distributed in the hope that it will be useful,
12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *  GNU General Public License for more details.
15  *
16  *  You should have received a copy of the GNU General Public License
17  *  along with this program; if not, write to:
18  *  Free Software Foundation
19  *  51 Franklin Street, Fifth Floor
20  *  Boston, MA  02111-1301  USA
21  *
22  */
23 
24 #include "super.h"
25 #include "cache.h"
26 
27 struct ceph_aux_inode {
28 	u64 	version;
29 	u64	mtime_sec;
30 	u64	mtime_nsec;
31 };
32 
33 struct fscache_netfs ceph_cache_netfs = {
34 	.name		= "ceph",
35 	.version	= 0,
36 };
37 
38 static DEFINE_MUTEX(ceph_fscache_lock);
39 static LIST_HEAD(ceph_fscache_list);
40 
41 struct ceph_fscache_entry {
42 	struct list_head list;
43 	struct fscache_cookie *fscache;
44 	size_t uniq_len;
45 	/* The following members must be last */
46 	struct ceph_fsid fsid;
47 	char uniquifier[0];
48 };
49 
50 static const struct fscache_cookie_def ceph_fscache_fsid_object_def = {
51 	.name		= "CEPH.fsid",
52 	.type		= FSCACHE_COOKIE_TYPE_INDEX,
53 };
54 
55 int __init ceph_fscache_register(void)
56 {
57 	return fscache_register_netfs(&ceph_cache_netfs);
58 }
59 
60 void ceph_fscache_unregister(void)
61 {
62 	fscache_unregister_netfs(&ceph_cache_netfs);
63 }
64 
65 int ceph_fscache_register_fs(struct ceph_fs_client* fsc)
66 {
67 	const struct ceph_fsid *fsid = &fsc->client->fsid;
68 	const char *fscache_uniq = fsc->mount_options->fscache_uniq;
69 	size_t uniq_len = fscache_uniq ? strlen(fscache_uniq) : 0;
70 	struct ceph_fscache_entry *ent;
71 	int err = 0;
72 
73 	mutex_lock(&ceph_fscache_lock);
74 	list_for_each_entry(ent, &ceph_fscache_list, list) {
75 		if (memcmp(&ent->fsid, fsid, sizeof(*fsid)))
76 			continue;
77 		if (ent->uniq_len != uniq_len)
78 			continue;
79 		if (uniq_len && memcmp(ent->uniquifier, fscache_uniq, uniq_len))
80 			continue;
81 
82 		pr_err("fscache cookie already registered for fsid %pU\n", fsid);
83 		pr_err("  use fsc=%%s mount option to specify a uniquifier\n");
84 		err = -EBUSY;
85 		goto out_unlock;
86 	}
87 
88 	ent = kzalloc(sizeof(*ent) + uniq_len, GFP_KERNEL);
89 	if (!ent) {
90 		err = -ENOMEM;
91 		goto out_unlock;
92 	}
93 
94 	memcpy(&ent->fsid, fsid, sizeof(*fsid));
95 	if (uniq_len > 0) {
96 		memcpy(&ent->uniquifier, fscache_uniq, uniq_len);
97 		ent->uniq_len = uniq_len;
98 	}
99 
100 	fsc->fscache = fscache_acquire_cookie(ceph_cache_netfs.primary_index,
101 					      &ceph_fscache_fsid_object_def,
102 					      &ent->fsid, sizeof(ent->fsid) + uniq_len,
103 					      NULL, 0,
104 					      fsc, 0, true);
105 
106 	if (fsc->fscache) {
107 		ent->fscache = fsc->fscache;
108 		list_add_tail(&ent->list, &ceph_fscache_list);
109 	} else {
110 		kfree(ent);
111 		pr_err("unable to register fscache cookie for fsid %pU\n",
112 		       fsid);
113 		/* all other fs ignore this error */
114 	}
115 out_unlock:
116 	mutex_unlock(&ceph_fscache_lock);
117 	return err;
118 }
119 
120 static enum fscache_checkaux ceph_fscache_inode_check_aux(
121 	void *cookie_netfs_data, const void *data, uint16_t dlen,
122 	loff_t object_size)
123 {
124 	struct ceph_aux_inode aux;
125 	struct ceph_inode_info* ci = cookie_netfs_data;
126 	struct inode* inode = &ci->vfs_inode;
127 
128 	if (dlen != sizeof(aux) ||
129 	    i_size_read(inode) != object_size)
130 		return FSCACHE_CHECKAUX_OBSOLETE;
131 
132 	memset(&aux, 0, sizeof(aux));
133 	aux.version = ci->i_version;
134 	aux.mtime_sec = inode->i_mtime.tv_sec;
135 	aux.mtime_nsec = inode->i_mtime.tv_nsec;
136 
137 	if (memcmp(data, &aux, sizeof(aux)) != 0)
138 		return FSCACHE_CHECKAUX_OBSOLETE;
139 
140 	dout("ceph inode 0x%p cached okay\n", ci);
141 	return FSCACHE_CHECKAUX_OKAY;
142 }
143 
144 static const struct fscache_cookie_def ceph_fscache_inode_object_def = {
145 	.name		= "CEPH.inode",
146 	.type		= FSCACHE_COOKIE_TYPE_DATAFILE,
147 	.check_aux	= ceph_fscache_inode_check_aux,
148 };
149 
150 void ceph_fscache_register_inode_cookie(struct inode *inode)
151 {
152 	struct ceph_inode_info *ci = ceph_inode(inode);
153 	struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
154 	struct ceph_aux_inode aux;
155 
156 	/* No caching for filesystem */
157 	if (!fsc->fscache)
158 		return;
159 
160 	/* Only cache for regular files that are read only */
161 	if (!S_ISREG(inode->i_mode))
162 		return;
163 
164 	inode_lock_nested(inode, I_MUTEX_CHILD);
165 	if (!ci->fscache) {
166 		memset(&aux, 0, sizeof(aux));
167 		aux.version = ci->i_version;
168 		aux.mtime_sec = inode->i_mtime.tv_sec;
169 		aux.mtime_nsec = inode->i_mtime.tv_nsec;
170 		ci->fscache = fscache_acquire_cookie(fsc->fscache,
171 						     &ceph_fscache_inode_object_def,
172 						     &ci->i_vino, sizeof(ci->i_vino),
173 						     &aux, sizeof(aux),
174 						     ci, i_size_read(inode), false);
175 	}
176 	inode_unlock(inode);
177 }
178 
179 void ceph_fscache_unregister_inode_cookie(struct ceph_inode_info* ci)
180 {
181 	struct fscache_cookie* cookie;
182 
183 	if ((cookie = ci->fscache) == NULL)
184 		return;
185 
186 	ci->fscache = NULL;
187 
188 	fscache_uncache_all_inode_pages(cookie, &ci->vfs_inode);
189 	fscache_relinquish_cookie(cookie, &ci->i_vino, false);
190 }
191 
192 static bool ceph_fscache_can_enable(void *data)
193 {
194 	struct inode *inode = data;
195 	return !inode_is_open_for_write(inode);
196 }
197 
198 void ceph_fscache_file_set_cookie(struct inode *inode, struct file *filp)
199 {
200 	struct ceph_inode_info *ci = ceph_inode(inode);
201 
202 	if (!fscache_cookie_valid(ci->fscache))
203 		return;
204 
205 	if (inode_is_open_for_write(inode)) {
206 		dout("fscache_file_set_cookie %p %p disabling cache\n",
207 		     inode, filp);
208 		fscache_disable_cookie(ci->fscache, &ci->i_vino, false);
209 		fscache_uncache_all_inode_pages(ci->fscache, inode);
210 	} else {
211 		fscache_enable_cookie(ci->fscache, &ci->i_vino, i_size_read(inode),
212 				      ceph_fscache_can_enable, inode);
213 		if (fscache_cookie_enabled(ci->fscache)) {
214 			dout("fscache_file_set_cookie %p %p enabling cache\n",
215 			     inode, filp);
216 		}
217 	}
218 }
219 
220 static void ceph_readpage_from_fscache_complete(struct page *page, void *data, int error)
221 {
222 	if (!error)
223 		SetPageUptodate(page);
224 
225 	unlock_page(page);
226 }
227 
228 static inline bool cache_valid(struct ceph_inode_info *ci)
229 {
230 	return ci->i_fscache_gen == ci->i_rdcache_gen;
231 }
232 
233 
234 /* Atempt to read from the fscache,
235  *
236  * This function is called from the readpage_nounlock context. DO NOT attempt to
237  * unlock the page here (or in the callback).
238  */
239 int ceph_readpage_from_fscache(struct inode *inode, struct page *page)
240 {
241 	struct ceph_inode_info *ci = ceph_inode(inode);
242 	int ret;
243 
244 	if (!cache_valid(ci))
245 		return -ENOBUFS;
246 
247 	ret = fscache_read_or_alloc_page(ci->fscache, page,
248 					 ceph_readpage_from_fscache_complete, NULL,
249 					 GFP_KERNEL);
250 
251 	switch (ret) {
252 		case 0: /* Page found */
253 			dout("page read submitted\n");
254 			return 0;
255 		case -ENOBUFS: /* Pages were not found, and can't be */
256 		case -ENODATA: /* Pages were not found */
257 			dout("page/inode not in cache\n");
258 			return ret;
259 		default:
260 			dout("%s: unknown error ret = %i\n", __func__, ret);
261 			return ret;
262 	}
263 }
264 
265 int ceph_readpages_from_fscache(struct inode *inode,
266 				  struct address_space *mapping,
267 				  struct list_head *pages,
268 				  unsigned *nr_pages)
269 {
270 	struct ceph_inode_info *ci = ceph_inode(inode);
271 	int ret;
272 
273 	if (!cache_valid(ci))
274 		return -ENOBUFS;
275 
276 	ret = fscache_read_or_alloc_pages(ci->fscache, mapping, pages, nr_pages,
277 					  ceph_readpage_from_fscache_complete,
278 					  NULL, mapping_gfp_mask(mapping));
279 
280 	switch (ret) {
281 		case 0: /* All pages found */
282 			dout("all-page read submitted\n");
283 			return 0;
284 		case -ENOBUFS: /* Some pages were not found, and can't be */
285 		case -ENODATA: /* some pages were not found */
286 			dout("page/inode not in cache\n");
287 			return ret;
288 		default:
289 			dout("%s: unknown error ret = %i\n", __func__, ret);
290 			return ret;
291 	}
292 }
293 
294 void ceph_readpage_to_fscache(struct inode *inode, struct page *page)
295 {
296 	struct ceph_inode_info *ci = ceph_inode(inode);
297 	int ret;
298 
299 	if (!PageFsCache(page))
300 		return;
301 
302 	if (!cache_valid(ci))
303 		return;
304 
305 	ret = fscache_write_page(ci->fscache, page, i_size_read(inode),
306 				 GFP_KERNEL);
307 	if (ret)
308 		 fscache_uncache_page(ci->fscache, page);
309 }
310 
311 void ceph_invalidate_fscache_page(struct inode* inode, struct page *page)
312 {
313 	struct ceph_inode_info *ci = ceph_inode(inode);
314 
315 	if (!PageFsCache(page))
316 		return;
317 
318 	fscache_wait_on_page_write(ci->fscache, page);
319 	fscache_uncache_page(ci->fscache, page);
320 }
321 
322 void ceph_fscache_unregister_fs(struct ceph_fs_client* fsc)
323 {
324 	if (fscache_cookie_valid(fsc->fscache)) {
325 		struct ceph_fscache_entry *ent;
326 		bool found = false;
327 
328 		mutex_lock(&ceph_fscache_lock);
329 		list_for_each_entry(ent, &ceph_fscache_list, list) {
330 			if (ent->fscache == fsc->fscache) {
331 				list_del(&ent->list);
332 				kfree(ent);
333 				found = true;
334 				break;
335 			}
336 		}
337 		WARN_ON_ONCE(!found);
338 		mutex_unlock(&ceph_fscache_lock);
339 
340 		__fscache_relinquish_cookie(fsc->fscache, NULL, false);
341 	}
342 	fsc->fscache = NULL;
343 }
344 
345 /*
346  * caller should hold CEPH_CAP_FILE_{RD,CACHE}
347  */
348 void ceph_fscache_revalidate_cookie(struct ceph_inode_info *ci)
349 {
350 	if (cache_valid(ci))
351 		return;
352 
353 	/* resue i_truncate_mutex. There should be no pending
354 	 * truncate while the caller holds CEPH_CAP_FILE_RD */
355 	mutex_lock(&ci->i_truncate_mutex);
356 	if (!cache_valid(ci)) {
357 		if (fscache_check_consistency(ci->fscache, &ci->i_vino))
358 			fscache_invalidate(ci->fscache);
359 		spin_lock(&ci->i_ceph_lock);
360 		ci->i_fscache_gen = ci->i_rdcache_gen;
361 		spin_unlock(&ci->i_ceph_lock);
362 	}
363 	mutex_unlock(&ci->i_truncate_mutex);
364 }
365