xref: /openbmc/linux/fs/ceph/super.c (revision d2ba09c1)
1 
2 #include <linux/ceph/ceph_debug.h>
3 
4 #include <linux/backing-dev.h>
5 #include <linux/ctype.h>
6 #include <linux/fs.h>
7 #include <linux/inet.h>
8 #include <linux/in6.h>
9 #include <linux/module.h>
10 #include <linux/mount.h>
11 #include <linux/parser.h>
12 #include <linux/sched.h>
13 #include <linux/seq_file.h>
14 #include <linux/slab.h>
15 #include <linux/statfs.h>
16 #include <linux/string.h>
17 
18 #include "super.h"
19 #include "mds_client.h"
20 #include "cache.h"
21 
22 #include <linux/ceph/ceph_features.h>
23 #include <linux/ceph/decode.h>
24 #include <linux/ceph/mon_client.h>
25 #include <linux/ceph/auth.h>
26 #include <linux/ceph/debugfs.h>
27 
28 /*
29  * Ceph superblock operations
30  *
31  * Handle the basics of mounting, unmounting.
32  */
33 
34 /*
35  * super ops
36  */
37 static void ceph_put_super(struct super_block *s)
38 {
39 	struct ceph_fs_client *fsc = ceph_sb_to_client(s);
40 
41 	dout("put_super\n");
42 	ceph_mdsc_close_sessions(fsc->mdsc);
43 }
44 
45 static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
46 {
47 	struct ceph_fs_client *fsc = ceph_inode_to_client(d_inode(dentry));
48 	struct ceph_monmap *monmap = fsc->client->monc.monmap;
49 	struct ceph_statfs st;
50 	u64 fsid;
51 	int err;
52 	u64 data_pool;
53 
54 	if (fsc->mdsc->mdsmap->m_num_data_pg_pools == 1) {
55 		data_pool = fsc->mdsc->mdsmap->m_data_pg_pools[0];
56 	} else {
57 		data_pool = CEPH_NOPOOL;
58 	}
59 
60 	dout("statfs\n");
61 	err = ceph_monc_do_statfs(&fsc->client->monc, data_pool, &st);
62 	if (err < 0)
63 		return err;
64 
65 	/* fill in kstatfs */
66 	buf->f_type = CEPH_SUPER_MAGIC;  /* ?? */
67 
68 	/*
69 	 * express utilization in terms of large blocks to avoid
70 	 * overflow on 32-bit machines.
71 	 *
72 	 * NOTE: for the time being, we make bsize == frsize to humor
73 	 * not-yet-ancient versions of glibc that are broken.
74 	 * Someday, we will probably want to report a real block
75 	 * size...  whatever that may mean for a network file system!
76 	 */
77 	buf->f_bsize = 1 << CEPH_BLOCK_SHIFT;
78 	buf->f_frsize = 1 << CEPH_BLOCK_SHIFT;
79 
80 	/*
81 	 * By default use root quota for stats; fallback to overall filesystem
82 	 * usage if using 'noquotadf' mount option or if the root dir doesn't
83 	 * have max_bytes quota set.
84 	 */
85 	if (ceph_test_mount_opt(fsc, NOQUOTADF) ||
86 	    !ceph_quota_update_statfs(fsc, buf)) {
87 		buf->f_blocks = le64_to_cpu(st.kb) >> (CEPH_BLOCK_SHIFT-10);
88 		buf->f_bfree = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
89 		buf->f_bavail = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
90 	}
91 
92 	buf->f_files = le64_to_cpu(st.num_objects);
93 	buf->f_ffree = -1;
94 	buf->f_namelen = NAME_MAX;
95 
96 	/* Must convert the fsid, for consistent values across arches */
97 	fsid = le64_to_cpu(*(__le64 *)(&monmap->fsid)) ^
98 	       le64_to_cpu(*((__le64 *)&monmap->fsid + 1));
99 	buf->f_fsid.val[0] = fsid & 0xffffffff;
100 	buf->f_fsid.val[1] = fsid >> 32;
101 
102 	return 0;
103 }
104 
105 
106 static int ceph_sync_fs(struct super_block *sb, int wait)
107 {
108 	struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
109 
110 	if (!wait) {
111 		dout("sync_fs (non-blocking)\n");
112 		ceph_flush_dirty_caps(fsc->mdsc);
113 		dout("sync_fs (non-blocking) done\n");
114 		return 0;
115 	}
116 
117 	dout("sync_fs (blocking)\n");
118 	ceph_osdc_sync(&fsc->client->osdc);
119 	ceph_mdsc_sync(fsc->mdsc);
120 	dout("sync_fs (blocking) done\n");
121 	return 0;
122 }
123 
124 /*
125  * mount options
126  */
127 enum {
128 	Opt_wsize,
129 	Opt_rsize,
130 	Opt_rasize,
131 	Opt_caps_wanted_delay_min,
132 	Opt_caps_wanted_delay_max,
133 	Opt_readdir_max_entries,
134 	Opt_readdir_max_bytes,
135 	Opt_congestion_kb,
136 	Opt_last_int,
137 	/* int args above */
138 	Opt_snapdirname,
139 	Opt_mds_namespace,
140 	Opt_fscache_uniq,
141 	Opt_last_string,
142 	/* string args above */
143 	Opt_dirstat,
144 	Opt_nodirstat,
145 	Opt_rbytes,
146 	Opt_norbytes,
147 	Opt_asyncreaddir,
148 	Opt_noasyncreaddir,
149 	Opt_dcache,
150 	Opt_nodcache,
151 	Opt_ino32,
152 	Opt_noino32,
153 	Opt_fscache,
154 	Opt_nofscache,
155 	Opt_poolperm,
156 	Opt_nopoolperm,
157 	Opt_require_active_mds,
158 	Opt_norequire_active_mds,
159 #ifdef CONFIG_CEPH_FS_POSIX_ACL
160 	Opt_acl,
161 #endif
162 	Opt_noacl,
163 	Opt_quotadf,
164 	Opt_noquotadf,
165 };
166 
167 static match_table_t fsopt_tokens = {
168 	{Opt_wsize, "wsize=%d"},
169 	{Opt_rsize, "rsize=%d"},
170 	{Opt_rasize, "rasize=%d"},
171 	{Opt_caps_wanted_delay_min, "caps_wanted_delay_min=%d"},
172 	{Opt_caps_wanted_delay_max, "caps_wanted_delay_max=%d"},
173 	{Opt_readdir_max_entries, "readdir_max_entries=%d"},
174 	{Opt_readdir_max_bytes, "readdir_max_bytes=%d"},
175 	{Opt_congestion_kb, "write_congestion_kb=%d"},
176 	/* int args above */
177 	{Opt_snapdirname, "snapdirname=%s"},
178 	{Opt_mds_namespace, "mds_namespace=%s"},
179 	{Opt_fscache_uniq, "fsc=%s"},
180 	/* string args above */
181 	{Opt_dirstat, "dirstat"},
182 	{Opt_nodirstat, "nodirstat"},
183 	{Opt_rbytes, "rbytes"},
184 	{Opt_norbytes, "norbytes"},
185 	{Opt_asyncreaddir, "asyncreaddir"},
186 	{Opt_noasyncreaddir, "noasyncreaddir"},
187 	{Opt_dcache, "dcache"},
188 	{Opt_nodcache, "nodcache"},
189 	{Opt_ino32, "ino32"},
190 	{Opt_noino32, "noino32"},
191 	{Opt_fscache, "fsc"},
192 	{Opt_nofscache, "nofsc"},
193 	{Opt_poolperm, "poolperm"},
194 	{Opt_nopoolperm, "nopoolperm"},
195 	{Opt_require_active_mds, "require_active_mds"},
196 	{Opt_norequire_active_mds, "norequire_active_mds"},
197 #ifdef CONFIG_CEPH_FS_POSIX_ACL
198 	{Opt_acl, "acl"},
199 #endif
200 	{Opt_noacl, "noacl"},
201 	{Opt_quotadf, "quotadf"},
202 	{Opt_noquotadf, "noquotadf"},
203 	{-1, NULL}
204 };
205 
206 static int parse_fsopt_token(char *c, void *private)
207 {
208 	struct ceph_mount_options *fsopt = private;
209 	substring_t argstr[MAX_OPT_ARGS];
210 	int token, intval, ret;
211 
212 	token = match_token((char *)c, fsopt_tokens, argstr);
213 	if (token < 0)
214 		return -EINVAL;
215 
216 	if (token < Opt_last_int) {
217 		ret = match_int(&argstr[0], &intval);
218 		if (ret < 0) {
219 			pr_err("bad mount option arg (not int) "
220 			       "at '%s'\n", c);
221 			return ret;
222 		}
223 		dout("got int token %d val %d\n", token, intval);
224 	} else if (token > Opt_last_int && token < Opt_last_string) {
225 		dout("got string token %d val %s\n", token,
226 		     argstr[0].from);
227 	} else {
228 		dout("got token %d\n", token);
229 	}
230 
231 	switch (token) {
232 	case Opt_snapdirname:
233 		kfree(fsopt->snapdir_name);
234 		fsopt->snapdir_name = kstrndup(argstr[0].from,
235 					       argstr[0].to-argstr[0].from,
236 					       GFP_KERNEL);
237 		if (!fsopt->snapdir_name)
238 			return -ENOMEM;
239 		break;
240 	case Opt_mds_namespace:
241 		kfree(fsopt->mds_namespace);
242 		fsopt->mds_namespace = kstrndup(argstr[0].from,
243 						argstr[0].to-argstr[0].from,
244 						GFP_KERNEL);
245 		if (!fsopt->mds_namespace)
246 			return -ENOMEM;
247 		break;
248 	case Opt_fscache_uniq:
249 		kfree(fsopt->fscache_uniq);
250 		fsopt->fscache_uniq = kstrndup(argstr[0].from,
251 					       argstr[0].to-argstr[0].from,
252 					       GFP_KERNEL);
253 		if (!fsopt->fscache_uniq)
254 			return -ENOMEM;
255 		fsopt->flags |= CEPH_MOUNT_OPT_FSCACHE;
256 		break;
257 		/* misc */
258 	case Opt_wsize:
259 		if (intval < PAGE_SIZE || intval > CEPH_MAX_WRITE_SIZE)
260 			return -EINVAL;
261 		fsopt->wsize = ALIGN(intval, PAGE_SIZE);
262 		break;
263 	case Opt_rsize:
264 		if (intval < PAGE_SIZE || intval > CEPH_MAX_READ_SIZE)
265 			return -EINVAL;
266 		fsopt->rsize = ALIGN(intval, PAGE_SIZE);
267 		break;
268 	case Opt_rasize:
269 		if (intval < 0)
270 			return -EINVAL;
271 		fsopt->rasize = ALIGN(intval + PAGE_SIZE - 1, PAGE_SIZE);
272 		break;
273 	case Opt_caps_wanted_delay_min:
274 		if (intval < 1)
275 			return -EINVAL;
276 		fsopt->caps_wanted_delay_min = intval;
277 		break;
278 	case Opt_caps_wanted_delay_max:
279 		if (intval < 1)
280 			return -EINVAL;
281 		fsopt->caps_wanted_delay_max = intval;
282 		break;
283 	case Opt_readdir_max_entries:
284 		if (intval < 1)
285 			return -EINVAL;
286 		fsopt->max_readdir = intval;
287 		break;
288 	case Opt_readdir_max_bytes:
289 		if (intval < PAGE_SIZE && intval != 0)
290 			return -EINVAL;
291 		fsopt->max_readdir_bytes = intval;
292 		break;
293 	case Opt_congestion_kb:
294 		if (intval < 1024) /* at least 1M */
295 			return -EINVAL;
296 		fsopt->congestion_kb = intval;
297 		break;
298 	case Opt_dirstat:
299 		fsopt->flags |= CEPH_MOUNT_OPT_DIRSTAT;
300 		break;
301 	case Opt_nodirstat:
302 		fsopt->flags &= ~CEPH_MOUNT_OPT_DIRSTAT;
303 		break;
304 	case Opt_rbytes:
305 		fsopt->flags |= CEPH_MOUNT_OPT_RBYTES;
306 		break;
307 	case Opt_norbytes:
308 		fsopt->flags &= ~CEPH_MOUNT_OPT_RBYTES;
309 		break;
310 	case Opt_asyncreaddir:
311 		fsopt->flags &= ~CEPH_MOUNT_OPT_NOASYNCREADDIR;
312 		break;
313 	case Opt_noasyncreaddir:
314 		fsopt->flags |= CEPH_MOUNT_OPT_NOASYNCREADDIR;
315 		break;
316 	case Opt_dcache:
317 		fsopt->flags |= CEPH_MOUNT_OPT_DCACHE;
318 		break;
319 	case Opt_nodcache:
320 		fsopt->flags &= ~CEPH_MOUNT_OPT_DCACHE;
321 		break;
322 	case Opt_ino32:
323 		fsopt->flags |= CEPH_MOUNT_OPT_INO32;
324 		break;
325 	case Opt_noino32:
326 		fsopt->flags &= ~CEPH_MOUNT_OPT_INO32;
327 		break;
328 	case Opt_fscache:
329 		fsopt->flags |= CEPH_MOUNT_OPT_FSCACHE;
330 		kfree(fsopt->fscache_uniq);
331 		fsopt->fscache_uniq = NULL;
332 		break;
333 	case Opt_nofscache:
334 		fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE;
335 		kfree(fsopt->fscache_uniq);
336 		fsopt->fscache_uniq = NULL;
337 		break;
338 	case Opt_poolperm:
339 		fsopt->flags &= ~CEPH_MOUNT_OPT_NOPOOLPERM;
340 		break;
341 	case Opt_nopoolperm:
342 		fsopt->flags |= CEPH_MOUNT_OPT_NOPOOLPERM;
343 		break;
344 	case Opt_require_active_mds:
345 		fsopt->flags &= ~CEPH_MOUNT_OPT_MOUNTWAIT;
346 		break;
347 	case Opt_norequire_active_mds:
348 		fsopt->flags |= CEPH_MOUNT_OPT_MOUNTWAIT;
349 		break;
350 	case Opt_quotadf:
351 		fsopt->flags &= ~CEPH_MOUNT_OPT_NOQUOTADF;
352 		break;
353 	case Opt_noquotadf:
354 		fsopt->flags |= CEPH_MOUNT_OPT_NOQUOTADF;
355 		break;
356 #ifdef CONFIG_CEPH_FS_POSIX_ACL
357 	case Opt_acl:
358 		fsopt->sb_flags |= SB_POSIXACL;
359 		break;
360 #endif
361 	case Opt_noacl:
362 		fsopt->sb_flags &= ~SB_POSIXACL;
363 		break;
364 	default:
365 		BUG_ON(token);
366 	}
367 	return 0;
368 }
369 
370 static void destroy_mount_options(struct ceph_mount_options *args)
371 {
372 	dout("destroy_mount_options %p\n", args);
373 	kfree(args->snapdir_name);
374 	kfree(args->mds_namespace);
375 	kfree(args->server_path);
376 	kfree(args->fscache_uniq);
377 	kfree(args);
378 }
379 
380 static int strcmp_null(const char *s1, const char *s2)
381 {
382 	if (!s1 && !s2)
383 		return 0;
384 	if (s1 && !s2)
385 		return -1;
386 	if (!s1 && s2)
387 		return 1;
388 	return strcmp(s1, s2);
389 }
390 
391 static int compare_mount_options(struct ceph_mount_options *new_fsopt,
392 				 struct ceph_options *new_opt,
393 				 struct ceph_fs_client *fsc)
394 {
395 	struct ceph_mount_options *fsopt1 = new_fsopt;
396 	struct ceph_mount_options *fsopt2 = fsc->mount_options;
397 	int ofs = offsetof(struct ceph_mount_options, snapdir_name);
398 	int ret;
399 
400 	ret = memcmp(fsopt1, fsopt2, ofs);
401 	if (ret)
402 		return ret;
403 
404 	ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name);
405 	if (ret)
406 		return ret;
407 	ret = strcmp_null(fsopt1->mds_namespace, fsopt2->mds_namespace);
408 	if (ret)
409 		return ret;
410 	ret = strcmp_null(fsopt1->server_path, fsopt2->server_path);
411 	if (ret)
412 		return ret;
413 	ret = strcmp_null(fsopt1->fscache_uniq, fsopt2->fscache_uniq);
414 	if (ret)
415 		return ret;
416 
417 	return ceph_compare_options(new_opt, fsc->client);
418 }
419 
420 static int parse_mount_options(struct ceph_mount_options **pfsopt,
421 			       struct ceph_options **popt,
422 			       int flags, char *options,
423 			       const char *dev_name)
424 {
425 	struct ceph_mount_options *fsopt;
426 	const char *dev_name_end;
427 	int err;
428 
429 	if (!dev_name || !*dev_name)
430 		return -EINVAL;
431 
432 	fsopt = kzalloc(sizeof(*fsopt), GFP_KERNEL);
433 	if (!fsopt)
434 		return -ENOMEM;
435 
436 	dout("parse_mount_options %p, dev_name '%s'\n", fsopt, dev_name);
437 
438 	fsopt->sb_flags = flags;
439 	fsopt->flags = CEPH_MOUNT_OPT_DEFAULT;
440 
441 	fsopt->wsize = CEPH_MAX_WRITE_SIZE;
442 	fsopt->rsize = CEPH_MAX_READ_SIZE;
443 	fsopt->rasize = CEPH_RASIZE_DEFAULT;
444 	fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
445 	if (!fsopt->snapdir_name) {
446 		err = -ENOMEM;
447 		goto out;
448 	}
449 
450 	fsopt->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT;
451 	fsopt->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT;
452 	fsopt->max_readdir = CEPH_MAX_READDIR_DEFAULT;
453 	fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
454 	fsopt->congestion_kb = default_congestion_kb();
455 
456 	/*
457 	 * Distinguish the server list from the path in "dev_name".
458 	 * Internally we do not include the leading '/' in the path.
459 	 *
460 	 * "dev_name" will look like:
461 	 *     <server_spec>[,<server_spec>...]:[<path>]
462 	 * where
463 	 *     <server_spec> is <ip>[:<port>]
464 	 *     <path> is optional, but if present must begin with '/'
465 	 */
466 	dev_name_end = strchr(dev_name, '/');
467 	if (dev_name_end) {
468 		if (strlen(dev_name_end) > 1) {
469 			fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
470 			if (!fsopt->server_path) {
471 				err = -ENOMEM;
472 				goto out;
473 			}
474 		}
475 	} else {
476 		dev_name_end = dev_name + strlen(dev_name);
477 	}
478 	err = -EINVAL;
479 	dev_name_end--;		/* back up to ':' separator */
480 	if (dev_name_end < dev_name || *dev_name_end != ':') {
481 		pr_err("device name is missing path (no : separator in %s)\n",
482 				dev_name);
483 		goto out;
484 	}
485 	dout("device name '%.*s'\n", (int)(dev_name_end - dev_name), dev_name);
486 	if (fsopt->server_path)
487 		dout("server path '%s'\n", fsopt->server_path);
488 
489 	*popt = ceph_parse_options(options, dev_name, dev_name_end,
490 				 parse_fsopt_token, (void *)fsopt);
491 	if (IS_ERR(*popt)) {
492 		err = PTR_ERR(*popt);
493 		goto out;
494 	}
495 
496 	/* success */
497 	*pfsopt = fsopt;
498 	return 0;
499 
500 out:
501 	destroy_mount_options(fsopt);
502 	return err;
503 }
504 
505 /**
506  * ceph_show_options - Show mount options in /proc/mounts
507  * @m: seq_file to write to
508  * @root: root of that (sub)tree
509  */
510 static int ceph_show_options(struct seq_file *m, struct dentry *root)
511 {
512 	struct ceph_fs_client *fsc = ceph_sb_to_client(root->d_sb);
513 	struct ceph_mount_options *fsopt = fsc->mount_options;
514 	size_t pos;
515 	int ret;
516 
517 	/* a comma between MNT/MS and client options */
518 	seq_putc(m, ',');
519 	pos = m->count;
520 
521 	ret = ceph_print_client_options(m, fsc->client);
522 	if (ret)
523 		return ret;
524 
525 	/* retract our comma if no client options */
526 	if (m->count == pos)
527 		m->count--;
528 
529 	if (fsopt->flags & CEPH_MOUNT_OPT_DIRSTAT)
530 		seq_puts(m, ",dirstat");
531 	if ((fsopt->flags & CEPH_MOUNT_OPT_RBYTES))
532 		seq_puts(m, ",rbytes");
533 	if (fsopt->flags & CEPH_MOUNT_OPT_NOASYNCREADDIR)
534 		seq_puts(m, ",noasyncreaddir");
535 	if ((fsopt->flags & CEPH_MOUNT_OPT_DCACHE) == 0)
536 		seq_puts(m, ",nodcache");
537 	if (fsopt->flags & CEPH_MOUNT_OPT_FSCACHE) {
538 		seq_show_option(m, "fsc", fsopt->fscache_uniq);
539 	}
540 	if (fsopt->flags & CEPH_MOUNT_OPT_NOPOOLPERM)
541 		seq_puts(m, ",nopoolperm");
542 	if (fsopt->flags & CEPH_MOUNT_OPT_NOQUOTADF)
543 		seq_puts(m, ",noquotadf");
544 
545 #ifdef CONFIG_CEPH_FS_POSIX_ACL
546 	if (fsopt->sb_flags & SB_POSIXACL)
547 		seq_puts(m, ",acl");
548 	else
549 		seq_puts(m, ",noacl");
550 #endif
551 
552 	if (fsopt->mds_namespace)
553 		seq_show_option(m, "mds_namespace", fsopt->mds_namespace);
554 	if (fsopt->wsize)
555 		seq_printf(m, ",wsize=%d", fsopt->wsize);
556 	if (fsopt->rsize != CEPH_MAX_READ_SIZE)
557 		seq_printf(m, ",rsize=%d", fsopt->rsize);
558 	if (fsopt->rasize != CEPH_RASIZE_DEFAULT)
559 		seq_printf(m, ",rasize=%d", fsopt->rasize);
560 	if (fsopt->congestion_kb != default_congestion_kb())
561 		seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb);
562 	if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
563 		seq_printf(m, ",caps_wanted_delay_min=%d",
564 			 fsopt->caps_wanted_delay_min);
565 	if (fsopt->caps_wanted_delay_max != CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT)
566 		seq_printf(m, ",caps_wanted_delay_max=%d",
567 			   fsopt->caps_wanted_delay_max);
568 	if (fsopt->max_readdir != CEPH_MAX_READDIR_DEFAULT)
569 		seq_printf(m, ",readdir_max_entries=%d", fsopt->max_readdir);
570 	if (fsopt->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT)
571 		seq_printf(m, ",readdir_max_bytes=%d", fsopt->max_readdir_bytes);
572 	if (strcmp(fsopt->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
573 		seq_show_option(m, "snapdirname", fsopt->snapdir_name);
574 
575 	return 0;
576 }
577 
578 /*
579  * handle any mon messages the standard library doesn't understand.
580  * return error if we don't either.
581  */
582 static int extra_mon_dispatch(struct ceph_client *client, struct ceph_msg *msg)
583 {
584 	struct ceph_fs_client *fsc = client->private;
585 	int type = le16_to_cpu(msg->hdr.type);
586 
587 	switch (type) {
588 	case CEPH_MSG_MDS_MAP:
589 		ceph_mdsc_handle_mdsmap(fsc->mdsc, msg);
590 		return 0;
591 	case CEPH_MSG_FS_MAP_USER:
592 		ceph_mdsc_handle_fsmap(fsc->mdsc, msg);
593 		return 0;
594 	default:
595 		return -1;
596 	}
597 }
598 
599 /*
600  * create a new fs client
601  */
602 static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
603 					struct ceph_options *opt)
604 {
605 	struct ceph_fs_client *fsc;
606 	int page_count;
607 	size_t size;
608 	int err = -ENOMEM;
609 
610 	fsc = kzalloc(sizeof(*fsc), GFP_KERNEL);
611 	if (!fsc)
612 		return ERR_PTR(-ENOMEM);
613 
614 	fsc->client = ceph_create_client(opt, fsc);
615 	if (IS_ERR(fsc->client)) {
616 		err = PTR_ERR(fsc->client);
617 		goto fail;
618 	}
619 	fsc->client->extra_mon_dispatch = extra_mon_dispatch;
620 
621 	if (!fsopt->mds_namespace) {
622 		ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
623 				   0, true);
624 	} else {
625 		ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_FSMAP,
626 				   0, false);
627 	}
628 
629 	fsc->mount_options = fsopt;
630 
631 	fsc->sb = NULL;
632 	fsc->mount_state = CEPH_MOUNT_MOUNTING;
633 
634 	atomic_long_set(&fsc->writeback_count, 0);
635 
636 	err = -ENOMEM;
637 	/*
638 	 * The number of concurrent works can be high but they don't need
639 	 * to be processed in parallel, limit concurrency.
640 	 */
641 	fsc->wb_wq = alloc_workqueue("ceph-writeback", 0, 1);
642 	if (!fsc->wb_wq)
643 		goto fail_client;
644 	fsc->pg_inv_wq = alloc_workqueue("ceph-pg-invalid", 0, 1);
645 	if (!fsc->pg_inv_wq)
646 		goto fail_wb_wq;
647 	fsc->trunc_wq = alloc_workqueue("ceph-trunc", 0, 1);
648 	if (!fsc->trunc_wq)
649 		goto fail_pg_inv_wq;
650 
651 	/* set up mempools */
652 	err = -ENOMEM;
653 	page_count = fsc->mount_options->wsize >> PAGE_SHIFT;
654 	size = sizeof (struct page *) * (page_count ? page_count : 1);
655 	fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10, size);
656 	if (!fsc->wb_pagevec_pool)
657 		goto fail_trunc_wq;
658 
659 	/* caps */
660 	fsc->min_caps = fsopt->max_readdir;
661 
662 	return fsc;
663 
664 fail_trunc_wq:
665 	destroy_workqueue(fsc->trunc_wq);
666 fail_pg_inv_wq:
667 	destroy_workqueue(fsc->pg_inv_wq);
668 fail_wb_wq:
669 	destroy_workqueue(fsc->wb_wq);
670 fail_client:
671 	ceph_destroy_client(fsc->client);
672 fail:
673 	kfree(fsc);
674 	return ERR_PTR(err);
675 }
676 
677 static void destroy_fs_client(struct ceph_fs_client *fsc)
678 {
679 	dout("destroy_fs_client %p\n", fsc);
680 
681 	destroy_workqueue(fsc->wb_wq);
682 	destroy_workqueue(fsc->pg_inv_wq);
683 	destroy_workqueue(fsc->trunc_wq);
684 
685 	mempool_destroy(fsc->wb_pagevec_pool);
686 
687 	destroy_mount_options(fsc->mount_options);
688 
689 	ceph_destroy_client(fsc->client);
690 
691 	kfree(fsc);
692 	dout("destroy_fs_client %p done\n", fsc);
693 }
694 
695 /*
696  * caches
697  */
698 struct kmem_cache *ceph_inode_cachep;
699 struct kmem_cache *ceph_cap_cachep;
700 struct kmem_cache *ceph_cap_flush_cachep;
701 struct kmem_cache *ceph_dentry_cachep;
702 struct kmem_cache *ceph_file_cachep;
703 struct kmem_cache *ceph_dir_file_cachep;
704 
705 static void ceph_inode_init_once(void *foo)
706 {
707 	struct ceph_inode_info *ci = foo;
708 	inode_init_once(&ci->vfs_inode);
709 }
710 
711 static int __init init_caches(void)
712 {
713 	int error = -ENOMEM;
714 
715 	ceph_inode_cachep = kmem_cache_create("ceph_inode_info",
716 				      sizeof(struct ceph_inode_info),
717 				      __alignof__(struct ceph_inode_info),
718 				      SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|
719 				      SLAB_ACCOUNT, ceph_inode_init_once);
720 	if (!ceph_inode_cachep)
721 		return -ENOMEM;
722 
723 	ceph_cap_cachep = KMEM_CACHE(ceph_cap, SLAB_MEM_SPREAD);
724 	if (!ceph_cap_cachep)
725 		goto bad_cap;
726 	ceph_cap_flush_cachep = KMEM_CACHE(ceph_cap_flush,
727 					   SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
728 	if (!ceph_cap_flush_cachep)
729 		goto bad_cap_flush;
730 
731 	ceph_dentry_cachep = KMEM_CACHE(ceph_dentry_info,
732 					SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
733 	if (!ceph_dentry_cachep)
734 		goto bad_dentry;
735 
736 	ceph_file_cachep = KMEM_CACHE(ceph_file_info, SLAB_MEM_SPREAD);
737 	if (!ceph_file_cachep)
738 		goto bad_file;
739 
740 	ceph_dir_file_cachep = KMEM_CACHE(ceph_dir_file_info, SLAB_MEM_SPREAD);
741 	if (!ceph_dir_file_cachep)
742 		goto bad_dir_file;
743 
744 	error = ceph_fscache_register();
745 	if (error)
746 		goto bad_fscache;
747 
748 	return 0;
749 
750 bad_fscache:
751 	kmem_cache_destroy(ceph_dir_file_cachep);
752 bad_dir_file:
753 	kmem_cache_destroy(ceph_file_cachep);
754 bad_file:
755 	kmem_cache_destroy(ceph_dentry_cachep);
756 bad_dentry:
757 	kmem_cache_destroy(ceph_cap_flush_cachep);
758 bad_cap_flush:
759 	kmem_cache_destroy(ceph_cap_cachep);
760 bad_cap:
761 	kmem_cache_destroy(ceph_inode_cachep);
762 	return error;
763 }
764 
765 static void destroy_caches(void)
766 {
767 	/*
768 	 * Make sure all delayed rcu free inodes are flushed before we
769 	 * destroy cache.
770 	 */
771 	rcu_barrier();
772 
773 	kmem_cache_destroy(ceph_inode_cachep);
774 	kmem_cache_destroy(ceph_cap_cachep);
775 	kmem_cache_destroy(ceph_cap_flush_cachep);
776 	kmem_cache_destroy(ceph_dentry_cachep);
777 	kmem_cache_destroy(ceph_file_cachep);
778 	kmem_cache_destroy(ceph_dir_file_cachep);
779 
780 	ceph_fscache_unregister();
781 }
782 
783 
784 /*
785  * ceph_umount_begin - initiate forced umount.  Tear down down the
786  * mount, skipping steps that may hang while waiting for server(s).
787  */
788 static void ceph_umount_begin(struct super_block *sb)
789 {
790 	struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
791 
792 	dout("ceph_umount_begin - starting forced umount\n");
793 	if (!fsc)
794 		return;
795 	fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
796 	ceph_mdsc_force_umount(fsc->mdsc);
797 	return;
798 }
799 
800 static const struct super_operations ceph_super_ops = {
801 	.alloc_inode	= ceph_alloc_inode,
802 	.destroy_inode	= ceph_destroy_inode,
803 	.write_inode    = ceph_write_inode,
804 	.drop_inode	= ceph_drop_inode,
805 	.sync_fs        = ceph_sync_fs,
806 	.put_super	= ceph_put_super,
807 	.show_options   = ceph_show_options,
808 	.statfs		= ceph_statfs,
809 	.umount_begin   = ceph_umount_begin,
810 };
811 
812 /*
813  * Bootstrap mount by opening the root directory.  Note the mount
814  * @started time from caller, and time out if this takes too long.
815  */
816 static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
817 				       const char *path,
818 				       unsigned long started)
819 {
820 	struct ceph_mds_client *mdsc = fsc->mdsc;
821 	struct ceph_mds_request *req = NULL;
822 	int err;
823 	struct dentry *root;
824 
825 	/* open dir */
826 	dout("open_root_inode opening '%s'\n", path);
827 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
828 	if (IS_ERR(req))
829 		return ERR_CAST(req);
830 	req->r_path1 = kstrdup(path, GFP_NOFS);
831 	if (!req->r_path1) {
832 		root = ERR_PTR(-ENOMEM);
833 		goto out;
834 	}
835 
836 	req->r_ino1.ino = CEPH_INO_ROOT;
837 	req->r_ino1.snap = CEPH_NOSNAP;
838 	req->r_started = started;
839 	req->r_timeout = fsc->client->options->mount_timeout;
840 	req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
841 	req->r_num_caps = 2;
842 	err = ceph_mdsc_do_request(mdsc, NULL, req);
843 	if (err == 0) {
844 		struct inode *inode = req->r_target_inode;
845 		req->r_target_inode = NULL;
846 		dout("open_root_inode success\n");
847 		root = d_make_root(inode);
848 		if (!root) {
849 			root = ERR_PTR(-ENOMEM);
850 			goto out;
851 		}
852 		dout("open_root_inode success, root dentry is %p\n", root);
853 	} else {
854 		root = ERR_PTR(err);
855 	}
856 out:
857 	ceph_mdsc_put_request(req);
858 	return root;
859 }
860 
861 
862 
863 
864 /*
865  * mount: join the ceph cluster, and open root directory.
866  */
867 static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc)
868 {
869 	int err;
870 	unsigned long started = jiffies;  /* note the start time */
871 	struct dentry *root;
872 
873 	dout("mount start %p\n", fsc);
874 	mutex_lock(&fsc->client->mount_mutex);
875 
876 	if (!fsc->sb->s_root) {
877 		const char *path;
878 		err = __ceph_open_session(fsc->client, started);
879 		if (err < 0)
880 			goto out;
881 
882 		/* setup fscache */
883 		if (fsc->mount_options->flags & CEPH_MOUNT_OPT_FSCACHE) {
884 			err = ceph_fscache_register_fs(fsc);
885 			if (err < 0)
886 				goto out;
887 		}
888 
889 		if (!fsc->mount_options->server_path) {
890 			path = "";
891 			dout("mount opening path \\t\n");
892 		} else {
893 			path = fsc->mount_options->server_path + 1;
894 			dout("mount opening path %s\n", path);
895 		}
896 
897 		err = ceph_fs_debugfs_init(fsc);
898 		if (err < 0)
899 			goto out;
900 
901 		root = open_root_dentry(fsc, path, started);
902 		if (IS_ERR(root)) {
903 			err = PTR_ERR(root);
904 			goto out;
905 		}
906 		fsc->sb->s_root = dget(root);
907 	} else {
908 		root = dget(fsc->sb->s_root);
909 	}
910 
911 	fsc->mount_state = CEPH_MOUNT_MOUNTED;
912 	dout("mount success\n");
913 	mutex_unlock(&fsc->client->mount_mutex);
914 	return root;
915 
916 out:
917 	mutex_unlock(&fsc->client->mount_mutex);
918 	return ERR_PTR(err);
919 }
920 
921 static int ceph_set_super(struct super_block *s, void *data)
922 {
923 	struct ceph_fs_client *fsc = data;
924 	int ret;
925 
926 	dout("set_super %p data %p\n", s, data);
927 
928 	s->s_flags = fsc->mount_options->sb_flags;
929 	s->s_maxbytes = 1ULL << 40;  /* temp value until we get mdsmap */
930 
931 	s->s_xattr = ceph_xattr_handlers;
932 	s->s_fs_info = fsc;
933 	fsc->sb = s;
934 
935 	s->s_op = &ceph_super_ops;
936 	s->s_d_op = &ceph_dentry_ops;
937 	s->s_export_op = &ceph_export_ops;
938 
939 	s->s_time_gran = 1000;  /* 1000 ns == 1 us */
940 
941 	ret = set_anon_super(s, NULL);  /* what is that second arg for? */
942 	if (ret != 0)
943 		goto fail;
944 
945 	return ret;
946 
947 fail:
948 	s->s_fs_info = NULL;
949 	fsc->sb = NULL;
950 	return ret;
951 }
952 
953 /*
954  * share superblock if same fs AND options
955  */
956 static int ceph_compare_super(struct super_block *sb, void *data)
957 {
958 	struct ceph_fs_client *new = data;
959 	struct ceph_mount_options *fsopt = new->mount_options;
960 	struct ceph_options *opt = new->client->options;
961 	struct ceph_fs_client *other = ceph_sb_to_client(sb);
962 
963 	dout("ceph_compare_super %p\n", sb);
964 
965 	if (compare_mount_options(fsopt, opt, other)) {
966 		dout("monitor(s)/mount options don't match\n");
967 		return 0;
968 	}
969 	if ((opt->flags & CEPH_OPT_FSID) &&
970 	    ceph_fsid_compare(&opt->fsid, &other->client->fsid)) {
971 		dout("fsid doesn't match\n");
972 		return 0;
973 	}
974 	if (fsopt->sb_flags != other->mount_options->sb_flags) {
975 		dout("flags differ\n");
976 		return 0;
977 	}
978 	return 1;
979 }
980 
981 /*
982  * construct our own bdi so we can control readahead, etc.
983  */
984 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
985 
986 static int ceph_setup_bdi(struct super_block *sb, struct ceph_fs_client *fsc)
987 {
988 	int err;
989 
990 	err = super_setup_bdi_name(sb, "ceph-%ld",
991 				   atomic_long_inc_return(&bdi_seq));
992 	if (err)
993 		return err;
994 
995 	/* set ra_pages based on rasize mount option? */
996 	sb->s_bdi->ra_pages = fsc->mount_options->rasize >> PAGE_SHIFT;
997 
998 	/* set io_pages based on max osd read size */
999 	sb->s_bdi->io_pages = fsc->mount_options->rsize >> PAGE_SHIFT;
1000 
1001 	return 0;
1002 }
1003 
1004 static struct dentry *ceph_mount(struct file_system_type *fs_type,
1005 		       int flags, const char *dev_name, void *data)
1006 {
1007 	struct super_block *sb;
1008 	struct ceph_fs_client *fsc;
1009 	struct dentry *res;
1010 	int err;
1011 	int (*compare_super)(struct super_block *, void *) = ceph_compare_super;
1012 	struct ceph_mount_options *fsopt = NULL;
1013 	struct ceph_options *opt = NULL;
1014 
1015 	dout("ceph_mount\n");
1016 
1017 #ifdef CONFIG_CEPH_FS_POSIX_ACL
1018 	flags |= SB_POSIXACL;
1019 #endif
1020 	err = parse_mount_options(&fsopt, &opt, flags, data, dev_name);
1021 	if (err < 0) {
1022 		res = ERR_PTR(err);
1023 		goto out_final;
1024 	}
1025 
1026 	/* create client (which we may/may not use) */
1027 	fsc = create_fs_client(fsopt, opt);
1028 	if (IS_ERR(fsc)) {
1029 		res = ERR_CAST(fsc);
1030 		destroy_mount_options(fsopt);
1031 		ceph_destroy_options(opt);
1032 		goto out_final;
1033 	}
1034 
1035 	err = ceph_mdsc_init(fsc);
1036 	if (err < 0) {
1037 		res = ERR_PTR(err);
1038 		goto out;
1039 	}
1040 
1041 	if (ceph_test_opt(fsc->client, NOSHARE))
1042 		compare_super = NULL;
1043 	sb = sget(fs_type, compare_super, ceph_set_super, flags, fsc);
1044 	if (IS_ERR(sb)) {
1045 		res = ERR_CAST(sb);
1046 		goto out;
1047 	}
1048 
1049 	if (ceph_sb_to_client(sb) != fsc) {
1050 		ceph_mdsc_destroy(fsc);
1051 		destroy_fs_client(fsc);
1052 		fsc = ceph_sb_to_client(sb);
1053 		dout("get_sb got existing client %p\n", fsc);
1054 	} else {
1055 		dout("get_sb using new client %p\n", fsc);
1056 		err = ceph_setup_bdi(sb, fsc);
1057 		if (err < 0) {
1058 			res = ERR_PTR(err);
1059 			goto out_splat;
1060 		}
1061 	}
1062 
1063 	res = ceph_real_mount(fsc);
1064 	if (IS_ERR(res))
1065 		goto out_splat;
1066 	dout("root %p inode %p ino %llx.%llx\n", res,
1067 	     d_inode(res), ceph_vinop(d_inode(res)));
1068 	return res;
1069 
1070 out_splat:
1071 	ceph_mdsc_close_sessions(fsc->mdsc);
1072 	deactivate_locked_super(sb);
1073 	goto out_final;
1074 
1075 out:
1076 	ceph_mdsc_destroy(fsc);
1077 	destroy_fs_client(fsc);
1078 out_final:
1079 	dout("ceph_mount fail %ld\n", PTR_ERR(res));
1080 	return res;
1081 }
1082 
1083 static void ceph_kill_sb(struct super_block *s)
1084 {
1085 	struct ceph_fs_client *fsc = ceph_sb_to_client(s);
1086 	dev_t dev = s->s_dev;
1087 
1088 	dout("kill_sb %p\n", s);
1089 
1090 	ceph_mdsc_pre_umount(fsc->mdsc);
1091 	generic_shutdown_super(s);
1092 
1093 	fsc->client->extra_mon_dispatch = NULL;
1094 	ceph_fs_debugfs_cleanup(fsc);
1095 
1096 	ceph_fscache_unregister_fs(fsc);
1097 
1098 	ceph_mdsc_destroy(fsc);
1099 
1100 	destroy_fs_client(fsc);
1101 	free_anon_bdev(dev);
1102 }
1103 
1104 static struct file_system_type ceph_fs_type = {
1105 	.owner		= THIS_MODULE,
1106 	.name		= "ceph",
1107 	.mount		= ceph_mount,
1108 	.kill_sb	= ceph_kill_sb,
1109 	.fs_flags	= FS_RENAME_DOES_D_MOVE,
1110 };
1111 MODULE_ALIAS_FS("ceph");
1112 
1113 static int __init init_ceph(void)
1114 {
1115 	int ret = init_caches();
1116 	if (ret)
1117 		goto out;
1118 
1119 	ceph_flock_init();
1120 	ceph_xattr_init();
1121 	ret = register_filesystem(&ceph_fs_type);
1122 	if (ret)
1123 		goto out_xattr;
1124 
1125 	pr_info("loaded (mds proto %d)\n", CEPH_MDSC_PROTOCOL);
1126 
1127 	return 0;
1128 
1129 out_xattr:
1130 	ceph_xattr_exit();
1131 	destroy_caches();
1132 out:
1133 	return ret;
1134 }
1135 
1136 static void __exit exit_ceph(void)
1137 {
1138 	dout("exit_ceph\n");
1139 	unregister_filesystem(&ceph_fs_type);
1140 	ceph_xattr_exit();
1141 	destroy_caches();
1142 }
1143 
1144 module_init(init_ceph);
1145 module_exit(exit_ceph);
1146 
1147 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
1148 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
1149 MODULE_AUTHOR("Patience Warnick <patience@newdream.net>");
1150 MODULE_DESCRIPTION("Ceph filesystem for Linux");
1151 MODULE_LICENSE("GPL");
1152