xref: /openbmc/linux/fs/ceph/super.c (revision 6d99a79c)
1 
2 #include <linux/ceph/ceph_debug.h>
3 
4 #include <linux/backing-dev.h>
5 #include <linux/ctype.h>
6 #include <linux/fs.h>
7 #include <linux/inet.h>
8 #include <linux/in6.h>
9 #include <linux/module.h>
10 #include <linux/mount.h>
11 #include <linux/parser.h>
12 #include <linux/sched.h>
13 #include <linux/seq_file.h>
14 #include <linux/slab.h>
15 #include <linux/statfs.h>
16 #include <linux/string.h>
17 
18 #include "super.h"
19 #include "mds_client.h"
20 #include "cache.h"
21 
22 #include <linux/ceph/ceph_features.h>
23 #include <linux/ceph/decode.h>
24 #include <linux/ceph/mon_client.h>
25 #include <linux/ceph/auth.h>
26 #include <linux/ceph/debugfs.h>
27 
28 /*
29  * Ceph superblock operations
30  *
31  * Handle the basics of mounting, unmounting.
32  */
33 
34 /*
35  * super ops
36  */
37 static void ceph_put_super(struct super_block *s)
38 {
39 	struct ceph_fs_client *fsc = ceph_sb_to_client(s);
40 
41 	dout("put_super\n");
42 	ceph_mdsc_close_sessions(fsc->mdsc);
43 }
44 
45 static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
46 {
47 	struct ceph_fs_client *fsc = ceph_inode_to_client(d_inode(dentry));
48 	struct ceph_mon_client *monc = &fsc->client->monc;
49 	struct ceph_statfs st;
50 	u64 fsid;
51 	int err;
52 	u64 data_pool;
53 
54 	if (fsc->mdsc->mdsmap->m_num_data_pg_pools == 1) {
55 		data_pool = fsc->mdsc->mdsmap->m_data_pg_pools[0];
56 	} else {
57 		data_pool = CEPH_NOPOOL;
58 	}
59 
60 	dout("statfs\n");
61 	err = ceph_monc_do_statfs(monc, data_pool, &st);
62 	if (err < 0)
63 		return err;
64 
65 	/* fill in kstatfs */
66 	buf->f_type = CEPH_SUPER_MAGIC;  /* ?? */
67 
68 	/*
69 	 * express utilization in terms of large blocks to avoid
70 	 * overflow on 32-bit machines.
71 	 *
72 	 * NOTE: for the time being, we make bsize == frsize to humor
73 	 * not-yet-ancient versions of glibc that are broken.
74 	 * Someday, we will probably want to report a real block
75 	 * size...  whatever that may mean for a network file system!
76 	 */
77 	buf->f_bsize = 1 << CEPH_BLOCK_SHIFT;
78 	buf->f_frsize = 1 << CEPH_BLOCK_SHIFT;
79 
80 	/*
81 	 * By default use root quota for stats; fallback to overall filesystem
82 	 * usage if using 'noquotadf' mount option or if the root dir doesn't
83 	 * have max_bytes quota set.
84 	 */
85 	if (ceph_test_mount_opt(fsc, NOQUOTADF) ||
86 	    !ceph_quota_update_statfs(fsc, buf)) {
87 		buf->f_blocks = le64_to_cpu(st.kb) >> (CEPH_BLOCK_SHIFT-10);
88 		buf->f_bfree = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
89 		buf->f_bavail = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
90 	}
91 
92 	buf->f_files = le64_to_cpu(st.num_objects);
93 	buf->f_ffree = -1;
94 	buf->f_namelen = NAME_MAX;
95 
96 	/* Must convert the fsid, for consistent values across arches */
97 	mutex_lock(&monc->mutex);
98 	fsid = le64_to_cpu(*(__le64 *)(&monc->monmap->fsid)) ^
99 	       le64_to_cpu(*((__le64 *)&monc->monmap->fsid + 1));
100 	mutex_unlock(&monc->mutex);
101 
102 	buf->f_fsid.val[0] = fsid & 0xffffffff;
103 	buf->f_fsid.val[1] = fsid >> 32;
104 
105 	return 0;
106 }
107 
108 
109 static int ceph_sync_fs(struct super_block *sb, int wait)
110 {
111 	struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
112 
113 	if (!wait) {
114 		dout("sync_fs (non-blocking)\n");
115 		ceph_flush_dirty_caps(fsc->mdsc);
116 		dout("sync_fs (non-blocking) done\n");
117 		return 0;
118 	}
119 
120 	dout("sync_fs (blocking)\n");
121 	ceph_osdc_sync(&fsc->client->osdc);
122 	ceph_mdsc_sync(fsc->mdsc);
123 	dout("sync_fs (blocking) done\n");
124 	return 0;
125 }
126 
127 /*
128  * mount options
129  */
130 enum {
131 	Opt_wsize,
132 	Opt_rsize,
133 	Opt_rasize,
134 	Opt_caps_wanted_delay_min,
135 	Opt_caps_wanted_delay_max,
136 	Opt_readdir_max_entries,
137 	Opt_readdir_max_bytes,
138 	Opt_congestion_kb,
139 	Opt_last_int,
140 	/* int args above */
141 	Opt_snapdirname,
142 	Opt_mds_namespace,
143 	Opt_fscache_uniq,
144 	Opt_last_string,
145 	/* string args above */
146 	Opt_dirstat,
147 	Opt_nodirstat,
148 	Opt_rbytes,
149 	Opt_norbytes,
150 	Opt_asyncreaddir,
151 	Opt_noasyncreaddir,
152 	Opt_dcache,
153 	Opt_nodcache,
154 	Opt_ino32,
155 	Opt_noino32,
156 	Opt_fscache,
157 	Opt_nofscache,
158 	Opt_poolperm,
159 	Opt_nopoolperm,
160 	Opt_require_active_mds,
161 	Opt_norequire_active_mds,
162 #ifdef CONFIG_CEPH_FS_POSIX_ACL
163 	Opt_acl,
164 #endif
165 	Opt_noacl,
166 	Opt_quotadf,
167 	Opt_noquotadf,
168 	Opt_copyfrom,
169 	Opt_nocopyfrom,
170 };
171 
172 static match_table_t fsopt_tokens = {
173 	{Opt_wsize, "wsize=%d"},
174 	{Opt_rsize, "rsize=%d"},
175 	{Opt_rasize, "rasize=%d"},
176 	{Opt_caps_wanted_delay_min, "caps_wanted_delay_min=%d"},
177 	{Opt_caps_wanted_delay_max, "caps_wanted_delay_max=%d"},
178 	{Opt_readdir_max_entries, "readdir_max_entries=%d"},
179 	{Opt_readdir_max_bytes, "readdir_max_bytes=%d"},
180 	{Opt_congestion_kb, "write_congestion_kb=%d"},
181 	/* int args above */
182 	{Opt_snapdirname, "snapdirname=%s"},
183 	{Opt_mds_namespace, "mds_namespace=%s"},
184 	{Opt_fscache_uniq, "fsc=%s"},
185 	/* string args above */
186 	{Opt_dirstat, "dirstat"},
187 	{Opt_nodirstat, "nodirstat"},
188 	{Opt_rbytes, "rbytes"},
189 	{Opt_norbytes, "norbytes"},
190 	{Opt_asyncreaddir, "asyncreaddir"},
191 	{Opt_noasyncreaddir, "noasyncreaddir"},
192 	{Opt_dcache, "dcache"},
193 	{Opt_nodcache, "nodcache"},
194 	{Opt_ino32, "ino32"},
195 	{Opt_noino32, "noino32"},
196 	{Opt_fscache, "fsc"},
197 	{Opt_nofscache, "nofsc"},
198 	{Opt_poolperm, "poolperm"},
199 	{Opt_nopoolperm, "nopoolperm"},
200 	{Opt_require_active_mds, "require_active_mds"},
201 	{Opt_norequire_active_mds, "norequire_active_mds"},
202 #ifdef CONFIG_CEPH_FS_POSIX_ACL
203 	{Opt_acl, "acl"},
204 #endif
205 	{Opt_noacl, "noacl"},
206 	{Opt_quotadf, "quotadf"},
207 	{Opt_noquotadf, "noquotadf"},
208 	{Opt_copyfrom, "copyfrom"},
209 	{Opt_nocopyfrom, "nocopyfrom"},
210 	{-1, NULL}
211 };
212 
213 static int parse_fsopt_token(char *c, void *private)
214 {
215 	struct ceph_mount_options *fsopt = private;
216 	substring_t argstr[MAX_OPT_ARGS];
217 	int token, intval, ret;
218 
219 	token = match_token((char *)c, fsopt_tokens, argstr);
220 	if (token < 0)
221 		return -EINVAL;
222 
223 	if (token < Opt_last_int) {
224 		ret = match_int(&argstr[0], &intval);
225 		if (ret < 0) {
226 			pr_err("bad option arg (not int) at '%s'\n", c);
227 			return ret;
228 		}
229 		dout("got int token %d val %d\n", token, intval);
230 	} else if (token > Opt_last_int && token < Opt_last_string) {
231 		dout("got string token %d val %s\n", token,
232 		     argstr[0].from);
233 	} else {
234 		dout("got token %d\n", token);
235 	}
236 
237 	switch (token) {
238 	case Opt_snapdirname:
239 		kfree(fsopt->snapdir_name);
240 		fsopt->snapdir_name = kstrndup(argstr[0].from,
241 					       argstr[0].to-argstr[0].from,
242 					       GFP_KERNEL);
243 		if (!fsopt->snapdir_name)
244 			return -ENOMEM;
245 		break;
246 	case Opt_mds_namespace:
247 		kfree(fsopt->mds_namespace);
248 		fsopt->mds_namespace = kstrndup(argstr[0].from,
249 						argstr[0].to-argstr[0].from,
250 						GFP_KERNEL);
251 		if (!fsopt->mds_namespace)
252 			return -ENOMEM;
253 		break;
254 	case Opt_fscache_uniq:
255 		kfree(fsopt->fscache_uniq);
256 		fsopt->fscache_uniq = kstrndup(argstr[0].from,
257 					       argstr[0].to-argstr[0].from,
258 					       GFP_KERNEL);
259 		if (!fsopt->fscache_uniq)
260 			return -ENOMEM;
261 		fsopt->flags |= CEPH_MOUNT_OPT_FSCACHE;
262 		break;
263 		/* misc */
264 	case Opt_wsize:
265 		if (intval < (int)PAGE_SIZE || intval > CEPH_MAX_WRITE_SIZE)
266 			return -EINVAL;
267 		fsopt->wsize = ALIGN(intval, PAGE_SIZE);
268 		break;
269 	case Opt_rsize:
270 		if (intval < (int)PAGE_SIZE || intval > CEPH_MAX_READ_SIZE)
271 			return -EINVAL;
272 		fsopt->rsize = ALIGN(intval, PAGE_SIZE);
273 		break;
274 	case Opt_rasize:
275 		if (intval < 0)
276 			return -EINVAL;
277 		fsopt->rasize = ALIGN(intval, PAGE_SIZE);
278 		break;
279 	case Opt_caps_wanted_delay_min:
280 		if (intval < 1)
281 			return -EINVAL;
282 		fsopt->caps_wanted_delay_min = intval;
283 		break;
284 	case Opt_caps_wanted_delay_max:
285 		if (intval < 1)
286 			return -EINVAL;
287 		fsopt->caps_wanted_delay_max = intval;
288 		break;
289 	case Opt_readdir_max_entries:
290 		if (intval < 1)
291 			return -EINVAL;
292 		fsopt->max_readdir = intval;
293 		break;
294 	case Opt_readdir_max_bytes:
295 		if (intval < (int)PAGE_SIZE && intval != 0)
296 			return -EINVAL;
297 		fsopt->max_readdir_bytes = intval;
298 		break;
299 	case Opt_congestion_kb:
300 		if (intval < 1024) /* at least 1M */
301 			return -EINVAL;
302 		fsopt->congestion_kb = intval;
303 		break;
304 	case Opt_dirstat:
305 		fsopt->flags |= CEPH_MOUNT_OPT_DIRSTAT;
306 		break;
307 	case Opt_nodirstat:
308 		fsopt->flags &= ~CEPH_MOUNT_OPT_DIRSTAT;
309 		break;
310 	case Opt_rbytes:
311 		fsopt->flags |= CEPH_MOUNT_OPT_RBYTES;
312 		break;
313 	case Opt_norbytes:
314 		fsopt->flags &= ~CEPH_MOUNT_OPT_RBYTES;
315 		break;
316 	case Opt_asyncreaddir:
317 		fsopt->flags &= ~CEPH_MOUNT_OPT_NOASYNCREADDIR;
318 		break;
319 	case Opt_noasyncreaddir:
320 		fsopt->flags |= CEPH_MOUNT_OPT_NOASYNCREADDIR;
321 		break;
322 	case Opt_dcache:
323 		fsopt->flags |= CEPH_MOUNT_OPT_DCACHE;
324 		break;
325 	case Opt_nodcache:
326 		fsopt->flags &= ~CEPH_MOUNT_OPT_DCACHE;
327 		break;
328 	case Opt_ino32:
329 		fsopt->flags |= CEPH_MOUNT_OPT_INO32;
330 		break;
331 	case Opt_noino32:
332 		fsopt->flags &= ~CEPH_MOUNT_OPT_INO32;
333 		break;
334 	case Opt_fscache:
335 		fsopt->flags |= CEPH_MOUNT_OPT_FSCACHE;
336 		kfree(fsopt->fscache_uniq);
337 		fsopt->fscache_uniq = NULL;
338 		break;
339 	case Opt_nofscache:
340 		fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE;
341 		kfree(fsopt->fscache_uniq);
342 		fsopt->fscache_uniq = NULL;
343 		break;
344 	case Opt_poolperm:
345 		fsopt->flags &= ~CEPH_MOUNT_OPT_NOPOOLPERM;
346 		break;
347 	case Opt_nopoolperm:
348 		fsopt->flags |= CEPH_MOUNT_OPT_NOPOOLPERM;
349 		break;
350 	case Opt_require_active_mds:
351 		fsopt->flags &= ~CEPH_MOUNT_OPT_MOUNTWAIT;
352 		break;
353 	case Opt_norequire_active_mds:
354 		fsopt->flags |= CEPH_MOUNT_OPT_MOUNTWAIT;
355 		break;
356 	case Opt_quotadf:
357 		fsopt->flags &= ~CEPH_MOUNT_OPT_NOQUOTADF;
358 		break;
359 	case Opt_noquotadf:
360 		fsopt->flags |= CEPH_MOUNT_OPT_NOQUOTADF;
361 		break;
362 	case Opt_copyfrom:
363 		fsopt->flags &= ~CEPH_MOUNT_OPT_NOCOPYFROM;
364 		break;
365 	case Opt_nocopyfrom:
366 		fsopt->flags |= CEPH_MOUNT_OPT_NOCOPYFROM;
367 		break;
368 #ifdef CONFIG_CEPH_FS_POSIX_ACL
369 	case Opt_acl:
370 		fsopt->sb_flags |= SB_POSIXACL;
371 		break;
372 #endif
373 	case Opt_noacl:
374 		fsopt->sb_flags &= ~SB_POSIXACL;
375 		break;
376 	default:
377 		BUG_ON(token);
378 	}
379 	return 0;
380 }
381 
382 static void destroy_mount_options(struct ceph_mount_options *args)
383 {
384 	dout("destroy_mount_options %p\n", args);
385 	kfree(args->snapdir_name);
386 	kfree(args->mds_namespace);
387 	kfree(args->server_path);
388 	kfree(args->fscache_uniq);
389 	kfree(args);
390 }
391 
392 static int strcmp_null(const char *s1, const char *s2)
393 {
394 	if (!s1 && !s2)
395 		return 0;
396 	if (s1 && !s2)
397 		return -1;
398 	if (!s1 && s2)
399 		return 1;
400 	return strcmp(s1, s2);
401 }
402 
403 static int compare_mount_options(struct ceph_mount_options *new_fsopt,
404 				 struct ceph_options *new_opt,
405 				 struct ceph_fs_client *fsc)
406 {
407 	struct ceph_mount_options *fsopt1 = new_fsopt;
408 	struct ceph_mount_options *fsopt2 = fsc->mount_options;
409 	int ofs = offsetof(struct ceph_mount_options, snapdir_name);
410 	int ret;
411 
412 	ret = memcmp(fsopt1, fsopt2, ofs);
413 	if (ret)
414 		return ret;
415 
416 	ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name);
417 	if (ret)
418 		return ret;
419 	ret = strcmp_null(fsopt1->mds_namespace, fsopt2->mds_namespace);
420 	if (ret)
421 		return ret;
422 	ret = strcmp_null(fsopt1->server_path, fsopt2->server_path);
423 	if (ret)
424 		return ret;
425 	ret = strcmp_null(fsopt1->fscache_uniq, fsopt2->fscache_uniq);
426 	if (ret)
427 		return ret;
428 
429 	return ceph_compare_options(new_opt, fsc->client);
430 }
431 
432 static int parse_mount_options(struct ceph_mount_options **pfsopt,
433 			       struct ceph_options **popt,
434 			       int flags, char *options,
435 			       const char *dev_name)
436 {
437 	struct ceph_mount_options *fsopt;
438 	const char *dev_name_end;
439 	int err;
440 
441 	if (!dev_name || !*dev_name)
442 		return -EINVAL;
443 
444 	fsopt = kzalloc(sizeof(*fsopt), GFP_KERNEL);
445 	if (!fsopt)
446 		return -ENOMEM;
447 
448 	dout("parse_mount_options %p, dev_name '%s'\n", fsopt, dev_name);
449 
450 	fsopt->sb_flags = flags;
451 	fsopt->flags = CEPH_MOUNT_OPT_DEFAULT;
452 
453 	fsopt->wsize = CEPH_MAX_WRITE_SIZE;
454 	fsopt->rsize = CEPH_MAX_READ_SIZE;
455 	fsopt->rasize = CEPH_RASIZE_DEFAULT;
456 	fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
457 	if (!fsopt->snapdir_name) {
458 		err = -ENOMEM;
459 		goto out;
460 	}
461 
462 	fsopt->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT;
463 	fsopt->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT;
464 	fsopt->max_readdir = CEPH_MAX_READDIR_DEFAULT;
465 	fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
466 	fsopt->congestion_kb = default_congestion_kb();
467 
468 	/*
469 	 * Distinguish the server list from the path in "dev_name".
470 	 * Internally we do not include the leading '/' in the path.
471 	 *
472 	 * "dev_name" will look like:
473 	 *     <server_spec>[,<server_spec>...]:[<path>]
474 	 * where
475 	 *     <server_spec> is <ip>[:<port>]
476 	 *     <path> is optional, but if present must begin with '/'
477 	 */
478 	dev_name_end = strchr(dev_name, '/');
479 	if (dev_name_end) {
480 		if (strlen(dev_name_end) > 1) {
481 			fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
482 			if (!fsopt->server_path) {
483 				err = -ENOMEM;
484 				goto out;
485 			}
486 		}
487 	} else {
488 		dev_name_end = dev_name + strlen(dev_name);
489 	}
490 	err = -EINVAL;
491 	dev_name_end--;		/* back up to ':' separator */
492 	if (dev_name_end < dev_name || *dev_name_end != ':') {
493 		pr_err("device name is missing path (no : separator in %s)\n",
494 				dev_name);
495 		goto out;
496 	}
497 	dout("device name '%.*s'\n", (int)(dev_name_end - dev_name), dev_name);
498 	if (fsopt->server_path)
499 		dout("server path '%s'\n", fsopt->server_path);
500 
501 	*popt = ceph_parse_options(options, dev_name, dev_name_end,
502 				 parse_fsopt_token, (void *)fsopt);
503 	if (IS_ERR(*popt)) {
504 		err = PTR_ERR(*popt);
505 		goto out;
506 	}
507 
508 	/* success */
509 	*pfsopt = fsopt;
510 	return 0;
511 
512 out:
513 	destroy_mount_options(fsopt);
514 	return err;
515 }
516 
517 /**
518  * ceph_show_options - Show mount options in /proc/mounts
519  * @m: seq_file to write to
520  * @root: root of that (sub)tree
521  */
522 static int ceph_show_options(struct seq_file *m, struct dentry *root)
523 {
524 	struct ceph_fs_client *fsc = ceph_sb_to_client(root->d_sb);
525 	struct ceph_mount_options *fsopt = fsc->mount_options;
526 	size_t pos;
527 	int ret;
528 
529 	/* a comma between MNT/MS and client options */
530 	seq_putc(m, ',');
531 	pos = m->count;
532 
533 	ret = ceph_print_client_options(m, fsc->client);
534 	if (ret)
535 		return ret;
536 
537 	/* retract our comma if no client options */
538 	if (m->count == pos)
539 		m->count--;
540 
541 	if (fsopt->flags & CEPH_MOUNT_OPT_DIRSTAT)
542 		seq_puts(m, ",dirstat");
543 	if ((fsopt->flags & CEPH_MOUNT_OPT_RBYTES))
544 		seq_puts(m, ",rbytes");
545 	if (fsopt->flags & CEPH_MOUNT_OPT_NOASYNCREADDIR)
546 		seq_puts(m, ",noasyncreaddir");
547 	if ((fsopt->flags & CEPH_MOUNT_OPT_DCACHE) == 0)
548 		seq_puts(m, ",nodcache");
549 	if (fsopt->flags & CEPH_MOUNT_OPT_INO32)
550 		seq_puts(m, ",ino32");
551 	if (fsopt->flags & CEPH_MOUNT_OPT_FSCACHE) {
552 		seq_show_option(m, "fsc", fsopt->fscache_uniq);
553 	}
554 	if (fsopt->flags & CEPH_MOUNT_OPT_NOPOOLPERM)
555 		seq_puts(m, ",nopoolperm");
556 	if (fsopt->flags & CEPH_MOUNT_OPT_NOQUOTADF)
557 		seq_puts(m, ",noquotadf");
558 
559 #ifdef CONFIG_CEPH_FS_POSIX_ACL
560 	if (fsopt->sb_flags & SB_POSIXACL)
561 		seq_puts(m, ",acl");
562 	else
563 		seq_puts(m, ",noacl");
564 #endif
565 
566 	if (fsopt->flags & CEPH_MOUNT_OPT_NOCOPYFROM)
567 		seq_puts(m, ",nocopyfrom");
568 
569 	if (fsopt->mds_namespace)
570 		seq_show_option(m, "mds_namespace", fsopt->mds_namespace);
571 	if (fsopt->wsize != CEPH_MAX_WRITE_SIZE)
572 		seq_printf(m, ",wsize=%d", fsopt->wsize);
573 	if (fsopt->rsize != CEPH_MAX_READ_SIZE)
574 		seq_printf(m, ",rsize=%d", fsopt->rsize);
575 	if (fsopt->rasize != CEPH_RASIZE_DEFAULT)
576 		seq_printf(m, ",rasize=%d", fsopt->rasize);
577 	if (fsopt->congestion_kb != default_congestion_kb())
578 		seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb);
579 	if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
580 		seq_printf(m, ",caps_wanted_delay_min=%d",
581 			 fsopt->caps_wanted_delay_min);
582 	if (fsopt->caps_wanted_delay_max != CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT)
583 		seq_printf(m, ",caps_wanted_delay_max=%d",
584 			   fsopt->caps_wanted_delay_max);
585 	if (fsopt->max_readdir != CEPH_MAX_READDIR_DEFAULT)
586 		seq_printf(m, ",readdir_max_entries=%d", fsopt->max_readdir);
587 	if (fsopt->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT)
588 		seq_printf(m, ",readdir_max_bytes=%d", fsopt->max_readdir_bytes);
589 	if (strcmp(fsopt->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
590 		seq_show_option(m, "snapdirname", fsopt->snapdir_name);
591 
592 	return 0;
593 }
594 
595 /*
596  * handle any mon messages the standard library doesn't understand.
597  * return error if we don't either.
598  */
599 static int extra_mon_dispatch(struct ceph_client *client, struct ceph_msg *msg)
600 {
601 	struct ceph_fs_client *fsc = client->private;
602 	int type = le16_to_cpu(msg->hdr.type);
603 
604 	switch (type) {
605 	case CEPH_MSG_MDS_MAP:
606 		ceph_mdsc_handle_mdsmap(fsc->mdsc, msg);
607 		return 0;
608 	case CEPH_MSG_FS_MAP_USER:
609 		ceph_mdsc_handle_fsmap(fsc->mdsc, msg);
610 		return 0;
611 	default:
612 		return -1;
613 	}
614 }
615 
616 /*
617  * create a new fs client
618  *
619  * Success or not, this function consumes @fsopt and @opt.
620  */
621 static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
622 					struct ceph_options *opt)
623 {
624 	struct ceph_fs_client *fsc;
625 	int page_count;
626 	size_t size;
627 	int err;
628 
629 	fsc = kzalloc(sizeof(*fsc), GFP_KERNEL);
630 	if (!fsc) {
631 		err = -ENOMEM;
632 		goto fail;
633 	}
634 
635 	fsc->client = ceph_create_client(opt, fsc);
636 	if (IS_ERR(fsc->client)) {
637 		err = PTR_ERR(fsc->client);
638 		goto fail;
639 	}
640 	opt = NULL; /* fsc->client now owns this */
641 
642 	fsc->client->extra_mon_dispatch = extra_mon_dispatch;
643 	fsc->client->osdc.abort_on_full = true;
644 
645 	if (!fsopt->mds_namespace) {
646 		ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
647 				   0, true);
648 	} else {
649 		ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_FSMAP,
650 				   0, false);
651 	}
652 
653 	fsc->mount_options = fsopt;
654 
655 	fsc->sb = NULL;
656 	fsc->mount_state = CEPH_MOUNT_MOUNTING;
657 
658 	atomic_long_set(&fsc->writeback_count, 0);
659 
660 	err = -ENOMEM;
661 	/*
662 	 * The number of concurrent works can be high but they don't need
663 	 * to be processed in parallel, limit concurrency.
664 	 */
665 	fsc->wb_wq = alloc_workqueue("ceph-writeback", 0, 1);
666 	if (!fsc->wb_wq)
667 		goto fail_client;
668 	fsc->pg_inv_wq = alloc_workqueue("ceph-pg-invalid", 0, 1);
669 	if (!fsc->pg_inv_wq)
670 		goto fail_wb_wq;
671 	fsc->trunc_wq = alloc_workqueue("ceph-trunc", 0, 1);
672 	if (!fsc->trunc_wq)
673 		goto fail_pg_inv_wq;
674 
675 	/* set up mempools */
676 	err = -ENOMEM;
677 	page_count = fsc->mount_options->wsize >> PAGE_SHIFT;
678 	size = sizeof (struct page *) * (page_count ? page_count : 1);
679 	fsc->wb_pagevec_pool = mempool_create_kmalloc_pool(10, size);
680 	if (!fsc->wb_pagevec_pool)
681 		goto fail_trunc_wq;
682 
683 	/* caps */
684 	fsc->min_caps = fsopt->max_readdir;
685 
686 	return fsc;
687 
688 fail_trunc_wq:
689 	destroy_workqueue(fsc->trunc_wq);
690 fail_pg_inv_wq:
691 	destroy_workqueue(fsc->pg_inv_wq);
692 fail_wb_wq:
693 	destroy_workqueue(fsc->wb_wq);
694 fail_client:
695 	ceph_destroy_client(fsc->client);
696 fail:
697 	kfree(fsc);
698 	if (opt)
699 		ceph_destroy_options(opt);
700 	destroy_mount_options(fsopt);
701 	return ERR_PTR(err);
702 }
703 
704 static void flush_fs_workqueues(struct ceph_fs_client *fsc)
705 {
706 	flush_workqueue(fsc->wb_wq);
707 	flush_workqueue(fsc->pg_inv_wq);
708 	flush_workqueue(fsc->trunc_wq);
709 }
710 
711 static void destroy_fs_client(struct ceph_fs_client *fsc)
712 {
713 	dout("destroy_fs_client %p\n", fsc);
714 
715 	destroy_workqueue(fsc->wb_wq);
716 	destroy_workqueue(fsc->pg_inv_wq);
717 	destroy_workqueue(fsc->trunc_wq);
718 
719 	mempool_destroy(fsc->wb_pagevec_pool);
720 
721 	destroy_mount_options(fsc->mount_options);
722 
723 	ceph_destroy_client(fsc->client);
724 
725 	kfree(fsc);
726 	dout("destroy_fs_client %p done\n", fsc);
727 }
728 
729 /*
730  * caches
731  */
732 struct kmem_cache *ceph_inode_cachep;
733 struct kmem_cache *ceph_cap_cachep;
734 struct kmem_cache *ceph_cap_flush_cachep;
735 struct kmem_cache *ceph_dentry_cachep;
736 struct kmem_cache *ceph_file_cachep;
737 struct kmem_cache *ceph_dir_file_cachep;
738 
739 static void ceph_inode_init_once(void *foo)
740 {
741 	struct ceph_inode_info *ci = foo;
742 	inode_init_once(&ci->vfs_inode);
743 }
744 
745 static int __init init_caches(void)
746 {
747 	int error = -ENOMEM;
748 
749 	ceph_inode_cachep = kmem_cache_create("ceph_inode_info",
750 				      sizeof(struct ceph_inode_info),
751 				      __alignof__(struct ceph_inode_info),
752 				      SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|
753 				      SLAB_ACCOUNT, ceph_inode_init_once);
754 	if (!ceph_inode_cachep)
755 		return -ENOMEM;
756 
757 	ceph_cap_cachep = KMEM_CACHE(ceph_cap, SLAB_MEM_SPREAD);
758 	if (!ceph_cap_cachep)
759 		goto bad_cap;
760 	ceph_cap_flush_cachep = KMEM_CACHE(ceph_cap_flush,
761 					   SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
762 	if (!ceph_cap_flush_cachep)
763 		goto bad_cap_flush;
764 
765 	ceph_dentry_cachep = KMEM_CACHE(ceph_dentry_info,
766 					SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
767 	if (!ceph_dentry_cachep)
768 		goto bad_dentry;
769 
770 	ceph_file_cachep = KMEM_CACHE(ceph_file_info, SLAB_MEM_SPREAD);
771 	if (!ceph_file_cachep)
772 		goto bad_file;
773 
774 	ceph_dir_file_cachep = KMEM_CACHE(ceph_dir_file_info, SLAB_MEM_SPREAD);
775 	if (!ceph_dir_file_cachep)
776 		goto bad_dir_file;
777 
778 	error = ceph_fscache_register();
779 	if (error)
780 		goto bad_fscache;
781 
782 	return 0;
783 
784 bad_fscache:
785 	kmem_cache_destroy(ceph_dir_file_cachep);
786 bad_dir_file:
787 	kmem_cache_destroy(ceph_file_cachep);
788 bad_file:
789 	kmem_cache_destroy(ceph_dentry_cachep);
790 bad_dentry:
791 	kmem_cache_destroy(ceph_cap_flush_cachep);
792 bad_cap_flush:
793 	kmem_cache_destroy(ceph_cap_cachep);
794 bad_cap:
795 	kmem_cache_destroy(ceph_inode_cachep);
796 	return error;
797 }
798 
799 static void destroy_caches(void)
800 {
801 	/*
802 	 * Make sure all delayed rcu free inodes are flushed before we
803 	 * destroy cache.
804 	 */
805 	rcu_barrier();
806 
807 	kmem_cache_destroy(ceph_inode_cachep);
808 	kmem_cache_destroy(ceph_cap_cachep);
809 	kmem_cache_destroy(ceph_cap_flush_cachep);
810 	kmem_cache_destroy(ceph_dentry_cachep);
811 	kmem_cache_destroy(ceph_file_cachep);
812 	kmem_cache_destroy(ceph_dir_file_cachep);
813 
814 	ceph_fscache_unregister();
815 }
816 
817 
818 /*
819  * ceph_umount_begin - initiate forced umount.  Tear down down the
820  * mount, skipping steps that may hang while waiting for server(s).
821  */
822 static void ceph_umount_begin(struct super_block *sb)
823 {
824 	struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
825 
826 	dout("ceph_umount_begin - starting forced umount\n");
827 	if (!fsc)
828 		return;
829 	fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
830 	ceph_osdc_abort_requests(&fsc->client->osdc, -EIO);
831 	ceph_mdsc_force_umount(fsc->mdsc);
832 	return;
833 }
834 
835 static const struct super_operations ceph_super_ops = {
836 	.alloc_inode	= ceph_alloc_inode,
837 	.destroy_inode	= ceph_destroy_inode,
838 	.write_inode    = ceph_write_inode,
839 	.drop_inode	= ceph_drop_inode,
840 	.sync_fs        = ceph_sync_fs,
841 	.put_super	= ceph_put_super,
842 	.show_options   = ceph_show_options,
843 	.statfs		= ceph_statfs,
844 	.umount_begin   = ceph_umount_begin,
845 };
846 
847 /*
848  * Bootstrap mount by opening the root directory.  Note the mount
849  * @started time from caller, and time out if this takes too long.
850  */
851 static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
852 				       const char *path,
853 				       unsigned long started)
854 {
855 	struct ceph_mds_client *mdsc = fsc->mdsc;
856 	struct ceph_mds_request *req = NULL;
857 	int err;
858 	struct dentry *root;
859 
860 	/* open dir */
861 	dout("open_root_inode opening '%s'\n", path);
862 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
863 	if (IS_ERR(req))
864 		return ERR_CAST(req);
865 	req->r_path1 = kstrdup(path, GFP_NOFS);
866 	if (!req->r_path1) {
867 		root = ERR_PTR(-ENOMEM);
868 		goto out;
869 	}
870 
871 	req->r_ino1.ino = CEPH_INO_ROOT;
872 	req->r_ino1.snap = CEPH_NOSNAP;
873 	req->r_started = started;
874 	req->r_timeout = fsc->client->options->mount_timeout;
875 	req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
876 	req->r_num_caps = 2;
877 	err = ceph_mdsc_do_request(mdsc, NULL, req);
878 	if (err == 0) {
879 		struct inode *inode = req->r_target_inode;
880 		req->r_target_inode = NULL;
881 		dout("open_root_inode success\n");
882 		root = d_make_root(inode);
883 		if (!root) {
884 			root = ERR_PTR(-ENOMEM);
885 			goto out;
886 		}
887 		dout("open_root_inode success, root dentry is %p\n", root);
888 	} else {
889 		root = ERR_PTR(err);
890 	}
891 out:
892 	ceph_mdsc_put_request(req);
893 	return root;
894 }
895 
896 
897 
898 
899 /*
900  * mount: join the ceph cluster, and open root directory.
901  */
902 static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc)
903 {
904 	int err;
905 	unsigned long started = jiffies;  /* note the start time */
906 	struct dentry *root;
907 
908 	dout("mount start %p\n", fsc);
909 	mutex_lock(&fsc->client->mount_mutex);
910 
911 	if (!fsc->sb->s_root) {
912 		const char *path;
913 		err = __ceph_open_session(fsc->client, started);
914 		if (err < 0)
915 			goto out;
916 
917 		/* setup fscache */
918 		if (fsc->mount_options->flags & CEPH_MOUNT_OPT_FSCACHE) {
919 			err = ceph_fscache_register_fs(fsc);
920 			if (err < 0)
921 				goto out;
922 		}
923 
924 		if (!fsc->mount_options->server_path) {
925 			path = "";
926 			dout("mount opening path \\t\n");
927 		} else {
928 			path = fsc->mount_options->server_path + 1;
929 			dout("mount opening path %s\n", path);
930 		}
931 
932 		err = ceph_fs_debugfs_init(fsc);
933 		if (err < 0)
934 			goto out;
935 
936 		root = open_root_dentry(fsc, path, started);
937 		if (IS_ERR(root)) {
938 			err = PTR_ERR(root);
939 			goto out;
940 		}
941 		fsc->sb->s_root = dget(root);
942 	} else {
943 		root = dget(fsc->sb->s_root);
944 	}
945 
946 	fsc->mount_state = CEPH_MOUNT_MOUNTED;
947 	dout("mount success\n");
948 	mutex_unlock(&fsc->client->mount_mutex);
949 	return root;
950 
951 out:
952 	mutex_unlock(&fsc->client->mount_mutex);
953 	return ERR_PTR(err);
954 }
955 
956 static int ceph_set_super(struct super_block *s, void *data)
957 {
958 	struct ceph_fs_client *fsc = data;
959 	int ret;
960 
961 	dout("set_super %p data %p\n", s, data);
962 
963 	s->s_flags = fsc->mount_options->sb_flags;
964 	s->s_maxbytes = MAX_LFS_FILESIZE;
965 
966 	s->s_xattr = ceph_xattr_handlers;
967 	s->s_fs_info = fsc;
968 	fsc->sb = s;
969 	fsc->max_file_size = 1ULL << 40; /* temp value until we get mdsmap */
970 
971 	s->s_op = &ceph_super_ops;
972 	s->s_d_op = &ceph_dentry_ops;
973 	s->s_export_op = &ceph_export_ops;
974 
975 	s->s_time_gran = 1000;  /* 1000 ns == 1 us */
976 
977 	ret = set_anon_super(s, NULL);  /* what is that second arg for? */
978 	if (ret != 0)
979 		goto fail;
980 
981 	return ret;
982 
983 fail:
984 	s->s_fs_info = NULL;
985 	fsc->sb = NULL;
986 	return ret;
987 }
988 
989 /*
990  * share superblock if same fs AND options
991  */
992 static int ceph_compare_super(struct super_block *sb, void *data)
993 {
994 	struct ceph_fs_client *new = data;
995 	struct ceph_mount_options *fsopt = new->mount_options;
996 	struct ceph_options *opt = new->client->options;
997 	struct ceph_fs_client *other = ceph_sb_to_client(sb);
998 
999 	dout("ceph_compare_super %p\n", sb);
1000 
1001 	if (compare_mount_options(fsopt, opt, other)) {
1002 		dout("monitor(s)/mount options don't match\n");
1003 		return 0;
1004 	}
1005 	if ((opt->flags & CEPH_OPT_FSID) &&
1006 	    ceph_fsid_compare(&opt->fsid, &other->client->fsid)) {
1007 		dout("fsid doesn't match\n");
1008 		return 0;
1009 	}
1010 	if (fsopt->sb_flags != other->mount_options->sb_flags) {
1011 		dout("flags differ\n");
1012 		return 0;
1013 	}
1014 	return 1;
1015 }
1016 
1017 /*
1018  * construct our own bdi so we can control readahead, etc.
1019  */
1020 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
1021 
1022 static int ceph_setup_bdi(struct super_block *sb, struct ceph_fs_client *fsc)
1023 {
1024 	int err;
1025 
1026 	err = super_setup_bdi_name(sb, "ceph-%ld",
1027 				   atomic_long_inc_return(&bdi_seq));
1028 	if (err)
1029 		return err;
1030 
1031 	/* set ra_pages based on rasize mount option? */
1032 	sb->s_bdi->ra_pages = fsc->mount_options->rasize >> PAGE_SHIFT;
1033 
1034 	/* set io_pages based on max osd read size */
1035 	sb->s_bdi->io_pages = fsc->mount_options->rsize >> PAGE_SHIFT;
1036 
1037 	return 0;
1038 }
1039 
1040 static struct dentry *ceph_mount(struct file_system_type *fs_type,
1041 		       int flags, const char *dev_name, void *data)
1042 {
1043 	struct super_block *sb;
1044 	struct ceph_fs_client *fsc;
1045 	struct dentry *res;
1046 	int err;
1047 	int (*compare_super)(struct super_block *, void *) = ceph_compare_super;
1048 	struct ceph_mount_options *fsopt = NULL;
1049 	struct ceph_options *opt = NULL;
1050 
1051 	dout("ceph_mount\n");
1052 
1053 #ifdef CONFIG_CEPH_FS_POSIX_ACL
1054 	flags |= SB_POSIXACL;
1055 #endif
1056 	err = parse_mount_options(&fsopt, &opt, flags, data, dev_name);
1057 	if (err < 0) {
1058 		res = ERR_PTR(err);
1059 		goto out_final;
1060 	}
1061 
1062 	/* create client (which we may/may not use) */
1063 	fsc = create_fs_client(fsopt, opt);
1064 	if (IS_ERR(fsc)) {
1065 		res = ERR_CAST(fsc);
1066 		goto out_final;
1067 	}
1068 
1069 	err = ceph_mdsc_init(fsc);
1070 	if (err < 0) {
1071 		res = ERR_PTR(err);
1072 		goto out;
1073 	}
1074 
1075 	if (ceph_test_opt(fsc->client, NOSHARE))
1076 		compare_super = NULL;
1077 	sb = sget(fs_type, compare_super, ceph_set_super, flags, fsc);
1078 	if (IS_ERR(sb)) {
1079 		res = ERR_CAST(sb);
1080 		goto out;
1081 	}
1082 
1083 	if (ceph_sb_to_client(sb) != fsc) {
1084 		ceph_mdsc_destroy(fsc);
1085 		destroy_fs_client(fsc);
1086 		fsc = ceph_sb_to_client(sb);
1087 		dout("get_sb got existing client %p\n", fsc);
1088 	} else {
1089 		dout("get_sb using new client %p\n", fsc);
1090 		err = ceph_setup_bdi(sb, fsc);
1091 		if (err < 0) {
1092 			res = ERR_PTR(err);
1093 			goto out_splat;
1094 		}
1095 	}
1096 
1097 	res = ceph_real_mount(fsc);
1098 	if (IS_ERR(res))
1099 		goto out_splat;
1100 	dout("root %p inode %p ino %llx.%llx\n", res,
1101 	     d_inode(res), ceph_vinop(d_inode(res)));
1102 	return res;
1103 
1104 out_splat:
1105 	ceph_mdsc_close_sessions(fsc->mdsc);
1106 	deactivate_locked_super(sb);
1107 	goto out_final;
1108 
1109 out:
1110 	ceph_mdsc_destroy(fsc);
1111 	destroy_fs_client(fsc);
1112 out_final:
1113 	dout("ceph_mount fail %ld\n", PTR_ERR(res));
1114 	return res;
1115 }
1116 
1117 static void ceph_kill_sb(struct super_block *s)
1118 {
1119 	struct ceph_fs_client *fsc = ceph_sb_to_client(s);
1120 	dev_t dev = s->s_dev;
1121 
1122 	dout("kill_sb %p\n", s);
1123 
1124 	ceph_mdsc_pre_umount(fsc->mdsc);
1125 	flush_fs_workqueues(fsc);
1126 
1127 	generic_shutdown_super(s);
1128 
1129 	fsc->client->extra_mon_dispatch = NULL;
1130 	ceph_fs_debugfs_cleanup(fsc);
1131 
1132 	ceph_fscache_unregister_fs(fsc);
1133 
1134 	ceph_mdsc_destroy(fsc);
1135 
1136 	destroy_fs_client(fsc);
1137 	free_anon_bdev(dev);
1138 }
1139 
1140 static struct file_system_type ceph_fs_type = {
1141 	.owner		= THIS_MODULE,
1142 	.name		= "ceph",
1143 	.mount		= ceph_mount,
1144 	.kill_sb	= ceph_kill_sb,
1145 	.fs_flags	= FS_RENAME_DOES_D_MOVE,
1146 };
1147 MODULE_ALIAS_FS("ceph");
1148 
1149 static int __init init_ceph(void)
1150 {
1151 	int ret = init_caches();
1152 	if (ret)
1153 		goto out;
1154 
1155 	ceph_flock_init();
1156 	ceph_xattr_init();
1157 	ret = register_filesystem(&ceph_fs_type);
1158 	if (ret)
1159 		goto out_xattr;
1160 
1161 	pr_info("loaded (mds proto %d)\n", CEPH_MDSC_PROTOCOL);
1162 
1163 	return 0;
1164 
1165 out_xattr:
1166 	ceph_xattr_exit();
1167 	destroy_caches();
1168 out:
1169 	return ret;
1170 }
1171 
1172 static void __exit exit_ceph(void)
1173 {
1174 	dout("exit_ceph\n");
1175 	unregister_filesystem(&ceph_fs_type);
1176 	ceph_xattr_exit();
1177 	destroy_caches();
1178 }
1179 
1180 module_init(init_ceph);
1181 module_exit(exit_ceph);
1182 
1183 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
1184 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
1185 MODULE_AUTHOR("Patience Warnick <patience@newdream.net>");
1186 MODULE_DESCRIPTION("Ceph filesystem for Linux");
1187 MODULE_LICENSE("GPL");
1188