xref: /openbmc/linux/fs/ceph/super.c (revision 2b076054)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 #include <linux/ceph/ceph_debug.h>
4 
5 #include <linux/backing-dev.h>
6 #include <linux/ctype.h>
7 #include <linux/fs.h>
8 #include <linux/inet.h>
9 #include <linux/in6.h>
10 #include <linux/module.h>
11 #include <linux/mount.h>
12 #include <linux/fs_context.h>
13 #include <linux/fs_parser.h>
14 #include <linux/sched.h>
15 #include <linux/seq_file.h>
16 #include <linux/slab.h>
17 #include <linux/statfs.h>
18 #include <linux/string.h>
19 
20 #include "super.h"
21 #include "mds_client.h"
22 #include "cache.h"
23 
24 #include <linux/ceph/ceph_features.h>
25 #include <linux/ceph/decode.h>
26 #include <linux/ceph/mon_client.h>
27 #include <linux/ceph/auth.h>
28 #include <linux/ceph/debugfs.h>
29 
30 static DEFINE_SPINLOCK(ceph_fsc_lock);
31 static LIST_HEAD(ceph_fsc_list);
32 
33 /*
34  * Ceph superblock operations
35  *
36  * Handle the basics of mounting, unmounting.
37  */
38 
39 /*
40  * super ops
41  */
42 static void ceph_put_super(struct super_block *s)
43 {
44 	struct ceph_fs_client *fsc = ceph_sb_to_client(s);
45 
46 	dout("put_super\n");
47 	ceph_mdsc_close_sessions(fsc->mdsc);
48 }
49 
50 static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
51 {
52 	struct ceph_fs_client *fsc = ceph_inode_to_client(d_inode(dentry));
53 	struct ceph_mon_client *monc = &fsc->client->monc;
54 	struct ceph_statfs st;
55 	u64 fsid;
56 	int err;
57 	u64 data_pool;
58 
59 	if (fsc->mdsc->mdsmap->m_num_data_pg_pools == 1) {
60 		data_pool = fsc->mdsc->mdsmap->m_data_pg_pools[0];
61 	} else {
62 		data_pool = CEPH_NOPOOL;
63 	}
64 
65 	dout("statfs\n");
66 	err = ceph_monc_do_statfs(monc, data_pool, &st);
67 	if (err < 0)
68 		return err;
69 
70 	/* fill in kstatfs */
71 	buf->f_type = CEPH_SUPER_MAGIC;  /* ?? */
72 
73 	/*
74 	 * express utilization in terms of large blocks to avoid
75 	 * overflow on 32-bit machines.
76 	 *
77 	 * NOTE: for the time being, we make bsize == frsize to humor
78 	 * not-yet-ancient versions of glibc that are broken.
79 	 * Someday, we will probably want to report a real block
80 	 * size...  whatever that may mean for a network file system!
81 	 */
82 	buf->f_bsize = 1 << CEPH_BLOCK_SHIFT;
83 	buf->f_frsize = 1 << CEPH_BLOCK_SHIFT;
84 
85 	/*
86 	 * By default use root quota for stats; fallback to overall filesystem
87 	 * usage if using 'noquotadf' mount option or if the root dir doesn't
88 	 * have max_bytes quota set.
89 	 */
90 	if (ceph_test_mount_opt(fsc, NOQUOTADF) ||
91 	    !ceph_quota_update_statfs(fsc, buf)) {
92 		buf->f_blocks = le64_to_cpu(st.kb) >> (CEPH_BLOCK_SHIFT-10);
93 		buf->f_bfree = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
94 		buf->f_bavail = le64_to_cpu(st.kb_avail) >> (CEPH_BLOCK_SHIFT-10);
95 	}
96 
97 	buf->f_files = le64_to_cpu(st.num_objects);
98 	buf->f_ffree = -1;
99 	buf->f_namelen = NAME_MAX;
100 
101 	/* Must convert the fsid, for consistent values across arches */
102 	mutex_lock(&monc->mutex);
103 	fsid = le64_to_cpu(*(__le64 *)(&monc->monmap->fsid)) ^
104 	       le64_to_cpu(*((__le64 *)&monc->monmap->fsid + 1));
105 	mutex_unlock(&monc->mutex);
106 
107 	buf->f_fsid = u64_to_fsid(fsid);
108 
109 	return 0;
110 }
111 
112 static int ceph_sync_fs(struct super_block *sb, int wait)
113 {
114 	struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
115 
116 	if (!wait) {
117 		dout("sync_fs (non-blocking)\n");
118 		ceph_flush_dirty_caps(fsc->mdsc);
119 		dout("sync_fs (non-blocking) done\n");
120 		return 0;
121 	}
122 
123 	dout("sync_fs (blocking)\n");
124 	ceph_osdc_sync(&fsc->client->osdc);
125 	ceph_mdsc_sync(fsc->mdsc);
126 	dout("sync_fs (blocking) done\n");
127 	return 0;
128 }
129 
130 /*
131  * mount options
132  */
133 enum {
134 	Opt_wsize,
135 	Opt_rsize,
136 	Opt_rasize,
137 	Opt_caps_wanted_delay_min,
138 	Opt_caps_wanted_delay_max,
139 	Opt_caps_max,
140 	Opt_readdir_max_entries,
141 	Opt_readdir_max_bytes,
142 	Opt_congestion_kb,
143 	/* int args above */
144 	Opt_snapdirname,
145 	Opt_mds_namespace,
146 	Opt_recover_session,
147 	Opt_source,
148 	/* string args above */
149 	Opt_dirstat,
150 	Opt_rbytes,
151 	Opt_asyncreaddir,
152 	Opt_dcache,
153 	Opt_ino32,
154 	Opt_fscache,
155 	Opt_poolperm,
156 	Opt_require_active_mds,
157 	Opt_acl,
158 	Opt_quotadf,
159 	Opt_copyfrom,
160 	Opt_wsync,
161 };
162 
163 enum ceph_recover_session_mode {
164 	ceph_recover_session_no,
165 	ceph_recover_session_clean
166 };
167 
168 static const struct constant_table ceph_param_recover[] = {
169 	{ "no",		ceph_recover_session_no },
170 	{ "clean",	ceph_recover_session_clean },
171 	{}
172 };
173 
174 static const struct fs_parameter_spec ceph_mount_parameters[] = {
175 	fsparam_flag_no ("acl",				Opt_acl),
176 	fsparam_flag_no ("asyncreaddir",		Opt_asyncreaddir),
177 	fsparam_s32	("caps_max",			Opt_caps_max),
178 	fsparam_u32	("caps_wanted_delay_max",	Opt_caps_wanted_delay_max),
179 	fsparam_u32	("caps_wanted_delay_min",	Opt_caps_wanted_delay_min),
180 	fsparam_u32	("write_congestion_kb",		Opt_congestion_kb),
181 	fsparam_flag_no ("copyfrom",			Opt_copyfrom),
182 	fsparam_flag_no ("dcache",			Opt_dcache),
183 	fsparam_flag_no ("dirstat",			Opt_dirstat),
184 	fsparam_flag_no	("fsc",				Opt_fscache), // fsc|nofsc
185 	fsparam_string	("fsc",				Opt_fscache), // fsc=...
186 	fsparam_flag_no ("ino32",			Opt_ino32),
187 	fsparam_string	("mds_namespace",		Opt_mds_namespace),
188 	fsparam_flag_no ("poolperm",			Opt_poolperm),
189 	fsparam_flag_no ("quotadf",			Opt_quotadf),
190 	fsparam_u32	("rasize",			Opt_rasize),
191 	fsparam_flag_no ("rbytes",			Opt_rbytes),
192 	fsparam_u32	("readdir_max_bytes",		Opt_readdir_max_bytes),
193 	fsparam_u32	("readdir_max_entries",		Opt_readdir_max_entries),
194 	fsparam_enum	("recover_session",		Opt_recover_session, ceph_param_recover),
195 	fsparam_flag_no ("require_active_mds",		Opt_require_active_mds),
196 	fsparam_u32	("rsize",			Opt_rsize),
197 	fsparam_string	("snapdirname",			Opt_snapdirname),
198 	fsparam_string	("source",			Opt_source),
199 	fsparam_u32	("wsize",			Opt_wsize),
200 	fsparam_flag_no	("wsync",			Opt_wsync),
201 	{}
202 };
203 
204 struct ceph_parse_opts_ctx {
205 	struct ceph_options		*copts;
206 	struct ceph_mount_options	*opts;
207 };
208 
209 /*
210  * Remove adjacent slashes and then the trailing slash, unless it is
211  * the only remaining character.
212  *
213  * E.g. "//dir1////dir2///" --> "/dir1/dir2", "///" --> "/".
214  */
215 static void canonicalize_path(char *path)
216 {
217 	int i, j = 0;
218 
219 	for (i = 0; path[i] != '\0'; i++) {
220 		if (path[i] != '/' || j < 1 || path[j - 1] != '/')
221 			path[j++] = path[i];
222 	}
223 
224 	if (j > 1 && path[j - 1] == '/')
225 		j--;
226 	path[j] = '\0';
227 }
228 
229 /*
230  * Parse the source parameter.  Distinguish the server list from the path.
231  *
232  * The source will look like:
233  *     <server_spec>[,<server_spec>...]:[<path>]
234  * where
235  *     <server_spec> is <ip>[:<port>]
236  *     <path> is optional, but if present must begin with '/'
237  */
238 static int ceph_parse_source(struct fs_parameter *param, struct fs_context *fc)
239 {
240 	struct ceph_parse_opts_ctx *pctx = fc->fs_private;
241 	struct ceph_mount_options *fsopt = pctx->opts;
242 	char *dev_name = param->string, *dev_name_end;
243 	int ret;
244 
245 	dout("%s '%s'\n", __func__, dev_name);
246 	if (!dev_name || !*dev_name)
247 		return invalfc(fc, "Empty source");
248 
249 	dev_name_end = strchr(dev_name, '/');
250 	if (dev_name_end) {
251 		/*
252 		 * The server_path will include the whole chars from userland
253 		 * including the leading '/'.
254 		 */
255 		kfree(fsopt->server_path);
256 		fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
257 		if (!fsopt->server_path)
258 			return -ENOMEM;
259 
260 		canonicalize_path(fsopt->server_path);
261 	} else {
262 		dev_name_end = dev_name + strlen(dev_name);
263 	}
264 
265 	dev_name_end--;		/* back up to ':' separator */
266 	if (dev_name_end < dev_name || *dev_name_end != ':')
267 		return invalfc(fc, "No path or : separator in source");
268 
269 	dout("device name '%.*s'\n", (int)(dev_name_end - dev_name), dev_name);
270 	if (fsopt->server_path)
271 		dout("server path '%s'\n", fsopt->server_path);
272 
273 	ret = ceph_parse_mon_ips(param->string, dev_name_end - dev_name,
274 				 pctx->copts, fc->log.log);
275 	if (ret)
276 		return ret;
277 
278 	fc->source = param->string;
279 	param->string = NULL;
280 	return 0;
281 }
282 
283 static int ceph_parse_mount_param(struct fs_context *fc,
284 				  struct fs_parameter *param)
285 {
286 	struct ceph_parse_opts_ctx *pctx = fc->fs_private;
287 	struct ceph_mount_options *fsopt = pctx->opts;
288 	struct fs_parse_result result;
289 	unsigned int mode;
290 	int token, ret;
291 
292 	ret = ceph_parse_param(param, pctx->copts, fc->log.log);
293 	if (ret != -ENOPARAM)
294 		return ret;
295 
296 	token = fs_parse(fc, ceph_mount_parameters, param, &result);
297 	dout("%s fs_parse '%s' token %d\n", __func__, param->key, token);
298 	if (token < 0)
299 		return token;
300 
301 	switch (token) {
302 	case Opt_snapdirname:
303 		kfree(fsopt->snapdir_name);
304 		fsopt->snapdir_name = param->string;
305 		param->string = NULL;
306 		break;
307 	case Opt_mds_namespace:
308 		kfree(fsopt->mds_namespace);
309 		fsopt->mds_namespace = param->string;
310 		param->string = NULL;
311 		break;
312 	case Opt_recover_session:
313 		mode = result.uint_32;
314 		if (mode == ceph_recover_session_no)
315 			fsopt->flags &= ~CEPH_MOUNT_OPT_CLEANRECOVER;
316 		else if (mode == ceph_recover_session_clean)
317 			fsopt->flags |= CEPH_MOUNT_OPT_CLEANRECOVER;
318 		else
319 			BUG();
320 		break;
321 	case Opt_source:
322 		if (fc->source)
323 			return invalfc(fc, "Multiple sources specified");
324 		return ceph_parse_source(param, fc);
325 	case Opt_wsize:
326 		if (result.uint_32 < PAGE_SIZE ||
327 		    result.uint_32 > CEPH_MAX_WRITE_SIZE)
328 			goto out_of_range;
329 		fsopt->wsize = ALIGN(result.uint_32, PAGE_SIZE);
330 		break;
331 	case Opt_rsize:
332 		if (result.uint_32 < PAGE_SIZE ||
333 		    result.uint_32 > CEPH_MAX_READ_SIZE)
334 			goto out_of_range;
335 		fsopt->rsize = ALIGN(result.uint_32, PAGE_SIZE);
336 		break;
337 	case Opt_rasize:
338 		fsopt->rasize = ALIGN(result.uint_32, PAGE_SIZE);
339 		break;
340 	case Opt_caps_wanted_delay_min:
341 		if (result.uint_32 < 1)
342 			goto out_of_range;
343 		fsopt->caps_wanted_delay_min = result.uint_32;
344 		break;
345 	case Opt_caps_wanted_delay_max:
346 		if (result.uint_32 < 1)
347 			goto out_of_range;
348 		fsopt->caps_wanted_delay_max = result.uint_32;
349 		break;
350 	case Opt_caps_max:
351 		if (result.int_32 < 0)
352 			goto out_of_range;
353 		fsopt->caps_max = result.int_32;
354 		break;
355 	case Opt_readdir_max_entries:
356 		if (result.uint_32 < 1)
357 			goto out_of_range;
358 		fsopt->max_readdir = result.uint_32;
359 		break;
360 	case Opt_readdir_max_bytes:
361 		if (result.uint_32 < PAGE_SIZE && result.uint_32 != 0)
362 			goto out_of_range;
363 		fsopt->max_readdir_bytes = result.uint_32;
364 		break;
365 	case Opt_congestion_kb:
366 		if (result.uint_32 < 1024) /* at least 1M */
367 			goto out_of_range;
368 		fsopt->congestion_kb = result.uint_32;
369 		break;
370 	case Opt_dirstat:
371 		if (!result.negated)
372 			fsopt->flags |= CEPH_MOUNT_OPT_DIRSTAT;
373 		else
374 			fsopt->flags &= ~CEPH_MOUNT_OPT_DIRSTAT;
375 		break;
376 	case Opt_rbytes:
377 		if (!result.negated)
378 			fsopt->flags |= CEPH_MOUNT_OPT_RBYTES;
379 		else
380 			fsopt->flags &= ~CEPH_MOUNT_OPT_RBYTES;
381 		break;
382 	case Opt_asyncreaddir:
383 		if (!result.negated)
384 			fsopt->flags &= ~CEPH_MOUNT_OPT_NOASYNCREADDIR;
385 		else
386 			fsopt->flags |= CEPH_MOUNT_OPT_NOASYNCREADDIR;
387 		break;
388 	case Opt_dcache:
389 		if (!result.negated)
390 			fsopt->flags |= CEPH_MOUNT_OPT_DCACHE;
391 		else
392 			fsopt->flags &= ~CEPH_MOUNT_OPT_DCACHE;
393 		break;
394 	case Opt_ino32:
395 		if (!result.negated)
396 			fsopt->flags |= CEPH_MOUNT_OPT_INO32;
397 		else
398 			fsopt->flags &= ~CEPH_MOUNT_OPT_INO32;
399 		break;
400 
401 	case Opt_fscache:
402 #ifdef CONFIG_CEPH_FSCACHE
403 		kfree(fsopt->fscache_uniq);
404 		fsopt->fscache_uniq = NULL;
405 		if (result.negated) {
406 			fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE;
407 		} else {
408 			fsopt->flags |= CEPH_MOUNT_OPT_FSCACHE;
409 			fsopt->fscache_uniq = param->string;
410 			param->string = NULL;
411 		}
412 		break;
413 #else
414 		return invalfc(fc, "fscache support is disabled");
415 #endif
416 	case Opt_poolperm:
417 		if (!result.negated)
418 			fsopt->flags &= ~CEPH_MOUNT_OPT_NOPOOLPERM;
419 		else
420 			fsopt->flags |= CEPH_MOUNT_OPT_NOPOOLPERM;
421 		break;
422 	case Opt_require_active_mds:
423 		if (!result.negated)
424 			fsopt->flags &= ~CEPH_MOUNT_OPT_MOUNTWAIT;
425 		else
426 			fsopt->flags |= CEPH_MOUNT_OPT_MOUNTWAIT;
427 		break;
428 	case Opt_quotadf:
429 		if (!result.negated)
430 			fsopt->flags &= ~CEPH_MOUNT_OPT_NOQUOTADF;
431 		else
432 			fsopt->flags |= CEPH_MOUNT_OPT_NOQUOTADF;
433 		break;
434 	case Opt_copyfrom:
435 		if (!result.negated)
436 			fsopt->flags &= ~CEPH_MOUNT_OPT_NOCOPYFROM;
437 		else
438 			fsopt->flags |= CEPH_MOUNT_OPT_NOCOPYFROM;
439 		break;
440 	case Opt_acl:
441 		if (!result.negated) {
442 #ifdef CONFIG_CEPH_FS_POSIX_ACL
443 			fc->sb_flags |= SB_POSIXACL;
444 #else
445 			return invalfc(fc, "POSIX ACL support is disabled");
446 #endif
447 		} else {
448 			fc->sb_flags &= ~SB_POSIXACL;
449 		}
450 		break;
451 	case Opt_wsync:
452 		if (!result.negated)
453 			fsopt->flags &= ~CEPH_MOUNT_OPT_ASYNC_DIROPS;
454 		else
455 			fsopt->flags |= CEPH_MOUNT_OPT_ASYNC_DIROPS;
456 		break;
457 	default:
458 		BUG();
459 	}
460 	return 0;
461 
462 out_of_range:
463 	return invalfc(fc, "%s out of range", param->key);
464 }
465 
466 static void destroy_mount_options(struct ceph_mount_options *args)
467 {
468 	dout("destroy_mount_options %p\n", args);
469 	if (!args)
470 		return;
471 
472 	kfree(args->snapdir_name);
473 	kfree(args->mds_namespace);
474 	kfree(args->server_path);
475 	kfree(args->fscache_uniq);
476 	kfree(args);
477 }
478 
479 static int strcmp_null(const char *s1, const char *s2)
480 {
481 	if (!s1 && !s2)
482 		return 0;
483 	if (s1 && !s2)
484 		return -1;
485 	if (!s1 && s2)
486 		return 1;
487 	return strcmp(s1, s2);
488 }
489 
490 static int compare_mount_options(struct ceph_mount_options *new_fsopt,
491 				 struct ceph_options *new_opt,
492 				 struct ceph_fs_client *fsc)
493 {
494 	struct ceph_mount_options *fsopt1 = new_fsopt;
495 	struct ceph_mount_options *fsopt2 = fsc->mount_options;
496 	int ofs = offsetof(struct ceph_mount_options, snapdir_name);
497 	int ret;
498 
499 	ret = memcmp(fsopt1, fsopt2, ofs);
500 	if (ret)
501 		return ret;
502 
503 	ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name);
504 	if (ret)
505 		return ret;
506 
507 	ret = strcmp_null(fsopt1->mds_namespace, fsopt2->mds_namespace);
508 	if (ret)
509 		return ret;
510 
511 	ret = strcmp_null(fsopt1->server_path, fsopt2->server_path);
512 	if (ret)
513 		return ret;
514 
515 	ret = strcmp_null(fsopt1->fscache_uniq, fsopt2->fscache_uniq);
516 	if (ret)
517 		return ret;
518 
519 	return ceph_compare_options(new_opt, fsc->client);
520 }
521 
522 /**
523  * ceph_show_options - Show mount options in /proc/mounts
524  * @m: seq_file to write to
525  * @root: root of that (sub)tree
526  */
527 static int ceph_show_options(struct seq_file *m, struct dentry *root)
528 {
529 	struct ceph_fs_client *fsc = ceph_sb_to_client(root->d_sb);
530 	struct ceph_mount_options *fsopt = fsc->mount_options;
531 	size_t pos;
532 	int ret;
533 
534 	/* a comma between MNT/MS and client options */
535 	seq_putc(m, ',');
536 	pos = m->count;
537 
538 	ret = ceph_print_client_options(m, fsc->client, false);
539 	if (ret)
540 		return ret;
541 
542 	/* retract our comma if no client options */
543 	if (m->count == pos)
544 		m->count--;
545 
546 	if (fsopt->flags & CEPH_MOUNT_OPT_DIRSTAT)
547 		seq_puts(m, ",dirstat");
548 	if ((fsopt->flags & CEPH_MOUNT_OPT_RBYTES))
549 		seq_puts(m, ",rbytes");
550 	if (fsopt->flags & CEPH_MOUNT_OPT_NOASYNCREADDIR)
551 		seq_puts(m, ",noasyncreaddir");
552 	if ((fsopt->flags & CEPH_MOUNT_OPT_DCACHE) == 0)
553 		seq_puts(m, ",nodcache");
554 	if (fsopt->flags & CEPH_MOUNT_OPT_INO32)
555 		seq_puts(m, ",ino32");
556 	if (fsopt->flags & CEPH_MOUNT_OPT_FSCACHE) {
557 		seq_show_option(m, "fsc", fsopt->fscache_uniq);
558 	}
559 	if (fsopt->flags & CEPH_MOUNT_OPT_NOPOOLPERM)
560 		seq_puts(m, ",nopoolperm");
561 	if (fsopt->flags & CEPH_MOUNT_OPT_NOQUOTADF)
562 		seq_puts(m, ",noquotadf");
563 
564 #ifdef CONFIG_CEPH_FS_POSIX_ACL
565 	if (root->d_sb->s_flags & SB_POSIXACL)
566 		seq_puts(m, ",acl");
567 	else
568 		seq_puts(m, ",noacl");
569 #endif
570 
571 	if ((fsopt->flags & CEPH_MOUNT_OPT_NOCOPYFROM) == 0)
572 		seq_puts(m, ",copyfrom");
573 
574 	if (fsopt->mds_namespace)
575 		seq_show_option(m, "mds_namespace", fsopt->mds_namespace);
576 
577 	if (fsopt->flags & CEPH_MOUNT_OPT_CLEANRECOVER)
578 		seq_show_option(m, "recover_session", "clean");
579 
580 	if (fsopt->flags & CEPH_MOUNT_OPT_ASYNC_DIROPS)
581 		seq_puts(m, ",nowsync");
582 
583 	if (fsopt->wsize != CEPH_MAX_WRITE_SIZE)
584 		seq_printf(m, ",wsize=%u", fsopt->wsize);
585 	if (fsopt->rsize != CEPH_MAX_READ_SIZE)
586 		seq_printf(m, ",rsize=%u", fsopt->rsize);
587 	if (fsopt->rasize != CEPH_RASIZE_DEFAULT)
588 		seq_printf(m, ",rasize=%u", fsopt->rasize);
589 	if (fsopt->congestion_kb != default_congestion_kb())
590 		seq_printf(m, ",write_congestion_kb=%u", fsopt->congestion_kb);
591 	if (fsopt->caps_max)
592 		seq_printf(m, ",caps_max=%d", fsopt->caps_max);
593 	if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
594 		seq_printf(m, ",caps_wanted_delay_min=%u",
595 			 fsopt->caps_wanted_delay_min);
596 	if (fsopt->caps_wanted_delay_max != CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT)
597 		seq_printf(m, ",caps_wanted_delay_max=%u",
598 			   fsopt->caps_wanted_delay_max);
599 	if (fsopt->max_readdir != CEPH_MAX_READDIR_DEFAULT)
600 		seq_printf(m, ",readdir_max_entries=%u", fsopt->max_readdir);
601 	if (fsopt->max_readdir_bytes != CEPH_MAX_READDIR_BYTES_DEFAULT)
602 		seq_printf(m, ",readdir_max_bytes=%u", fsopt->max_readdir_bytes);
603 	if (strcmp(fsopt->snapdir_name, CEPH_SNAPDIRNAME_DEFAULT))
604 		seq_show_option(m, "snapdirname", fsopt->snapdir_name);
605 
606 	return 0;
607 }
608 
609 /*
610  * handle any mon messages the standard library doesn't understand.
611  * return error if we don't either.
612  */
613 static int extra_mon_dispatch(struct ceph_client *client, struct ceph_msg *msg)
614 {
615 	struct ceph_fs_client *fsc = client->private;
616 	int type = le16_to_cpu(msg->hdr.type);
617 
618 	switch (type) {
619 	case CEPH_MSG_MDS_MAP:
620 		ceph_mdsc_handle_mdsmap(fsc->mdsc, msg);
621 		return 0;
622 	case CEPH_MSG_FS_MAP_USER:
623 		ceph_mdsc_handle_fsmap(fsc->mdsc, msg);
624 		return 0;
625 	default:
626 		return -1;
627 	}
628 }
629 
630 /*
631  * create a new fs client
632  *
633  * Success or not, this function consumes @fsopt and @opt.
634  */
635 static struct ceph_fs_client *create_fs_client(struct ceph_mount_options *fsopt,
636 					struct ceph_options *opt)
637 {
638 	struct ceph_fs_client *fsc;
639 	int err;
640 
641 	fsc = kzalloc(sizeof(*fsc), GFP_KERNEL);
642 	if (!fsc) {
643 		err = -ENOMEM;
644 		goto fail;
645 	}
646 
647 	fsc->client = ceph_create_client(opt, fsc);
648 	if (IS_ERR(fsc->client)) {
649 		err = PTR_ERR(fsc->client);
650 		goto fail;
651 	}
652 	opt = NULL; /* fsc->client now owns this */
653 
654 	fsc->client->extra_mon_dispatch = extra_mon_dispatch;
655 	ceph_set_opt(fsc->client, ABORT_ON_FULL);
656 
657 	if (!fsopt->mds_namespace) {
658 		ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
659 				   0, true);
660 	} else {
661 		ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_FSMAP,
662 				   0, false);
663 	}
664 
665 	fsc->mount_options = fsopt;
666 
667 	fsc->sb = NULL;
668 	fsc->mount_state = CEPH_MOUNT_MOUNTING;
669 	fsc->filp_gen = 1;
670 	fsc->have_copy_from2 = true;
671 
672 	atomic_long_set(&fsc->writeback_count, 0);
673 
674 	err = -ENOMEM;
675 	/*
676 	 * The number of concurrent works can be high but they don't need
677 	 * to be processed in parallel, limit concurrency.
678 	 */
679 	fsc->inode_wq = alloc_workqueue("ceph-inode", WQ_UNBOUND, 0);
680 	if (!fsc->inode_wq)
681 		goto fail_client;
682 	fsc->cap_wq = alloc_workqueue("ceph-cap", 0, 1);
683 	if (!fsc->cap_wq)
684 		goto fail_inode_wq;
685 
686 	spin_lock(&ceph_fsc_lock);
687 	list_add_tail(&fsc->metric_wakeup, &ceph_fsc_list);
688 	spin_unlock(&ceph_fsc_lock);
689 
690 	return fsc;
691 
692 fail_inode_wq:
693 	destroy_workqueue(fsc->inode_wq);
694 fail_client:
695 	ceph_destroy_client(fsc->client);
696 fail:
697 	kfree(fsc);
698 	if (opt)
699 		ceph_destroy_options(opt);
700 	destroy_mount_options(fsopt);
701 	return ERR_PTR(err);
702 }
703 
704 static void flush_fs_workqueues(struct ceph_fs_client *fsc)
705 {
706 	flush_workqueue(fsc->inode_wq);
707 	flush_workqueue(fsc->cap_wq);
708 }
709 
710 static void destroy_fs_client(struct ceph_fs_client *fsc)
711 {
712 	dout("destroy_fs_client %p\n", fsc);
713 
714 	spin_lock(&ceph_fsc_lock);
715 	list_del(&fsc->metric_wakeup);
716 	spin_unlock(&ceph_fsc_lock);
717 
718 	ceph_mdsc_destroy(fsc);
719 	destroy_workqueue(fsc->inode_wq);
720 	destroy_workqueue(fsc->cap_wq);
721 
722 	destroy_mount_options(fsc->mount_options);
723 
724 	ceph_destroy_client(fsc->client);
725 
726 	kfree(fsc);
727 	dout("destroy_fs_client %p done\n", fsc);
728 }
729 
730 /*
731  * caches
732  */
733 struct kmem_cache *ceph_inode_cachep;
734 struct kmem_cache *ceph_cap_cachep;
735 struct kmem_cache *ceph_cap_flush_cachep;
736 struct kmem_cache *ceph_dentry_cachep;
737 struct kmem_cache *ceph_file_cachep;
738 struct kmem_cache *ceph_dir_file_cachep;
739 struct kmem_cache *ceph_mds_request_cachep;
740 mempool_t *ceph_wb_pagevec_pool;
741 
742 static void ceph_inode_init_once(void *foo)
743 {
744 	struct ceph_inode_info *ci = foo;
745 	inode_init_once(&ci->vfs_inode);
746 }
747 
748 static int __init init_caches(void)
749 {
750 	int error = -ENOMEM;
751 
752 	ceph_inode_cachep = kmem_cache_create("ceph_inode_info",
753 				      sizeof(struct ceph_inode_info),
754 				      __alignof__(struct ceph_inode_info),
755 				      SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|
756 				      SLAB_ACCOUNT, ceph_inode_init_once);
757 	if (!ceph_inode_cachep)
758 		return -ENOMEM;
759 
760 	ceph_cap_cachep = KMEM_CACHE(ceph_cap, SLAB_MEM_SPREAD);
761 	if (!ceph_cap_cachep)
762 		goto bad_cap;
763 	ceph_cap_flush_cachep = KMEM_CACHE(ceph_cap_flush,
764 					   SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
765 	if (!ceph_cap_flush_cachep)
766 		goto bad_cap_flush;
767 
768 	ceph_dentry_cachep = KMEM_CACHE(ceph_dentry_info,
769 					SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD);
770 	if (!ceph_dentry_cachep)
771 		goto bad_dentry;
772 
773 	ceph_file_cachep = KMEM_CACHE(ceph_file_info, SLAB_MEM_SPREAD);
774 	if (!ceph_file_cachep)
775 		goto bad_file;
776 
777 	ceph_dir_file_cachep = KMEM_CACHE(ceph_dir_file_info, SLAB_MEM_SPREAD);
778 	if (!ceph_dir_file_cachep)
779 		goto bad_dir_file;
780 
781 	ceph_mds_request_cachep = KMEM_CACHE(ceph_mds_request, SLAB_MEM_SPREAD);
782 	if (!ceph_mds_request_cachep)
783 		goto bad_mds_req;
784 
785 	ceph_wb_pagevec_pool = mempool_create_kmalloc_pool(10, CEPH_MAX_WRITE_SIZE >> PAGE_SHIFT);
786 	if (!ceph_wb_pagevec_pool)
787 		goto bad_pagevec_pool;
788 
789 	error = ceph_fscache_register();
790 	if (error)
791 		goto bad_fscache;
792 
793 	return 0;
794 
795 bad_fscache:
796 	kmem_cache_destroy(ceph_mds_request_cachep);
797 bad_pagevec_pool:
798 	mempool_destroy(ceph_wb_pagevec_pool);
799 bad_mds_req:
800 	kmem_cache_destroy(ceph_dir_file_cachep);
801 bad_dir_file:
802 	kmem_cache_destroy(ceph_file_cachep);
803 bad_file:
804 	kmem_cache_destroy(ceph_dentry_cachep);
805 bad_dentry:
806 	kmem_cache_destroy(ceph_cap_flush_cachep);
807 bad_cap_flush:
808 	kmem_cache_destroy(ceph_cap_cachep);
809 bad_cap:
810 	kmem_cache_destroy(ceph_inode_cachep);
811 	return error;
812 }
813 
814 static void destroy_caches(void)
815 {
816 	/*
817 	 * Make sure all delayed rcu free inodes are flushed before we
818 	 * destroy cache.
819 	 */
820 	rcu_barrier();
821 
822 	kmem_cache_destroy(ceph_inode_cachep);
823 	kmem_cache_destroy(ceph_cap_cachep);
824 	kmem_cache_destroy(ceph_cap_flush_cachep);
825 	kmem_cache_destroy(ceph_dentry_cachep);
826 	kmem_cache_destroy(ceph_file_cachep);
827 	kmem_cache_destroy(ceph_dir_file_cachep);
828 	kmem_cache_destroy(ceph_mds_request_cachep);
829 	mempool_destroy(ceph_wb_pagevec_pool);
830 
831 	ceph_fscache_unregister();
832 }
833 
834 /*
835  * ceph_umount_begin - initiate forced umount.  Tear down the
836  * mount, skipping steps that may hang while waiting for server(s).
837  */
838 static void ceph_umount_begin(struct super_block *sb)
839 {
840 	struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
841 
842 	dout("ceph_umount_begin - starting forced umount\n");
843 	if (!fsc)
844 		return;
845 	fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
846 	ceph_osdc_abort_requests(&fsc->client->osdc, -EIO);
847 	ceph_mdsc_force_umount(fsc->mdsc);
848 	fsc->filp_gen++; // invalidate open files
849 }
850 
851 static const struct super_operations ceph_super_ops = {
852 	.alloc_inode	= ceph_alloc_inode,
853 	.free_inode	= ceph_free_inode,
854 	.write_inode    = ceph_write_inode,
855 	.drop_inode	= generic_delete_inode,
856 	.evict_inode	= ceph_evict_inode,
857 	.sync_fs        = ceph_sync_fs,
858 	.put_super	= ceph_put_super,
859 	.show_options   = ceph_show_options,
860 	.statfs		= ceph_statfs,
861 	.umount_begin   = ceph_umount_begin,
862 };
863 
864 /*
865  * Bootstrap mount by opening the root directory.  Note the mount
866  * @started time from caller, and time out if this takes too long.
867  */
868 static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
869 				       const char *path,
870 				       unsigned long started)
871 {
872 	struct ceph_mds_client *mdsc = fsc->mdsc;
873 	struct ceph_mds_request *req = NULL;
874 	int err;
875 	struct dentry *root;
876 
877 	/* open dir */
878 	dout("open_root_inode opening '%s'\n", path);
879 	req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
880 	if (IS_ERR(req))
881 		return ERR_CAST(req);
882 	req->r_path1 = kstrdup(path, GFP_NOFS);
883 	if (!req->r_path1) {
884 		root = ERR_PTR(-ENOMEM);
885 		goto out;
886 	}
887 
888 	req->r_ino1.ino = CEPH_INO_ROOT;
889 	req->r_ino1.snap = CEPH_NOSNAP;
890 	req->r_started = started;
891 	req->r_timeout = fsc->client->options->mount_timeout;
892 	req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
893 	req->r_num_caps = 2;
894 	err = ceph_mdsc_do_request(mdsc, NULL, req);
895 	if (err == 0) {
896 		struct inode *inode = req->r_target_inode;
897 		req->r_target_inode = NULL;
898 		dout("open_root_inode success\n");
899 		root = d_make_root(inode);
900 		if (!root) {
901 			root = ERR_PTR(-ENOMEM);
902 			goto out;
903 		}
904 		dout("open_root_inode success, root dentry is %p\n", root);
905 	} else {
906 		root = ERR_PTR(err);
907 	}
908 out:
909 	ceph_mdsc_put_request(req);
910 	return root;
911 }
912 
913 /*
914  * mount: join the ceph cluster, and open root directory.
915  */
916 static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
917 				      struct fs_context *fc)
918 {
919 	int err;
920 	unsigned long started = jiffies;  /* note the start time */
921 	struct dentry *root;
922 
923 	dout("mount start %p\n", fsc);
924 	mutex_lock(&fsc->client->mount_mutex);
925 
926 	if (!fsc->sb->s_root) {
927 		const char *path = fsc->mount_options->server_path ?
928 				     fsc->mount_options->server_path + 1 : "";
929 
930 		err = __ceph_open_session(fsc->client, started);
931 		if (err < 0)
932 			goto out;
933 
934 		/* setup fscache */
935 		if (fsc->mount_options->flags & CEPH_MOUNT_OPT_FSCACHE) {
936 			err = ceph_fscache_register_fs(fsc, fc);
937 			if (err < 0)
938 				goto out;
939 		}
940 
941 		dout("mount opening path '%s'\n", path);
942 
943 		ceph_fs_debugfs_init(fsc);
944 
945 		root = open_root_dentry(fsc, path, started);
946 		if (IS_ERR(root)) {
947 			err = PTR_ERR(root);
948 			goto out;
949 		}
950 		fsc->sb->s_root = dget(root);
951 	} else {
952 		root = dget(fsc->sb->s_root);
953 	}
954 
955 	fsc->mount_state = CEPH_MOUNT_MOUNTED;
956 	dout("mount success\n");
957 	mutex_unlock(&fsc->client->mount_mutex);
958 	return root;
959 
960 out:
961 	mutex_unlock(&fsc->client->mount_mutex);
962 	return ERR_PTR(err);
963 }
964 
965 static int ceph_set_super(struct super_block *s, struct fs_context *fc)
966 {
967 	struct ceph_fs_client *fsc = s->s_fs_info;
968 	int ret;
969 
970 	dout("set_super %p\n", s);
971 
972 	s->s_maxbytes = MAX_LFS_FILESIZE;
973 
974 	s->s_xattr = ceph_xattr_handlers;
975 	fsc->sb = s;
976 	fsc->max_file_size = 1ULL << 40; /* temp value until we get mdsmap */
977 
978 	s->s_op = &ceph_super_ops;
979 	s->s_d_op = &ceph_dentry_ops;
980 	s->s_export_op = &ceph_export_ops;
981 
982 	s->s_time_gran = 1;
983 	s->s_time_min = 0;
984 	s->s_time_max = U32_MAX;
985 
986 	ret = set_anon_super_fc(s, fc);
987 	if (ret != 0)
988 		fsc->sb = NULL;
989 	return ret;
990 }
991 
992 /*
993  * share superblock if same fs AND options
994  */
995 static int ceph_compare_super(struct super_block *sb, struct fs_context *fc)
996 {
997 	struct ceph_fs_client *new = fc->s_fs_info;
998 	struct ceph_mount_options *fsopt = new->mount_options;
999 	struct ceph_options *opt = new->client->options;
1000 	struct ceph_fs_client *other = ceph_sb_to_client(sb);
1001 
1002 	dout("ceph_compare_super %p\n", sb);
1003 
1004 	if (compare_mount_options(fsopt, opt, other)) {
1005 		dout("monitor(s)/mount options don't match\n");
1006 		return 0;
1007 	}
1008 	if ((opt->flags & CEPH_OPT_FSID) &&
1009 	    ceph_fsid_compare(&opt->fsid, &other->client->fsid)) {
1010 		dout("fsid doesn't match\n");
1011 		return 0;
1012 	}
1013 	if (fc->sb_flags != (sb->s_flags & ~SB_BORN)) {
1014 		dout("flags differ\n");
1015 		return 0;
1016 	}
1017 	return 1;
1018 }
1019 
1020 /*
1021  * construct our own bdi so we can control readahead, etc.
1022  */
1023 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
1024 
1025 static int ceph_setup_bdi(struct super_block *sb, struct ceph_fs_client *fsc)
1026 {
1027 	int err;
1028 
1029 	err = super_setup_bdi_name(sb, "ceph-%ld",
1030 				   atomic_long_inc_return(&bdi_seq));
1031 	if (err)
1032 		return err;
1033 
1034 	/* set ra_pages based on rasize mount option? */
1035 	sb->s_bdi->ra_pages = fsc->mount_options->rasize >> PAGE_SHIFT;
1036 
1037 	/* set io_pages based on max osd read size */
1038 	sb->s_bdi->io_pages = fsc->mount_options->rsize >> PAGE_SHIFT;
1039 
1040 	return 0;
1041 }
1042 
1043 static int ceph_get_tree(struct fs_context *fc)
1044 {
1045 	struct ceph_parse_opts_ctx *pctx = fc->fs_private;
1046 	struct super_block *sb;
1047 	struct ceph_fs_client *fsc;
1048 	struct dentry *res;
1049 	int (*compare_super)(struct super_block *, struct fs_context *) =
1050 		ceph_compare_super;
1051 	int err;
1052 
1053 	dout("ceph_get_tree\n");
1054 
1055 	if (!fc->source)
1056 		return invalfc(fc, "No source");
1057 
1058 	/* create client (which we may/may not use) */
1059 	fsc = create_fs_client(pctx->opts, pctx->copts);
1060 	pctx->opts = NULL;
1061 	pctx->copts = NULL;
1062 	if (IS_ERR(fsc)) {
1063 		err = PTR_ERR(fsc);
1064 		goto out_final;
1065 	}
1066 
1067 	err = ceph_mdsc_init(fsc);
1068 	if (err < 0)
1069 		goto out;
1070 
1071 	if (ceph_test_opt(fsc->client, NOSHARE))
1072 		compare_super = NULL;
1073 
1074 	fc->s_fs_info = fsc;
1075 	sb = sget_fc(fc, compare_super, ceph_set_super);
1076 	fc->s_fs_info = NULL;
1077 	if (IS_ERR(sb)) {
1078 		err = PTR_ERR(sb);
1079 		goto out;
1080 	}
1081 
1082 	if (ceph_sb_to_client(sb) != fsc) {
1083 		destroy_fs_client(fsc);
1084 		fsc = ceph_sb_to_client(sb);
1085 		dout("get_sb got existing client %p\n", fsc);
1086 	} else {
1087 		dout("get_sb using new client %p\n", fsc);
1088 		err = ceph_setup_bdi(sb, fsc);
1089 		if (err < 0)
1090 			goto out_splat;
1091 	}
1092 
1093 	res = ceph_real_mount(fsc, fc);
1094 	if (IS_ERR(res)) {
1095 		err = PTR_ERR(res);
1096 		goto out_splat;
1097 	}
1098 	dout("root %p inode %p ino %llx.%llx\n", res,
1099 	     d_inode(res), ceph_vinop(d_inode(res)));
1100 	fc->root = fsc->sb->s_root;
1101 	return 0;
1102 
1103 out_splat:
1104 	if (!ceph_mdsmap_is_cluster_available(fsc->mdsc->mdsmap)) {
1105 		pr_info("No mds server is up or the cluster is laggy\n");
1106 		err = -EHOSTUNREACH;
1107 	}
1108 
1109 	ceph_mdsc_close_sessions(fsc->mdsc);
1110 	deactivate_locked_super(sb);
1111 	goto out_final;
1112 
1113 out:
1114 	destroy_fs_client(fsc);
1115 out_final:
1116 	dout("ceph_get_tree fail %d\n", err);
1117 	return err;
1118 }
1119 
1120 static void ceph_free_fc(struct fs_context *fc)
1121 {
1122 	struct ceph_parse_opts_ctx *pctx = fc->fs_private;
1123 
1124 	if (pctx) {
1125 		destroy_mount_options(pctx->opts);
1126 		ceph_destroy_options(pctx->copts);
1127 		kfree(pctx);
1128 	}
1129 }
1130 
1131 static int ceph_reconfigure_fc(struct fs_context *fc)
1132 {
1133 	struct ceph_parse_opts_ctx *pctx = fc->fs_private;
1134 	struct ceph_mount_options *fsopt = pctx->opts;
1135 	struct ceph_fs_client *fsc = ceph_sb_to_client(fc->root->d_sb);
1136 
1137 	if (fsopt->flags & CEPH_MOUNT_OPT_ASYNC_DIROPS)
1138 		ceph_set_mount_opt(fsc, ASYNC_DIROPS);
1139 	else
1140 		ceph_clear_mount_opt(fsc, ASYNC_DIROPS);
1141 
1142 	sync_filesystem(fc->root->d_sb);
1143 	return 0;
1144 }
1145 
1146 static const struct fs_context_operations ceph_context_ops = {
1147 	.free		= ceph_free_fc,
1148 	.parse_param	= ceph_parse_mount_param,
1149 	.get_tree	= ceph_get_tree,
1150 	.reconfigure	= ceph_reconfigure_fc,
1151 };
1152 
1153 /*
1154  * Set up the filesystem mount context.
1155  */
1156 static int ceph_init_fs_context(struct fs_context *fc)
1157 {
1158 	struct ceph_parse_opts_ctx *pctx;
1159 	struct ceph_mount_options *fsopt;
1160 
1161 	pctx = kzalloc(sizeof(*pctx), GFP_KERNEL);
1162 	if (!pctx)
1163 		return -ENOMEM;
1164 
1165 	pctx->copts = ceph_alloc_options();
1166 	if (!pctx->copts)
1167 		goto nomem;
1168 
1169 	pctx->opts = kzalloc(sizeof(*pctx->opts), GFP_KERNEL);
1170 	if (!pctx->opts)
1171 		goto nomem;
1172 
1173 	fsopt = pctx->opts;
1174 	fsopt->flags = CEPH_MOUNT_OPT_DEFAULT;
1175 
1176 	fsopt->wsize = CEPH_MAX_WRITE_SIZE;
1177 	fsopt->rsize = CEPH_MAX_READ_SIZE;
1178 	fsopt->rasize = CEPH_RASIZE_DEFAULT;
1179 	fsopt->snapdir_name = kstrdup(CEPH_SNAPDIRNAME_DEFAULT, GFP_KERNEL);
1180 	if (!fsopt->snapdir_name)
1181 		goto nomem;
1182 
1183 	fsopt->caps_wanted_delay_min = CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT;
1184 	fsopt->caps_wanted_delay_max = CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT;
1185 	fsopt->max_readdir = CEPH_MAX_READDIR_DEFAULT;
1186 	fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
1187 	fsopt->congestion_kb = default_congestion_kb();
1188 
1189 #ifdef CONFIG_CEPH_FS_POSIX_ACL
1190 	fc->sb_flags |= SB_POSIXACL;
1191 #endif
1192 
1193 	fc->fs_private = pctx;
1194 	fc->ops = &ceph_context_ops;
1195 	return 0;
1196 
1197 nomem:
1198 	destroy_mount_options(pctx->opts);
1199 	ceph_destroy_options(pctx->copts);
1200 	kfree(pctx);
1201 	return -ENOMEM;
1202 }
1203 
1204 static void ceph_kill_sb(struct super_block *s)
1205 {
1206 	struct ceph_fs_client *fsc = ceph_sb_to_client(s);
1207 
1208 	dout("kill_sb %p\n", s);
1209 
1210 	ceph_mdsc_pre_umount(fsc->mdsc);
1211 	flush_fs_workqueues(fsc);
1212 
1213 	kill_anon_super(s);
1214 
1215 	fsc->client->extra_mon_dispatch = NULL;
1216 	ceph_fs_debugfs_cleanup(fsc);
1217 
1218 	ceph_fscache_unregister_fs(fsc);
1219 
1220 	destroy_fs_client(fsc);
1221 }
1222 
1223 static struct file_system_type ceph_fs_type = {
1224 	.owner		= THIS_MODULE,
1225 	.name		= "ceph",
1226 	.init_fs_context = ceph_init_fs_context,
1227 	.kill_sb	= ceph_kill_sb,
1228 	.fs_flags	= FS_RENAME_DOES_D_MOVE,
1229 };
1230 MODULE_ALIAS_FS("ceph");
1231 
1232 int ceph_force_reconnect(struct super_block *sb)
1233 {
1234 	struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
1235 	int err = 0;
1236 
1237 	ceph_umount_begin(sb);
1238 
1239 	/* Make sure all page caches get invalidated.
1240 	 * see remove_session_caps_cb() */
1241 	flush_workqueue(fsc->inode_wq);
1242 
1243 	/* In case that we were blocklisted. This also reset
1244 	 * all mon/osd connections */
1245 	ceph_reset_client_addr(fsc->client);
1246 
1247 	ceph_osdc_clear_abort_err(&fsc->client->osdc);
1248 
1249 	fsc->blocklisted = false;
1250 	fsc->mount_state = CEPH_MOUNT_MOUNTED;
1251 
1252 	if (sb->s_root) {
1253 		err = __ceph_do_getattr(d_inode(sb->s_root), NULL,
1254 					CEPH_STAT_CAP_INODE, true);
1255 	}
1256 	return err;
1257 }
1258 
1259 static int __init init_ceph(void)
1260 {
1261 	int ret = init_caches();
1262 	if (ret)
1263 		goto out;
1264 
1265 	ceph_flock_init();
1266 	ret = register_filesystem(&ceph_fs_type);
1267 	if (ret)
1268 		goto out_caches;
1269 
1270 	pr_info("loaded (mds proto %d)\n", CEPH_MDSC_PROTOCOL);
1271 
1272 	return 0;
1273 
1274 out_caches:
1275 	destroy_caches();
1276 out:
1277 	return ret;
1278 }
1279 
1280 static void __exit exit_ceph(void)
1281 {
1282 	dout("exit_ceph\n");
1283 	unregister_filesystem(&ceph_fs_type);
1284 	destroy_caches();
1285 }
1286 
1287 static int param_set_metrics(const char *val, const struct kernel_param *kp)
1288 {
1289 	struct ceph_fs_client *fsc;
1290 	int ret;
1291 
1292 	ret = param_set_bool(val, kp);
1293 	if (ret) {
1294 		pr_err("Failed to parse sending metrics switch value '%s'\n",
1295 		       val);
1296 		return ret;
1297 	} else if (!disable_send_metrics) {
1298 		// wake up all the mds clients
1299 		spin_lock(&ceph_fsc_lock);
1300 		list_for_each_entry(fsc, &ceph_fsc_list, metric_wakeup) {
1301 			metric_schedule_delayed(&fsc->mdsc->metric);
1302 		}
1303 		spin_unlock(&ceph_fsc_lock);
1304 	}
1305 
1306 	return 0;
1307 }
1308 
1309 static const struct kernel_param_ops param_ops_metrics = {
1310 	.set = param_set_metrics,
1311 	.get = param_get_bool,
1312 };
1313 
1314 bool disable_send_metrics = false;
1315 module_param_cb(disable_send_metrics, &param_ops_metrics, &disable_send_metrics, 0644);
1316 MODULE_PARM_DESC(disable_send_metrics, "Enable sending perf metrics to ceph cluster (default: on)");
1317 
1318 module_init(init_ceph);
1319 module_exit(exit_ceph);
1320 
1321 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
1322 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
1323 MODULE_AUTHOR("Patience Warnick <patience@newdream.net>");
1324 MODULE_DESCRIPTION("Ceph filesystem for Linux");
1325 MODULE_LICENSE("GPL");
1326