1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 */
6
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9 #include <linux/sched.h>
10 #include <linux/cred.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/module.h>
15 #include <linux/kobject.h>
16 #include <linux/uaccess.h>
17 #include <linux/gfs2_ondisk.h>
18 #include <linux/blkdev.h>
19
20 #include "gfs2.h"
21 #include "incore.h"
22 #include "sys.h"
23 #include "super.h"
24 #include "glock.h"
25 #include "quota.h"
26 #include "util.h"
27 #include "glops.h"
28 #include "recovery.h"
29
30 struct gfs2_attr {
31 struct attribute attr;
32 ssize_t (*show)(struct gfs2_sbd *, char *);
33 ssize_t (*store)(struct gfs2_sbd *, const char *, size_t);
34 };
35
gfs2_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)36 static ssize_t gfs2_attr_show(struct kobject *kobj, struct attribute *attr,
37 char *buf)
38 {
39 struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj);
40 struct gfs2_attr *a = container_of(attr, struct gfs2_attr, attr);
41 return a->show ? a->show(sdp, buf) : 0;
42 }
43
gfs2_attr_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t len)44 static ssize_t gfs2_attr_store(struct kobject *kobj, struct attribute *attr,
45 const char *buf, size_t len)
46 {
47 struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj);
48 struct gfs2_attr *a = container_of(attr, struct gfs2_attr, attr);
49 return a->store ? a->store(sdp, buf, len) : len;
50 }
51
52 static const struct sysfs_ops gfs2_attr_ops = {
53 .show = gfs2_attr_show,
54 .store = gfs2_attr_store,
55 };
56
57
58 static struct kset *gfs2_kset;
59
id_show(struct gfs2_sbd * sdp,char * buf)60 static ssize_t id_show(struct gfs2_sbd *sdp, char *buf)
61 {
62 return snprintf(buf, PAGE_SIZE, "%u:%u\n",
63 MAJOR(sdp->sd_vfs->s_dev), MINOR(sdp->sd_vfs->s_dev));
64 }
65
status_show(struct gfs2_sbd * sdp,char * buf)66 static ssize_t status_show(struct gfs2_sbd *sdp, char *buf)
67 {
68 unsigned long f = sdp->sd_flags;
69 ssize_t s;
70
71 s = snprintf(buf, PAGE_SIZE,
72 "Journal Checked: %d\n"
73 "Journal Live: %d\n"
74 "Journal ID: %d\n"
75 "Spectator: %d\n"
76 "Withdrawn: %d\n"
77 "No barriers: %d\n"
78 "No recovery: %d\n"
79 "Demote: %d\n"
80 "No Journal ID: %d\n"
81 "Mounted RO: %d\n"
82 "RO Recovery: %d\n"
83 "Skip DLM Unlock: %d\n"
84 "Force AIL Flush: %d\n"
85 "FS Freeze Initiator: %d\n"
86 "FS Frozen: %d\n"
87 "Withdrawing: %d\n"
88 "Withdraw In Prog: %d\n"
89 "Remote Withdraw: %d\n"
90 "Withdraw Recovery: %d\n"
91 "Deactivating: %d\n"
92 "sd_log_error: %d\n"
93 "sd_log_flush_lock: %d\n"
94 "sd_log_num_revoke: %u\n"
95 "sd_log_in_flight: %d\n"
96 "sd_log_blks_needed: %d\n"
97 "sd_log_blks_free: %d\n"
98 "sd_log_flush_head: %d\n"
99 "sd_log_flush_tail: %d\n"
100 "sd_log_blks_reserved: %d\n"
101 "sd_log_revokes_available: %d\n"
102 "sd_log_pinned: %d\n"
103 "sd_log_thresh1: %d\n"
104 "sd_log_thresh2: %d\n",
105 test_bit(SDF_JOURNAL_CHECKED, &f),
106 test_bit(SDF_JOURNAL_LIVE, &f),
107 (sdp->sd_jdesc ? sdp->sd_jdesc->jd_jid : 0),
108 (sdp->sd_args.ar_spectator ? 1 : 0),
109 test_bit(SDF_WITHDRAWN, &f),
110 test_bit(SDF_NOBARRIERS, &f),
111 test_bit(SDF_NORECOVERY, &f),
112 test_bit(SDF_DEMOTE, &f),
113 test_bit(SDF_NOJOURNALID, &f),
114 (sb_rdonly(sdp->sd_vfs) ? 1 : 0),
115 test_bit(SDF_RORECOVERY, &f),
116 test_bit(SDF_SKIP_DLM_UNLOCK, &f),
117 test_bit(SDF_FORCE_AIL_FLUSH, &f),
118 test_bit(SDF_FREEZE_INITIATOR, &f),
119 test_bit(SDF_FROZEN, &f),
120 test_bit(SDF_WITHDRAWING, &f),
121 test_bit(SDF_WITHDRAW_IN_PROG, &f),
122 test_bit(SDF_REMOTE_WITHDRAW, &f),
123 test_bit(SDF_WITHDRAW_RECOVERY, &f),
124 test_bit(SDF_KILL, &f),
125 sdp->sd_log_error,
126 rwsem_is_locked(&sdp->sd_log_flush_lock),
127 sdp->sd_log_num_revoke,
128 atomic_read(&sdp->sd_log_in_flight),
129 atomic_read(&sdp->sd_log_blks_needed),
130 atomic_read(&sdp->sd_log_blks_free),
131 sdp->sd_log_flush_head,
132 sdp->sd_log_flush_tail,
133 sdp->sd_log_blks_reserved,
134 atomic_read(&sdp->sd_log_revokes_available),
135 atomic_read(&sdp->sd_log_pinned),
136 atomic_read(&sdp->sd_log_thresh1),
137 atomic_read(&sdp->sd_log_thresh2));
138 return s;
139 }
140
fsname_show(struct gfs2_sbd * sdp,char * buf)141 static ssize_t fsname_show(struct gfs2_sbd *sdp, char *buf)
142 {
143 return snprintf(buf, PAGE_SIZE, "%s\n", sdp->sd_fsname);
144 }
145
uuid_show(struct gfs2_sbd * sdp,char * buf)146 static ssize_t uuid_show(struct gfs2_sbd *sdp, char *buf)
147 {
148 struct super_block *s = sdp->sd_vfs;
149
150 buf[0] = '\0';
151 if (uuid_is_null(&s->s_uuid))
152 return 0;
153 return snprintf(buf, PAGE_SIZE, "%pUB\n", &s->s_uuid);
154 }
155
freeze_show(struct gfs2_sbd * sdp,char * buf)156 static ssize_t freeze_show(struct gfs2_sbd *sdp, char *buf)
157 {
158 struct super_block *sb = sdp->sd_vfs;
159 int frozen = (sb->s_writers.frozen == SB_UNFROZEN) ? 0 : 1;
160
161 return snprintf(buf, PAGE_SIZE, "%d\n", frozen);
162 }
163
freeze_store(struct gfs2_sbd * sdp,const char * buf,size_t len)164 static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
165 {
166 int error, n;
167
168 error = kstrtoint(buf, 0, &n);
169 if (error)
170 return error;
171
172 if (!capable(CAP_SYS_ADMIN))
173 return -EPERM;
174
175 switch (n) {
176 case 0:
177 error = thaw_super(sdp->sd_vfs, FREEZE_HOLDER_USERSPACE);
178 break;
179 case 1:
180 error = freeze_super(sdp->sd_vfs, FREEZE_HOLDER_USERSPACE);
181 break;
182 default:
183 return -EINVAL;
184 }
185
186 if (error) {
187 fs_warn(sdp, "freeze %d error %d\n", n, error);
188 return error;
189 }
190
191 return len;
192 }
193
withdraw_show(struct gfs2_sbd * sdp,char * buf)194 static ssize_t withdraw_show(struct gfs2_sbd *sdp, char *buf)
195 {
196 unsigned int b = gfs2_withdrawing_or_withdrawn(sdp);
197 return snprintf(buf, PAGE_SIZE, "%u\n", b);
198 }
199
withdraw_store(struct gfs2_sbd * sdp,const char * buf,size_t len)200 static ssize_t withdraw_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
201 {
202 int error, val;
203
204 if (!capable(CAP_SYS_ADMIN))
205 return -EPERM;
206
207 error = kstrtoint(buf, 0, &val);
208 if (error)
209 return error;
210
211 if (val != 1)
212 return -EINVAL;
213
214 gfs2_lm(sdp, "withdrawing from cluster at user's request\n");
215 gfs2_withdraw(sdp);
216
217 return len;
218 }
219
statfs_sync_store(struct gfs2_sbd * sdp,const char * buf,size_t len)220 static ssize_t statfs_sync_store(struct gfs2_sbd *sdp, const char *buf,
221 size_t len)
222 {
223 int error, val;
224
225 if (!capable(CAP_SYS_ADMIN))
226 return -EPERM;
227
228 error = kstrtoint(buf, 0, &val);
229 if (error)
230 return error;
231
232 if (val != 1)
233 return -EINVAL;
234
235 gfs2_statfs_sync(sdp->sd_vfs, 0);
236 return len;
237 }
238
quota_sync_store(struct gfs2_sbd * sdp,const char * buf,size_t len)239 static ssize_t quota_sync_store(struct gfs2_sbd *sdp, const char *buf,
240 size_t len)
241 {
242 int error, val;
243
244 if (!capable(CAP_SYS_ADMIN))
245 return -EPERM;
246
247 error = kstrtoint(buf, 0, &val);
248 if (error)
249 return error;
250
251 if (val != 1)
252 return -EINVAL;
253
254 gfs2_quota_sync(sdp->sd_vfs, 0);
255 return len;
256 }
257
quota_refresh_user_store(struct gfs2_sbd * sdp,const char * buf,size_t len)258 static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf,
259 size_t len)
260 {
261 struct kqid qid;
262 int error;
263 u32 id;
264
265 if (!capable(CAP_SYS_ADMIN))
266 return -EPERM;
267
268 error = kstrtou32(buf, 0, &id);
269 if (error)
270 return error;
271
272 qid = make_kqid(current_user_ns(), USRQUOTA, id);
273 if (!qid_valid(qid))
274 return -EINVAL;
275
276 error = gfs2_quota_refresh(sdp, qid);
277 return error ? error : len;
278 }
279
quota_refresh_group_store(struct gfs2_sbd * sdp,const char * buf,size_t len)280 static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf,
281 size_t len)
282 {
283 struct kqid qid;
284 int error;
285 u32 id;
286
287 if (!capable(CAP_SYS_ADMIN))
288 return -EPERM;
289
290 error = kstrtou32(buf, 0, &id);
291 if (error)
292 return error;
293
294 qid = make_kqid(current_user_ns(), GRPQUOTA, id);
295 if (!qid_valid(qid))
296 return -EINVAL;
297
298 error = gfs2_quota_refresh(sdp, qid);
299 return error ? error : len;
300 }
301
demote_rq_store(struct gfs2_sbd * sdp,const char * buf,size_t len)302 static ssize_t demote_rq_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
303 {
304 struct gfs2_glock *gl;
305 const struct gfs2_glock_operations *glops;
306 unsigned int glmode;
307 unsigned int gltype;
308 unsigned long long glnum;
309 char mode[16];
310 int rv;
311
312 if (!capable(CAP_SYS_ADMIN))
313 return -EPERM;
314
315 rv = sscanf(buf, "%u:%llu %15s", &gltype, &glnum,
316 mode);
317 if (rv != 3)
318 return -EINVAL;
319
320 if (strcmp(mode, "EX") == 0)
321 glmode = LM_ST_UNLOCKED;
322 else if ((strcmp(mode, "CW") == 0) || (strcmp(mode, "DF") == 0))
323 glmode = LM_ST_DEFERRED;
324 else if ((strcmp(mode, "PR") == 0) || (strcmp(mode, "SH") == 0))
325 glmode = LM_ST_SHARED;
326 else
327 return -EINVAL;
328
329 if (gltype > LM_TYPE_JOURNAL)
330 return -EINVAL;
331 if (gltype == LM_TYPE_NONDISK && glnum == GFS2_FREEZE_LOCK)
332 glops = &gfs2_freeze_glops;
333 else
334 glops = gfs2_glops_list[gltype];
335 if (glops == NULL)
336 return -EINVAL;
337 if (!test_and_set_bit(SDF_DEMOTE, &sdp->sd_flags))
338 fs_info(sdp, "demote interface used\n");
339 rv = gfs2_glock_get(sdp, glnum, glops, 0, &gl);
340 if (rv)
341 return rv;
342 gfs2_glock_cb(gl, glmode);
343 gfs2_glock_put(gl);
344 return len;
345 }
346
347
348 #define GFS2_ATTR(name, mode, show, store) \
349 static struct gfs2_attr gfs2_attr_##name = __ATTR(name, mode, show, store)
350
351 GFS2_ATTR(id, 0444, id_show, NULL);
352 GFS2_ATTR(fsname, 0444, fsname_show, NULL);
353 GFS2_ATTR(uuid, 0444, uuid_show, NULL);
354 GFS2_ATTR(freeze, 0644, freeze_show, freeze_store);
355 GFS2_ATTR(withdraw, 0644, withdraw_show, withdraw_store);
356 GFS2_ATTR(statfs_sync, 0200, NULL, statfs_sync_store);
357 GFS2_ATTR(quota_sync, 0200, NULL, quota_sync_store);
358 GFS2_ATTR(quota_refresh_user, 0200, NULL, quota_refresh_user_store);
359 GFS2_ATTR(quota_refresh_group, 0200, NULL, quota_refresh_group_store);
360 GFS2_ATTR(demote_rq, 0200, NULL, demote_rq_store);
361 GFS2_ATTR(status, 0400, status_show, NULL);
362
363 static struct attribute *gfs2_attrs[] = {
364 &gfs2_attr_id.attr,
365 &gfs2_attr_fsname.attr,
366 &gfs2_attr_uuid.attr,
367 &gfs2_attr_freeze.attr,
368 &gfs2_attr_withdraw.attr,
369 &gfs2_attr_statfs_sync.attr,
370 &gfs2_attr_quota_sync.attr,
371 &gfs2_attr_quota_refresh_user.attr,
372 &gfs2_attr_quota_refresh_group.attr,
373 &gfs2_attr_demote_rq.attr,
374 &gfs2_attr_status.attr,
375 NULL,
376 };
377 ATTRIBUTE_GROUPS(gfs2);
378
gfs2_sbd_release(struct kobject * kobj)379 static void gfs2_sbd_release(struct kobject *kobj)
380 {
381 struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj);
382
383 complete(&sdp->sd_kobj_unregister);
384 }
385
386 static struct kobj_type gfs2_ktype = {
387 .release = gfs2_sbd_release,
388 .default_groups = gfs2_groups,
389 .sysfs_ops = &gfs2_attr_ops,
390 };
391
392
393 /*
394 * lock_module. Originally from lock_dlm
395 */
396
proto_name_show(struct gfs2_sbd * sdp,char * buf)397 static ssize_t proto_name_show(struct gfs2_sbd *sdp, char *buf)
398 {
399 const struct lm_lockops *ops = sdp->sd_lockstruct.ls_ops;
400 return sprintf(buf, "%s\n", ops->lm_proto_name);
401 }
402
block_show(struct gfs2_sbd * sdp,char * buf)403 static ssize_t block_show(struct gfs2_sbd *sdp, char *buf)
404 {
405 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
406 ssize_t ret;
407 int val = 0;
408
409 if (test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))
410 val = 1;
411 ret = sprintf(buf, "%d\n", val);
412 return ret;
413 }
414
block_store(struct gfs2_sbd * sdp,const char * buf,size_t len)415 static ssize_t block_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
416 {
417 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
418 int ret, val;
419
420 ret = kstrtoint(buf, 0, &val);
421 if (ret)
422 return ret;
423
424 if (val == 1)
425 set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
426 else if (val == 0) {
427 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags);
428 smp_mb__after_atomic();
429 gfs2_glock_thaw(sdp);
430 } else {
431 return -EINVAL;
432 }
433 return len;
434 }
435
wdack_show(struct gfs2_sbd * sdp,char * buf)436 static ssize_t wdack_show(struct gfs2_sbd *sdp, char *buf)
437 {
438 int val = completion_done(&sdp->sd_wdack) ? 1 : 0;
439
440 return sprintf(buf, "%d\n", val);
441 }
442
wdack_store(struct gfs2_sbd * sdp,const char * buf,size_t len)443 static ssize_t wdack_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
444 {
445 int ret, val;
446
447 ret = kstrtoint(buf, 0, &val);
448 if (ret)
449 return ret;
450
451 if ((val == 1) &&
452 !strcmp(sdp->sd_lockstruct.ls_ops->lm_proto_name, "lock_dlm"))
453 complete(&sdp->sd_wdack);
454 else
455 return -EINVAL;
456 return len;
457 }
458
lkfirst_show(struct gfs2_sbd * sdp,char * buf)459 static ssize_t lkfirst_show(struct gfs2_sbd *sdp, char *buf)
460 {
461 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
462 return sprintf(buf, "%d\n", ls->ls_first);
463 }
464
lkfirst_store(struct gfs2_sbd * sdp,const char * buf,size_t len)465 static ssize_t lkfirst_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
466 {
467 unsigned first;
468 int rv;
469
470 rv = sscanf(buf, "%u", &first);
471 if (rv != 1 || first > 1)
472 return -EINVAL;
473 rv = wait_for_completion_killable(&sdp->sd_locking_init);
474 if (rv)
475 return rv;
476 spin_lock(&sdp->sd_jindex_spin);
477 rv = -EBUSY;
478 if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0)
479 goto out;
480 rv = -EINVAL;
481 if (sdp->sd_args.ar_spectator)
482 goto out;
483 if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
484 goto out;
485 sdp->sd_lockstruct.ls_first = first;
486 rv = 0;
487 out:
488 spin_unlock(&sdp->sd_jindex_spin);
489 return rv ? rv : len;
490 }
491
first_done_show(struct gfs2_sbd * sdp,char * buf)492 static ssize_t first_done_show(struct gfs2_sbd *sdp, char *buf)
493 {
494 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
495 return sprintf(buf, "%d\n", !!test_bit(DFL_FIRST_MOUNT_DONE, &ls->ls_recover_flags));
496 }
497
gfs2_recover_set(struct gfs2_sbd * sdp,unsigned jid)498 int gfs2_recover_set(struct gfs2_sbd *sdp, unsigned jid)
499 {
500 struct gfs2_jdesc *jd;
501 int rv;
502
503 /* Wait for our primary journal to be initialized */
504 wait_for_completion(&sdp->sd_journal_ready);
505
506 spin_lock(&sdp->sd_jindex_spin);
507 rv = -EBUSY;
508 /**
509 * If we're a spectator, we use journal0, but it's not really ours.
510 * So we need to wait for its recovery too. If we skip it we'd never
511 * queue work to the recovery workqueue, and so its completion would
512 * never clear the DFL_BLOCK_LOCKS flag, so all our locks would
513 * permanently stop working.
514 */
515 if (!sdp->sd_jdesc)
516 goto out;
517 if (sdp->sd_jdesc->jd_jid == jid && !sdp->sd_args.ar_spectator)
518 goto out;
519 rv = -ENOENT;
520 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
521 if (jd->jd_jid != jid && !sdp->sd_args.ar_spectator)
522 continue;
523 rv = gfs2_recover_journal(jd, false);
524 break;
525 }
526 out:
527 spin_unlock(&sdp->sd_jindex_spin);
528 return rv;
529 }
530
recover_store(struct gfs2_sbd * sdp,const char * buf,size_t len)531 static ssize_t recover_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
532 {
533 unsigned jid;
534 int rv;
535
536 rv = sscanf(buf, "%u", &jid);
537 if (rv != 1)
538 return -EINVAL;
539
540 if (test_bit(SDF_NORECOVERY, &sdp->sd_flags)) {
541 rv = -ESHUTDOWN;
542 goto out;
543 }
544
545 rv = gfs2_recover_set(sdp, jid);
546 out:
547 return rv ? rv : len;
548 }
549
recover_done_show(struct gfs2_sbd * sdp,char * buf)550 static ssize_t recover_done_show(struct gfs2_sbd *sdp, char *buf)
551 {
552 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
553 return sprintf(buf, "%d\n", ls->ls_recover_jid_done);
554 }
555
recover_status_show(struct gfs2_sbd * sdp,char * buf)556 static ssize_t recover_status_show(struct gfs2_sbd *sdp, char *buf)
557 {
558 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
559 return sprintf(buf, "%d\n", ls->ls_recover_jid_status);
560 }
561
jid_show(struct gfs2_sbd * sdp,char * buf)562 static ssize_t jid_show(struct gfs2_sbd *sdp, char *buf)
563 {
564 return sprintf(buf, "%d\n", sdp->sd_lockstruct.ls_jid);
565 }
566
jid_store(struct gfs2_sbd * sdp,const char * buf,size_t len)567 static ssize_t jid_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
568 {
569 int jid;
570 int rv;
571
572 rv = sscanf(buf, "%d", &jid);
573 if (rv != 1)
574 return -EINVAL;
575 rv = wait_for_completion_killable(&sdp->sd_locking_init);
576 if (rv)
577 return rv;
578 spin_lock(&sdp->sd_jindex_spin);
579 rv = -EINVAL;
580 if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL)
581 goto out;
582 rv = -EBUSY;
583 if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0)
584 goto out;
585 rv = 0;
586 if (sdp->sd_args.ar_spectator && jid > 0)
587 rv = jid = -EINVAL;
588 sdp->sd_lockstruct.ls_jid = jid;
589 clear_bit(SDF_NOJOURNALID, &sdp->sd_flags);
590 smp_mb__after_atomic();
591 wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID);
592 out:
593 spin_unlock(&sdp->sd_jindex_spin);
594 return rv ? rv : len;
595 }
596
597 #define GDLM_ATTR(_name,_mode,_show,_store) \
598 static struct gfs2_attr gdlm_attr_##_name = __ATTR(_name,_mode,_show,_store)
599
600 GDLM_ATTR(proto_name, 0444, proto_name_show, NULL);
601 GDLM_ATTR(block, 0644, block_show, block_store);
602 GDLM_ATTR(withdraw, 0644, wdack_show, wdack_store);
603 GDLM_ATTR(jid, 0644, jid_show, jid_store);
604 GDLM_ATTR(first, 0644, lkfirst_show, lkfirst_store);
605 GDLM_ATTR(first_done, 0444, first_done_show, NULL);
606 GDLM_ATTR(recover, 0600, NULL, recover_store);
607 GDLM_ATTR(recover_done, 0444, recover_done_show, NULL);
608 GDLM_ATTR(recover_status, 0444, recover_status_show, NULL);
609
610 static struct attribute *lock_module_attrs[] = {
611 &gdlm_attr_proto_name.attr,
612 &gdlm_attr_block.attr,
613 &gdlm_attr_withdraw.attr,
614 &gdlm_attr_jid.attr,
615 &gdlm_attr_first.attr,
616 &gdlm_attr_first_done.attr,
617 &gdlm_attr_recover.attr,
618 &gdlm_attr_recover_done.attr,
619 &gdlm_attr_recover_status.attr,
620 NULL,
621 };
622
623 /*
624 * get and set struct gfs2_tune fields
625 */
626
quota_scale_show(struct gfs2_sbd * sdp,char * buf)627 static ssize_t quota_scale_show(struct gfs2_sbd *sdp, char *buf)
628 {
629 return snprintf(buf, PAGE_SIZE, "%u %u\n",
630 sdp->sd_tune.gt_quota_scale_num,
631 sdp->sd_tune.gt_quota_scale_den);
632 }
633
quota_scale_store(struct gfs2_sbd * sdp,const char * buf,size_t len)634 static ssize_t quota_scale_store(struct gfs2_sbd *sdp, const char *buf,
635 size_t len)
636 {
637 struct gfs2_tune *gt = &sdp->sd_tune;
638 unsigned int x, y;
639
640 if (!capable(CAP_SYS_ADMIN))
641 return -EPERM;
642
643 if (sscanf(buf, "%u %u", &x, &y) != 2 || !y)
644 return -EINVAL;
645
646 spin_lock(>->gt_spin);
647 gt->gt_quota_scale_num = x;
648 gt->gt_quota_scale_den = y;
649 spin_unlock(>->gt_spin);
650 return len;
651 }
652
tune_set(struct gfs2_sbd * sdp,unsigned int * field,int check_zero,const char * buf,size_t len)653 static ssize_t tune_set(struct gfs2_sbd *sdp, unsigned int *field,
654 int check_zero, const char *buf, size_t len)
655 {
656 struct gfs2_tune *gt = &sdp->sd_tune;
657 unsigned int x;
658 int error;
659
660 if (!capable(CAP_SYS_ADMIN))
661 return -EPERM;
662
663 error = kstrtouint(buf, 0, &x);
664 if (error)
665 return error;
666
667 if (check_zero && !x)
668 return -EINVAL;
669
670 spin_lock(>->gt_spin);
671 *field = x;
672 spin_unlock(>->gt_spin);
673 return len;
674 }
675
676 #define TUNE_ATTR_3(name, show, store) \
677 static struct gfs2_attr tune_attr_##name = __ATTR(name, 0644, show, store)
678
679 #define TUNE_ATTR_2(name, store) \
680 static ssize_t name##_show(struct gfs2_sbd *sdp, char *buf) \
681 { \
682 return snprintf(buf, PAGE_SIZE, "%u\n", sdp->sd_tune.gt_##name); \
683 } \
684 TUNE_ATTR_3(name, name##_show, store)
685
686 #define TUNE_ATTR(name, check_zero) \
687 static ssize_t name##_store(struct gfs2_sbd *sdp, const char *buf, size_t len)\
688 { \
689 return tune_set(sdp, &sdp->sd_tune.gt_##name, check_zero, buf, len); \
690 } \
691 TUNE_ATTR_2(name, name##_store)
692
693 TUNE_ATTR(quota_warn_period, 0);
694 TUNE_ATTR(quota_quantum, 0);
695 TUNE_ATTR(max_readahead, 0);
696 TUNE_ATTR(complain_secs, 0);
697 TUNE_ATTR(statfs_slow, 0);
698 TUNE_ATTR(new_files_jdata, 0);
699 TUNE_ATTR(statfs_quantum, 1);
700 TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store);
701
702 static struct attribute *tune_attrs[] = {
703 &tune_attr_quota_warn_period.attr,
704 &tune_attr_quota_quantum.attr,
705 &tune_attr_max_readahead.attr,
706 &tune_attr_complain_secs.attr,
707 &tune_attr_statfs_slow.attr,
708 &tune_attr_statfs_quantum.attr,
709 &tune_attr_quota_scale.attr,
710 &tune_attr_new_files_jdata.attr,
711 NULL,
712 };
713
714 static const struct attribute_group tune_group = {
715 .name = "tune",
716 .attrs = tune_attrs,
717 };
718
719 static const struct attribute_group lock_module_group = {
720 .name = "lock_module",
721 .attrs = lock_module_attrs,
722 };
723
gfs2_sys_fs_add(struct gfs2_sbd * sdp)724 int gfs2_sys_fs_add(struct gfs2_sbd *sdp)
725 {
726 struct super_block *sb = sdp->sd_vfs;
727 int error;
728 char ro[20];
729 char spectator[20];
730 char *envp[] = { ro, spectator, NULL };
731
732 sprintf(ro, "RDONLY=%d", sb_rdonly(sb));
733 sprintf(spectator, "SPECTATOR=%d", sdp->sd_args.ar_spectator ? 1 : 0);
734
735 init_completion(&sdp->sd_kobj_unregister);
736 sdp->sd_kobj.kset = gfs2_kset;
737 error = kobject_init_and_add(&sdp->sd_kobj, &gfs2_ktype, NULL,
738 "%s", sdp->sd_table_name);
739 if (error)
740 goto fail_reg;
741
742 error = sysfs_create_group(&sdp->sd_kobj, &tune_group);
743 if (error)
744 goto fail_reg;
745
746 error = sysfs_create_group(&sdp->sd_kobj, &lock_module_group);
747 if (error)
748 goto fail_tune;
749
750 error = sysfs_create_link(&sdp->sd_kobj,
751 &disk_to_dev(sb->s_bdev->bd_disk)->kobj,
752 "device");
753 if (error)
754 goto fail_lock_module;
755
756 kobject_uevent_env(&sdp->sd_kobj, KOBJ_ADD, envp);
757 return 0;
758
759 fail_lock_module:
760 sysfs_remove_group(&sdp->sd_kobj, &lock_module_group);
761 fail_tune:
762 sysfs_remove_group(&sdp->sd_kobj, &tune_group);
763 fail_reg:
764 fs_err(sdp, "error %d adding sysfs files\n", error);
765 kobject_put(&sdp->sd_kobj);
766 wait_for_completion(&sdp->sd_kobj_unregister);
767 sb->s_fs_info = NULL;
768 return error;
769 }
770
gfs2_sys_fs_del(struct gfs2_sbd * sdp)771 void gfs2_sys_fs_del(struct gfs2_sbd *sdp)
772 {
773 sysfs_remove_link(&sdp->sd_kobj, "device");
774 sysfs_remove_group(&sdp->sd_kobj, &tune_group);
775 sysfs_remove_group(&sdp->sd_kobj, &lock_module_group);
776 kobject_put(&sdp->sd_kobj);
777 wait_for_completion(&sdp->sd_kobj_unregister);
778 }
779
gfs2_uevent(const struct kobject * kobj,struct kobj_uevent_env * env)780 static int gfs2_uevent(const struct kobject *kobj, struct kobj_uevent_env *env)
781 {
782 const struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj);
783 const struct super_block *s = sdp->sd_vfs;
784
785 add_uevent_var(env, "LOCKTABLE=%s", sdp->sd_table_name);
786 add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name);
787 if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags))
788 add_uevent_var(env, "JOURNALID=%d", sdp->sd_lockstruct.ls_jid);
789 if (!uuid_is_null(&s->s_uuid))
790 add_uevent_var(env, "UUID=%pUB", &s->s_uuid);
791 return 0;
792 }
793
794 static const struct kset_uevent_ops gfs2_uevent_ops = {
795 .uevent = gfs2_uevent,
796 };
797
gfs2_sys_init(void)798 int gfs2_sys_init(void)
799 {
800 gfs2_kset = kset_create_and_add("gfs2", &gfs2_uevent_ops, fs_kobj);
801 if (!gfs2_kset)
802 return -ENOMEM;
803 return 0;
804 }
805
gfs2_sys_uninit(void)806 void gfs2_sys_uninit(void)
807 {
808 kset_unregister(gfs2_kset);
809 }
810
811