1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/sched.h> 10 #include <linux/cred.h> 11 #include <linux/spinlock.h> 12 #include <linux/completion.h> 13 #include <linux/buffer_head.h> 14 #include <linux/module.h> 15 #include <linux/kobject.h> 16 #include <linux/uaccess.h> 17 #include <linux/gfs2_ondisk.h> 18 #include <linux/genhd.h> 19 20 #include "gfs2.h" 21 #include "incore.h" 22 #include "sys.h" 23 #include "super.h" 24 #include "glock.h" 25 #include "quota.h" 26 #include "util.h" 27 #include "glops.h" 28 #include "recovery.h" 29 30 struct gfs2_attr { 31 struct attribute attr; 32 ssize_t (*show)(struct gfs2_sbd *, char *); 33 ssize_t (*store)(struct gfs2_sbd *, const char *, size_t); 34 }; 35 36 static ssize_t gfs2_attr_show(struct kobject *kobj, struct attribute *attr, 37 char *buf) 38 { 39 struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj); 40 struct gfs2_attr *a = container_of(attr, struct gfs2_attr, attr); 41 return a->show ? a->show(sdp, buf) : 0; 42 } 43 44 static ssize_t gfs2_attr_store(struct kobject *kobj, struct attribute *attr, 45 const char *buf, size_t len) 46 { 47 struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj); 48 struct gfs2_attr *a = container_of(attr, struct gfs2_attr, attr); 49 return a->store ? a->store(sdp, buf, len) : len; 50 } 51 52 static const struct sysfs_ops gfs2_attr_ops = { 53 .show = gfs2_attr_show, 54 .store = gfs2_attr_store, 55 }; 56 57 58 static struct kset *gfs2_kset; 59 60 static ssize_t id_show(struct gfs2_sbd *sdp, char *buf) 61 { 62 return snprintf(buf, PAGE_SIZE, "%u:%u\n", 63 MAJOR(sdp->sd_vfs->s_dev), MINOR(sdp->sd_vfs->s_dev)); 64 } 65 66 static ssize_t fsname_show(struct gfs2_sbd *sdp, char *buf) 67 { 68 return snprintf(buf, PAGE_SIZE, "%s\n", sdp->sd_fsname); 69 } 70 71 static ssize_t uuid_show(struct gfs2_sbd *sdp, char *buf) 72 { 73 struct super_block *s = sdp->sd_vfs; 74 75 buf[0] = '\0'; 76 if (uuid_is_null(&s->s_uuid)) 77 return 0; 78 return snprintf(buf, PAGE_SIZE, "%pUB\n", &s->s_uuid); 79 } 80 81 static ssize_t freeze_show(struct gfs2_sbd *sdp, char *buf) 82 { 83 struct super_block *sb = sdp->sd_vfs; 84 int frozen = (sb->s_writers.frozen == SB_UNFROZEN) ? 0 : 1; 85 86 return snprintf(buf, PAGE_SIZE, "%d\n", frozen); 87 } 88 89 static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 90 { 91 int error, n; 92 93 error = kstrtoint(buf, 0, &n); 94 if (error) 95 return error; 96 97 if (!capable(CAP_SYS_ADMIN)) 98 return -EPERM; 99 100 switch (n) { 101 case 0: 102 error = thaw_super(sdp->sd_vfs); 103 break; 104 case 1: 105 error = freeze_super(sdp->sd_vfs); 106 break; 107 default: 108 return -EINVAL; 109 } 110 111 if (error) { 112 fs_warn(sdp, "freeze %d error %d\n", n, error); 113 return error; 114 } 115 116 return len; 117 } 118 119 static ssize_t withdraw_show(struct gfs2_sbd *sdp, char *buf) 120 { 121 unsigned int b = test_bit(SDF_WITHDRAWN, &sdp->sd_flags); 122 return snprintf(buf, PAGE_SIZE, "%u\n", b); 123 } 124 125 static ssize_t withdraw_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 126 { 127 int error, val; 128 129 if (!capable(CAP_SYS_ADMIN)) 130 return -EPERM; 131 132 error = kstrtoint(buf, 0, &val); 133 if (error) 134 return error; 135 136 if (val != 1) 137 return -EINVAL; 138 139 gfs2_lm_withdraw(sdp, "withdrawing from cluster at user's request\n"); 140 141 return len; 142 } 143 144 static ssize_t statfs_sync_store(struct gfs2_sbd *sdp, const char *buf, 145 size_t len) 146 { 147 int error, val; 148 149 if (!capable(CAP_SYS_ADMIN)) 150 return -EPERM; 151 152 error = kstrtoint(buf, 0, &val); 153 if (error) 154 return error; 155 156 if (val != 1) 157 return -EINVAL; 158 159 gfs2_statfs_sync(sdp->sd_vfs, 0); 160 return len; 161 } 162 163 static ssize_t quota_sync_store(struct gfs2_sbd *sdp, const char *buf, 164 size_t len) 165 { 166 int error, val; 167 168 if (!capable(CAP_SYS_ADMIN)) 169 return -EPERM; 170 171 error = kstrtoint(buf, 0, &val); 172 if (error) 173 return error; 174 175 if (val != 1) 176 return -EINVAL; 177 178 gfs2_quota_sync(sdp->sd_vfs, 0); 179 return len; 180 } 181 182 static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf, 183 size_t len) 184 { 185 struct kqid qid; 186 int error; 187 u32 id; 188 189 if (!capable(CAP_SYS_ADMIN)) 190 return -EPERM; 191 192 error = kstrtou32(buf, 0, &id); 193 if (error) 194 return error; 195 196 qid = make_kqid(current_user_ns(), USRQUOTA, id); 197 if (!qid_valid(qid)) 198 return -EINVAL; 199 200 error = gfs2_quota_refresh(sdp, qid); 201 return error ? error : len; 202 } 203 204 static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf, 205 size_t len) 206 { 207 struct kqid qid; 208 int error; 209 u32 id; 210 211 if (!capable(CAP_SYS_ADMIN)) 212 return -EPERM; 213 214 error = kstrtou32(buf, 0, &id); 215 if (error) 216 return error; 217 218 qid = make_kqid(current_user_ns(), GRPQUOTA, id); 219 if (!qid_valid(qid)) 220 return -EINVAL; 221 222 error = gfs2_quota_refresh(sdp, qid); 223 return error ? error : len; 224 } 225 226 static ssize_t demote_rq_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 227 { 228 struct gfs2_glock *gl; 229 const struct gfs2_glock_operations *glops; 230 unsigned int glmode; 231 unsigned int gltype; 232 unsigned long long glnum; 233 char mode[16]; 234 int rv; 235 236 if (!capable(CAP_SYS_ADMIN)) 237 return -EPERM; 238 239 rv = sscanf(buf, "%u:%llu %15s", &gltype, &glnum, 240 mode); 241 if (rv != 3) 242 return -EINVAL; 243 244 if (strcmp(mode, "EX") == 0) 245 glmode = LM_ST_UNLOCKED; 246 else if ((strcmp(mode, "CW") == 0) || (strcmp(mode, "DF") == 0)) 247 glmode = LM_ST_DEFERRED; 248 else if ((strcmp(mode, "PR") == 0) || (strcmp(mode, "SH") == 0)) 249 glmode = LM_ST_SHARED; 250 else 251 return -EINVAL; 252 253 if (gltype > LM_TYPE_JOURNAL) 254 return -EINVAL; 255 if (gltype == LM_TYPE_NONDISK && glnum == GFS2_FREEZE_LOCK) 256 glops = &gfs2_freeze_glops; 257 else 258 glops = gfs2_glops_list[gltype]; 259 if (glops == NULL) 260 return -EINVAL; 261 if (!test_and_set_bit(SDF_DEMOTE, &sdp->sd_flags)) 262 fs_info(sdp, "demote interface used\n"); 263 rv = gfs2_glock_get(sdp, glnum, glops, 0, &gl); 264 if (rv) 265 return rv; 266 gfs2_glock_cb(gl, glmode); 267 gfs2_glock_put(gl); 268 return len; 269 } 270 271 272 #define GFS2_ATTR(name, mode, show, store) \ 273 static struct gfs2_attr gfs2_attr_##name = __ATTR(name, mode, show, store) 274 275 GFS2_ATTR(id, 0444, id_show, NULL); 276 GFS2_ATTR(fsname, 0444, fsname_show, NULL); 277 GFS2_ATTR(uuid, 0444, uuid_show, NULL); 278 GFS2_ATTR(freeze, 0644, freeze_show, freeze_store); 279 GFS2_ATTR(withdraw, 0644, withdraw_show, withdraw_store); 280 GFS2_ATTR(statfs_sync, 0200, NULL, statfs_sync_store); 281 GFS2_ATTR(quota_sync, 0200, NULL, quota_sync_store); 282 GFS2_ATTR(quota_refresh_user, 0200, NULL, quota_refresh_user_store); 283 GFS2_ATTR(quota_refresh_group, 0200, NULL, quota_refresh_group_store); 284 GFS2_ATTR(demote_rq, 0200, NULL, demote_rq_store); 285 286 static struct attribute *gfs2_attrs[] = { 287 &gfs2_attr_id.attr, 288 &gfs2_attr_fsname.attr, 289 &gfs2_attr_uuid.attr, 290 &gfs2_attr_freeze.attr, 291 &gfs2_attr_withdraw.attr, 292 &gfs2_attr_statfs_sync.attr, 293 &gfs2_attr_quota_sync.attr, 294 &gfs2_attr_quota_refresh_user.attr, 295 &gfs2_attr_quota_refresh_group.attr, 296 &gfs2_attr_demote_rq.attr, 297 NULL, 298 }; 299 ATTRIBUTE_GROUPS(gfs2); 300 301 static void gfs2_sbd_release(struct kobject *kobj) 302 { 303 struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj); 304 305 free_sbd(sdp); 306 } 307 308 static struct kobj_type gfs2_ktype = { 309 .release = gfs2_sbd_release, 310 .default_groups = gfs2_groups, 311 .sysfs_ops = &gfs2_attr_ops, 312 }; 313 314 315 /* 316 * lock_module. Originally from lock_dlm 317 */ 318 319 static ssize_t proto_name_show(struct gfs2_sbd *sdp, char *buf) 320 { 321 const struct lm_lockops *ops = sdp->sd_lockstruct.ls_ops; 322 return sprintf(buf, "%s\n", ops->lm_proto_name); 323 } 324 325 static ssize_t block_show(struct gfs2_sbd *sdp, char *buf) 326 { 327 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 328 ssize_t ret; 329 int val = 0; 330 331 if (test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags)) 332 val = 1; 333 ret = sprintf(buf, "%d\n", val); 334 return ret; 335 } 336 337 static ssize_t block_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 338 { 339 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 340 int ret, val; 341 342 ret = kstrtoint(buf, 0, &val); 343 if (ret) 344 return ret; 345 346 if (val == 1) 347 set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); 348 else if (val == 0) { 349 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); 350 smp_mb__after_atomic(); 351 gfs2_glock_thaw(sdp); 352 } else { 353 return -EINVAL; 354 } 355 return len; 356 } 357 358 static ssize_t wdack_show(struct gfs2_sbd *sdp, char *buf) 359 { 360 int val = completion_done(&sdp->sd_wdack) ? 1 : 0; 361 362 return sprintf(buf, "%d\n", val); 363 } 364 365 static ssize_t wdack_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 366 { 367 int ret, val; 368 369 ret = kstrtoint(buf, 0, &val); 370 if (ret) 371 return ret; 372 373 if ((val == 1) && 374 !strcmp(sdp->sd_lockstruct.ls_ops->lm_proto_name, "lock_dlm")) 375 complete(&sdp->sd_wdack); 376 else 377 return -EINVAL; 378 return len; 379 } 380 381 static ssize_t lkfirst_show(struct gfs2_sbd *sdp, char *buf) 382 { 383 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 384 return sprintf(buf, "%d\n", ls->ls_first); 385 } 386 387 static ssize_t lkfirst_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 388 { 389 unsigned first; 390 int rv; 391 392 rv = sscanf(buf, "%u", &first); 393 if (rv != 1 || first > 1) 394 return -EINVAL; 395 rv = wait_for_completion_killable(&sdp->sd_locking_init); 396 if (rv) 397 return rv; 398 spin_lock(&sdp->sd_jindex_spin); 399 rv = -EBUSY; 400 if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0) 401 goto out; 402 rv = -EINVAL; 403 if (sdp->sd_args.ar_spectator) 404 goto out; 405 if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) 406 goto out; 407 sdp->sd_lockstruct.ls_first = first; 408 rv = 0; 409 out: 410 spin_unlock(&sdp->sd_jindex_spin); 411 return rv ? rv : len; 412 } 413 414 static ssize_t first_done_show(struct gfs2_sbd *sdp, char *buf) 415 { 416 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 417 return sprintf(buf, "%d\n", !!test_bit(DFL_FIRST_MOUNT_DONE, &ls->ls_recover_flags)); 418 } 419 420 int gfs2_recover_set(struct gfs2_sbd *sdp, unsigned jid) 421 { 422 struct gfs2_jdesc *jd; 423 int rv; 424 425 /* Wait for our primary journal to be initialized */ 426 wait_for_completion(&sdp->sd_journal_ready); 427 428 spin_lock(&sdp->sd_jindex_spin); 429 rv = -EBUSY; 430 /** 431 * If we're a spectator, we use journal0, but it's not really ours. 432 * So we need to wait for its recovery too. If we skip it we'd never 433 * queue work to the recovery workqueue, and so its completion would 434 * never clear the DFL_BLOCK_LOCKS flag, so all our locks would 435 * permanently stop working. 436 */ 437 if (sdp->sd_jdesc->jd_jid == jid && !sdp->sd_args.ar_spectator) 438 goto out; 439 rv = -ENOENT; 440 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { 441 if (jd->jd_jid != jid && !sdp->sd_args.ar_spectator) 442 continue; 443 rv = gfs2_recover_journal(jd, false); 444 break; 445 } 446 out: 447 spin_unlock(&sdp->sd_jindex_spin); 448 return rv; 449 } 450 451 static ssize_t recover_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 452 { 453 unsigned jid; 454 int rv; 455 456 rv = sscanf(buf, "%u", &jid); 457 if (rv != 1) 458 return -EINVAL; 459 460 if (test_bit(SDF_NORECOVERY, &sdp->sd_flags)) { 461 rv = -ESHUTDOWN; 462 goto out; 463 } 464 465 rv = gfs2_recover_set(sdp, jid); 466 out: 467 return rv ? rv : len; 468 } 469 470 static ssize_t recover_done_show(struct gfs2_sbd *sdp, char *buf) 471 { 472 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 473 return sprintf(buf, "%d\n", ls->ls_recover_jid_done); 474 } 475 476 static ssize_t recover_status_show(struct gfs2_sbd *sdp, char *buf) 477 { 478 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 479 return sprintf(buf, "%d\n", ls->ls_recover_jid_status); 480 } 481 482 static ssize_t jid_show(struct gfs2_sbd *sdp, char *buf) 483 { 484 return sprintf(buf, "%d\n", sdp->sd_lockstruct.ls_jid); 485 } 486 487 static ssize_t jid_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 488 { 489 int jid; 490 int rv; 491 492 rv = sscanf(buf, "%d", &jid); 493 if (rv != 1) 494 return -EINVAL; 495 rv = wait_for_completion_killable(&sdp->sd_locking_init); 496 if (rv) 497 return rv; 498 spin_lock(&sdp->sd_jindex_spin); 499 rv = -EINVAL; 500 if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) 501 goto out; 502 rv = -EBUSY; 503 if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0) 504 goto out; 505 rv = 0; 506 if (sdp->sd_args.ar_spectator && jid > 0) 507 rv = jid = -EINVAL; 508 sdp->sd_lockstruct.ls_jid = jid; 509 clear_bit(SDF_NOJOURNALID, &sdp->sd_flags); 510 smp_mb__after_atomic(); 511 wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID); 512 out: 513 spin_unlock(&sdp->sd_jindex_spin); 514 return rv ? rv : len; 515 } 516 517 #define GDLM_ATTR(_name,_mode,_show,_store) \ 518 static struct gfs2_attr gdlm_attr_##_name = __ATTR(_name,_mode,_show,_store) 519 520 GDLM_ATTR(proto_name, 0444, proto_name_show, NULL); 521 GDLM_ATTR(block, 0644, block_show, block_store); 522 GDLM_ATTR(withdraw, 0644, wdack_show, wdack_store); 523 GDLM_ATTR(jid, 0644, jid_show, jid_store); 524 GDLM_ATTR(first, 0644, lkfirst_show, lkfirst_store); 525 GDLM_ATTR(first_done, 0444, first_done_show, NULL); 526 GDLM_ATTR(recover, 0600, NULL, recover_store); 527 GDLM_ATTR(recover_done, 0444, recover_done_show, NULL); 528 GDLM_ATTR(recover_status, 0444, recover_status_show, NULL); 529 530 static struct attribute *lock_module_attrs[] = { 531 &gdlm_attr_proto_name.attr, 532 &gdlm_attr_block.attr, 533 &gdlm_attr_withdraw.attr, 534 &gdlm_attr_jid.attr, 535 &gdlm_attr_first.attr, 536 &gdlm_attr_first_done.attr, 537 &gdlm_attr_recover.attr, 538 &gdlm_attr_recover_done.attr, 539 &gdlm_attr_recover_status.attr, 540 NULL, 541 }; 542 543 /* 544 * get and set struct gfs2_tune fields 545 */ 546 547 static ssize_t quota_scale_show(struct gfs2_sbd *sdp, char *buf) 548 { 549 return snprintf(buf, PAGE_SIZE, "%u %u\n", 550 sdp->sd_tune.gt_quota_scale_num, 551 sdp->sd_tune.gt_quota_scale_den); 552 } 553 554 static ssize_t quota_scale_store(struct gfs2_sbd *sdp, const char *buf, 555 size_t len) 556 { 557 struct gfs2_tune *gt = &sdp->sd_tune; 558 unsigned int x, y; 559 560 if (!capable(CAP_SYS_ADMIN)) 561 return -EPERM; 562 563 if (sscanf(buf, "%u %u", &x, &y) != 2 || !y) 564 return -EINVAL; 565 566 spin_lock(>->gt_spin); 567 gt->gt_quota_scale_num = x; 568 gt->gt_quota_scale_den = y; 569 spin_unlock(>->gt_spin); 570 return len; 571 } 572 573 static ssize_t tune_set(struct gfs2_sbd *sdp, unsigned int *field, 574 int check_zero, const char *buf, size_t len) 575 { 576 struct gfs2_tune *gt = &sdp->sd_tune; 577 unsigned int x; 578 int error; 579 580 if (!capable(CAP_SYS_ADMIN)) 581 return -EPERM; 582 583 error = kstrtouint(buf, 0, &x); 584 if (error) 585 return error; 586 587 if (check_zero && !x) 588 return -EINVAL; 589 590 spin_lock(>->gt_spin); 591 *field = x; 592 spin_unlock(>->gt_spin); 593 return len; 594 } 595 596 #define TUNE_ATTR_3(name, show, store) \ 597 static struct gfs2_attr tune_attr_##name = __ATTR(name, 0644, show, store) 598 599 #define TUNE_ATTR_2(name, store) \ 600 static ssize_t name##_show(struct gfs2_sbd *sdp, char *buf) \ 601 { \ 602 return snprintf(buf, PAGE_SIZE, "%u\n", sdp->sd_tune.gt_##name); \ 603 } \ 604 TUNE_ATTR_3(name, name##_show, store) 605 606 #define TUNE_ATTR(name, check_zero) \ 607 static ssize_t name##_store(struct gfs2_sbd *sdp, const char *buf, size_t len)\ 608 { \ 609 return tune_set(sdp, &sdp->sd_tune.gt_##name, check_zero, buf, len); \ 610 } \ 611 TUNE_ATTR_2(name, name##_store) 612 613 TUNE_ATTR(quota_warn_period, 0); 614 TUNE_ATTR(quota_quantum, 0); 615 TUNE_ATTR(max_readahead, 0); 616 TUNE_ATTR(complain_secs, 0); 617 TUNE_ATTR(statfs_slow, 0); 618 TUNE_ATTR(new_files_jdata, 0); 619 TUNE_ATTR(statfs_quantum, 1); 620 TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store); 621 622 static struct attribute *tune_attrs[] = { 623 &tune_attr_quota_warn_period.attr, 624 &tune_attr_quota_quantum.attr, 625 &tune_attr_max_readahead.attr, 626 &tune_attr_complain_secs.attr, 627 &tune_attr_statfs_slow.attr, 628 &tune_attr_statfs_quantum.attr, 629 &tune_attr_quota_scale.attr, 630 &tune_attr_new_files_jdata.attr, 631 NULL, 632 }; 633 634 static const struct attribute_group tune_group = { 635 .name = "tune", 636 .attrs = tune_attrs, 637 }; 638 639 static const struct attribute_group lock_module_group = { 640 .name = "lock_module", 641 .attrs = lock_module_attrs, 642 }; 643 644 int gfs2_sys_fs_add(struct gfs2_sbd *sdp) 645 { 646 struct super_block *sb = sdp->sd_vfs; 647 int error; 648 char ro[20]; 649 char spectator[20]; 650 char *envp[] = { ro, spectator, NULL }; 651 652 sprintf(ro, "RDONLY=%d", sb_rdonly(sb)); 653 sprintf(spectator, "SPECTATOR=%d", sdp->sd_args.ar_spectator ? 1 : 0); 654 655 sdp->sd_kobj.kset = gfs2_kset; 656 error = kobject_init_and_add(&sdp->sd_kobj, &gfs2_ktype, NULL, 657 "%s", sdp->sd_table_name); 658 if (error) 659 goto fail_reg; 660 661 error = sysfs_create_group(&sdp->sd_kobj, &tune_group); 662 if (error) 663 goto fail_reg; 664 665 error = sysfs_create_group(&sdp->sd_kobj, &lock_module_group); 666 if (error) 667 goto fail_tune; 668 669 error = sysfs_create_link(&sdp->sd_kobj, 670 &disk_to_dev(sb->s_bdev->bd_disk)->kobj, 671 "device"); 672 if (error) 673 goto fail_lock_module; 674 675 kobject_uevent_env(&sdp->sd_kobj, KOBJ_ADD, envp); 676 return 0; 677 678 fail_lock_module: 679 sysfs_remove_group(&sdp->sd_kobj, &lock_module_group); 680 fail_tune: 681 sysfs_remove_group(&sdp->sd_kobj, &tune_group); 682 fail_reg: 683 fs_err(sdp, "error %d adding sysfs files\n", error); 684 kobject_put(&sdp->sd_kobj); 685 sb->s_fs_info = NULL; 686 return error; 687 } 688 689 void gfs2_sys_fs_del(struct gfs2_sbd *sdp) 690 { 691 sysfs_remove_link(&sdp->sd_kobj, "device"); 692 sysfs_remove_group(&sdp->sd_kobj, &tune_group); 693 sysfs_remove_group(&sdp->sd_kobj, &lock_module_group); 694 kobject_put(&sdp->sd_kobj); 695 } 696 697 static int gfs2_uevent(struct kset *kset, struct kobject *kobj, 698 struct kobj_uevent_env *env) 699 { 700 struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj); 701 struct super_block *s = sdp->sd_vfs; 702 703 add_uevent_var(env, "LOCKTABLE=%s", sdp->sd_table_name); 704 add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name); 705 if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags)) 706 add_uevent_var(env, "JOURNALID=%d", sdp->sd_lockstruct.ls_jid); 707 if (!uuid_is_null(&s->s_uuid)) 708 add_uevent_var(env, "UUID=%pUB", &s->s_uuid); 709 return 0; 710 } 711 712 static const struct kset_uevent_ops gfs2_uevent_ops = { 713 .uevent = gfs2_uevent, 714 }; 715 716 int gfs2_sys_init(void) 717 { 718 gfs2_kset = kset_create_and_add("gfs2", &gfs2_uevent_ops, fs_kobj); 719 if (!gfs2_kset) 720 return -ENOMEM; 721 return 0; 722 } 723 724 void gfs2_sys_uninit(void) 725 { 726 kset_unregister(gfs2_kset); 727 } 728 729