1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 5 */ 6 7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9 #include <linux/sched.h> 10 #include <linux/cred.h> 11 #include <linux/spinlock.h> 12 #include <linux/completion.h> 13 #include <linux/buffer_head.h> 14 #include <linux/module.h> 15 #include <linux/kobject.h> 16 #include <linux/uaccess.h> 17 #include <linux/gfs2_ondisk.h> 18 #include <linux/genhd.h> 19 20 #include "gfs2.h" 21 #include "incore.h" 22 #include "sys.h" 23 #include "super.h" 24 #include "glock.h" 25 #include "quota.h" 26 #include "util.h" 27 #include "glops.h" 28 #include "recovery.h" 29 30 struct gfs2_attr { 31 struct attribute attr; 32 ssize_t (*show)(struct gfs2_sbd *, char *); 33 ssize_t (*store)(struct gfs2_sbd *, const char *, size_t); 34 }; 35 36 static ssize_t gfs2_attr_show(struct kobject *kobj, struct attribute *attr, 37 char *buf) 38 { 39 struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj); 40 struct gfs2_attr *a = container_of(attr, struct gfs2_attr, attr); 41 return a->show ? a->show(sdp, buf) : 0; 42 } 43 44 static ssize_t gfs2_attr_store(struct kobject *kobj, struct attribute *attr, 45 const char *buf, size_t len) 46 { 47 struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj); 48 struct gfs2_attr *a = container_of(attr, struct gfs2_attr, attr); 49 return a->store ? a->store(sdp, buf, len) : len; 50 } 51 52 static const struct sysfs_ops gfs2_attr_ops = { 53 .show = gfs2_attr_show, 54 .store = gfs2_attr_store, 55 }; 56 57 58 static struct kset *gfs2_kset; 59 60 static ssize_t id_show(struct gfs2_sbd *sdp, char *buf) 61 { 62 return snprintf(buf, PAGE_SIZE, "%u:%u\n", 63 MAJOR(sdp->sd_vfs->s_dev), MINOR(sdp->sd_vfs->s_dev)); 64 } 65 66 static ssize_t fsname_show(struct gfs2_sbd *sdp, char *buf) 67 { 68 return snprintf(buf, PAGE_SIZE, "%s\n", sdp->sd_fsname); 69 } 70 71 static ssize_t uuid_show(struct gfs2_sbd *sdp, char *buf) 72 { 73 struct super_block *s = sdp->sd_vfs; 74 75 buf[0] = '\0'; 76 if (uuid_is_null(&s->s_uuid)) 77 return 0; 78 return snprintf(buf, PAGE_SIZE, "%pUB\n", &s->s_uuid); 79 } 80 81 static ssize_t freeze_show(struct gfs2_sbd *sdp, char *buf) 82 { 83 struct super_block *sb = sdp->sd_vfs; 84 int frozen = (sb->s_writers.frozen == SB_UNFROZEN) ? 0 : 1; 85 86 return snprintf(buf, PAGE_SIZE, "%d\n", frozen); 87 } 88 89 static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 90 { 91 int error, n; 92 93 error = kstrtoint(buf, 0, &n); 94 if (error) 95 return error; 96 97 if (!capable(CAP_SYS_ADMIN)) 98 return -EPERM; 99 100 switch (n) { 101 case 0: 102 error = thaw_super(sdp->sd_vfs); 103 break; 104 case 1: 105 error = freeze_super(sdp->sd_vfs); 106 break; 107 default: 108 return -EINVAL; 109 } 110 111 if (error) { 112 fs_warn(sdp, "freeze %d error %d\n", n, error); 113 return error; 114 } 115 116 return len; 117 } 118 119 static ssize_t withdraw_show(struct gfs2_sbd *sdp, char *buf) 120 { 121 unsigned int b = gfs2_withdrawn(sdp); 122 return snprintf(buf, PAGE_SIZE, "%u\n", b); 123 } 124 125 static ssize_t withdraw_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 126 { 127 int error, val; 128 129 if (!capable(CAP_SYS_ADMIN)) 130 return -EPERM; 131 132 error = kstrtoint(buf, 0, &val); 133 if (error) 134 return error; 135 136 if (val != 1) 137 return -EINVAL; 138 139 gfs2_lm(sdp, "withdrawing from cluster at user's request\n"); 140 gfs2_withdraw(sdp); 141 142 return len; 143 } 144 145 static ssize_t statfs_sync_store(struct gfs2_sbd *sdp, const char *buf, 146 size_t len) 147 { 148 int error, val; 149 150 if (!capable(CAP_SYS_ADMIN)) 151 return -EPERM; 152 153 error = kstrtoint(buf, 0, &val); 154 if (error) 155 return error; 156 157 if (val != 1) 158 return -EINVAL; 159 160 gfs2_statfs_sync(sdp->sd_vfs, 0); 161 return len; 162 } 163 164 static ssize_t quota_sync_store(struct gfs2_sbd *sdp, const char *buf, 165 size_t len) 166 { 167 int error, val; 168 169 if (!capable(CAP_SYS_ADMIN)) 170 return -EPERM; 171 172 error = kstrtoint(buf, 0, &val); 173 if (error) 174 return error; 175 176 if (val != 1) 177 return -EINVAL; 178 179 gfs2_quota_sync(sdp->sd_vfs, 0); 180 return len; 181 } 182 183 static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf, 184 size_t len) 185 { 186 struct kqid qid; 187 int error; 188 u32 id; 189 190 if (!capable(CAP_SYS_ADMIN)) 191 return -EPERM; 192 193 error = kstrtou32(buf, 0, &id); 194 if (error) 195 return error; 196 197 qid = make_kqid(current_user_ns(), USRQUOTA, id); 198 if (!qid_valid(qid)) 199 return -EINVAL; 200 201 error = gfs2_quota_refresh(sdp, qid); 202 return error ? error : len; 203 } 204 205 static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf, 206 size_t len) 207 { 208 struct kqid qid; 209 int error; 210 u32 id; 211 212 if (!capable(CAP_SYS_ADMIN)) 213 return -EPERM; 214 215 error = kstrtou32(buf, 0, &id); 216 if (error) 217 return error; 218 219 qid = make_kqid(current_user_ns(), GRPQUOTA, id); 220 if (!qid_valid(qid)) 221 return -EINVAL; 222 223 error = gfs2_quota_refresh(sdp, qid); 224 return error ? error : len; 225 } 226 227 static ssize_t demote_rq_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 228 { 229 struct gfs2_glock *gl; 230 const struct gfs2_glock_operations *glops; 231 unsigned int glmode; 232 unsigned int gltype; 233 unsigned long long glnum; 234 char mode[16]; 235 int rv; 236 237 if (!capable(CAP_SYS_ADMIN)) 238 return -EPERM; 239 240 rv = sscanf(buf, "%u:%llu %15s", &gltype, &glnum, 241 mode); 242 if (rv != 3) 243 return -EINVAL; 244 245 if (strcmp(mode, "EX") == 0) 246 glmode = LM_ST_UNLOCKED; 247 else if ((strcmp(mode, "CW") == 0) || (strcmp(mode, "DF") == 0)) 248 glmode = LM_ST_DEFERRED; 249 else if ((strcmp(mode, "PR") == 0) || (strcmp(mode, "SH") == 0)) 250 glmode = LM_ST_SHARED; 251 else 252 return -EINVAL; 253 254 if (gltype > LM_TYPE_JOURNAL) 255 return -EINVAL; 256 if (gltype == LM_TYPE_NONDISK && glnum == GFS2_FREEZE_LOCK) 257 glops = &gfs2_freeze_glops; 258 else 259 glops = gfs2_glops_list[gltype]; 260 if (glops == NULL) 261 return -EINVAL; 262 if (!test_and_set_bit(SDF_DEMOTE, &sdp->sd_flags)) 263 fs_info(sdp, "demote interface used\n"); 264 rv = gfs2_glock_get(sdp, glnum, glops, 0, &gl); 265 if (rv) 266 return rv; 267 gfs2_glock_cb(gl, glmode); 268 gfs2_glock_put(gl); 269 return len; 270 } 271 272 273 #define GFS2_ATTR(name, mode, show, store) \ 274 static struct gfs2_attr gfs2_attr_##name = __ATTR(name, mode, show, store) 275 276 GFS2_ATTR(id, 0444, id_show, NULL); 277 GFS2_ATTR(fsname, 0444, fsname_show, NULL); 278 GFS2_ATTR(uuid, 0444, uuid_show, NULL); 279 GFS2_ATTR(freeze, 0644, freeze_show, freeze_store); 280 GFS2_ATTR(withdraw, 0644, withdraw_show, withdraw_store); 281 GFS2_ATTR(statfs_sync, 0200, NULL, statfs_sync_store); 282 GFS2_ATTR(quota_sync, 0200, NULL, quota_sync_store); 283 GFS2_ATTR(quota_refresh_user, 0200, NULL, quota_refresh_user_store); 284 GFS2_ATTR(quota_refresh_group, 0200, NULL, quota_refresh_group_store); 285 GFS2_ATTR(demote_rq, 0200, NULL, demote_rq_store); 286 287 static struct attribute *gfs2_attrs[] = { 288 &gfs2_attr_id.attr, 289 &gfs2_attr_fsname.attr, 290 &gfs2_attr_uuid.attr, 291 &gfs2_attr_freeze.attr, 292 &gfs2_attr_withdraw.attr, 293 &gfs2_attr_statfs_sync.attr, 294 &gfs2_attr_quota_sync.attr, 295 &gfs2_attr_quota_refresh_user.attr, 296 &gfs2_attr_quota_refresh_group.attr, 297 &gfs2_attr_demote_rq.attr, 298 NULL, 299 }; 300 ATTRIBUTE_GROUPS(gfs2); 301 302 static void gfs2_sbd_release(struct kobject *kobj) 303 { 304 struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj); 305 306 complete(&sdp->sd_kobj_unregister); 307 } 308 309 static struct kobj_type gfs2_ktype = { 310 .release = gfs2_sbd_release, 311 .default_groups = gfs2_groups, 312 .sysfs_ops = &gfs2_attr_ops, 313 }; 314 315 316 /* 317 * lock_module. Originally from lock_dlm 318 */ 319 320 static ssize_t proto_name_show(struct gfs2_sbd *sdp, char *buf) 321 { 322 const struct lm_lockops *ops = sdp->sd_lockstruct.ls_ops; 323 return sprintf(buf, "%s\n", ops->lm_proto_name); 324 } 325 326 static ssize_t block_show(struct gfs2_sbd *sdp, char *buf) 327 { 328 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 329 ssize_t ret; 330 int val = 0; 331 332 if (test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags)) 333 val = 1; 334 ret = sprintf(buf, "%d\n", val); 335 return ret; 336 } 337 338 static ssize_t block_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 339 { 340 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 341 int ret, val; 342 343 ret = kstrtoint(buf, 0, &val); 344 if (ret) 345 return ret; 346 347 if (val == 1) 348 set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); 349 else if (val == 0) { 350 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); 351 smp_mb__after_atomic(); 352 gfs2_glock_thaw(sdp); 353 } else { 354 return -EINVAL; 355 } 356 return len; 357 } 358 359 static ssize_t wdack_show(struct gfs2_sbd *sdp, char *buf) 360 { 361 int val = completion_done(&sdp->sd_wdack) ? 1 : 0; 362 363 return sprintf(buf, "%d\n", val); 364 } 365 366 static ssize_t wdack_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 367 { 368 int ret, val; 369 370 ret = kstrtoint(buf, 0, &val); 371 if (ret) 372 return ret; 373 374 if ((val == 1) && 375 !strcmp(sdp->sd_lockstruct.ls_ops->lm_proto_name, "lock_dlm")) 376 complete(&sdp->sd_wdack); 377 else 378 return -EINVAL; 379 return len; 380 } 381 382 static ssize_t lkfirst_show(struct gfs2_sbd *sdp, char *buf) 383 { 384 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 385 return sprintf(buf, "%d\n", ls->ls_first); 386 } 387 388 static ssize_t lkfirst_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 389 { 390 unsigned first; 391 int rv; 392 393 rv = sscanf(buf, "%u", &first); 394 if (rv != 1 || first > 1) 395 return -EINVAL; 396 rv = wait_for_completion_killable(&sdp->sd_locking_init); 397 if (rv) 398 return rv; 399 spin_lock(&sdp->sd_jindex_spin); 400 rv = -EBUSY; 401 if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0) 402 goto out; 403 rv = -EINVAL; 404 if (sdp->sd_args.ar_spectator) 405 goto out; 406 if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) 407 goto out; 408 sdp->sd_lockstruct.ls_first = first; 409 rv = 0; 410 out: 411 spin_unlock(&sdp->sd_jindex_spin); 412 return rv ? rv : len; 413 } 414 415 static ssize_t first_done_show(struct gfs2_sbd *sdp, char *buf) 416 { 417 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 418 return sprintf(buf, "%d\n", !!test_bit(DFL_FIRST_MOUNT_DONE, &ls->ls_recover_flags)); 419 } 420 421 int gfs2_recover_set(struct gfs2_sbd *sdp, unsigned jid) 422 { 423 struct gfs2_jdesc *jd; 424 int rv; 425 426 /* Wait for our primary journal to be initialized */ 427 wait_for_completion(&sdp->sd_journal_ready); 428 429 spin_lock(&sdp->sd_jindex_spin); 430 rv = -EBUSY; 431 /** 432 * If we're a spectator, we use journal0, but it's not really ours. 433 * So we need to wait for its recovery too. If we skip it we'd never 434 * queue work to the recovery workqueue, and so its completion would 435 * never clear the DFL_BLOCK_LOCKS flag, so all our locks would 436 * permanently stop working. 437 */ 438 if (!sdp->sd_jdesc) 439 goto out; 440 if (sdp->sd_jdesc->jd_jid == jid && !sdp->sd_args.ar_spectator) 441 goto out; 442 rv = -ENOENT; 443 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { 444 if (jd->jd_jid != jid && !sdp->sd_args.ar_spectator) 445 continue; 446 rv = gfs2_recover_journal(jd, false); 447 break; 448 } 449 out: 450 spin_unlock(&sdp->sd_jindex_spin); 451 return rv; 452 } 453 454 static ssize_t recover_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 455 { 456 unsigned jid; 457 int rv; 458 459 rv = sscanf(buf, "%u", &jid); 460 if (rv != 1) 461 return -EINVAL; 462 463 if (test_bit(SDF_NORECOVERY, &sdp->sd_flags)) { 464 rv = -ESHUTDOWN; 465 goto out; 466 } 467 468 rv = gfs2_recover_set(sdp, jid); 469 out: 470 return rv ? rv : len; 471 } 472 473 static ssize_t recover_done_show(struct gfs2_sbd *sdp, char *buf) 474 { 475 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 476 return sprintf(buf, "%d\n", ls->ls_recover_jid_done); 477 } 478 479 static ssize_t recover_status_show(struct gfs2_sbd *sdp, char *buf) 480 { 481 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 482 return sprintf(buf, "%d\n", ls->ls_recover_jid_status); 483 } 484 485 static ssize_t jid_show(struct gfs2_sbd *sdp, char *buf) 486 { 487 return sprintf(buf, "%d\n", sdp->sd_lockstruct.ls_jid); 488 } 489 490 static ssize_t jid_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 491 { 492 int jid; 493 int rv; 494 495 rv = sscanf(buf, "%d", &jid); 496 if (rv != 1) 497 return -EINVAL; 498 rv = wait_for_completion_killable(&sdp->sd_locking_init); 499 if (rv) 500 return rv; 501 spin_lock(&sdp->sd_jindex_spin); 502 rv = -EINVAL; 503 if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) 504 goto out; 505 rv = -EBUSY; 506 if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0) 507 goto out; 508 rv = 0; 509 if (sdp->sd_args.ar_spectator && jid > 0) 510 rv = jid = -EINVAL; 511 sdp->sd_lockstruct.ls_jid = jid; 512 clear_bit(SDF_NOJOURNALID, &sdp->sd_flags); 513 smp_mb__after_atomic(); 514 wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID); 515 out: 516 spin_unlock(&sdp->sd_jindex_spin); 517 return rv ? rv : len; 518 } 519 520 #define GDLM_ATTR(_name,_mode,_show,_store) \ 521 static struct gfs2_attr gdlm_attr_##_name = __ATTR(_name,_mode,_show,_store) 522 523 GDLM_ATTR(proto_name, 0444, proto_name_show, NULL); 524 GDLM_ATTR(block, 0644, block_show, block_store); 525 GDLM_ATTR(withdraw, 0644, wdack_show, wdack_store); 526 GDLM_ATTR(jid, 0644, jid_show, jid_store); 527 GDLM_ATTR(first, 0644, lkfirst_show, lkfirst_store); 528 GDLM_ATTR(first_done, 0444, first_done_show, NULL); 529 GDLM_ATTR(recover, 0600, NULL, recover_store); 530 GDLM_ATTR(recover_done, 0444, recover_done_show, NULL); 531 GDLM_ATTR(recover_status, 0444, recover_status_show, NULL); 532 533 static struct attribute *lock_module_attrs[] = { 534 &gdlm_attr_proto_name.attr, 535 &gdlm_attr_block.attr, 536 &gdlm_attr_withdraw.attr, 537 &gdlm_attr_jid.attr, 538 &gdlm_attr_first.attr, 539 &gdlm_attr_first_done.attr, 540 &gdlm_attr_recover.attr, 541 &gdlm_attr_recover_done.attr, 542 &gdlm_attr_recover_status.attr, 543 NULL, 544 }; 545 546 /* 547 * get and set struct gfs2_tune fields 548 */ 549 550 static ssize_t quota_scale_show(struct gfs2_sbd *sdp, char *buf) 551 { 552 return snprintf(buf, PAGE_SIZE, "%u %u\n", 553 sdp->sd_tune.gt_quota_scale_num, 554 sdp->sd_tune.gt_quota_scale_den); 555 } 556 557 static ssize_t quota_scale_store(struct gfs2_sbd *sdp, const char *buf, 558 size_t len) 559 { 560 struct gfs2_tune *gt = &sdp->sd_tune; 561 unsigned int x, y; 562 563 if (!capable(CAP_SYS_ADMIN)) 564 return -EPERM; 565 566 if (sscanf(buf, "%u %u", &x, &y) != 2 || !y) 567 return -EINVAL; 568 569 spin_lock(>->gt_spin); 570 gt->gt_quota_scale_num = x; 571 gt->gt_quota_scale_den = y; 572 spin_unlock(>->gt_spin); 573 return len; 574 } 575 576 static ssize_t tune_set(struct gfs2_sbd *sdp, unsigned int *field, 577 int check_zero, const char *buf, size_t len) 578 { 579 struct gfs2_tune *gt = &sdp->sd_tune; 580 unsigned int x; 581 int error; 582 583 if (!capable(CAP_SYS_ADMIN)) 584 return -EPERM; 585 586 error = kstrtouint(buf, 0, &x); 587 if (error) 588 return error; 589 590 if (check_zero && !x) 591 return -EINVAL; 592 593 spin_lock(>->gt_spin); 594 *field = x; 595 spin_unlock(>->gt_spin); 596 return len; 597 } 598 599 #define TUNE_ATTR_3(name, show, store) \ 600 static struct gfs2_attr tune_attr_##name = __ATTR(name, 0644, show, store) 601 602 #define TUNE_ATTR_2(name, store) \ 603 static ssize_t name##_show(struct gfs2_sbd *sdp, char *buf) \ 604 { \ 605 return snprintf(buf, PAGE_SIZE, "%u\n", sdp->sd_tune.gt_##name); \ 606 } \ 607 TUNE_ATTR_3(name, name##_show, store) 608 609 #define TUNE_ATTR(name, check_zero) \ 610 static ssize_t name##_store(struct gfs2_sbd *sdp, const char *buf, size_t len)\ 611 { \ 612 return tune_set(sdp, &sdp->sd_tune.gt_##name, check_zero, buf, len); \ 613 } \ 614 TUNE_ATTR_2(name, name##_store) 615 616 TUNE_ATTR(quota_warn_period, 0); 617 TUNE_ATTR(quota_quantum, 0); 618 TUNE_ATTR(max_readahead, 0); 619 TUNE_ATTR(complain_secs, 0); 620 TUNE_ATTR(statfs_slow, 0); 621 TUNE_ATTR(new_files_jdata, 0); 622 TUNE_ATTR(statfs_quantum, 1); 623 TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store); 624 625 static struct attribute *tune_attrs[] = { 626 &tune_attr_quota_warn_period.attr, 627 &tune_attr_quota_quantum.attr, 628 &tune_attr_max_readahead.attr, 629 &tune_attr_complain_secs.attr, 630 &tune_attr_statfs_slow.attr, 631 &tune_attr_statfs_quantum.attr, 632 &tune_attr_quota_scale.attr, 633 &tune_attr_new_files_jdata.attr, 634 NULL, 635 }; 636 637 static const struct attribute_group tune_group = { 638 .name = "tune", 639 .attrs = tune_attrs, 640 }; 641 642 static const struct attribute_group lock_module_group = { 643 .name = "lock_module", 644 .attrs = lock_module_attrs, 645 }; 646 647 int gfs2_sys_fs_add(struct gfs2_sbd *sdp) 648 { 649 struct super_block *sb = sdp->sd_vfs; 650 int error; 651 char ro[20]; 652 char spectator[20]; 653 char *envp[] = { ro, spectator, NULL }; 654 655 sprintf(ro, "RDONLY=%d", sb_rdonly(sb)); 656 sprintf(spectator, "SPECTATOR=%d", sdp->sd_args.ar_spectator ? 1 : 0); 657 658 init_completion(&sdp->sd_kobj_unregister); 659 sdp->sd_kobj.kset = gfs2_kset; 660 error = kobject_init_and_add(&sdp->sd_kobj, &gfs2_ktype, NULL, 661 "%s", sdp->sd_table_name); 662 if (error) 663 goto fail_reg; 664 665 error = sysfs_create_group(&sdp->sd_kobj, &tune_group); 666 if (error) 667 goto fail_reg; 668 669 error = sysfs_create_group(&sdp->sd_kobj, &lock_module_group); 670 if (error) 671 goto fail_tune; 672 673 error = sysfs_create_link(&sdp->sd_kobj, 674 &disk_to_dev(sb->s_bdev->bd_disk)->kobj, 675 "device"); 676 if (error) 677 goto fail_lock_module; 678 679 kobject_uevent_env(&sdp->sd_kobj, KOBJ_ADD, envp); 680 return 0; 681 682 fail_lock_module: 683 sysfs_remove_group(&sdp->sd_kobj, &lock_module_group); 684 fail_tune: 685 sysfs_remove_group(&sdp->sd_kobj, &tune_group); 686 fail_reg: 687 fs_err(sdp, "error %d adding sysfs files\n", error); 688 kobject_put(&sdp->sd_kobj); 689 wait_for_completion(&sdp->sd_kobj_unregister); 690 sb->s_fs_info = NULL; 691 return error; 692 } 693 694 void gfs2_sys_fs_del(struct gfs2_sbd *sdp) 695 { 696 sysfs_remove_link(&sdp->sd_kobj, "device"); 697 sysfs_remove_group(&sdp->sd_kobj, &tune_group); 698 sysfs_remove_group(&sdp->sd_kobj, &lock_module_group); 699 kobject_put(&sdp->sd_kobj); 700 wait_for_completion(&sdp->sd_kobj_unregister); 701 } 702 703 static int gfs2_uevent(struct kset *kset, struct kobject *kobj, 704 struct kobj_uevent_env *env) 705 { 706 struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj); 707 struct super_block *s = sdp->sd_vfs; 708 709 add_uevent_var(env, "LOCKTABLE=%s", sdp->sd_table_name); 710 add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name); 711 if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags)) 712 add_uevent_var(env, "JOURNALID=%d", sdp->sd_lockstruct.ls_jid); 713 if (!uuid_is_null(&s->s_uuid)) 714 add_uevent_var(env, "UUID=%pUB", &s->s_uuid); 715 return 0; 716 } 717 718 static const struct kset_uevent_ops gfs2_uevent_ops = { 719 .uevent = gfs2_uevent, 720 }; 721 722 int gfs2_sys_init(void) 723 { 724 gfs2_kset = kset_create_and_add("gfs2", &gfs2_uevent_ops, fs_kobj); 725 if (!gfs2_kset) 726 return -ENOMEM; 727 return 0; 728 } 729 730 void gfs2_sys_uninit(void) 731 { 732 kset_unregister(gfs2_kset); 733 } 734 735