1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11 12 #include <linux/sched.h> 13 #include <linux/spinlock.h> 14 #include <linux/completion.h> 15 #include <linux/buffer_head.h> 16 #include <linux/module.h> 17 #include <linux/kobject.h> 18 #include <asm/uaccess.h> 19 #include <linux/gfs2_ondisk.h> 20 #include <linux/genhd.h> 21 22 #include "gfs2.h" 23 #include "incore.h" 24 #include "sys.h" 25 #include "super.h" 26 #include "glock.h" 27 #include "quota.h" 28 #include "util.h" 29 #include "glops.h" 30 #include "recovery.h" 31 32 struct gfs2_attr { 33 struct attribute attr; 34 ssize_t (*show)(struct gfs2_sbd *, char *); 35 ssize_t (*store)(struct gfs2_sbd *, const char *, size_t); 36 }; 37 38 static ssize_t gfs2_attr_show(struct kobject *kobj, struct attribute *attr, 39 char *buf) 40 { 41 struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj); 42 struct gfs2_attr *a = container_of(attr, struct gfs2_attr, attr); 43 return a->show ? a->show(sdp, buf) : 0; 44 } 45 46 static ssize_t gfs2_attr_store(struct kobject *kobj, struct attribute *attr, 47 const char *buf, size_t len) 48 { 49 struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj); 50 struct gfs2_attr *a = container_of(attr, struct gfs2_attr, attr); 51 return a->store ? a->store(sdp, buf, len) : len; 52 } 53 54 static const struct sysfs_ops gfs2_attr_ops = { 55 .show = gfs2_attr_show, 56 .store = gfs2_attr_store, 57 }; 58 59 60 static struct kset *gfs2_kset; 61 62 static ssize_t id_show(struct gfs2_sbd *sdp, char *buf) 63 { 64 return snprintf(buf, PAGE_SIZE, "%u:%u\n", 65 MAJOR(sdp->sd_vfs->s_dev), MINOR(sdp->sd_vfs->s_dev)); 66 } 67 68 static ssize_t fsname_show(struct gfs2_sbd *sdp, char *buf) 69 { 70 return snprintf(buf, PAGE_SIZE, "%s\n", sdp->sd_fsname); 71 } 72 73 static int gfs2_uuid_valid(const u8 *uuid) 74 { 75 int i; 76 77 for (i = 0; i < 16; i++) { 78 if (uuid[i]) 79 return 1; 80 } 81 return 0; 82 } 83 84 static ssize_t uuid_show(struct gfs2_sbd *sdp, char *buf) 85 { 86 struct super_block *s = sdp->sd_vfs; 87 const u8 *uuid = s->s_uuid; 88 buf[0] = '\0'; 89 if (!gfs2_uuid_valid(uuid)) 90 return 0; 91 return snprintf(buf, PAGE_SIZE, "%pUB\n", uuid); 92 } 93 94 static ssize_t freeze_show(struct gfs2_sbd *sdp, char *buf) 95 { 96 struct super_block *sb = sdp->sd_vfs; 97 int frozen = (sb->s_writers.frozen == SB_UNFROZEN) ? 0 : 1; 98 99 return snprintf(buf, PAGE_SIZE, "%d\n", frozen); 100 } 101 102 static ssize_t freeze_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 103 { 104 int error; 105 int n = simple_strtol(buf, NULL, 0); 106 107 if (!capable(CAP_SYS_ADMIN)) 108 return -EPERM; 109 110 switch (n) { 111 case 0: 112 error = thaw_super(sdp->sd_vfs); 113 break; 114 case 1: 115 error = freeze_super(sdp->sd_vfs); 116 break; 117 default: 118 return -EINVAL; 119 } 120 121 if (error) { 122 fs_warn(sdp, "freeze %d error %d", n, error); 123 return error; 124 } 125 126 return len; 127 } 128 129 static ssize_t withdraw_show(struct gfs2_sbd *sdp, char *buf) 130 { 131 unsigned int b = test_bit(SDF_SHUTDOWN, &sdp->sd_flags); 132 return snprintf(buf, PAGE_SIZE, "%u\n", b); 133 } 134 135 static ssize_t withdraw_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 136 { 137 if (!capable(CAP_SYS_ADMIN)) 138 return -EPERM; 139 140 if (simple_strtol(buf, NULL, 0) != 1) 141 return -EINVAL; 142 143 gfs2_lm_withdraw(sdp, "withdrawing from cluster at user's request\n"); 144 145 return len; 146 } 147 148 static ssize_t statfs_sync_store(struct gfs2_sbd *sdp, const char *buf, 149 size_t len) 150 { 151 if (!capable(CAP_SYS_ADMIN)) 152 return -EPERM; 153 154 if (simple_strtol(buf, NULL, 0) != 1) 155 return -EINVAL; 156 157 gfs2_statfs_sync(sdp->sd_vfs, 0); 158 return len; 159 } 160 161 static ssize_t quota_sync_store(struct gfs2_sbd *sdp, const char *buf, 162 size_t len) 163 { 164 if (!capable(CAP_SYS_ADMIN)) 165 return -EPERM; 166 167 if (simple_strtol(buf, NULL, 0) != 1) 168 return -EINVAL; 169 170 gfs2_quota_sync(sdp->sd_vfs, 0); 171 return len; 172 } 173 174 static ssize_t quota_refresh_user_store(struct gfs2_sbd *sdp, const char *buf, 175 size_t len) 176 { 177 struct kqid qid; 178 int error; 179 u32 id; 180 181 if (!capable(CAP_SYS_ADMIN)) 182 return -EPERM; 183 184 id = simple_strtoul(buf, NULL, 0); 185 186 qid = make_kqid(current_user_ns(), USRQUOTA, id); 187 if (!qid_valid(qid)) 188 return -EINVAL; 189 190 error = gfs2_quota_refresh(sdp, qid); 191 return error ? error : len; 192 } 193 194 static ssize_t quota_refresh_group_store(struct gfs2_sbd *sdp, const char *buf, 195 size_t len) 196 { 197 struct kqid qid; 198 int error; 199 u32 id; 200 201 if (!capable(CAP_SYS_ADMIN)) 202 return -EPERM; 203 204 id = simple_strtoul(buf, NULL, 0); 205 206 qid = make_kqid(current_user_ns(), GRPQUOTA, id); 207 if (!qid_valid(qid)) 208 return -EINVAL; 209 210 error = gfs2_quota_refresh(sdp, qid); 211 return error ? error : len; 212 } 213 214 static ssize_t demote_rq_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 215 { 216 struct gfs2_glock *gl; 217 const struct gfs2_glock_operations *glops; 218 unsigned int glmode; 219 unsigned int gltype; 220 unsigned long long glnum; 221 char mode[16]; 222 int rv; 223 224 if (!capable(CAP_SYS_ADMIN)) 225 return -EPERM; 226 227 rv = sscanf(buf, "%u:%llu %15s", &gltype, &glnum, 228 mode); 229 if (rv != 3) 230 return -EINVAL; 231 232 if (strcmp(mode, "EX") == 0) 233 glmode = LM_ST_UNLOCKED; 234 else if ((strcmp(mode, "CW") == 0) || (strcmp(mode, "DF") == 0)) 235 glmode = LM_ST_DEFERRED; 236 else if ((strcmp(mode, "PR") == 0) || (strcmp(mode, "SH") == 0)) 237 glmode = LM_ST_SHARED; 238 else 239 return -EINVAL; 240 241 if (gltype > LM_TYPE_JOURNAL) 242 return -EINVAL; 243 if (gltype == LM_TYPE_NONDISK && glnum == GFS2_FREEZE_LOCK) 244 glops = &gfs2_freeze_glops; 245 else 246 glops = gfs2_glops_list[gltype]; 247 if (glops == NULL) 248 return -EINVAL; 249 if (!test_and_set_bit(SDF_DEMOTE, &sdp->sd_flags)) 250 fs_info(sdp, "demote interface used\n"); 251 rv = gfs2_glock_get(sdp, glnum, glops, 0, &gl); 252 if (rv) 253 return rv; 254 gfs2_glock_cb(gl, glmode); 255 gfs2_glock_put(gl); 256 return len; 257 } 258 259 260 #define GFS2_ATTR(name, mode, show, store) \ 261 static struct gfs2_attr gfs2_attr_##name = __ATTR(name, mode, show, store) 262 263 GFS2_ATTR(id, 0444, id_show, NULL); 264 GFS2_ATTR(fsname, 0444, fsname_show, NULL); 265 GFS2_ATTR(uuid, 0444, uuid_show, NULL); 266 GFS2_ATTR(freeze, 0644, freeze_show, freeze_store); 267 GFS2_ATTR(withdraw, 0644, withdraw_show, withdraw_store); 268 GFS2_ATTR(statfs_sync, 0200, NULL, statfs_sync_store); 269 GFS2_ATTR(quota_sync, 0200, NULL, quota_sync_store); 270 GFS2_ATTR(quota_refresh_user, 0200, NULL, quota_refresh_user_store); 271 GFS2_ATTR(quota_refresh_group, 0200, NULL, quota_refresh_group_store); 272 GFS2_ATTR(demote_rq, 0200, NULL, demote_rq_store); 273 274 static struct attribute *gfs2_attrs[] = { 275 &gfs2_attr_id.attr, 276 &gfs2_attr_fsname.attr, 277 &gfs2_attr_uuid.attr, 278 &gfs2_attr_freeze.attr, 279 &gfs2_attr_withdraw.attr, 280 &gfs2_attr_statfs_sync.attr, 281 &gfs2_attr_quota_sync.attr, 282 &gfs2_attr_quota_refresh_user.attr, 283 &gfs2_attr_quota_refresh_group.attr, 284 &gfs2_attr_demote_rq.attr, 285 NULL, 286 }; 287 288 static void gfs2_sbd_release(struct kobject *kobj) 289 { 290 struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj); 291 292 kfree(sdp); 293 } 294 295 static struct kobj_type gfs2_ktype = { 296 .release = gfs2_sbd_release, 297 .default_attrs = gfs2_attrs, 298 .sysfs_ops = &gfs2_attr_ops, 299 }; 300 301 302 /* 303 * lock_module. Originally from lock_dlm 304 */ 305 306 static ssize_t proto_name_show(struct gfs2_sbd *sdp, char *buf) 307 { 308 const struct lm_lockops *ops = sdp->sd_lockstruct.ls_ops; 309 return sprintf(buf, "%s\n", ops->lm_proto_name); 310 } 311 312 static ssize_t block_show(struct gfs2_sbd *sdp, char *buf) 313 { 314 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 315 ssize_t ret; 316 int val = 0; 317 318 if (test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags)) 319 val = 1; 320 ret = sprintf(buf, "%d\n", val); 321 return ret; 322 } 323 324 static ssize_t block_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 325 { 326 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 327 ssize_t ret = len; 328 int val; 329 330 val = simple_strtol(buf, NULL, 0); 331 332 if (val == 1) 333 set_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); 334 else if (val == 0) { 335 clear_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags); 336 smp_mb__after_atomic(); 337 gfs2_glock_thaw(sdp); 338 } else { 339 ret = -EINVAL; 340 } 341 return ret; 342 } 343 344 static ssize_t wdack_show(struct gfs2_sbd *sdp, char *buf) 345 { 346 int val = completion_done(&sdp->sd_wdack) ? 1 : 0; 347 348 return sprintf(buf, "%d\n", val); 349 } 350 351 static ssize_t wdack_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 352 { 353 ssize_t ret = len; 354 int val; 355 356 val = simple_strtol(buf, NULL, 0); 357 358 if ((val == 1) && 359 !strcmp(sdp->sd_lockstruct.ls_ops->lm_proto_name, "lock_dlm")) 360 complete(&sdp->sd_wdack); 361 else 362 ret = -EINVAL; 363 return ret; 364 } 365 366 static ssize_t lkfirst_show(struct gfs2_sbd *sdp, char *buf) 367 { 368 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 369 return sprintf(buf, "%d\n", ls->ls_first); 370 } 371 372 static ssize_t lkfirst_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 373 { 374 unsigned first; 375 int rv; 376 377 rv = sscanf(buf, "%u", &first); 378 if (rv != 1 || first > 1) 379 return -EINVAL; 380 rv = wait_for_completion_killable(&sdp->sd_locking_init); 381 if (rv) 382 return rv; 383 spin_lock(&sdp->sd_jindex_spin); 384 rv = -EBUSY; 385 if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0) 386 goto out; 387 rv = -EINVAL; 388 if (sdp->sd_args.ar_spectator) 389 goto out; 390 if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) 391 goto out; 392 sdp->sd_lockstruct.ls_first = first; 393 rv = 0; 394 out: 395 spin_unlock(&sdp->sd_jindex_spin); 396 return rv ? rv : len; 397 } 398 399 static ssize_t first_done_show(struct gfs2_sbd *sdp, char *buf) 400 { 401 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 402 return sprintf(buf, "%d\n", !!test_bit(DFL_FIRST_MOUNT_DONE, &ls->ls_recover_flags)); 403 } 404 405 int gfs2_recover_set(struct gfs2_sbd *sdp, unsigned jid) 406 { 407 struct gfs2_jdesc *jd; 408 int rv; 409 410 /* Wait for our primary journal to be initialized */ 411 wait_for_completion(&sdp->sd_journal_ready); 412 413 spin_lock(&sdp->sd_jindex_spin); 414 rv = -EBUSY; 415 if (sdp->sd_jdesc->jd_jid == jid) 416 goto out; 417 rv = -ENOENT; 418 list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) { 419 if (jd->jd_jid != jid) 420 continue; 421 rv = gfs2_recover_journal(jd, false); 422 break; 423 } 424 out: 425 spin_unlock(&sdp->sd_jindex_spin); 426 return rv; 427 } 428 429 static ssize_t recover_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 430 { 431 unsigned jid; 432 int rv; 433 434 rv = sscanf(buf, "%u", &jid); 435 if (rv != 1) 436 return -EINVAL; 437 438 if (test_bit(SDF_NORECOVERY, &sdp->sd_flags)) { 439 rv = -ESHUTDOWN; 440 goto out; 441 } 442 443 rv = gfs2_recover_set(sdp, jid); 444 out: 445 return rv ? rv : len; 446 } 447 448 static ssize_t recover_done_show(struct gfs2_sbd *sdp, char *buf) 449 { 450 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 451 return sprintf(buf, "%d\n", ls->ls_recover_jid_done); 452 } 453 454 static ssize_t recover_status_show(struct gfs2_sbd *sdp, char *buf) 455 { 456 struct lm_lockstruct *ls = &sdp->sd_lockstruct; 457 return sprintf(buf, "%d\n", ls->ls_recover_jid_status); 458 } 459 460 static ssize_t jid_show(struct gfs2_sbd *sdp, char *buf) 461 { 462 return sprintf(buf, "%d\n", sdp->sd_lockstruct.ls_jid); 463 } 464 465 static ssize_t jid_store(struct gfs2_sbd *sdp, const char *buf, size_t len) 466 { 467 int jid; 468 int rv; 469 470 rv = sscanf(buf, "%d", &jid); 471 if (rv != 1) 472 return -EINVAL; 473 rv = wait_for_completion_killable(&sdp->sd_locking_init); 474 if (rv) 475 return rv; 476 spin_lock(&sdp->sd_jindex_spin); 477 rv = -EINVAL; 478 if (sdp->sd_lockstruct.ls_ops->lm_mount == NULL) 479 goto out; 480 rv = -EBUSY; 481 if (test_bit(SDF_NOJOURNALID, &sdp->sd_flags) == 0) 482 goto out; 483 rv = 0; 484 if (sdp->sd_args.ar_spectator && jid > 0) 485 rv = jid = -EINVAL; 486 sdp->sd_lockstruct.ls_jid = jid; 487 clear_bit(SDF_NOJOURNALID, &sdp->sd_flags); 488 smp_mb__after_atomic(); 489 wake_up_bit(&sdp->sd_flags, SDF_NOJOURNALID); 490 out: 491 spin_unlock(&sdp->sd_jindex_spin); 492 return rv ? rv : len; 493 } 494 495 #define GDLM_ATTR(_name,_mode,_show,_store) \ 496 static struct gfs2_attr gdlm_attr_##_name = __ATTR(_name,_mode,_show,_store) 497 498 GDLM_ATTR(proto_name, 0444, proto_name_show, NULL); 499 GDLM_ATTR(block, 0644, block_show, block_store); 500 GDLM_ATTR(withdraw, 0644, wdack_show, wdack_store); 501 GDLM_ATTR(jid, 0644, jid_show, jid_store); 502 GDLM_ATTR(first, 0644, lkfirst_show, lkfirst_store); 503 GDLM_ATTR(first_done, 0444, first_done_show, NULL); 504 GDLM_ATTR(recover, 0600, NULL, recover_store); 505 GDLM_ATTR(recover_done, 0444, recover_done_show, NULL); 506 GDLM_ATTR(recover_status, 0444, recover_status_show, NULL); 507 508 static struct attribute *lock_module_attrs[] = { 509 &gdlm_attr_proto_name.attr, 510 &gdlm_attr_block.attr, 511 &gdlm_attr_withdraw.attr, 512 &gdlm_attr_jid.attr, 513 &gdlm_attr_first.attr, 514 &gdlm_attr_first_done.attr, 515 &gdlm_attr_recover.attr, 516 &gdlm_attr_recover_done.attr, 517 &gdlm_attr_recover_status.attr, 518 NULL, 519 }; 520 521 /* 522 * get and set struct gfs2_tune fields 523 */ 524 525 static ssize_t quota_scale_show(struct gfs2_sbd *sdp, char *buf) 526 { 527 return snprintf(buf, PAGE_SIZE, "%u %u\n", 528 sdp->sd_tune.gt_quota_scale_num, 529 sdp->sd_tune.gt_quota_scale_den); 530 } 531 532 static ssize_t quota_scale_store(struct gfs2_sbd *sdp, const char *buf, 533 size_t len) 534 { 535 struct gfs2_tune *gt = &sdp->sd_tune; 536 unsigned int x, y; 537 538 if (!capable(CAP_SYS_ADMIN)) 539 return -EPERM; 540 541 if (sscanf(buf, "%u %u", &x, &y) != 2 || !y) 542 return -EINVAL; 543 544 spin_lock(>->gt_spin); 545 gt->gt_quota_scale_num = x; 546 gt->gt_quota_scale_den = y; 547 spin_unlock(>->gt_spin); 548 return len; 549 } 550 551 static ssize_t tune_set(struct gfs2_sbd *sdp, unsigned int *field, 552 int check_zero, const char *buf, size_t len) 553 { 554 struct gfs2_tune *gt = &sdp->sd_tune; 555 unsigned int x; 556 557 if (!capable(CAP_SYS_ADMIN)) 558 return -EPERM; 559 560 x = simple_strtoul(buf, NULL, 0); 561 562 if (check_zero && !x) 563 return -EINVAL; 564 565 spin_lock(>->gt_spin); 566 *field = x; 567 spin_unlock(>->gt_spin); 568 return len; 569 } 570 571 #define TUNE_ATTR_3(name, show, store) \ 572 static struct gfs2_attr tune_attr_##name = __ATTR(name, 0644, show, store) 573 574 #define TUNE_ATTR_2(name, store) \ 575 static ssize_t name##_show(struct gfs2_sbd *sdp, char *buf) \ 576 { \ 577 return snprintf(buf, PAGE_SIZE, "%u\n", sdp->sd_tune.gt_##name); \ 578 } \ 579 TUNE_ATTR_3(name, name##_show, store) 580 581 #define TUNE_ATTR(name, check_zero) \ 582 static ssize_t name##_store(struct gfs2_sbd *sdp, const char *buf, size_t len)\ 583 { \ 584 return tune_set(sdp, &sdp->sd_tune.gt_##name, check_zero, buf, len); \ 585 } \ 586 TUNE_ATTR_2(name, name##_store) 587 588 TUNE_ATTR(quota_warn_period, 0); 589 TUNE_ATTR(quota_quantum, 0); 590 TUNE_ATTR(max_readahead, 0); 591 TUNE_ATTR(complain_secs, 0); 592 TUNE_ATTR(statfs_slow, 0); 593 TUNE_ATTR(new_files_jdata, 0); 594 TUNE_ATTR(statfs_quantum, 1); 595 TUNE_ATTR_3(quota_scale, quota_scale_show, quota_scale_store); 596 597 static struct attribute *tune_attrs[] = { 598 &tune_attr_quota_warn_period.attr, 599 &tune_attr_quota_quantum.attr, 600 &tune_attr_max_readahead.attr, 601 &tune_attr_complain_secs.attr, 602 &tune_attr_statfs_slow.attr, 603 &tune_attr_statfs_quantum.attr, 604 &tune_attr_quota_scale.attr, 605 &tune_attr_new_files_jdata.attr, 606 NULL, 607 }; 608 609 static struct attribute_group tune_group = { 610 .name = "tune", 611 .attrs = tune_attrs, 612 }; 613 614 static struct attribute_group lock_module_group = { 615 .name = "lock_module", 616 .attrs = lock_module_attrs, 617 }; 618 619 int gfs2_sys_fs_add(struct gfs2_sbd *sdp) 620 { 621 struct super_block *sb = sdp->sd_vfs; 622 int error; 623 char ro[20]; 624 char spectator[20]; 625 char *envp[] = { ro, spectator, NULL }; 626 int sysfs_frees_sdp = 0; 627 628 sprintf(ro, "RDONLY=%d", (sb->s_flags & MS_RDONLY) ? 1 : 0); 629 sprintf(spectator, "SPECTATOR=%d", sdp->sd_args.ar_spectator ? 1 : 0); 630 631 sdp->sd_kobj.kset = gfs2_kset; 632 error = kobject_init_and_add(&sdp->sd_kobj, &gfs2_ktype, NULL, 633 "%s", sdp->sd_table_name); 634 if (error) 635 goto fail_reg; 636 637 sysfs_frees_sdp = 1; /* Freeing sdp is now done by sysfs calling 638 function gfs2_sbd_release. */ 639 error = sysfs_create_group(&sdp->sd_kobj, &tune_group); 640 if (error) 641 goto fail_reg; 642 643 error = sysfs_create_group(&sdp->sd_kobj, &lock_module_group); 644 if (error) 645 goto fail_tune; 646 647 error = sysfs_create_link(&sdp->sd_kobj, 648 &disk_to_dev(sb->s_bdev->bd_disk)->kobj, 649 "device"); 650 if (error) 651 goto fail_lock_module; 652 653 kobject_uevent_env(&sdp->sd_kobj, KOBJ_ADD, envp); 654 return 0; 655 656 fail_lock_module: 657 sysfs_remove_group(&sdp->sd_kobj, &lock_module_group); 658 fail_tune: 659 sysfs_remove_group(&sdp->sd_kobj, &tune_group); 660 fail_reg: 661 free_percpu(sdp->sd_lkstats); 662 fs_err(sdp, "error %d adding sysfs files", error); 663 if (sysfs_frees_sdp) 664 kobject_put(&sdp->sd_kobj); 665 else 666 kfree(sdp); 667 sb->s_fs_info = NULL; 668 return error; 669 } 670 671 void gfs2_sys_fs_del(struct gfs2_sbd *sdp) 672 { 673 sysfs_remove_link(&sdp->sd_kobj, "device"); 674 sysfs_remove_group(&sdp->sd_kobj, &tune_group); 675 sysfs_remove_group(&sdp->sd_kobj, &lock_module_group); 676 kobject_put(&sdp->sd_kobj); 677 } 678 679 static int gfs2_uevent(struct kset *kset, struct kobject *kobj, 680 struct kobj_uevent_env *env) 681 { 682 struct gfs2_sbd *sdp = container_of(kobj, struct gfs2_sbd, sd_kobj); 683 struct super_block *s = sdp->sd_vfs; 684 const u8 *uuid = s->s_uuid; 685 686 add_uevent_var(env, "LOCKTABLE=%s", sdp->sd_table_name); 687 add_uevent_var(env, "LOCKPROTO=%s", sdp->sd_proto_name); 688 if (!test_bit(SDF_NOJOURNALID, &sdp->sd_flags)) 689 add_uevent_var(env, "JOURNALID=%d", sdp->sd_lockstruct.ls_jid); 690 if (gfs2_uuid_valid(uuid)) 691 add_uevent_var(env, "UUID=%pUB", uuid); 692 return 0; 693 } 694 695 static const struct kset_uevent_ops gfs2_uevent_ops = { 696 .uevent = gfs2_uevent, 697 }; 698 699 int gfs2_sys_init(void) 700 { 701 gfs2_kset = kset_create_and_add("gfs2", &gfs2_uevent_ops, fs_kobj); 702 if (!gfs2_kset) 703 return -ENOMEM; 704 return 0; 705 } 706 707 void gfs2_sys_uninit(void) 708 { 709 kset_unregister(gfs2_kset); 710 } 711 712