1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. 5 */ 6 7 #include <linux/slab.h> 8 #include <linux/spinlock.h> 9 #include <linux/completion.h> 10 #include <linux/buffer_head.h> 11 #include <linux/xattr.h> 12 #include <linux/gfs2_ondisk.h> 13 #include <linux/posix_acl_xattr.h> 14 #include <linux/uaccess.h> 15 16 #include "gfs2.h" 17 #include "incore.h" 18 #include "acl.h" 19 #include "xattr.h" 20 #include "glock.h" 21 #include "inode.h" 22 #include "meta_io.h" 23 #include "quota.h" 24 #include "rgrp.h" 25 #include "super.h" 26 #include "trans.h" 27 #include "util.h" 28 29 /* 30 * ea_calc_size - returns the actual number of bytes the request will take up 31 * (not counting any unstuffed data blocks) 32 * 33 * Returns: 1 if the EA should be stuffed 34 */ 35 36 static int ea_calc_size(struct gfs2_sbd *sdp, unsigned int nsize, size_t dsize, 37 unsigned int *size) 38 { 39 unsigned int jbsize = sdp->sd_jbsize; 40 41 /* Stuffed */ 42 *size = ALIGN(sizeof(struct gfs2_ea_header) + nsize + dsize, 8); 43 44 if (*size <= jbsize) 45 return 1; 46 47 /* Unstuffed */ 48 *size = ALIGN(sizeof(struct gfs2_ea_header) + nsize + 49 (sizeof(__be64) * DIV_ROUND_UP(dsize, jbsize)), 8); 50 51 return 0; 52 } 53 54 static int ea_check_size(struct gfs2_sbd *sdp, unsigned int nsize, size_t dsize) 55 { 56 unsigned int size; 57 58 if (dsize > GFS2_EA_MAX_DATA_LEN) 59 return -ERANGE; 60 61 ea_calc_size(sdp, nsize, dsize, &size); 62 63 /* This can only happen with 512 byte blocks */ 64 if (size > sdp->sd_jbsize) 65 return -ERANGE; 66 67 return 0; 68 } 69 70 static bool gfs2_eatype_valid(struct gfs2_sbd *sdp, u8 type) 71 { 72 switch(sdp->sd_sb.sb_fs_format) { 73 case GFS2_FS_FORMAT_MAX: 74 return true; 75 76 case GFS2_FS_FORMAT_MIN: 77 return type <= GFS2_EATYPE_SECURITY; 78 79 default: 80 return false; 81 } 82 } 83 84 typedef int (*ea_call_t) (struct gfs2_inode *ip, struct buffer_head *bh, 85 struct gfs2_ea_header *ea, 86 struct gfs2_ea_header *prev, void *private); 87 88 static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh, 89 ea_call_t ea_call, void *data) 90 { 91 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 92 struct gfs2_ea_header *ea, *prev = NULL; 93 int error = 0; 94 95 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_EA)) 96 return -EIO; 97 98 for (ea = GFS2_EA_BH2FIRST(bh);; prev = ea, ea = GFS2_EA2NEXT(ea)) { 99 if (!GFS2_EA_REC_LEN(ea)) 100 goto fail; 101 if (!(bh->b_data <= (char *)ea && (char *)GFS2_EA2NEXT(ea) <= 102 bh->b_data + bh->b_size)) 103 goto fail; 104 if (!gfs2_eatype_valid(sdp, ea->ea_type)) 105 goto fail; 106 error = ea_call(ip, bh, ea, prev, data); 107 if (error) 108 return error; 109 110 if (GFS2_EA_IS_LAST(ea)) { 111 if ((char *)GFS2_EA2NEXT(ea) != 112 bh->b_data + bh->b_size) 113 goto fail; 114 break; 115 } 116 } 117 118 return error; 119 120 fail: 121 gfs2_consist_inode(ip); 122 return -EIO; 123 } 124 125 static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data) 126 { 127 struct buffer_head *bh, *eabh; 128 __be64 *eablk, *end; 129 int error; 130 131 error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, 0, &bh); 132 if (error) 133 return error; 134 135 if (!(ip->i_diskflags & GFS2_DIF_EA_INDIRECT)) { 136 error = ea_foreach_i(ip, bh, ea_call, data); 137 goto out; 138 } 139 140 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_IN)) { 141 error = -EIO; 142 goto out; 143 } 144 145 eablk = (__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header)); 146 end = eablk + GFS2_SB(&ip->i_inode)->sd_inptrs; 147 148 for (; eablk < end; eablk++) { 149 u64 bn; 150 151 if (!*eablk) 152 break; 153 bn = be64_to_cpu(*eablk); 154 155 error = gfs2_meta_read(ip->i_gl, bn, DIO_WAIT, 0, &eabh); 156 if (error) 157 break; 158 error = ea_foreach_i(ip, eabh, ea_call, data); 159 brelse(eabh); 160 if (error) 161 break; 162 } 163 out: 164 brelse(bh); 165 return error; 166 } 167 168 struct ea_find { 169 int type; 170 const char *name; 171 size_t namel; 172 struct gfs2_ea_location *ef_el; 173 }; 174 175 static int ea_find_i(struct gfs2_inode *ip, struct buffer_head *bh, 176 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev, 177 void *private) 178 { 179 struct ea_find *ef = private; 180 181 if (ea->ea_type == GFS2_EATYPE_UNUSED) 182 return 0; 183 184 if (ea->ea_type == ef->type) { 185 if (ea->ea_name_len == ef->namel && 186 !memcmp(GFS2_EA2NAME(ea), ef->name, ea->ea_name_len)) { 187 struct gfs2_ea_location *el = ef->ef_el; 188 get_bh(bh); 189 el->el_bh = bh; 190 el->el_ea = ea; 191 el->el_prev = prev; 192 return 1; 193 } 194 } 195 196 return 0; 197 } 198 199 static int gfs2_ea_find(struct gfs2_inode *ip, int type, const char *name, 200 struct gfs2_ea_location *el) 201 { 202 struct ea_find ef; 203 int error; 204 205 ef.type = type; 206 ef.name = name; 207 ef.namel = strlen(name); 208 ef.ef_el = el; 209 210 memset(el, 0, sizeof(struct gfs2_ea_location)); 211 212 error = ea_foreach(ip, ea_find_i, &ef); 213 if (error > 0) 214 return 0; 215 216 return error; 217 } 218 219 /* 220 * ea_dealloc_unstuffed 221 * 222 * Take advantage of the fact that all unstuffed blocks are 223 * allocated from the same RG. But watch, this may not always 224 * be true. 225 * 226 * Returns: errno 227 */ 228 229 static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh, 230 struct gfs2_ea_header *ea, 231 struct gfs2_ea_header *prev, void *private) 232 { 233 int *leave = private; 234 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 235 struct gfs2_rgrpd *rgd; 236 struct gfs2_holder rg_gh; 237 __be64 *dataptrs; 238 u64 bn = 0; 239 u64 bstart = 0; 240 unsigned int blen = 0; 241 unsigned int blks = 0; 242 unsigned int x; 243 int error; 244 245 error = gfs2_rindex_update(sdp); 246 if (error) 247 return error; 248 249 if (GFS2_EA_IS_STUFFED(ea)) 250 return 0; 251 252 dataptrs = GFS2_EA2DATAPTRS(ea); 253 for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) { 254 if (*dataptrs) { 255 blks++; 256 bn = be64_to_cpu(*dataptrs); 257 } 258 } 259 if (!blks) 260 return 0; 261 262 rgd = gfs2_blk2rgrpd(sdp, bn, 1); 263 if (!rgd) { 264 gfs2_consist_inode(ip); 265 return -EIO; 266 } 267 268 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 269 LM_FLAG_NODE_SCOPE, &rg_gh); 270 if (error) 271 return error; 272 273 error = gfs2_trans_begin(sdp, rgd->rd_length + RES_DINODE + 274 RES_EATTR + RES_STATFS + RES_QUOTA, blks); 275 if (error) 276 goto out_gunlock; 277 278 gfs2_trans_add_meta(ip->i_gl, bh); 279 280 dataptrs = GFS2_EA2DATAPTRS(ea); 281 for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) { 282 if (!*dataptrs) 283 break; 284 bn = be64_to_cpu(*dataptrs); 285 286 if (bstart + blen == bn) 287 blen++; 288 else { 289 if (bstart) 290 gfs2_free_meta(ip, rgd, bstart, blen); 291 bstart = bn; 292 blen = 1; 293 } 294 295 *dataptrs = 0; 296 gfs2_add_inode_blocks(&ip->i_inode, -1); 297 } 298 if (bstart) 299 gfs2_free_meta(ip, rgd, bstart, blen); 300 301 if (prev && !leave) { 302 u32 len; 303 304 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea); 305 prev->ea_rec_len = cpu_to_be32(len); 306 307 if (GFS2_EA_IS_LAST(ea)) 308 prev->ea_flags |= GFS2_EAFLAG_LAST; 309 } else { 310 ea->ea_type = GFS2_EATYPE_UNUSED; 311 ea->ea_num_ptrs = 0; 312 } 313 314 ip->i_inode.i_ctime = current_time(&ip->i_inode); 315 __mark_inode_dirty(&ip->i_inode, I_DIRTY_DATASYNC); 316 317 gfs2_trans_end(sdp); 318 319 out_gunlock: 320 gfs2_glock_dq_uninit(&rg_gh); 321 return error; 322 } 323 324 static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh, 325 struct gfs2_ea_header *ea, 326 struct gfs2_ea_header *prev, int leave) 327 { 328 int error; 329 330 error = gfs2_rindex_update(GFS2_SB(&ip->i_inode)); 331 if (error) 332 return error; 333 334 error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE); 335 if (error) 336 goto out_alloc; 337 338 error = ea_dealloc_unstuffed(ip, bh, ea, prev, (leave) ? &error : NULL); 339 340 gfs2_quota_unhold(ip); 341 out_alloc: 342 return error; 343 } 344 345 struct ea_list { 346 struct gfs2_ea_request *ei_er; 347 unsigned int ei_size; 348 }; 349 350 static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh, 351 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev, 352 void *private) 353 { 354 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 355 struct ea_list *ei = private; 356 struct gfs2_ea_request *er = ei->ei_er; 357 unsigned int ea_size; 358 char *prefix; 359 unsigned int l; 360 361 if (ea->ea_type == GFS2_EATYPE_UNUSED) 362 return 0; 363 364 BUG_ON(ea->ea_type > GFS2_EATYPE_SECURITY && 365 sdp->sd_sb.sb_fs_format == GFS2_FS_FORMAT_MIN); 366 switch (ea->ea_type) { 367 case GFS2_EATYPE_USR: 368 prefix = "user."; 369 l = 5; 370 break; 371 case GFS2_EATYPE_SYS: 372 prefix = "system."; 373 l = 7; 374 break; 375 case GFS2_EATYPE_SECURITY: 376 prefix = "security."; 377 l = 9; 378 break; 379 case GFS2_EATYPE_TRUSTED: 380 prefix = "trusted."; 381 l = 8; 382 break; 383 default: 384 return 0; 385 } 386 387 ea_size = l + ea->ea_name_len + 1; 388 if (er->er_data_len) { 389 if (ei->ei_size + ea_size > er->er_data_len) 390 return -ERANGE; 391 392 memcpy(er->er_data + ei->ei_size, prefix, l); 393 memcpy(er->er_data + ei->ei_size + l, GFS2_EA2NAME(ea), 394 ea->ea_name_len); 395 er->er_data[ei->ei_size + ea_size - 1] = 0; 396 } 397 398 ei->ei_size += ea_size; 399 400 return 0; 401 } 402 403 /** 404 * gfs2_listxattr - List gfs2 extended attributes 405 * @dentry: The dentry whose inode we are interested in 406 * @buffer: The buffer to write the results 407 * @size: The size of the buffer 408 * 409 * Returns: actual size of data on success, -errno on error 410 */ 411 412 ssize_t gfs2_listxattr(struct dentry *dentry, char *buffer, size_t size) 413 { 414 struct gfs2_inode *ip = GFS2_I(d_inode(dentry)); 415 struct gfs2_ea_request er; 416 struct gfs2_holder i_gh; 417 int error; 418 419 memset(&er, 0, sizeof(struct gfs2_ea_request)); 420 if (size) { 421 er.er_data = buffer; 422 er.er_data_len = size; 423 } 424 425 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh); 426 if (error) 427 return error; 428 429 if (ip->i_eattr) { 430 struct ea_list ei = { .ei_er = &er, .ei_size = 0 }; 431 432 error = ea_foreach(ip, ea_list_i, &ei); 433 if (!error) 434 error = ei.ei_size; 435 } 436 437 gfs2_glock_dq_uninit(&i_gh); 438 439 return error; 440 } 441 442 /** 443 * gfs2_iter_unstuffed - copies the unstuffed xattr data to/from the 444 * request buffer 445 * @ip: The GFS2 inode 446 * @ea: The extended attribute header structure 447 * @din: The data to be copied in 448 * @dout: The data to be copied out (one of din,dout will be NULL) 449 * 450 * Returns: errno 451 */ 452 453 static int gfs2_iter_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea, 454 const char *din, char *dout) 455 { 456 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 457 struct buffer_head **bh; 458 unsigned int amount = GFS2_EA_DATA_LEN(ea); 459 unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize); 460 __be64 *dataptrs = GFS2_EA2DATAPTRS(ea); 461 unsigned int x; 462 int error = 0; 463 unsigned char *pos; 464 unsigned cp_size; 465 466 bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_NOFS); 467 if (!bh) 468 return -ENOMEM; 469 470 for (x = 0; x < nptrs; x++) { 471 error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0, 0, 472 bh + x); 473 if (error) { 474 while (x--) 475 brelse(bh[x]); 476 goto out; 477 } 478 dataptrs++; 479 } 480 481 for (x = 0; x < nptrs; x++) { 482 error = gfs2_meta_wait(sdp, bh[x]); 483 if (error) { 484 for (; x < nptrs; x++) 485 brelse(bh[x]); 486 goto out; 487 } 488 if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) { 489 for (; x < nptrs; x++) 490 brelse(bh[x]); 491 error = -EIO; 492 goto out; 493 } 494 495 pos = bh[x]->b_data + sizeof(struct gfs2_meta_header); 496 cp_size = (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize; 497 498 if (dout) { 499 memcpy(dout, pos, cp_size); 500 dout += sdp->sd_jbsize; 501 } 502 503 if (din) { 504 gfs2_trans_add_meta(ip->i_gl, bh[x]); 505 memcpy(pos, din, cp_size); 506 din += sdp->sd_jbsize; 507 } 508 509 amount -= sdp->sd_jbsize; 510 brelse(bh[x]); 511 } 512 513 out: 514 kfree(bh); 515 return error; 516 } 517 518 static int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el, 519 char *data, size_t size) 520 { 521 int ret; 522 size_t len = GFS2_EA_DATA_LEN(el->el_ea); 523 if (len > size) 524 return -ERANGE; 525 526 if (GFS2_EA_IS_STUFFED(el->el_ea)) { 527 memcpy(data, GFS2_EA2DATA(el->el_ea), len); 528 return len; 529 } 530 ret = gfs2_iter_unstuffed(ip, el->el_ea, NULL, data); 531 if (ret < 0) 532 return ret; 533 return len; 534 } 535 536 int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **ppdata) 537 { 538 struct gfs2_ea_location el; 539 int error; 540 int len; 541 char *data; 542 543 error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, name, &el); 544 if (error) 545 return error; 546 if (!el.el_ea) 547 goto out; 548 if (!GFS2_EA_DATA_LEN(el.el_ea)) 549 goto out; 550 551 len = GFS2_EA_DATA_LEN(el.el_ea); 552 data = kmalloc(len, GFP_NOFS); 553 error = -ENOMEM; 554 if (data == NULL) 555 goto out; 556 557 error = gfs2_ea_get_copy(ip, &el, data, len); 558 if (error < 0) 559 kfree(data); 560 else 561 *ppdata = data; 562 out: 563 brelse(el.el_bh); 564 return error; 565 } 566 567 /** 568 * __gfs2_xattr_get - Get a GFS2 extended attribute 569 * @inode: The inode 570 * @name: The name of the extended attribute 571 * @buffer: The buffer to write the result into 572 * @size: The size of the buffer 573 * @type: The type of extended attribute 574 * 575 * Returns: actual size of data on success, -errno on error 576 */ 577 static int __gfs2_xattr_get(struct inode *inode, const char *name, 578 void *buffer, size_t size, int type) 579 { 580 struct gfs2_inode *ip = GFS2_I(inode); 581 struct gfs2_ea_location el; 582 int error; 583 584 if (!ip->i_eattr) 585 return -ENODATA; 586 if (strlen(name) > GFS2_EA_MAX_NAME_LEN) 587 return -EINVAL; 588 589 error = gfs2_ea_find(ip, type, name, &el); 590 if (error) 591 return error; 592 if (!el.el_ea) 593 return -ENODATA; 594 if (size) 595 error = gfs2_ea_get_copy(ip, &el, buffer, size); 596 else 597 error = GFS2_EA_DATA_LEN(el.el_ea); 598 brelse(el.el_bh); 599 600 return error; 601 } 602 603 static int gfs2_xattr_get(const struct xattr_handler *handler, 604 struct dentry *unused, struct inode *inode, 605 const char *name, void *buffer, size_t size) 606 { 607 struct gfs2_inode *ip = GFS2_I(inode); 608 struct gfs2_holder gh; 609 int ret; 610 611 /* During lookup, SELinux calls this function with the glock locked. */ 612 613 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) { 614 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh); 615 if (ret) 616 return ret; 617 } else { 618 gfs2_holder_mark_uninitialized(&gh); 619 } 620 ret = __gfs2_xattr_get(inode, name, buffer, size, handler->flags); 621 if (gfs2_holder_initialized(&gh)) 622 gfs2_glock_dq_uninit(&gh); 623 return ret; 624 } 625 626 /** 627 * ea_alloc_blk - allocates a new block for extended attributes. 628 * @ip: A pointer to the inode that's getting extended attributes 629 * @bhp: Pointer to pointer to a struct buffer_head 630 * 631 * Returns: errno 632 */ 633 634 static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp) 635 { 636 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 637 struct gfs2_ea_header *ea; 638 unsigned int n = 1; 639 u64 block; 640 int error; 641 642 error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL); 643 if (error) 644 return error; 645 gfs2_trans_remove_revoke(sdp, block, 1); 646 *bhp = gfs2_meta_new(ip->i_gl, block); 647 gfs2_trans_add_meta(ip->i_gl, *bhp); 648 gfs2_metatype_set(*bhp, GFS2_METATYPE_EA, GFS2_FORMAT_EA); 649 gfs2_buffer_clear_tail(*bhp, sizeof(struct gfs2_meta_header)); 650 651 ea = GFS2_EA_BH2FIRST(*bhp); 652 ea->ea_rec_len = cpu_to_be32(sdp->sd_jbsize); 653 ea->ea_type = GFS2_EATYPE_UNUSED; 654 ea->ea_flags = GFS2_EAFLAG_LAST; 655 ea->ea_num_ptrs = 0; 656 657 gfs2_add_inode_blocks(&ip->i_inode, 1); 658 659 return 0; 660 } 661 662 /** 663 * ea_write - writes the request info to an ea, creating new blocks if 664 * necessary 665 * @ip: inode that is being modified 666 * @ea: the location of the new ea in a block 667 * @er: the write request 668 * 669 * Note: does not update ea_rec_len or the GFS2_EAFLAG_LAST bin of ea_flags 670 * 671 * returns : errno 672 */ 673 674 static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea, 675 struct gfs2_ea_request *er) 676 { 677 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 678 int error; 679 680 ea->ea_data_len = cpu_to_be32(er->er_data_len); 681 ea->ea_name_len = er->er_name_len; 682 ea->ea_type = er->er_type; 683 ea->__pad = 0; 684 685 memcpy(GFS2_EA2NAME(ea), er->er_name, er->er_name_len); 686 687 if (GFS2_EAREQ_SIZE_STUFFED(er) <= sdp->sd_jbsize) { 688 ea->ea_num_ptrs = 0; 689 memcpy(GFS2_EA2DATA(ea), er->er_data, er->er_data_len); 690 } else { 691 __be64 *dataptr = GFS2_EA2DATAPTRS(ea); 692 const char *data = er->er_data; 693 unsigned int data_len = er->er_data_len; 694 unsigned int copy; 695 unsigned int x; 696 697 ea->ea_num_ptrs = DIV_ROUND_UP(er->er_data_len, sdp->sd_jbsize); 698 for (x = 0; x < ea->ea_num_ptrs; x++) { 699 struct buffer_head *bh; 700 u64 block; 701 int mh_size = sizeof(struct gfs2_meta_header); 702 unsigned int n = 1; 703 704 error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL); 705 if (error) 706 return error; 707 gfs2_trans_remove_revoke(sdp, block, 1); 708 bh = gfs2_meta_new(ip->i_gl, block); 709 gfs2_trans_add_meta(ip->i_gl, bh); 710 gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED); 711 712 gfs2_add_inode_blocks(&ip->i_inode, 1); 713 714 copy = data_len > sdp->sd_jbsize ? sdp->sd_jbsize : 715 data_len; 716 memcpy(bh->b_data + mh_size, data, copy); 717 if (copy < sdp->sd_jbsize) 718 memset(bh->b_data + mh_size + copy, 0, 719 sdp->sd_jbsize - copy); 720 721 *dataptr++ = cpu_to_be64(bh->b_blocknr); 722 data += copy; 723 data_len -= copy; 724 725 brelse(bh); 726 } 727 728 gfs2_assert_withdraw(sdp, !data_len); 729 } 730 731 return 0; 732 } 733 734 typedef int (*ea_skeleton_call_t) (struct gfs2_inode *ip, 735 struct gfs2_ea_request *er, void *private); 736 737 static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er, 738 unsigned int blks, 739 ea_skeleton_call_t skeleton_call, void *private) 740 { 741 struct gfs2_alloc_parms ap = { .target = blks }; 742 int error; 743 744 error = gfs2_rindex_update(GFS2_SB(&ip->i_inode)); 745 if (error) 746 return error; 747 748 error = gfs2_quota_lock_check(ip, &ap); 749 if (error) 750 return error; 751 752 error = gfs2_inplace_reserve(ip, &ap); 753 if (error) 754 goto out_gunlock_q; 755 756 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), 757 blks + gfs2_rg_blocks(ip, blks) + 758 RES_DINODE + RES_STATFS + RES_QUOTA, 0); 759 if (error) 760 goto out_ipres; 761 762 error = skeleton_call(ip, er, private); 763 if (error) 764 goto out_end_trans; 765 766 ip->i_inode.i_ctime = current_time(&ip->i_inode); 767 __mark_inode_dirty(&ip->i_inode, I_DIRTY_DATASYNC); 768 769 out_end_trans: 770 gfs2_trans_end(GFS2_SB(&ip->i_inode)); 771 out_ipres: 772 gfs2_inplace_release(ip); 773 out_gunlock_q: 774 gfs2_quota_unlock(ip); 775 return error; 776 } 777 778 static int ea_init_i(struct gfs2_inode *ip, struct gfs2_ea_request *er, 779 void *private) 780 { 781 struct buffer_head *bh; 782 int error; 783 784 error = ea_alloc_blk(ip, &bh); 785 if (error) 786 return error; 787 788 ip->i_eattr = bh->b_blocknr; 789 error = ea_write(ip, GFS2_EA_BH2FIRST(bh), er); 790 791 brelse(bh); 792 793 return error; 794 } 795 796 /* 797 * ea_init - initializes a new eattr block 798 * 799 * Returns: errno 800 */ 801 static int ea_init(struct gfs2_inode *ip, int type, const char *name, 802 const void *data, size_t size) 803 { 804 struct gfs2_ea_request er; 805 unsigned int jbsize = GFS2_SB(&ip->i_inode)->sd_jbsize; 806 unsigned int blks = 1; 807 808 er.er_type = type; 809 er.er_name = name; 810 er.er_name_len = strlen(name); 811 er.er_data = (void *)data; 812 er.er_data_len = size; 813 814 if (GFS2_EAREQ_SIZE_STUFFED(&er) > jbsize) 815 blks += DIV_ROUND_UP(er.er_data_len, jbsize); 816 817 return ea_alloc_skeleton(ip, &er, blks, ea_init_i, NULL); 818 } 819 820 static struct gfs2_ea_header *ea_split_ea(struct gfs2_ea_header *ea) 821 { 822 u32 ea_size = GFS2_EA_SIZE(ea); 823 struct gfs2_ea_header *new = (struct gfs2_ea_header *)((char *)ea + 824 ea_size); 825 u32 new_size = GFS2_EA_REC_LEN(ea) - ea_size; 826 int last = ea->ea_flags & GFS2_EAFLAG_LAST; 827 828 ea->ea_rec_len = cpu_to_be32(ea_size); 829 ea->ea_flags ^= last; 830 831 new->ea_rec_len = cpu_to_be32(new_size); 832 new->ea_flags = last; 833 834 return new; 835 } 836 837 static void ea_set_remove_stuffed(struct gfs2_inode *ip, 838 struct gfs2_ea_location *el) 839 { 840 struct gfs2_ea_header *ea = el->el_ea; 841 struct gfs2_ea_header *prev = el->el_prev; 842 u32 len; 843 844 gfs2_trans_add_meta(ip->i_gl, el->el_bh); 845 846 if (!prev || !GFS2_EA_IS_STUFFED(ea)) { 847 ea->ea_type = GFS2_EATYPE_UNUSED; 848 return; 849 } else if (GFS2_EA2NEXT(prev) != ea) { 850 prev = GFS2_EA2NEXT(prev); 851 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), GFS2_EA2NEXT(prev) == ea); 852 } 853 854 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea); 855 prev->ea_rec_len = cpu_to_be32(len); 856 857 if (GFS2_EA_IS_LAST(ea)) 858 prev->ea_flags |= GFS2_EAFLAG_LAST; 859 } 860 861 struct ea_set { 862 int ea_split; 863 864 struct gfs2_ea_request *es_er; 865 struct gfs2_ea_location *es_el; 866 867 struct buffer_head *es_bh; 868 struct gfs2_ea_header *es_ea; 869 }; 870 871 static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh, 872 struct gfs2_ea_header *ea, struct ea_set *es) 873 { 874 struct gfs2_ea_request *er = es->es_er; 875 int error; 876 877 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + 2 * RES_EATTR, 0); 878 if (error) 879 return error; 880 881 gfs2_trans_add_meta(ip->i_gl, bh); 882 883 if (es->ea_split) 884 ea = ea_split_ea(ea); 885 886 ea_write(ip, ea, er); 887 888 if (es->es_el) 889 ea_set_remove_stuffed(ip, es->es_el); 890 891 ip->i_inode.i_ctime = current_time(&ip->i_inode); 892 __mark_inode_dirty(&ip->i_inode, I_DIRTY_DATASYNC); 893 894 gfs2_trans_end(GFS2_SB(&ip->i_inode)); 895 return error; 896 } 897 898 static int ea_set_simple_alloc(struct gfs2_inode *ip, 899 struct gfs2_ea_request *er, void *private) 900 { 901 struct ea_set *es = private; 902 struct gfs2_ea_header *ea = es->es_ea; 903 int error; 904 905 gfs2_trans_add_meta(ip->i_gl, es->es_bh); 906 907 if (es->ea_split) 908 ea = ea_split_ea(ea); 909 910 error = ea_write(ip, ea, er); 911 if (error) 912 return error; 913 914 if (es->es_el) 915 ea_set_remove_stuffed(ip, es->es_el); 916 917 return 0; 918 } 919 920 static int ea_set_simple(struct gfs2_inode *ip, struct buffer_head *bh, 921 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev, 922 void *private) 923 { 924 struct ea_set *es = private; 925 unsigned int size; 926 int stuffed; 927 int error; 928 929 stuffed = ea_calc_size(GFS2_SB(&ip->i_inode), es->es_er->er_name_len, 930 es->es_er->er_data_len, &size); 931 932 if (ea->ea_type == GFS2_EATYPE_UNUSED) { 933 if (GFS2_EA_REC_LEN(ea) < size) 934 return 0; 935 if (!GFS2_EA_IS_STUFFED(ea)) { 936 error = ea_remove_unstuffed(ip, bh, ea, prev, 1); 937 if (error) 938 return error; 939 } 940 es->ea_split = 0; 941 } else if (GFS2_EA_REC_LEN(ea) - GFS2_EA_SIZE(ea) >= size) 942 es->ea_split = 1; 943 else 944 return 0; 945 946 if (stuffed) { 947 error = ea_set_simple_noalloc(ip, bh, ea, es); 948 if (error) 949 return error; 950 } else { 951 unsigned int blks; 952 953 es->es_bh = bh; 954 es->es_ea = ea; 955 blks = 2 + DIV_ROUND_UP(es->es_er->er_data_len, 956 GFS2_SB(&ip->i_inode)->sd_jbsize); 957 958 error = ea_alloc_skeleton(ip, es->es_er, blks, 959 ea_set_simple_alloc, es); 960 if (error) 961 return error; 962 } 963 964 return 1; 965 } 966 967 static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er, 968 void *private) 969 { 970 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 971 struct buffer_head *indbh, *newbh; 972 __be64 *eablk; 973 int error; 974 int mh_size = sizeof(struct gfs2_meta_header); 975 976 if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) { 977 __be64 *end; 978 979 error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, 0, 980 &indbh); 981 if (error) 982 return error; 983 984 if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) { 985 error = -EIO; 986 goto out; 987 } 988 989 eablk = (__be64 *)(indbh->b_data + mh_size); 990 end = eablk + sdp->sd_inptrs; 991 992 for (; eablk < end; eablk++) 993 if (!*eablk) 994 break; 995 996 if (eablk == end) { 997 error = -ENOSPC; 998 goto out; 999 } 1000 1001 gfs2_trans_add_meta(ip->i_gl, indbh); 1002 } else { 1003 u64 blk; 1004 unsigned int n = 1; 1005 error = gfs2_alloc_blocks(ip, &blk, &n, 0, NULL); 1006 if (error) 1007 return error; 1008 gfs2_trans_remove_revoke(sdp, blk, 1); 1009 indbh = gfs2_meta_new(ip->i_gl, blk); 1010 gfs2_trans_add_meta(ip->i_gl, indbh); 1011 gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN); 1012 gfs2_buffer_clear_tail(indbh, mh_size); 1013 1014 eablk = (__be64 *)(indbh->b_data + mh_size); 1015 *eablk = cpu_to_be64(ip->i_eattr); 1016 ip->i_eattr = blk; 1017 ip->i_diskflags |= GFS2_DIF_EA_INDIRECT; 1018 gfs2_add_inode_blocks(&ip->i_inode, 1); 1019 1020 eablk++; 1021 } 1022 1023 error = ea_alloc_blk(ip, &newbh); 1024 if (error) 1025 goto out; 1026 1027 *eablk = cpu_to_be64((u64)newbh->b_blocknr); 1028 error = ea_write(ip, GFS2_EA_BH2FIRST(newbh), er); 1029 brelse(newbh); 1030 if (error) 1031 goto out; 1032 1033 if (private) 1034 ea_set_remove_stuffed(ip, private); 1035 1036 out: 1037 brelse(indbh); 1038 return error; 1039 } 1040 1041 static int ea_set_i(struct gfs2_inode *ip, int type, const char *name, 1042 const void *value, size_t size, struct gfs2_ea_location *el) 1043 { 1044 struct gfs2_ea_request er; 1045 struct ea_set es; 1046 unsigned int blks = 2; 1047 int error; 1048 1049 er.er_type = type; 1050 er.er_name = name; 1051 er.er_data = (void *)value; 1052 er.er_name_len = strlen(name); 1053 er.er_data_len = size; 1054 1055 memset(&es, 0, sizeof(struct ea_set)); 1056 es.es_er = &er; 1057 es.es_el = el; 1058 1059 error = ea_foreach(ip, ea_set_simple, &es); 1060 if (error > 0) 1061 return 0; 1062 if (error) 1063 return error; 1064 1065 if (!(ip->i_diskflags & GFS2_DIF_EA_INDIRECT)) 1066 blks++; 1067 if (GFS2_EAREQ_SIZE_STUFFED(&er) > GFS2_SB(&ip->i_inode)->sd_jbsize) 1068 blks += DIV_ROUND_UP(er.er_data_len, GFS2_SB(&ip->i_inode)->sd_jbsize); 1069 1070 return ea_alloc_skeleton(ip, &er, blks, ea_set_block, el); 1071 } 1072 1073 static int ea_set_remove_unstuffed(struct gfs2_inode *ip, 1074 struct gfs2_ea_location *el) 1075 { 1076 if (el->el_prev && GFS2_EA2NEXT(el->el_prev) != el->el_ea) { 1077 el->el_prev = GFS2_EA2NEXT(el->el_prev); 1078 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), 1079 GFS2_EA2NEXT(el->el_prev) == el->el_ea); 1080 } 1081 1082 return ea_remove_unstuffed(ip, el->el_bh, el->el_ea, el->el_prev, 0); 1083 } 1084 1085 static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el) 1086 { 1087 struct gfs2_ea_header *ea = el->el_ea; 1088 struct gfs2_ea_header *prev = el->el_prev; 1089 int error; 1090 1091 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0); 1092 if (error) 1093 return error; 1094 1095 gfs2_trans_add_meta(ip->i_gl, el->el_bh); 1096 1097 if (prev) { 1098 u32 len; 1099 1100 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea); 1101 prev->ea_rec_len = cpu_to_be32(len); 1102 1103 if (GFS2_EA_IS_LAST(ea)) 1104 prev->ea_flags |= GFS2_EAFLAG_LAST; 1105 } else { 1106 ea->ea_type = GFS2_EATYPE_UNUSED; 1107 } 1108 1109 ip->i_inode.i_ctime = current_time(&ip->i_inode); 1110 __mark_inode_dirty(&ip->i_inode, I_DIRTY_DATASYNC); 1111 1112 gfs2_trans_end(GFS2_SB(&ip->i_inode)); 1113 1114 return error; 1115 } 1116 1117 /** 1118 * gfs2_xattr_remove - Remove a GFS2 extended attribute 1119 * @ip: The inode 1120 * @type: The type of the extended attribute 1121 * @name: The name of the extended attribute 1122 * 1123 * This is not called directly by the VFS since we use the (common) 1124 * scheme of making a "set with NULL data" mean a remove request. Note 1125 * that this is different from a set with zero length data. 1126 * 1127 * Returns: 0, or errno on failure 1128 */ 1129 1130 static int gfs2_xattr_remove(struct gfs2_inode *ip, int type, const char *name) 1131 { 1132 struct gfs2_ea_location el; 1133 int error; 1134 1135 if (!ip->i_eattr) 1136 return -ENODATA; 1137 1138 error = gfs2_ea_find(ip, type, name, &el); 1139 if (error) 1140 return error; 1141 if (!el.el_ea) 1142 return -ENODATA; 1143 1144 if (GFS2_EA_IS_STUFFED(el.el_ea)) 1145 error = ea_remove_stuffed(ip, &el); 1146 else 1147 error = ea_remove_unstuffed(ip, el.el_bh, el.el_ea, el.el_prev, 0); 1148 1149 brelse(el.el_bh); 1150 1151 return error; 1152 } 1153 1154 /** 1155 * __gfs2_xattr_set - Set (or remove) a GFS2 extended attribute 1156 * @inode: The inode 1157 * @name: The name of the extended attribute 1158 * @value: The value of the extended attribute (NULL for remove) 1159 * @size: The size of the @value argument 1160 * @flags: Create or Replace 1161 * @type: The type of the extended attribute 1162 * 1163 * See gfs2_xattr_remove() for details of the removal of xattrs. 1164 * 1165 * Returns: 0 or errno on failure 1166 */ 1167 1168 int __gfs2_xattr_set(struct inode *inode, const char *name, 1169 const void *value, size_t size, int flags, int type) 1170 { 1171 struct gfs2_inode *ip = GFS2_I(inode); 1172 struct gfs2_sbd *sdp = GFS2_SB(inode); 1173 struct gfs2_ea_location el; 1174 unsigned int namel = strlen(name); 1175 int error; 1176 1177 if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) 1178 return -EPERM; 1179 if (namel > GFS2_EA_MAX_NAME_LEN) 1180 return -ERANGE; 1181 1182 if (value == NULL) { 1183 error = gfs2_xattr_remove(ip, type, name); 1184 if (error == -ENODATA && !(flags & XATTR_REPLACE)) 1185 error = 0; 1186 return error; 1187 } 1188 1189 if (ea_check_size(sdp, namel, size)) 1190 return -ERANGE; 1191 1192 if (!ip->i_eattr) { 1193 if (flags & XATTR_REPLACE) 1194 return -ENODATA; 1195 return ea_init(ip, type, name, value, size); 1196 } 1197 1198 error = gfs2_ea_find(ip, type, name, &el); 1199 if (error) 1200 return error; 1201 1202 if (el.el_ea) { 1203 if (ip->i_diskflags & GFS2_DIF_APPENDONLY) { 1204 brelse(el.el_bh); 1205 return -EPERM; 1206 } 1207 1208 error = -EEXIST; 1209 if (!(flags & XATTR_CREATE)) { 1210 int unstuffed = !GFS2_EA_IS_STUFFED(el.el_ea); 1211 error = ea_set_i(ip, type, name, value, size, &el); 1212 if (!error && unstuffed) 1213 ea_set_remove_unstuffed(ip, &el); 1214 } 1215 1216 brelse(el.el_bh); 1217 return error; 1218 } 1219 1220 error = -ENODATA; 1221 if (!(flags & XATTR_REPLACE)) 1222 error = ea_set_i(ip, type, name, value, size, NULL); 1223 1224 return error; 1225 } 1226 1227 static int gfs2_xattr_set(const struct xattr_handler *handler, 1228 struct mnt_idmap *idmap, 1229 struct dentry *unused, struct inode *inode, 1230 const char *name, const void *value, 1231 size_t size, int flags) 1232 { 1233 struct gfs2_inode *ip = GFS2_I(inode); 1234 struct gfs2_holder gh; 1235 int ret; 1236 1237 ret = gfs2_qa_get(ip); 1238 if (ret) 1239 return ret; 1240 1241 /* May be called from gfs_setattr with the glock locked. */ 1242 1243 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) { 1244 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); 1245 if (ret) 1246 goto out; 1247 } else { 1248 if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE)) { 1249 ret = -EIO; 1250 goto out; 1251 } 1252 gfs2_holder_mark_uninitialized(&gh); 1253 } 1254 ret = __gfs2_xattr_set(inode, name, value, size, flags, handler->flags); 1255 if (gfs2_holder_initialized(&gh)) 1256 gfs2_glock_dq_uninit(&gh); 1257 out: 1258 gfs2_qa_put(ip); 1259 return ret; 1260 } 1261 1262 static int ea_dealloc_indirect(struct gfs2_inode *ip) 1263 { 1264 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1265 struct gfs2_rgrp_list rlist; 1266 struct gfs2_rgrpd *rgd; 1267 struct buffer_head *indbh, *dibh; 1268 __be64 *eablk, *end; 1269 unsigned int rg_blocks = 0; 1270 u64 bstart = 0; 1271 unsigned int blen = 0; 1272 unsigned int blks = 0; 1273 unsigned int x; 1274 int error; 1275 1276 error = gfs2_rindex_update(sdp); 1277 if (error) 1278 return error; 1279 1280 memset(&rlist, 0, sizeof(struct gfs2_rgrp_list)); 1281 1282 error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, 0, &indbh); 1283 if (error) 1284 return error; 1285 1286 if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) { 1287 error = -EIO; 1288 goto out; 1289 } 1290 1291 eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header)); 1292 end = eablk + sdp->sd_inptrs; 1293 1294 for (; eablk < end; eablk++) { 1295 u64 bn; 1296 1297 if (!*eablk) 1298 break; 1299 bn = be64_to_cpu(*eablk); 1300 1301 if (bstart + blen == bn) 1302 blen++; 1303 else { 1304 if (bstart) 1305 gfs2_rlist_add(ip, &rlist, bstart); 1306 bstart = bn; 1307 blen = 1; 1308 } 1309 blks++; 1310 } 1311 if (bstart) 1312 gfs2_rlist_add(ip, &rlist, bstart); 1313 else 1314 goto out; 1315 1316 gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE, LM_FLAG_NODE_SCOPE); 1317 1318 for (x = 0; x < rlist.rl_rgrps; x++) { 1319 rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl); 1320 rg_blocks += rgd->rd_length; 1321 } 1322 1323 error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs); 1324 if (error) 1325 goto out_rlist_free; 1326 1327 error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE + RES_INDIRECT + 1328 RES_STATFS + RES_QUOTA, blks); 1329 if (error) 1330 goto out_gunlock; 1331 1332 gfs2_trans_add_meta(ip->i_gl, indbh); 1333 1334 eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header)); 1335 bstart = 0; 1336 rgd = NULL; 1337 blen = 0; 1338 1339 for (; eablk < end; eablk++) { 1340 u64 bn; 1341 1342 if (!*eablk) 1343 break; 1344 bn = be64_to_cpu(*eablk); 1345 1346 if (bstart + blen == bn) 1347 blen++; 1348 else { 1349 if (bstart) 1350 gfs2_free_meta(ip, rgd, bstart, blen); 1351 bstart = bn; 1352 rgd = gfs2_blk2rgrpd(sdp, bstart, true); 1353 blen = 1; 1354 } 1355 1356 *eablk = 0; 1357 gfs2_add_inode_blocks(&ip->i_inode, -1); 1358 } 1359 if (bstart) 1360 gfs2_free_meta(ip, rgd, bstart, blen); 1361 1362 ip->i_diskflags &= ~GFS2_DIF_EA_INDIRECT; 1363 1364 error = gfs2_meta_inode_buffer(ip, &dibh); 1365 if (!error) { 1366 gfs2_trans_add_meta(ip->i_gl, dibh); 1367 gfs2_dinode_out(ip, dibh->b_data); 1368 brelse(dibh); 1369 } 1370 1371 gfs2_trans_end(sdp); 1372 1373 out_gunlock: 1374 gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs); 1375 out_rlist_free: 1376 gfs2_rlist_free(&rlist); 1377 out: 1378 brelse(indbh); 1379 return error; 1380 } 1381 1382 static int ea_dealloc_block(struct gfs2_inode *ip) 1383 { 1384 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 1385 struct gfs2_rgrpd *rgd; 1386 struct buffer_head *dibh; 1387 struct gfs2_holder gh; 1388 int error; 1389 1390 error = gfs2_rindex_update(sdp); 1391 if (error) 1392 return error; 1393 1394 rgd = gfs2_blk2rgrpd(sdp, ip->i_eattr, 1); 1395 if (!rgd) { 1396 gfs2_consist_inode(ip); 1397 return -EIO; 1398 } 1399 1400 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 1401 LM_FLAG_NODE_SCOPE, &gh); 1402 if (error) 1403 return error; 1404 1405 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_DINODE + RES_STATFS + 1406 RES_QUOTA, 1); 1407 if (error) 1408 goto out_gunlock; 1409 1410 gfs2_free_meta(ip, rgd, ip->i_eattr, 1); 1411 1412 ip->i_eattr = 0; 1413 gfs2_add_inode_blocks(&ip->i_inode, -1); 1414 1415 if (likely(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags))) { 1416 error = gfs2_meta_inode_buffer(ip, &dibh); 1417 if (!error) { 1418 gfs2_trans_add_meta(ip->i_gl, dibh); 1419 gfs2_dinode_out(ip, dibh->b_data); 1420 brelse(dibh); 1421 } 1422 } 1423 1424 gfs2_trans_end(sdp); 1425 1426 out_gunlock: 1427 gfs2_glock_dq_uninit(&gh); 1428 return error; 1429 } 1430 1431 /** 1432 * gfs2_ea_dealloc - deallocate the extended attribute fork 1433 * @ip: the inode 1434 * 1435 * Returns: errno 1436 */ 1437 1438 int gfs2_ea_dealloc(struct gfs2_inode *ip) 1439 { 1440 int error; 1441 1442 error = gfs2_rindex_update(GFS2_SB(&ip->i_inode)); 1443 if (error) 1444 return error; 1445 1446 error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE); 1447 if (error) 1448 return error; 1449 1450 if (likely(!test_bit(GIF_ALLOC_FAILED, &ip->i_flags))) { 1451 error = ea_foreach(ip, ea_dealloc_unstuffed, NULL); 1452 if (error) 1453 goto out_quota; 1454 1455 if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) { 1456 error = ea_dealloc_indirect(ip); 1457 if (error) 1458 goto out_quota; 1459 } 1460 } 1461 1462 error = ea_dealloc_block(ip); 1463 1464 out_quota: 1465 gfs2_quota_unhold(ip); 1466 return error; 1467 } 1468 1469 static const struct xattr_handler gfs2_xattr_user_handler = { 1470 .prefix = XATTR_USER_PREFIX, 1471 .flags = GFS2_EATYPE_USR, 1472 .get = gfs2_xattr_get, 1473 .set = gfs2_xattr_set, 1474 }; 1475 1476 static const struct xattr_handler gfs2_xattr_security_handler = { 1477 .prefix = XATTR_SECURITY_PREFIX, 1478 .flags = GFS2_EATYPE_SECURITY, 1479 .get = gfs2_xattr_get, 1480 .set = gfs2_xattr_set, 1481 }; 1482 1483 static bool 1484 gfs2_xattr_trusted_list(struct dentry *dentry) 1485 { 1486 return capable(CAP_SYS_ADMIN); 1487 } 1488 1489 static const struct xattr_handler gfs2_xattr_trusted_handler = { 1490 .prefix = XATTR_TRUSTED_PREFIX, 1491 .flags = GFS2_EATYPE_TRUSTED, 1492 .list = gfs2_xattr_trusted_list, 1493 .get = gfs2_xattr_get, 1494 .set = gfs2_xattr_set, 1495 }; 1496 1497 const struct xattr_handler *gfs2_xattr_handlers_max[] = { 1498 /* GFS2_FS_FORMAT_MAX */ 1499 &gfs2_xattr_trusted_handler, 1500 1501 /* GFS2_FS_FORMAT_MIN */ 1502 &gfs2_xattr_user_handler, 1503 &gfs2_xattr_security_handler, 1504 NULL, 1505 }; 1506 1507 const struct xattr_handler **gfs2_xattr_handlers_min = gfs2_xattr_handlers_max + 1; 1508