1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/ext2/xattr.c 4 * 5 * Copyright (C) 2001-2003 Andreas Gruenbacher <agruen@suse.de> 6 * 7 * Fix by Harrison Xing <harrison@mountainviewdata.com>. 8 * Extended attributes for symlinks and special files added per 9 * suggestion of Luka Renko <luka.renko@hermes.si>. 10 * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>, 11 * Red Hat Inc. 12 * 13 */ 14 15 /* 16 * Extended attributes are stored on disk blocks allocated outside of 17 * any inode. The i_file_acl field is then made to point to this allocated 18 * block. If all extended attributes of an inode are identical, these 19 * inodes may share the same extended attribute block. Such situations 20 * are automatically detected by keeping a cache of recent attribute block 21 * numbers and hashes over the block's contents in memory. 22 * 23 * 24 * Extended attribute block layout: 25 * 26 * +------------------+ 27 * | header | 28 * | entry 1 | | 29 * | entry 2 | | growing downwards 30 * | entry 3 | v 31 * | four null bytes | 32 * | . . . | 33 * | value 1 | ^ 34 * | value 3 | | growing upwards 35 * | value 2 | | 36 * +------------------+ 37 * 38 * The block header is followed by multiple entry descriptors. These entry 39 * descriptors are variable in size, and aligned to EXT2_XATTR_PAD 40 * byte boundaries. The entry descriptors are sorted by attribute name, 41 * so that two extended attribute blocks can be compared efficiently. 42 * 43 * Attribute values are aligned to the end of the block, stored in 44 * no specific order. They are also padded to EXT2_XATTR_PAD byte 45 * boundaries. No additional gaps are left between them. 46 * 47 * Locking strategy 48 * ---------------- 49 * EXT2_I(inode)->i_file_acl is protected by EXT2_I(inode)->xattr_sem. 50 * EA blocks are only changed if they are exclusive to an inode, so 51 * holding xattr_sem also means that nothing but the EA block's reference 52 * count will change. Multiple writers to an EA block are synchronized 53 * by the bh lock. No more than a single bh lock is held at any time 54 * to avoid deadlocks. 55 */ 56 57 #include <linux/buffer_head.h> 58 #include <linux/init.h> 59 #include <linux/printk.h> 60 #include <linux/slab.h> 61 #include <linux/mbcache.h> 62 #include <linux/quotaops.h> 63 #include <linux/rwsem.h> 64 #include <linux/security.h> 65 #include "ext2.h" 66 #include "xattr.h" 67 #include "acl.h" 68 69 #define HDR(bh) ((struct ext2_xattr_header *)((bh)->b_data)) 70 #define ENTRY(ptr) ((struct ext2_xattr_entry *)(ptr)) 71 #define FIRST_ENTRY(bh) ENTRY(HDR(bh)+1) 72 #define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0) 73 74 #ifdef EXT2_XATTR_DEBUG 75 # define ea_idebug(inode, f...) do { \ 76 printk(KERN_DEBUG "inode %s:%ld: ", \ 77 inode->i_sb->s_id, inode->i_ino); \ 78 printk(f); \ 79 printk("\n"); \ 80 } while (0) 81 # define ea_bdebug(bh, f...) do { \ 82 printk(KERN_DEBUG "block %pg:%lu: ", \ 83 bh->b_bdev, (unsigned long) bh->b_blocknr); \ 84 printk(f); \ 85 printk("\n"); \ 86 } while (0) 87 #else 88 # define ea_idebug(inode, f...) no_printk(f) 89 # define ea_bdebug(bh, f...) no_printk(f) 90 #endif 91 92 static int ext2_xattr_set2(struct inode *, struct buffer_head *, 93 struct ext2_xattr_header *); 94 95 static int ext2_xattr_cache_insert(struct mb_cache *, struct buffer_head *); 96 static struct buffer_head *ext2_xattr_cache_find(struct inode *, 97 struct ext2_xattr_header *); 98 static void ext2_xattr_rehash(struct ext2_xattr_header *, 99 struct ext2_xattr_entry *); 100 101 static const struct xattr_handler *ext2_xattr_handler_map[] = { 102 [EXT2_XATTR_INDEX_USER] = &ext2_xattr_user_handler, 103 #ifdef CONFIG_EXT2_FS_POSIX_ACL 104 [EXT2_XATTR_INDEX_POSIX_ACL_ACCESS] = &nop_posix_acl_access, 105 [EXT2_XATTR_INDEX_POSIX_ACL_DEFAULT] = &nop_posix_acl_default, 106 #endif 107 [EXT2_XATTR_INDEX_TRUSTED] = &ext2_xattr_trusted_handler, 108 #ifdef CONFIG_EXT2_FS_SECURITY 109 [EXT2_XATTR_INDEX_SECURITY] = &ext2_xattr_security_handler, 110 #endif 111 }; 112 113 const struct xattr_handler *ext2_xattr_handlers[] = { 114 &ext2_xattr_user_handler, 115 &ext2_xattr_trusted_handler, 116 #ifdef CONFIG_EXT2_FS_SECURITY 117 &ext2_xattr_security_handler, 118 #endif 119 NULL 120 }; 121 122 #define EA_BLOCK_CACHE(inode) (EXT2_SB(inode->i_sb)->s_ea_block_cache) 123 124 static inline const char *ext2_xattr_prefix(int name_index, 125 struct dentry *dentry) 126 { 127 const struct xattr_handler *handler = NULL; 128 129 if (name_index > 0 && name_index < ARRAY_SIZE(ext2_xattr_handler_map)) 130 handler = ext2_xattr_handler_map[name_index]; 131 132 if (!xattr_handler_can_list(handler, dentry)) 133 return NULL; 134 135 return xattr_prefix(handler); 136 } 137 138 static bool 139 ext2_xattr_header_valid(struct ext2_xattr_header *header) 140 { 141 if (header->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) || 142 header->h_blocks != cpu_to_le32(1)) 143 return false; 144 145 return true; 146 } 147 148 static bool 149 ext2_xattr_entry_valid(struct ext2_xattr_entry *entry, 150 char *end, size_t end_offs) 151 { 152 struct ext2_xattr_entry *next; 153 size_t size; 154 155 next = EXT2_XATTR_NEXT(entry); 156 if ((char *)next >= end) 157 return false; 158 159 if (entry->e_value_block != 0) 160 return false; 161 162 size = le32_to_cpu(entry->e_value_size); 163 if (size > end_offs || 164 le16_to_cpu(entry->e_value_offs) + size > end_offs) 165 return false; 166 167 return true; 168 } 169 170 static int 171 ext2_xattr_cmp_entry(int name_index, size_t name_len, const char *name, 172 struct ext2_xattr_entry *entry) 173 { 174 int cmp; 175 176 cmp = name_index - entry->e_name_index; 177 if (!cmp) 178 cmp = name_len - entry->e_name_len; 179 if (!cmp) 180 cmp = memcmp(name, entry->e_name, name_len); 181 182 return cmp; 183 } 184 185 /* 186 * ext2_xattr_get() 187 * 188 * Copy an extended attribute into the buffer 189 * provided, or compute the buffer size required. 190 * Buffer is NULL to compute the size of the buffer required. 191 * 192 * Returns a negative error number on failure, or the number of bytes 193 * used / required on success. 194 */ 195 int 196 ext2_xattr_get(struct inode *inode, int name_index, const char *name, 197 void *buffer, size_t buffer_size) 198 { 199 struct buffer_head *bh = NULL; 200 struct ext2_xattr_entry *entry; 201 size_t name_len, size; 202 char *end; 203 int error, not_found; 204 struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode); 205 206 ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld", 207 name_index, name, buffer, (long)buffer_size); 208 209 if (name == NULL) 210 return -EINVAL; 211 name_len = strlen(name); 212 if (name_len > 255) 213 return -ERANGE; 214 215 down_read(&EXT2_I(inode)->xattr_sem); 216 error = -ENODATA; 217 if (!EXT2_I(inode)->i_file_acl) 218 goto cleanup; 219 ea_idebug(inode, "reading block %d", EXT2_I(inode)->i_file_acl); 220 bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl); 221 error = -EIO; 222 if (!bh) 223 goto cleanup; 224 ea_bdebug(bh, "b_count=%d, refcount=%d", 225 atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount)); 226 end = bh->b_data + bh->b_size; 227 if (!ext2_xattr_header_valid(HDR(bh))) { 228 bad_block: 229 ext2_error(inode->i_sb, "ext2_xattr_get", 230 "inode %ld: bad block %d", inode->i_ino, 231 EXT2_I(inode)->i_file_acl); 232 error = -EIO; 233 goto cleanup; 234 } 235 236 /* find named attribute */ 237 entry = FIRST_ENTRY(bh); 238 while (!IS_LAST_ENTRY(entry)) { 239 if (!ext2_xattr_entry_valid(entry, end, 240 inode->i_sb->s_blocksize)) 241 goto bad_block; 242 243 not_found = ext2_xattr_cmp_entry(name_index, name_len, name, 244 entry); 245 if (!not_found) 246 goto found; 247 if (not_found < 0) 248 break; 249 250 entry = EXT2_XATTR_NEXT(entry); 251 } 252 if (ext2_xattr_cache_insert(ea_block_cache, bh)) 253 ea_idebug(inode, "cache insert failed"); 254 error = -ENODATA; 255 goto cleanup; 256 found: 257 size = le32_to_cpu(entry->e_value_size); 258 if (ext2_xattr_cache_insert(ea_block_cache, bh)) 259 ea_idebug(inode, "cache insert failed"); 260 if (buffer) { 261 error = -ERANGE; 262 if (size > buffer_size) 263 goto cleanup; 264 /* return value of attribute */ 265 memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs), 266 size); 267 } 268 error = size; 269 270 cleanup: 271 brelse(bh); 272 up_read(&EXT2_I(inode)->xattr_sem); 273 274 return error; 275 } 276 277 /* 278 * ext2_xattr_list() 279 * 280 * Copy a list of attribute names into the buffer 281 * provided, or compute the buffer size required. 282 * Buffer is NULL to compute the size of the buffer required. 283 * 284 * Returns a negative error number on failure, or the number of bytes 285 * used / required on success. 286 */ 287 static int 288 ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size) 289 { 290 struct inode *inode = d_inode(dentry); 291 struct buffer_head *bh = NULL; 292 struct ext2_xattr_entry *entry; 293 char *end; 294 size_t rest = buffer_size; 295 int error; 296 struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode); 297 298 ea_idebug(inode, "buffer=%p, buffer_size=%ld", 299 buffer, (long)buffer_size); 300 301 down_read(&EXT2_I(inode)->xattr_sem); 302 error = 0; 303 if (!EXT2_I(inode)->i_file_acl) 304 goto cleanup; 305 ea_idebug(inode, "reading block %d", EXT2_I(inode)->i_file_acl); 306 bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl); 307 error = -EIO; 308 if (!bh) 309 goto cleanup; 310 ea_bdebug(bh, "b_count=%d, refcount=%d", 311 atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount)); 312 end = bh->b_data + bh->b_size; 313 if (!ext2_xattr_header_valid(HDR(bh))) { 314 bad_block: 315 ext2_error(inode->i_sb, "ext2_xattr_list", 316 "inode %ld: bad block %d", inode->i_ino, 317 EXT2_I(inode)->i_file_acl); 318 error = -EIO; 319 goto cleanup; 320 } 321 322 /* check the on-disk data structure */ 323 entry = FIRST_ENTRY(bh); 324 while (!IS_LAST_ENTRY(entry)) { 325 if (!ext2_xattr_entry_valid(entry, end, 326 inode->i_sb->s_blocksize)) 327 goto bad_block; 328 entry = EXT2_XATTR_NEXT(entry); 329 } 330 if (ext2_xattr_cache_insert(ea_block_cache, bh)) 331 ea_idebug(inode, "cache insert failed"); 332 333 /* list the attribute names */ 334 for (entry = FIRST_ENTRY(bh); !IS_LAST_ENTRY(entry); 335 entry = EXT2_XATTR_NEXT(entry)) { 336 const char *prefix; 337 338 prefix = ext2_xattr_prefix(entry->e_name_index, dentry); 339 if (prefix) { 340 size_t prefix_len = strlen(prefix); 341 size_t size = prefix_len + entry->e_name_len + 1; 342 343 if (buffer) { 344 if (size > rest) { 345 error = -ERANGE; 346 goto cleanup; 347 } 348 memcpy(buffer, prefix, prefix_len); 349 buffer += prefix_len; 350 memcpy(buffer, entry->e_name, entry->e_name_len); 351 buffer += entry->e_name_len; 352 *buffer++ = 0; 353 } 354 rest -= size; 355 } 356 } 357 error = buffer_size - rest; /* total size */ 358 359 cleanup: 360 brelse(bh); 361 up_read(&EXT2_I(inode)->xattr_sem); 362 363 return error; 364 } 365 366 /* 367 * Inode operation listxattr() 368 * 369 * d_inode(dentry)->i_mutex: don't care 370 */ 371 ssize_t 372 ext2_listxattr(struct dentry *dentry, char *buffer, size_t size) 373 { 374 return ext2_xattr_list(dentry, buffer, size); 375 } 376 377 /* 378 * If the EXT2_FEATURE_COMPAT_EXT_ATTR feature of this file system is 379 * not set, set it. 380 */ 381 static void ext2_xattr_update_super_block(struct super_block *sb) 382 { 383 if (EXT2_HAS_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR)) 384 return; 385 386 spin_lock(&EXT2_SB(sb)->s_lock); 387 ext2_update_dynamic_rev(sb); 388 EXT2_SET_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR); 389 spin_unlock(&EXT2_SB(sb)->s_lock); 390 mark_buffer_dirty(EXT2_SB(sb)->s_sbh); 391 } 392 393 /* 394 * ext2_xattr_set() 395 * 396 * Create, replace or remove an extended attribute for this inode. Value 397 * is NULL to remove an existing extended attribute, and non-NULL to 398 * either replace an existing extended attribute, or create a new extended 399 * attribute. The flags XATTR_REPLACE and XATTR_CREATE 400 * specify that an extended attribute must exist and must not exist 401 * previous to the call, respectively. 402 * 403 * Returns 0, or a negative error number on failure. 404 */ 405 int 406 ext2_xattr_set(struct inode *inode, int name_index, const char *name, 407 const void *value, size_t value_len, int flags) 408 { 409 struct super_block *sb = inode->i_sb; 410 struct buffer_head *bh = NULL; 411 struct ext2_xattr_header *header = NULL; 412 struct ext2_xattr_entry *here = NULL, *last = NULL; 413 size_t name_len, free, min_offs = sb->s_blocksize; 414 int not_found = 1, error; 415 char *end; 416 417 /* 418 * header -- Points either into bh, or to a temporarily 419 * allocated buffer. 420 * here -- The named entry found, or the place for inserting, within 421 * the block pointed to by header. 422 * last -- Points right after the last named entry within the block 423 * pointed to by header. 424 * min_offs -- The offset of the first value (values are aligned 425 * towards the end of the block). 426 * end -- Points right after the block pointed to by header. 427 */ 428 429 ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld", 430 name_index, name, value, (long)value_len); 431 432 if (value == NULL) 433 value_len = 0; 434 if (name == NULL) 435 return -EINVAL; 436 name_len = strlen(name); 437 if (name_len > 255 || value_len > sb->s_blocksize) 438 return -ERANGE; 439 error = dquot_initialize(inode); 440 if (error) 441 return error; 442 down_write(&EXT2_I(inode)->xattr_sem); 443 if (EXT2_I(inode)->i_file_acl) { 444 /* The inode already has an extended attribute block. */ 445 bh = sb_bread(sb, EXT2_I(inode)->i_file_acl); 446 error = -EIO; 447 if (!bh) 448 goto cleanup; 449 ea_bdebug(bh, "b_count=%d, refcount=%d", 450 atomic_read(&(bh->b_count)), 451 le32_to_cpu(HDR(bh)->h_refcount)); 452 header = HDR(bh); 453 end = bh->b_data + bh->b_size; 454 if (!ext2_xattr_header_valid(header)) { 455 bad_block: 456 ext2_error(sb, "ext2_xattr_set", 457 "inode %ld: bad block %d", inode->i_ino, 458 EXT2_I(inode)->i_file_acl); 459 error = -EIO; 460 goto cleanup; 461 } 462 /* 463 * Find the named attribute. If not found, 'here' will point 464 * to entry where the new attribute should be inserted to 465 * maintain sorting. 466 */ 467 last = FIRST_ENTRY(bh); 468 while (!IS_LAST_ENTRY(last)) { 469 if (!ext2_xattr_entry_valid(last, end, sb->s_blocksize)) 470 goto bad_block; 471 if (last->e_value_size) { 472 size_t offs = le16_to_cpu(last->e_value_offs); 473 if (offs < min_offs) 474 min_offs = offs; 475 } 476 if (not_found > 0) { 477 not_found = ext2_xattr_cmp_entry(name_index, 478 name_len, 479 name, last); 480 if (not_found <= 0) 481 here = last; 482 } 483 last = EXT2_XATTR_NEXT(last); 484 } 485 if (not_found > 0) 486 here = last; 487 488 /* Check whether we have enough space left. */ 489 free = min_offs - ((char*)last - (char*)header) - sizeof(__u32); 490 } else { 491 /* We will use a new extended attribute block. */ 492 free = sb->s_blocksize - 493 sizeof(struct ext2_xattr_header) - sizeof(__u32); 494 } 495 496 if (not_found) { 497 /* Request to remove a nonexistent attribute? */ 498 error = -ENODATA; 499 if (flags & XATTR_REPLACE) 500 goto cleanup; 501 error = 0; 502 if (value == NULL) 503 goto cleanup; 504 } else { 505 /* Request to create an existing attribute? */ 506 error = -EEXIST; 507 if (flags & XATTR_CREATE) 508 goto cleanup; 509 free += EXT2_XATTR_SIZE(le32_to_cpu(here->e_value_size)); 510 free += EXT2_XATTR_LEN(name_len); 511 } 512 error = -ENOSPC; 513 if (free < EXT2_XATTR_LEN(name_len) + EXT2_XATTR_SIZE(value_len)) 514 goto cleanup; 515 516 /* Here we know that we can set the new attribute. */ 517 518 if (header) { 519 int offset; 520 521 lock_buffer(bh); 522 if (header->h_refcount == cpu_to_le32(1)) { 523 __u32 hash = le32_to_cpu(header->h_hash); 524 struct mb_cache_entry *oe; 525 526 oe = mb_cache_entry_delete_or_get(EA_BLOCK_CACHE(inode), 527 hash, bh->b_blocknr); 528 if (!oe) { 529 ea_bdebug(bh, "modifying in-place"); 530 goto update_block; 531 } 532 /* 533 * Someone is trying to reuse the block, leave it alone 534 */ 535 mb_cache_entry_put(EA_BLOCK_CACHE(inode), oe); 536 } 537 unlock_buffer(bh); 538 ea_bdebug(bh, "cloning"); 539 header = kmemdup(HDR(bh), bh->b_size, GFP_KERNEL); 540 error = -ENOMEM; 541 if (header == NULL) 542 goto cleanup; 543 header->h_refcount = cpu_to_le32(1); 544 545 offset = (char *)here - bh->b_data; 546 here = ENTRY((char *)header + offset); 547 offset = (char *)last - bh->b_data; 548 last = ENTRY((char *)header + offset); 549 } else { 550 /* Allocate a buffer where we construct the new block. */ 551 header = kzalloc(sb->s_blocksize, GFP_KERNEL); 552 error = -ENOMEM; 553 if (header == NULL) 554 goto cleanup; 555 header->h_magic = cpu_to_le32(EXT2_XATTR_MAGIC); 556 header->h_blocks = header->h_refcount = cpu_to_le32(1); 557 last = here = ENTRY(header+1); 558 } 559 560 update_block: 561 /* Iff we are modifying the block in-place, bh is locked here. */ 562 563 if (not_found) { 564 /* Insert the new name. */ 565 size_t size = EXT2_XATTR_LEN(name_len); 566 size_t rest = (char *)last - (char *)here; 567 memmove((char *)here + size, here, rest); 568 memset(here, 0, size); 569 here->e_name_index = name_index; 570 here->e_name_len = name_len; 571 memcpy(here->e_name, name, name_len); 572 } else { 573 if (here->e_value_size) { 574 char *first_val = (char *)header + min_offs; 575 size_t offs = le16_to_cpu(here->e_value_offs); 576 char *val = (char *)header + offs; 577 size_t size = EXT2_XATTR_SIZE( 578 le32_to_cpu(here->e_value_size)); 579 580 if (size == EXT2_XATTR_SIZE(value_len)) { 581 /* The old and the new value have the same 582 size. Just replace. */ 583 here->e_value_size = cpu_to_le32(value_len); 584 memset(val + size - EXT2_XATTR_PAD, 0, 585 EXT2_XATTR_PAD); /* Clear pad bytes. */ 586 memcpy(val, value, value_len); 587 goto skip_replace; 588 } 589 590 /* Remove the old value. */ 591 memmove(first_val + size, first_val, val - first_val); 592 memset(first_val, 0, size); 593 min_offs += size; 594 595 /* Adjust all value offsets. */ 596 last = ENTRY(header+1); 597 while (!IS_LAST_ENTRY(last)) { 598 size_t o = le16_to_cpu(last->e_value_offs); 599 if (o < offs) 600 last->e_value_offs = 601 cpu_to_le16(o + size); 602 last = EXT2_XATTR_NEXT(last); 603 } 604 605 here->e_value_offs = 0; 606 } 607 if (value == NULL) { 608 /* Remove the old name. */ 609 size_t size = EXT2_XATTR_LEN(name_len); 610 last = ENTRY((char *)last - size); 611 memmove(here, (char*)here + size, 612 (char*)last - (char*)here); 613 memset(last, 0, size); 614 } 615 } 616 617 if (value != NULL) { 618 /* Insert the new value. */ 619 here->e_value_size = cpu_to_le32(value_len); 620 if (value_len) { 621 size_t size = EXT2_XATTR_SIZE(value_len); 622 char *val = (char *)header + min_offs - size; 623 here->e_value_offs = 624 cpu_to_le16((char *)val - (char *)header); 625 memset(val + size - EXT2_XATTR_PAD, 0, 626 EXT2_XATTR_PAD); /* Clear the pad bytes. */ 627 memcpy(val, value, value_len); 628 } 629 } 630 631 skip_replace: 632 if (IS_LAST_ENTRY(ENTRY(header+1))) { 633 /* This block is now empty. */ 634 if (bh && header == HDR(bh)) 635 unlock_buffer(bh); /* we were modifying in-place. */ 636 error = ext2_xattr_set2(inode, bh, NULL); 637 } else { 638 ext2_xattr_rehash(header, here); 639 if (bh && header == HDR(bh)) 640 unlock_buffer(bh); /* we were modifying in-place. */ 641 error = ext2_xattr_set2(inode, bh, header); 642 } 643 644 cleanup: 645 if (!(bh && header == HDR(bh))) 646 kfree(header); 647 brelse(bh); 648 up_write(&EXT2_I(inode)->xattr_sem); 649 650 return error; 651 } 652 653 static void ext2_xattr_release_block(struct inode *inode, 654 struct buffer_head *bh) 655 { 656 struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode); 657 658 retry_ref: 659 lock_buffer(bh); 660 if (HDR(bh)->h_refcount == cpu_to_le32(1)) { 661 __u32 hash = le32_to_cpu(HDR(bh)->h_hash); 662 struct mb_cache_entry *oe; 663 664 /* 665 * This must happen under buffer lock to properly 666 * serialize with ext2_xattr_set() reusing the block. 667 */ 668 oe = mb_cache_entry_delete_or_get(ea_block_cache, hash, 669 bh->b_blocknr); 670 if (oe) { 671 /* 672 * Someone is trying to reuse the block. Wait 673 * and retry. 674 */ 675 unlock_buffer(bh); 676 mb_cache_entry_wait_unused(oe); 677 mb_cache_entry_put(ea_block_cache, oe); 678 goto retry_ref; 679 } 680 681 /* Free the old block. */ 682 ea_bdebug(bh, "freeing"); 683 ext2_free_blocks(inode, bh->b_blocknr, 1); 684 /* We let our caller release bh, so we 685 * need to duplicate the buffer before. */ 686 get_bh(bh); 687 bforget(bh); 688 unlock_buffer(bh); 689 } else { 690 /* Decrement the refcount only. */ 691 le32_add_cpu(&HDR(bh)->h_refcount, -1); 692 dquot_free_block(inode, 1); 693 mark_buffer_dirty(bh); 694 unlock_buffer(bh); 695 ea_bdebug(bh, "refcount now=%d", 696 le32_to_cpu(HDR(bh)->h_refcount)); 697 if (IS_SYNC(inode)) 698 sync_dirty_buffer(bh); 699 } 700 } 701 702 /* 703 * Second half of ext2_xattr_set(): Update the file system. 704 */ 705 static int 706 ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh, 707 struct ext2_xattr_header *header) 708 { 709 struct super_block *sb = inode->i_sb; 710 struct buffer_head *new_bh = NULL; 711 int error; 712 struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode); 713 714 if (header) { 715 new_bh = ext2_xattr_cache_find(inode, header); 716 if (new_bh) { 717 /* We found an identical block in the cache. */ 718 if (new_bh == old_bh) { 719 ea_bdebug(new_bh, "keeping this block"); 720 } else { 721 /* The old block is released after updating 722 the inode. */ 723 ea_bdebug(new_bh, "reusing block"); 724 725 error = dquot_alloc_block(inode, 1); 726 if (error) { 727 unlock_buffer(new_bh); 728 goto cleanup; 729 } 730 le32_add_cpu(&HDR(new_bh)->h_refcount, 1); 731 ea_bdebug(new_bh, "refcount now=%d", 732 le32_to_cpu(HDR(new_bh)->h_refcount)); 733 } 734 unlock_buffer(new_bh); 735 } else if (old_bh && header == HDR(old_bh)) { 736 /* Keep this block. No need to lock the block as we 737 don't need to change the reference count. */ 738 new_bh = old_bh; 739 get_bh(new_bh); 740 ext2_xattr_cache_insert(ea_block_cache, new_bh); 741 } else { 742 /* We need to allocate a new block */ 743 ext2_fsblk_t goal = ext2_group_first_block_no(sb, 744 EXT2_I(inode)->i_block_group); 745 unsigned long count = 1; 746 ext2_fsblk_t block = ext2_new_blocks(inode, goal, 747 &count, &error, 748 EXT2_ALLOC_NORESERVE); 749 if (error) 750 goto cleanup; 751 ea_idebug(inode, "creating block %lu", block); 752 753 new_bh = sb_getblk(sb, block); 754 if (unlikely(!new_bh)) { 755 ext2_free_blocks(inode, block, 1); 756 mark_inode_dirty(inode); 757 error = -ENOMEM; 758 goto cleanup; 759 } 760 lock_buffer(new_bh); 761 memcpy(new_bh->b_data, header, new_bh->b_size); 762 set_buffer_uptodate(new_bh); 763 unlock_buffer(new_bh); 764 ext2_xattr_cache_insert(ea_block_cache, new_bh); 765 766 ext2_xattr_update_super_block(sb); 767 } 768 mark_buffer_dirty(new_bh); 769 if (IS_SYNC(inode)) { 770 sync_dirty_buffer(new_bh); 771 error = -EIO; 772 if (buffer_req(new_bh) && !buffer_uptodate(new_bh)) 773 goto cleanup; 774 } 775 } 776 777 /* Update the inode. */ 778 EXT2_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0; 779 inode_set_ctime_current(inode); 780 if (IS_SYNC(inode)) { 781 error = sync_inode_metadata(inode, 1); 782 /* In case sync failed due to ENOSPC the inode was actually 783 * written (only some dirty data were not) so we just proceed 784 * as if nothing happened and cleanup the unused block */ 785 if (error && error != -ENOSPC) { 786 if (new_bh && new_bh != old_bh) { 787 dquot_free_block_nodirty(inode, 1); 788 mark_inode_dirty(inode); 789 } 790 goto cleanup; 791 } 792 } else 793 mark_inode_dirty(inode); 794 795 error = 0; 796 if (old_bh && old_bh != new_bh) { 797 /* 798 * If there was an old block and we are no longer using it, 799 * release the old block. 800 */ 801 ext2_xattr_release_block(inode, old_bh); 802 } 803 804 cleanup: 805 brelse(new_bh); 806 807 return error; 808 } 809 810 /* 811 * ext2_xattr_delete_inode() 812 * 813 * Free extended attribute resources associated with this inode. This 814 * is called immediately before an inode is freed. 815 */ 816 void 817 ext2_xattr_delete_inode(struct inode *inode) 818 { 819 struct buffer_head *bh = NULL; 820 struct ext2_sb_info *sbi = EXT2_SB(inode->i_sb); 821 822 /* 823 * We are the only ones holding inode reference. The xattr_sem should 824 * better be unlocked! We could as well just not acquire xattr_sem at 825 * all but this makes the code more futureproof. OTOH we need trylock 826 * here to avoid false-positive warning from lockdep about reclaim 827 * circular dependency. 828 */ 829 if (WARN_ON_ONCE(!down_write_trylock(&EXT2_I(inode)->xattr_sem))) 830 return; 831 if (!EXT2_I(inode)->i_file_acl) 832 goto cleanup; 833 834 if (!ext2_data_block_valid(sbi, EXT2_I(inode)->i_file_acl, 1)) { 835 ext2_error(inode->i_sb, "ext2_xattr_delete_inode", 836 "inode %ld: xattr block %d is out of data blocks range", 837 inode->i_ino, EXT2_I(inode)->i_file_acl); 838 goto cleanup; 839 } 840 841 bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl); 842 if (!bh) { 843 ext2_error(inode->i_sb, "ext2_xattr_delete_inode", 844 "inode %ld: block %d read error", inode->i_ino, 845 EXT2_I(inode)->i_file_acl); 846 goto cleanup; 847 } 848 ea_bdebug(bh, "b_count=%d", atomic_read(&(bh->b_count))); 849 if (!ext2_xattr_header_valid(HDR(bh))) { 850 ext2_error(inode->i_sb, "ext2_xattr_delete_inode", 851 "inode %ld: bad block %d", inode->i_ino, 852 EXT2_I(inode)->i_file_acl); 853 goto cleanup; 854 } 855 ext2_xattr_release_block(inode, bh); 856 EXT2_I(inode)->i_file_acl = 0; 857 858 cleanup: 859 brelse(bh); 860 up_write(&EXT2_I(inode)->xattr_sem); 861 } 862 863 /* 864 * ext2_xattr_cache_insert() 865 * 866 * Create a new entry in the extended attribute cache, and insert 867 * it unless such an entry is already in the cache. 868 * 869 * Returns 0, or a negative error number on failure. 870 */ 871 static int 872 ext2_xattr_cache_insert(struct mb_cache *cache, struct buffer_head *bh) 873 { 874 __u32 hash = le32_to_cpu(HDR(bh)->h_hash); 875 int error; 876 877 error = mb_cache_entry_create(cache, GFP_NOFS, hash, bh->b_blocknr, 878 true); 879 if (error) { 880 if (error == -EBUSY) { 881 ea_bdebug(bh, "already in cache"); 882 error = 0; 883 } 884 } else 885 ea_bdebug(bh, "inserting [%x]", (int)hash); 886 return error; 887 } 888 889 /* 890 * ext2_xattr_cmp() 891 * 892 * Compare two extended attribute blocks for equality. 893 * 894 * Returns 0 if the blocks are equal, 1 if they differ, and 895 * a negative error number on errors. 896 */ 897 static int 898 ext2_xattr_cmp(struct ext2_xattr_header *header1, 899 struct ext2_xattr_header *header2) 900 { 901 struct ext2_xattr_entry *entry1, *entry2; 902 903 entry1 = ENTRY(header1+1); 904 entry2 = ENTRY(header2+1); 905 while (!IS_LAST_ENTRY(entry1)) { 906 if (IS_LAST_ENTRY(entry2)) 907 return 1; 908 if (entry1->e_hash != entry2->e_hash || 909 entry1->e_name_index != entry2->e_name_index || 910 entry1->e_name_len != entry2->e_name_len || 911 entry1->e_value_size != entry2->e_value_size || 912 memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len)) 913 return 1; 914 if (entry1->e_value_block != 0 || entry2->e_value_block != 0) 915 return -EIO; 916 if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs), 917 (char *)header2 + le16_to_cpu(entry2->e_value_offs), 918 le32_to_cpu(entry1->e_value_size))) 919 return 1; 920 921 entry1 = EXT2_XATTR_NEXT(entry1); 922 entry2 = EXT2_XATTR_NEXT(entry2); 923 } 924 if (!IS_LAST_ENTRY(entry2)) 925 return 1; 926 return 0; 927 } 928 929 /* 930 * ext2_xattr_cache_find() 931 * 932 * Find an identical extended attribute block. 933 * 934 * Returns a locked buffer head to the block found, or NULL if such 935 * a block was not found or an error occurred. 936 */ 937 static struct buffer_head * 938 ext2_xattr_cache_find(struct inode *inode, struct ext2_xattr_header *header) 939 { 940 __u32 hash = le32_to_cpu(header->h_hash); 941 struct mb_cache_entry *ce; 942 struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode); 943 944 if (!header->h_hash) 945 return NULL; /* never share */ 946 ea_idebug(inode, "looking for cached blocks [%x]", (int)hash); 947 948 ce = mb_cache_entry_find_first(ea_block_cache, hash); 949 while (ce) { 950 struct buffer_head *bh; 951 952 bh = sb_bread(inode->i_sb, ce->e_value); 953 if (!bh) { 954 ext2_error(inode->i_sb, "ext2_xattr_cache_find", 955 "inode %ld: block %ld read error", 956 inode->i_ino, (unsigned long) ce->e_value); 957 } else { 958 lock_buffer(bh); 959 if (le32_to_cpu(HDR(bh)->h_refcount) > 960 EXT2_XATTR_REFCOUNT_MAX) { 961 ea_idebug(inode, "block %ld refcount %d>%d", 962 (unsigned long) ce->e_value, 963 le32_to_cpu(HDR(bh)->h_refcount), 964 EXT2_XATTR_REFCOUNT_MAX); 965 } else if (!ext2_xattr_cmp(header, HDR(bh))) { 966 ea_bdebug(bh, "b_count=%d", 967 atomic_read(&(bh->b_count))); 968 mb_cache_entry_touch(ea_block_cache, ce); 969 mb_cache_entry_put(ea_block_cache, ce); 970 return bh; 971 } 972 unlock_buffer(bh); 973 brelse(bh); 974 } 975 ce = mb_cache_entry_find_next(ea_block_cache, ce); 976 } 977 return NULL; 978 } 979 980 #define NAME_HASH_SHIFT 5 981 #define VALUE_HASH_SHIFT 16 982 983 /* 984 * ext2_xattr_hash_entry() 985 * 986 * Compute the hash of an extended attribute. 987 */ 988 static inline void ext2_xattr_hash_entry(struct ext2_xattr_header *header, 989 struct ext2_xattr_entry *entry) 990 { 991 __u32 hash = 0; 992 char *name = entry->e_name; 993 int n; 994 995 for (n=0; n < entry->e_name_len; n++) { 996 hash = (hash << NAME_HASH_SHIFT) ^ 997 (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^ 998 *name++; 999 } 1000 1001 if (entry->e_value_block == 0 && entry->e_value_size != 0) { 1002 __le32 *value = (__le32 *)((char *)header + 1003 le16_to_cpu(entry->e_value_offs)); 1004 for (n = (le32_to_cpu(entry->e_value_size) + 1005 EXT2_XATTR_ROUND) >> EXT2_XATTR_PAD_BITS; n; n--) { 1006 hash = (hash << VALUE_HASH_SHIFT) ^ 1007 (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^ 1008 le32_to_cpu(*value++); 1009 } 1010 } 1011 entry->e_hash = cpu_to_le32(hash); 1012 } 1013 1014 #undef NAME_HASH_SHIFT 1015 #undef VALUE_HASH_SHIFT 1016 1017 #define BLOCK_HASH_SHIFT 16 1018 1019 /* 1020 * ext2_xattr_rehash() 1021 * 1022 * Re-compute the extended attribute hash value after an entry has changed. 1023 */ 1024 static void ext2_xattr_rehash(struct ext2_xattr_header *header, 1025 struct ext2_xattr_entry *entry) 1026 { 1027 struct ext2_xattr_entry *here; 1028 __u32 hash = 0; 1029 1030 ext2_xattr_hash_entry(header, entry); 1031 here = ENTRY(header+1); 1032 while (!IS_LAST_ENTRY(here)) { 1033 if (!here->e_hash) { 1034 /* Block is not shared if an entry's hash value == 0 */ 1035 hash = 0; 1036 break; 1037 } 1038 hash = (hash << BLOCK_HASH_SHIFT) ^ 1039 (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^ 1040 le32_to_cpu(here->e_hash); 1041 here = EXT2_XATTR_NEXT(here); 1042 } 1043 header->h_hash = cpu_to_le32(hash); 1044 } 1045 1046 #undef BLOCK_HASH_SHIFT 1047 1048 #define HASH_BUCKET_BITS 10 1049 1050 struct mb_cache *ext2_xattr_create_cache(void) 1051 { 1052 return mb_cache_create(HASH_BUCKET_BITS); 1053 } 1054 1055 void ext2_xattr_destroy_cache(struct mb_cache *cache) 1056 { 1057 if (cache) 1058 mb_cache_destroy(cache); 1059 } 1060