1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/ext2/xattr.c 4 * 5 * Copyright (C) 2001-2003 Andreas Gruenbacher <agruen@suse.de> 6 * 7 * Fix by Harrison Xing <harrison@mountainviewdata.com>. 8 * Extended attributes for symlinks and special files added per 9 * suggestion of Luka Renko <luka.renko@hermes.si>. 10 * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>, 11 * Red Hat Inc. 12 * 13 */ 14 15 /* 16 * Extended attributes are stored on disk blocks allocated outside of 17 * any inode. The i_file_acl field is then made to point to this allocated 18 * block. If all extended attributes of an inode are identical, these 19 * inodes may share the same extended attribute block. Such situations 20 * are automatically detected by keeping a cache of recent attribute block 21 * numbers and hashes over the block's contents in memory. 22 * 23 * 24 * Extended attribute block layout: 25 * 26 * +------------------+ 27 * | header | 28 * | entry 1 | | 29 * | entry 2 | | growing downwards 30 * | entry 3 | v 31 * | four null bytes | 32 * | . . . | 33 * | value 1 | ^ 34 * | value 3 | | growing upwards 35 * | value 2 | | 36 * +------------------+ 37 * 38 * The block header is followed by multiple entry descriptors. These entry 39 * descriptors are variable in size, and aligned to EXT2_XATTR_PAD 40 * byte boundaries. The entry descriptors are sorted by attribute name, 41 * so that two extended attribute blocks can be compared efficiently. 42 * 43 * Attribute values are aligned to the end of the block, stored in 44 * no specific order. They are also padded to EXT2_XATTR_PAD byte 45 * boundaries. No additional gaps are left between them. 46 * 47 * Locking strategy 48 * ---------------- 49 * EXT2_I(inode)->i_file_acl is protected by EXT2_I(inode)->xattr_sem. 50 * EA blocks are only changed if they are exclusive to an inode, so 51 * holding xattr_sem also means that nothing but the EA block's reference 52 * count will change. Multiple writers to an EA block are synchronized 53 * by the bh lock. No more than a single bh lock is held at any time 54 * to avoid deadlocks. 55 */ 56 57 #include <linux/buffer_head.h> 58 #include <linux/init.h> 59 #include <linux/printk.h> 60 #include <linux/slab.h> 61 #include <linux/mbcache.h> 62 #include <linux/quotaops.h> 63 #include <linux/rwsem.h> 64 #include <linux/security.h> 65 #include "ext2.h" 66 #include "xattr.h" 67 #include "acl.h" 68 69 #define HDR(bh) ((struct ext2_xattr_header *)((bh)->b_data)) 70 #define ENTRY(ptr) ((struct ext2_xattr_entry *)(ptr)) 71 #define FIRST_ENTRY(bh) ENTRY(HDR(bh)+1) 72 #define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0) 73 74 #ifdef EXT2_XATTR_DEBUG 75 # define ea_idebug(inode, f...) do { \ 76 printk(KERN_DEBUG "inode %s:%ld: ", \ 77 inode->i_sb->s_id, inode->i_ino); \ 78 printk(f); \ 79 printk("\n"); \ 80 } while (0) 81 # define ea_bdebug(bh, f...) do { \ 82 printk(KERN_DEBUG "block %pg:%lu: ", \ 83 bh->b_bdev, (unsigned long) bh->b_blocknr); \ 84 printk(f); \ 85 printk("\n"); \ 86 } while (0) 87 #else 88 # define ea_idebug(inode, f...) no_printk(f) 89 # define ea_bdebug(bh, f...) no_printk(f) 90 #endif 91 92 static int ext2_xattr_set2(struct inode *, struct buffer_head *, 93 struct ext2_xattr_header *); 94 95 static int ext2_xattr_cache_insert(struct mb_cache *, struct buffer_head *); 96 static struct buffer_head *ext2_xattr_cache_find(struct inode *, 97 struct ext2_xattr_header *); 98 static void ext2_xattr_rehash(struct ext2_xattr_header *, 99 struct ext2_xattr_entry *); 100 101 static const struct xattr_handler *ext2_xattr_handler_map[] = { 102 [EXT2_XATTR_INDEX_USER] = &ext2_xattr_user_handler, 103 #ifdef CONFIG_EXT2_FS_POSIX_ACL 104 [EXT2_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler, 105 [EXT2_XATTR_INDEX_POSIX_ACL_DEFAULT] = &posix_acl_default_xattr_handler, 106 #endif 107 [EXT2_XATTR_INDEX_TRUSTED] = &ext2_xattr_trusted_handler, 108 #ifdef CONFIG_EXT2_FS_SECURITY 109 [EXT2_XATTR_INDEX_SECURITY] = &ext2_xattr_security_handler, 110 #endif 111 }; 112 113 const struct xattr_handler *ext2_xattr_handlers[] = { 114 &ext2_xattr_user_handler, 115 &ext2_xattr_trusted_handler, 116 #ifdef CONFIG_EXT2_FS_SECURITY 117 &ext2_xattr_security_handler, 118 #endif 119 NULL 120 }; 121 122 #define EA_BLOCK_CACHE(inode) (EXT2_SB(inode->i_sb)->s_ea_block_cache) 123 124 static inline const struct xattr_handler * 125 ext2_xattr_handler(int name_index) 126 { 127 const struct xattr_handler *handler = NULL; 128 129 if (name_index > 0 && name_index < ARRAY_SIZE(ext2_xattr_handler_map)) 130 handler = ext2_xattr_handler_map[name_index]; 131 return handler; 132 } 133 134 static bool 135 ext2_xattr_header_valid(struct ext2_xattr_header *header) 136 { 137 if (header->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) || 138 header->h_blocks != cpu_to_le32(1)) 139 return false; 140 141 return true; 142 } 143 144 static bool 145 ext2_xattr_entry_valid(struct ext2_xattr_entry *entry, 146 char *end, size_t end_offs) 147 { 148 struct ext2_xattr_entry *next; 149 size_t size; 150 151 next = EXT2_XATTR_NEXT(entry); 152 if ((char *)next >= end) 153 return false; 154 155 if (entry->e_value_block != 0) 156 return false; 157 158 size = le32_to_cpu(entry->e_value_size); 159 if (size > end_offs || 160 le16_to_cpu(entry->e_value_offs) + size > end_offs) 161 return false; 162 163 return true; 164 } 165 166 static int 167 ext2_xattr_cmp_entry(int name_index, size_t name_len, const char *name, 168 struct ext2_xattr_entry *entry) 169 { 170 int cmp; 171 172 cmp = name_index - entry->e_name_index; 173 if (!cmp) 174 cmp = name_len - entry->e_name_len; 175 if (!cmp) 176 cmp = memcmp(name, entry->e_name, name_len); 177 178 return cmp; 179 } 180 181 /* 182 * ext2_xattr_get() 183 * 184 * Copy an extended attribute into the buffer 185 * provided, or compute the buffer size required. 186 * Buffer is NULL to compute the size of the buffer required. 187 * 188 * Returns a negative error number on failure, or the number of bytes 189 * used / required on success. 190 */ 191 int 192 ext2_xattr_get(struct inode *inode, int name_index, const char *name, 193 void *buffer, size_t buffer_size) 194 { 195 struct buffer_head *bh = NULL; 196 struct ext2_xattr_entry *entry; 197 size_t name_len, size; 198 char *end; 199 int error, not_found; 200 struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode); 201 202 ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld", 203 name_index, name, buffer, (long)buffer_size); 204 205 if (name == NULL) 206 return -EINVAL; 207 name_len = strlen(name); 208 if (name_len > 255) 209 return -ERANGE; 210 211 down_read(&EXT2_I(inode)->xattr_sem); 212 error = -ENODATA; 213 if (!EXT2_I(inode)->i_file_acl) 214 goto cleanup; 215 ea_idebug(inode, "reading block %d", EXT2_I(inode)->i_file_acl); 216 bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl); 217 error = -EIO; 218 if (!bh) 219 goto cleanup; 220 ea_bdebug(bh, "b_count=%d, refcount=%d", 221 atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount)); 222 end = bh->b_data + bh->b_size; 223 if (!ext2_xattr_header_valid(HDR(bh))) { 224 bad_block: 225 ext2_error(inode->i_sb, "ext2_xattr_get", 226 "inode %ld: bad block %d", inode->i_ino, 227 EXT2_I(inode)->i_file_acl); 228 error = -EIO; 229 goto cleanup; 230 } 231 232 /* find named attribute */ 233 entry = FIRST_ENTRY(bh); 234 while (!IS_LAST_ENTRY(entry)) { 235 if (!ext2_xattr_entry_valid(entry, end, 236 inode->i_sb->s_blocksize)) 237 goto bad_block; 238 239 not_found = ext2_xattr_cmp_entry(name_index, name_len, name, 240 entry); 241 if (!not_found) 242 goto found; 243 if (not_found < 0) 244 break; 245 246 entry = EXT2_XATTR_NEXT(entry); 247 } 248 if (ext2_xattr_cache_insert(ea_block_cache, bh)) 249 ea_idebug(inode, "cache insert failed"); 250 error = -ENODATA; 251 goto cleanup; 252 found: 253 size = le32_to_cpu(entry->e_value_size); 254 if (ext2_xattr_cache_insert(ea_block_cache, bh)) 255 ea_idebug(inode, "cache insert failed"); 256 if (buffer) { 257 error = -ERANGE; 258 if (size > buffer_size) 259 goto cleanup; 260 /* return value of attribute */ 261 memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs), 262 size); 263 } 264 error = size; 265 266 cleanup: 267 brelse(bh); 268 up_read(&EXT2_I(inode)->xattr_sem); 269 270 return error; 271 } 272 273 /* 274 * ext2_xattr_list() 275 * 276 * Copy a list of attribute names into the buffer 277 * provided, or compute the buffer size required. 278 * Buffer is NULL to compute the size of the buffer required. 279 * 280 * Returns a negative error number on failure, or the number of bytes 281 * used / required on success. 282 */ 283 static int 284 ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size) 285 { 286 struct inode *inode = d_inode(dentry); 287 struct buffer_head *bh = NULL; 288 struct ext2_xattr_entry *entry; 289 char *end; 290 size_t rest = buffer_size; 291 int error; 292 struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode); 293 294 ea_idebug(inode, "buffer=%p, buffer_size=%ld", 295 buffer, (long)buffer_size); 296 297 down_read(&EXT2_I(inode)->xattr_sem); 298 error = 0; 299 if (!EXT2_I(inode)->i_file_acl) 300 goto cleanup; 301 ea_idebug(inode, "reading block %d", EXT2_I(inode)->i_file_acl); 302 bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl); 303 error = -EIO; 304 if (!bh) 305 goto cleanup; 306 ea_bdebug(bh, "b_count=%d, refcount=%d", 307 atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount)); 308 end = bh->b_data + bh->b_size; 309 if (!ext2_xattr_header_valid(HDR(bh))) { 310 bad_block: 311 ext2_error(inode->i_sb, "ext2_xattr_list", 312 "inode %ld: bad block %d", inode->i_ino, 313 EXT2_I(inode)->i_file_acl); 314 error = -EIO; 315 goto cleanup; 316 } 317 318 /* check the on-disk data structure */ 319 entry = FIRST_ENTRY(bh); 320 while (!IS_LAST_ENTRY(entry)) { 321 if (!ext2_xattr_entry_valid(entry, end, 322 inode->i_sb->s_blocksize)) 323 goto bad_block; 324 entry = EXT2_XATTR_NEXT(entry); 325 } 326 if (ext2_xattr_cache_insert(ea_block_cache, bh)) 327 ea_idebug(inode, "cache insert failed"); 328 329 /* list the attribute names */ 330 for (entry = FIRST_ENTRY(bh); !IS_LAST_ENTRY(entry); 331 entry = EXT2_XATTR_NEXT(entry)) { 332 const struct xattr_handler *handler = 333 ext2_xattr_handler(entry->e_name_index); 334 335 if (handler && (!handler->list || handler->list(dentry))) { 336 const char *prefix = handler->prefix ?: handler->name; 337 size_t prefix_len = strlen(prefix); 338 size_t size = prefix_len + entry->e_name_len + 1; 339 340 if (buffer) { 341 if (size > rest) { 342 error = -ERANGE; 343 goto cleanup; 344 } 345 memcpy(buffer, prefix, prefix_len); 346 buffer += prefix_len; 347 memcpy(buffer, entry->e_name, entry->e_name_len); 348 buffer += entry->e_name_len; 349 *buffer++ = 0; 350 } 351 rest -= size; 352 } 353 } 354 error = buffer_size - rest; /* total size */ 355 356 cleanup: 357 brelse(bh); 358 up_read(&EXT2_I(inode)->xattr_sem); 359 360 return error; 361 } 362 363 /* 364 * Inode operation listxattr() 365 * 366 * d_inode(dentry)->i_mutex: don't care 367 */ 368 ssize_t 369 ext2_listxattr(struct dentry *dentry, char *buffer, size_t size) 370 { 371 return ext2_xattr_list(dentry, buffer, size); 372 } 373 374 /* 375 * If the EXT2_FEATURE_COMPAT_EXT_ATTR feature of this file system is 376 * not set, set it. 377 */ 378 static void ext2_xattr_update_super_block(struct super_block *sb) 379 { 380 if (EXT2_HAS_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR)) 381 return; 382 383 spin_lock(&EXT2_SB(sb)->s_lock); 384 ext2_update_dynamic_rev(sb); 385 EXT2_SET_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR); 386 spin_unlock(&EXT2_SB(sb)->s_lock); 387 mark_buffer_dirty(EXT2_SB(sb)->s_sbh); 388 } 389 390 /* 391 * ext2_xattr_set() 392 * 393 * Create, replace or remove an extended attribute for this inode. Value 394 * is NULL to remove an existing extended attribute, and non-NULL to 395 * either replace an existing extended attribute, or create a new extended 396 * attribute. The flags XATTR_REPLACE and XATTR_CREATE 397 * specify that an extended attribute must exist and must not exist 398 * previous to the call, respectively. 399 * 400 * Returns 0, or a negative error number on failure. 401 */ 402 int 403 ext2_xattr_set(struct inode *inode, int name_index, const char *name, 404 const void *value, size_t value_len, int flags) 405 { 406 struct super_block *sb = inode->i_sb; 407 struct buffer_head *bh = NULL; 408 struct ext2_xattr_header *header = NULL; 409 struct ext2_xattr_entry *here = NULL, *last = NULL; 410 size_t name_len, free, min_offs = sb->s_blocksize; 411 int not_found = 1, error; 412 char *end; 413 414 /* 415 * header -- Points either into bh, or to a temporarily 416 * allocated buffer. 417 * here -- The named entry found, or the place for inserting, within 418 * the block pointed to by header. 419 * last -- Points right after the last named entry within the block 420 * pointed to by header. 421 * min_offs -- The offset of the first value (values are aligned 422 * towards the end of the block). 423 * end -- Points right after the block pointed to by header. 424 */ 425 426 ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld", 427 name_index, name, value, (long)value_len); 428 429 if (value == NULL) 430 value_len = 0; 431 if (name == NULL) 432 return -EINVAL; 433 name_len = strlen(name); 434 if (name_len > 255 || value_len > sb->s_blocksize) 435 return -ERANGE; 436 error = dquot_initialize(inode); 437 if (error) 438 return error; 439 down_write(&EXT2_I(inode)->xattr_sem); 440 if (EXT2_I(inode)->i_file_acl) { 441 /* The inode already has an extended attribute block. */ 442 bh = sb_bread(sb, EXT2_I(inode)->i_file_acl); 443 error = -EIO; 444 if (!bh) 445 goto cleanup; 446 ea_bdebug(bh, "b_count=%d, refcount=%d", 447 atomic_read(&(bh->b_count)), 448 le32_to_cpu(HDR(bh)->h_refcount)); 449 header = HDR(bh); 450 end = bh->b_data + bh->b_size; 451 if (!ext2_xattr_header_valid(header)) { 452 bad_block: 453 ext2_error(sb, "ext2_xattr_set", 454 "inode %ld: bad block %d", inode->i_ino, 455 EXT2_I(inode)->i_file_acl); 456 error = -EIO; 457 goto cleanup; 458 } 459 /* 460 * Find the named attribute. If not found, 'here' will point 461 * to entry where the new attribute should be inserted to 462 * maintain sorting. 463 */ 464 last = FIRST_ENTRY(bh); 465 while (!IS_LAST_ENTRY(last)) { 466 if (!ext2_xattr_entry_valid(last, end, sb->s_blocksize)) 467 goto bad_block; 468 if (last->e_value_size) { 469 size_t offs = le16_to_cpu(last->e_value_offs); 470 if (offs < min_offs) 471 min_offs = offs; 472 } 473 if (not_found > 0) { 474 not_found = ext2_xattr_cmp_entry(name_index, 475 name_len, 476 name, last); 477 if (not_found <= 0) 478 here = last; 479 } 480 last = EXT2_XATTR_NEXT(last); 481 } 482 if (not_found > 0) 483 here = last; 484 485 /* Check whether we have enough space left. */ 486 free = min_offs - ((char*)last - (char*)header) - sizeof(__u32); 487 } else { 488 /* We will use a new extended attribute block. */ 489 free = sb->s_blocksize - 490 sizeof(struct ext2_xattr_header) - sizeof(__u32); 491 } 492 493 if (not_found) { 494 /* Request to remove a nonexistent attribute? */ 495 error = -ENODATA; 496 if (flags & XATTR_REPLACE) 497 goto cleanup; 498 error = 0; 499 if (value == NULL) 500 goto cleanup; 501 } else { 502 /* Request to create an existing attribute? */ 503 error = -EEXIST; 504 if (flags & XATTR_CREATE) 505 goto cleanup; 506 free += EXT2_XATTR_SIZE(le32_to_cpu(here->e_value_size)); 507 free += EXT2_XATTR_LEN(name_len); 508 } 509 error = -ENOSPC; 510 if (free < EXT2_XATTR_LEN(name_len) + EXT2_XATTR_SIZE(value_len)) 511 goto cleanup; 512 513 /* Here we know that we can set the new attribute. */ 514 515 if (header) { 516 int offset; 517 518 lock_buffer(bh); 519 if (header->h_refcount == cpu_to_le32(1)) { 520 __u32 hash = le32_to_cpu(header->h_hash); 521 struct mb_cache_entry *oe; 522 523 oe = mb_cache_entry_delete_or_get(EA_BLOCK_CACHE(inode), 524 hash, bh->b_blocknr); 525 if (!oe) { 526 ea_bdebug(bh, "modifying in-place"); 527 goto update_block; 528 } 529 /* 530 * Someone is trying to reuse the block, leave it alone 531 */ 532 mb_cache_entry_put(EA_BLOCK_CACHE(inode), oe); 533 } 534 unlock_buffer(bh); 535 ea_bdebug(bh, "cloning"); 536 header = kmemdup(HDR(bh), bh->b_size, GFP_KERNEL); 537 error = -ENOMEM; 538 if (header == NULL) 539 goto cleanup; 540 header->h_refcount = cpu_to_le32(1); 541 542 offset = (char *)here - bh->b_data; 543 here = ENTRY((char *)header + offset); 544 offset = (char *)last - bh->b_data; 545 last = ENTRY((char *)header + offset); 546 } else { 547 /* Allocate a buffer where we construct the new block. */ 548 header = kzalloc(sb->s_blocksize, GFP_KERNEL); 549 error = -ENOMEM; 550 if (header == NULL) 551 goto cleanup; 552 end = (char *)header + sb->s_blocksize; 553 header->h_magic = cpu_to_le32(EXT2_XATTR_MAGIC); 554 header->h_blocks = header->h_refcount = cpu_to_le32(1); 555 last = here = ENTRY(header+1); 556 } 557 558 update_block: 559 /* Iff we are modifying the block in-place, bh is locked here. */ 560 561 if (not_found) { 562 /* Insert the new name. */ 563 size_t size = EXT2_XATTR_LEN(name_len); 564 size_t rest = (char *)last - (char *)here; 565 memmove((char *)here + size, here, rest); 566 memset(here, 0, size); 567 here->e_name_index = name_index; 568 here->e_name_len = name_len; 569 memcpy(here->e_name, name, name_len); 570 } else { 571 if (here->e_value_size) { 572 char *first_val = (char *)header + min_offs; 573 size_t offs = le16_to_cpu(here->e_value_offs); 574 char *val = (char *)header + offs; 575 size_t size = EXT2_XATTR_SIZE( 576 le32_to_cpu(here->e_value_size)); 577 578 if (size == EXT2_XATTR_SIZE(value_len)) { 579 /* The old and the new value have the same 580 size. Just replace. */ 581 here->e_value_size = cpu_to_le32(value_len); 582 memset(val + size - EXT2_XATTR_PAD, 0, 583 EXT2_XATTR_PAD); /* Clear pad bytes. */ 584 memcpy(val, value, value_len); 585 goto skip_replace; 586 } 587 588 /* Remove the old value. */ 589 memmove(first_val + size, first_val, val - first_val); 590 memset(first_val, 0, size); 591 min_offs += size; 592 593 /* Adjust all value offsets. */ 594 last = ENTRY(header+1); 595 while (!IS_LAST_ENTRY(last)) { 596 size_t o = le16_to_cpu(last->e_value_offs); 597 if (o < offs) 598 last->e_value_offs = 599 cpu_to_le16(o + size); 600 last = EXT2_XATTR_NEXT(last); 601 } 602 603 here->e_value_offs = 0; 604 } 605 if (value == NULL) { 606 /* Remove the old name. */ 607 size_t size = EXT2_XATTR_LEN(name_len); 608 last = ENTRY((char *)last - size); 609 memmove(here, (char*)here + size, 610 (char*)last - (char*)here); 611 memset(last, 0, size); 612 } 613 } 614 615 if (value != NULL) { 616 /* Insert the new value. */ 617 here->e_value_size = cpu_to_le32(value_len); 618 if (value_len) { 619 size_t size = EXT2_XATTR_SIZE(value_len); 620 char *val = (char *)header + min_offs - size; 621 here->e_value_offs = 622 cpu_to_le16((char *)val - (char *)header); 623 memset(val + size - EXT2_XATTR_PAD, 0, 624 EXT2_XATTR_PAD); /* Clear the pad bytes. */ 625 memcpy(val, value, value_len); 626 } 627 } 628 629 skip_replace: 630 if (IS_LAST_ENTRY(ENTRY(header+1))) { 631 /* This block is now empty. */ 632 if (bh && header == HDR(bh)) 633 unlock_buffer(bh); /* we were modifying in-place. */ 634 error = ext2_xattr_set2(inode, bh, NULL); 635 } else { 636 ext2_xattr_rehash(header, here); 637 if (bh && header == HDR(bh)) 638 unlock_buffer(bh); /* we were modifying in-place. */ 639 error = ext2_xattr_set2(inode, bh, header); 640 } 641 642 cleanup: 643 if (!(bh && header == HDR(bh))) 644 kfree(header); 645 brelse(bh); 646 up_write(&EXT2_I(inode)->xattr_sem); 647 648 return error; 649 } 650 651 static void ext2_xattr_release_block(struct inode *inode, 652 struct buffer_head *bh) 653 { 654 struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode); 655 656 retry_ref: 657 lock_buffer(bh); 658 if (HDR(bh)->h_refcount == cpu_to_le32(1)) { 659 __u32 hash = le32_to_cpu(HDR(bh)->h_hash); 660 struct mb_cache_entry *oe; 661 662 /* 663 * This must happen under buffer lock to properly 664 * serialize with ext2_xattr_set() reusing the block. 665 */ 666 oe = mb_cache_entry_delete_or_get(ea_block_cache, hash, 667 bh->b_blocknr); 668 if (oe) { 669 /* 670 * Someone is trying to reuse the block. Wait 671 * and retry. 672 */ 673 unlock_buffer(bh); 674 mb_cache_entry_wait_unused(oe); 675 mb_cache_entry_put(ea_block_cache, oe); 676 goto retry_ref; 677 } 678 679 /* Free the old block. */ 680 ea_bdebug(bh, "freeing"); 681 ext2_free_blocks(inode, bh->b_blocknr, 1); 682 /* We let our caller release bh, so we 683 * need to duplicate the buffer before. */ 684 get_bh(bh); 685 bforget(bh); 686 unlock_buffer(bh); 687 } else { 688 /* Decrement the refcount only. */ 689 le32_add_cpu(&HDR(bh)->h_refcount, -1); 690 dquot_free_block(inode, 1); 691 mark_buffer_dirty(bh); 692 unlock_buffer(bh); 693 ea_bdebug(bh, "refcount now=%d", 694 le32_to_cpu(HDR(bh)->h_refcount)); 695 if (IS_SYNC(inode)) 696 sync_dirty_buffer(bh); 697 } 698 } 699 700 /* 701 * Second half of ext2_xattr_set(): Update the file system. 702 */ 703 static int 704 ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh, 705 struct ext2_xattr_header *header) 706 { 707 struct super_block *sb = inode->i_sb; 708 struct buffer_head *new_bh = NULL; 709 int error; 710 struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode); 711 712 if (header) { 713 new_bh = ext2_xattr_cache_find(inode, header); 714 if (new_bh) { 715 /* We found an identical block in the cache. */ 716 if (new_bh == old_bh) { 717 ea_bdebug(new_bh, "keeping this block"); 718 } else { 719 /* The old block is released after updating 720 the inode. */ 721 ea_bdebug(new_bh, "reusing block"); 722 723 error = dquot_alloc_block(inode, 1); 724 if (error) { 725 unlock_buffer(new_bh); 726 goto cleanup; 727 } 728 le32_add_cpu(&HDR(new_bh)->h_refcount, 1); 729 ea_bdebug(new_bh, "refcount now=%d", 730 le32_to_cpu(HDR(new_bh)->h_refcount)); 731 } 732 unlock_buffer(new_bh); 733 } else if (old_bh && header == HDR(old_bh)) { 734 /* Keep this block. No need to lock the block as we 735 don't need to change the reference count. */ 736 new_bh = old_bh; 737 get_bh(new_bh); 738 ext2_xattr_cache_insert(ea_block_cache, new_bh); 739 } else { 740 /* We need to allocate a new block */ 741 ext2_fsblk_t goal = ext2_group_first_block_no(sb, 742 EXT2_I(inode)->i_block_group); 743 int block = ext2_new_block(inode, goal, &error); 744 if (error) 745 goto cleanup; 746 ea_idebug(inode, "creating block %d", block); 747 748 new_bh = sb_getblk(sb, block); 749 if (unlikely(!new_bh)) { 750 ext2_free_blocks(inode, block, 1); 751 mark_inode_dirty(inode); 752 error = -ENOMEM; 753 goto cleanup; 754 } 755 lock_buffer(new_bh); 756 memcpy(new_bh->b_data, header, new_bh->b_size); 757 set_buffer_uptodate(new_bh); 758 unlock_buffer(new_bh); 759 ext2_xattr_cache_insert(ea_block_cache, new_bh); 760 761 ext2_xattr_update_super_block(sb); 762 } 763 mark_buffer_dirty(new_bh); 764 if (IS_SYNC(inode)) { 765 sync_dirty_buffer(new_bh); 766 error = -EIO; 767 if (buffer_req(new_bh) && !buffer_uptodate(new_bh)) 768 goto cleanup; 769 } 770 } 771 772 /* Update the inode. */ 773 EXT2_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0; 774 inode->i_ctime = current_time(inode); 775 if (IS_SYNC(inode)) { 776 error = sync_inode_metadata(inode, 1); 777 /* In case sync failed due to ENOSPC the inode was actually 778 * written (only some dirty data were not) so we just proceed 779 * as if nothing happened and cleanup the unused block */ 780 if (error && error != -ENOSPC) { 781 if (new_bh && new_bh != old_bh) { 782 dquot_free_block_nodirty(inode, 1); 783 mark_inode_dirty(inode); 784 } 785 goto cleanup; 786 } 787 } else 788 mark_inode_dirty(inode); 789 790 error = 0; 791 if (old_bh && old_bh != new_bh) { 792 /* 793 * If there was an old block and we are no longer using it, 794 * release the old block. 795 */ 796 ext2_xattr_release_block(inode, old_bh); 797 } 798 799 cleanup: 800 brelse(new_bh); 801 802 return error; 803 } 804 805 /* 806 * ext2_xattr_delete_inode() 807 * 808 * Free extended attribute resources associated with this inode. This 809 * is called immediately before an inode is freed. 810 */ 811 void 812 ext2_xattr_delete_inode(struct inode *inode) 813 { 814 struct buffer_head *bh = NULL; 815 struct ext2_sb_info *sbi = EXT2_SB(inode->i_sb); 816 817 /* 818 * We are the only ones holding inode reference. The xattr_sem should 819 * better be unlocked! We could as well just not acquire xattr_sem at 820 * all but this makes the code more futureproof. OTOH we need trylock 821 * here to avoid false-positive warning from lockdep about reclaim 822 * circular dependency. 823 */ 824 if (WARN_ON_ONCE(!down_write_trylock(&EXT2_I(inode)->xattr_sem))) 825 return; 826 if (!EXT2_I(inode)->i_file_acl) 827 goto cleanup; 828 829 if (!ext2_data_block_valid(sbi, EXT2_I(inode)->i_file_acl, 1)) { 830 ext2_error(inode->i_sb, "ext2_xattr_delete_inode", 831 "inode %ld: xattr block %d is out of data blocks range", 832 inode->i_ino, EXT2_I(inode)->i_file_acl); 833 goto cleanup; 834 } 835 836 bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl); 837 if (!bh) { 838 ext2_error(inode->i_sb, "ext2_xattr_delete_inode", 839 "inode %ld: block %d read error", inode->i_ino, 840 EXT2_I(inode)->i_file_acl); 841 goto cleanup; 842 } 843 ea_bdebug(bh, "b_count=%d", atomic_read(&(bh->b_count))); 844 if (!ext2_xattr_header_valid(HDR(bh))) { 845 ext2_error(inode->i_sb, "ext2_xattr_delete_inode", 846 "inode %ld: bad block %d", inode->i_ino, 847 EXT2_I(inode)->i_file_acl); 848 goto cleanup; 849 } 850 ext2_xattr_release_block(inode, bh); 851 EXT2_I(inode)->i_file_acl = 0; 852 853 cleanup: 854 brelse(bh); 855 up_write(&EXT2_I(inode)->xattr_sem); 856 } 857 858 /* 859 * ext2_xattr_cache_insert() 860 * 861 * Create a new entry in the extended attribute cache, and insert 862 * it unless such an entry is already in the cache. 863 * 864 * Returns 0, or a negative error number on failure. 865 */ 866 static int 867 ext2_xattr_cache_insert(struct mb_cache *cache, struct buffer_head *bh) 868 { 869 __u32 hash = le32_to_cpu(HDR(bh)->h_hash); 870 int error; 871 872 error = mb_cache_entry_create(cache, GFP_NOFS, hash, bh->b_blocknr, 873 true); 874 if (error) { 875 if (error == -EBUSY) { 876 ea_bdebug(bh, "already in cache"); 877 error = 0; 878 } 879 } else 880 ea_bdebug(bh, "inserting [%x]", (int)hash); 881 return error; 882 } 883 884 /* 885 * ext2_xattr_cmp() 886 * 887 * Compare two extended attribute blocks for equality. 888 * 889 * Returns 0 if the blocks are equal, 1 if they differ, and 890 * a negative error number on errors. 891 */ 892 static int 893 ext2_xattr_cmp(struct ext2_xattr_header *header1, 894 struct ext2_xattr_header *header2) 895 { 896 struct ext2_xattr_entry *entry1, *entry2; 897 898 entry1 = ENTRY(header1+1); 899 entry2 = ENTRY(header2+1); 900 while (!IS_LAST_ENTRY(entry1)) { 901 if (IS_LAST_ENTRY(entry2)) 902 return 1; 903 if (entry1->e_hash != entry2->e_hash || 904 entry1->e_name_index != entry2->e_name_index || 905 entry1->e_name_len != entry2->e_name_len || 906 entry1->e_value_size != entry2->e_value_size || 907 memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len)) 908 return 1; 909 if (entry1->e_value_block != 0 || entry2->e_value_block != 0) 910 return -EIO; 911 if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs), 912 (char *)header2 + le16_to_cpu(entry2->e_value_offs), 913 le32_to_cpu(entry1->e_value_size))) 914 return 1; 915 916 entry1 = EXT2_XATTR_NEXT(entry1); 917 entry2 = EXT2_XATTR_NEXT(entry2); 918 } 919 if (!IS_LAST_ENTRY(entry2)) 920 return 1; 921 return 0; 922 } 923 924 /* 925 * ext2_xattr_cache_find() 926 * 927 * Find an identical extended attribute block. 928 * 929 * Returns a locked buffer head to the block found, or NULL if such 930 * a block was not found or an error occurred. 931 */ 932 static struct buffer_head * 933 ext2_xattr_cache_find(struct inode *inode, struct ext2_xattr_header *header) 934 { 935 __u32 hash = le32_to_cpu(header->h_hash); 936 struct mb_cache_entry *ce; 937 struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode); 938 939 if (!header->h_hash) 940 return NULL; /* never share */ 941 ea_idebug(inode, "looking for cached blocks [%x]", (int)hash); 942 943 ce = mb_cache_entry_find_first(ea_block_cache, hash); 944 while (ce) { 945 struct buffer_head *bh; 946 947 bh = sb_bread(inode->i_sb, ce->e_value); 948 if (!bh) { 949 ext2_error(inode->i_sb, "ext2_xattr_cache_find", 950 "inode %ld: block %ld read error", 951 inode->i_ino, (unsigned long) ce->e_value); 952 } else { 953 lock_buffer(bh); 954 if (le32_to_cpu(HDR(bh)->h_refcount) > 955 EXT2_XATTR_REFCOUNT_MAX) { 956 ea_idebug(inode, "block %ld refcount %d>%d", 957 (unsigned long) ce->e_value, 958 le32_to_cpu(HDR(bh)->h_refcount), 959 EXT2_XATTR_REFCOUNT_MAX); 960 } else if (!ext2_xattr_cmp(header, HDR(bh))) { 961 ea_bdebug(bh, "b_count=%d", 962 atomic_read(&(bh->b_count))); 963 mb_cache_entry_touch(ea_block_cache, ce); 964 mb_cache_entry_put(ea_block_cache, ce); 965 return bh; 966 } 967 unlock_buffer(bh); 968 brelse(bh); 969 } 970 ce = mb_cache_entry_find_next(ea_block_cache, ce); 971 } 972 return NULL; 973 } 974 975 #define NAME_HASH_SHIFT 5 976 #define VALUE_HASH_SHIFT 16 977 978 /* 979 * ext2_xattr_hash_entry() 980 * 981 * Compute the hash of an extended attribute. 982 */ 983 static inline void ext2_xattr_hash_entry(struct ext2_xattr_header *header, 984 struct ext2_xattr_entry *entry) 985 { 986 __u32 hash = 0; 987 char *name = entry->e_name; 988 int n; 989 990 for (n=0; n < entry->e_name_len; n++) { 991 hash = (hash << NAME_HASH_SHIFT) ^ 992 (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^ 993 *name++; 994 } 995 996 if (entry->e_value_block == 0 && entry->e_value_size != 0) { 997 __le32 *value = (__le32 *)((char *)header + 998 le16_to_cpu(entry->e_value_offs)); 999 for (n = (le32_to_cpu(entry->e_value_size) + 1000 EXT2_XATTR_ROUND) >> EXT2_XATTR_PAD_BITS; n; n--) { 1001 hash = (hash << VALUE_HASH_SHIFT) ^ 1002 (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^ 1003 le32_to_cpu(*value++); 1004 } 1005 } 1006 entry->e_hash = cpu_to_le32(hash); 1007 } 1008 1009 #undef NAME_HASH_SHIFT 1010 #undef VALUE_HASH_SHIFT 1011 1012 #define BLOCK_HASH_SHIFT 16 1013 1014 /* 1015 * ext2_xattr_rehash() 1016 * 1017 * Re-compute the extended attribute hash value after an entry has changed. 1018 */ 1019 static void ext2_xattr_rehash(struct ext2_xattr_header *header, 1020 struct ext2_xattr_entry *entry) 1021 { 1022 struct ext2_xattr_entry *here; 1023 __u32 hash = 0; 1024 1025 ext2_xattr_hash_entry(header, entry); 1026 here = ENTRY(header+1); 1027 while (!IS_LAST_ENTRY(here)) { 1028 if (!here->e_hash) { 1029 /* Block is not shared if an entry's hash value == 0 */ 1030 hash = 0; 1031 break; 1032 } 1033 hash = (hash << BLOCK_HASH_SHIFT) ^ 1034 (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^ 1035 le32_to_cpu(here->e_hash); 1036 here = EXT2_XATTR_NEXT(here); 1037 } 1038 header->h_hash = cpu_to_le32(hash); 1039 } 1040 1041 #undef BLOCK_HASH_SHIFT 1042 1043 #define HASH_BUCKET_BITS 10 1044 1045 struct mb_cache *ext2_xattr_create_cache(void) 1046 { 1047 return mb_cache_create(HASH_BUCKET_BITS); 1048 } 1049 1050 void ext2_xattr_destroy_cache(struct mb_cache *cache) 1051 { 1052 if (cache) 1053 mb_cache_destroy(cache); 1054 } 1055