1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/fs/ext2/xattr.c 4 * 5 * Copyright (C) 2001-2003 Andreas Gruenbacher <agruen@suse.de> 6 * 7 * Fix by Harrison Xing <harrison@mountainviewdata.com>. 8 * Extended attributes for symlinks and special files added per 9 * suggestion of Luka Renko <luka.renko@hermes.si>. 10 * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>, 11 * Red Hat Inc. 12 * 13 */ 14 15 /* 16 * Extended attributes are stored on disk blocks allocated outside of 17 * any inode. The i_file_acl field is then made to point to this allocated 18 * block. If all extended attributes of an inode are identical, these 19 * inodes may share the same extended attribute block. Such situations 20 * are automatically detected by keeping a cache of recent attribute block 21 * numbers and hashes over the block's contents in memory. 22 * 23 * 24 * Extended attribute block layout: 25 * 26 * +------------------+ 27 * | header | 28 * | entry 1 | | 29 * | entry 2 | | growing downwards 30 * | entry 3 | v 31 * | four null bytes | 32 * | . . . | 33 * | value 1 | ^ 34 * | value 3 | | growing upwards 35 * | value 2 | | 36 * +------------------+ 37 * 38 * The block header is followed by multiple entry descriptors. These entry 39 * descriptors are variable in size, and aligned to EXT2_XATTR_PAD 40 * byte boundaries. The entry descriptors are sorted by attribute name, 41 * so that two extended attribute blocks can be compared efficiently. 42 * 43 * Attribute values are aligned to the end of the block, stored in 44 * no specific order. They are also padded to EXT2_XATTR_PAD byte 45 * boundaries. No additional gaps are left between them. 46 * 47 * Locking strategy 48 * ---------------- 49 * EXT2_I(inode)->i_file_acl is protected by EXT2_I(inode)->xattr_sem. 50 * EA blocks are only changed if they are exclusive to an inode, so 51 * holding xattr_sem also means that nothing but the EA block's reference 52 * count will change. Multiple writers to an EA block are synchronized 53 * by the bh lock. No more than a single bh lock is held at any time 54 * to avoid deadlocks. 55 */ 56 57 #include <linux/buffer_head.h> 58 #include <linux/init.h> 59 #include <linux/printk.h> 60 #include <linux/slab.h> 61 #include <linux/mbcache.h> 62 #include <linux/quotaops.h> 63 #include <linux/rwsem.h> 64 #include <linux/security.h> 65 #include "ext2.h" 66 #include "xattr.h" 67 #include "acl.h" 68 69 #define HDR(bh) ((struct ext2_xattr_header *)((bh)->b_data)) 70 #define ENTRY(ptr) ((struct ext2_xattr_entry *)(ptr)) 71 #define FIRST_ENTRY(bh) ENTRY(HDR(bh)+1) 72 #define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0) 73 74 #ifdef EXT2_XATTR_DEBUG 75 # define ea_idebug(inode, f...) do { \ 76 printk(KERN_DEBUG "inode %s:%ld: ", \ 77 inode->i_sb->s_id, inode->i_ino); \ 78 printk(f); \ 79 printk("\n"); \ 80 } while (0) 81 # define ea_bdebug(bh, f...) do { \ 82 printk(KERN_DEBUG "block %pg:%lu: ", \ 83 bh->b_bdev, (unsigned long) bh->b_blocknr); \ 84 printk(f); \ 85 printk("\n"); \ 86 } while (0) 87 #else 88 # define ea_idebug(inode, f...) no_printk(f) 89 # define ea_bdebug(bh, f...) no_printk(f) 90 #endif 91 92 static int ext2_xattr_set2(struct inode *, struct buffer_head *, 93 struct ext2_xattr_header *); 94 95 static int ext2_xattr_cache_insert(struct mb_cache *, struct buffer_head *); 96 static struct buffer_head *ext2_xattr_cache_find(struct inode *, 97 struct ext2_xattr_header *); 98 static void ext2_xattr_rehash(struct ext2_xattr_header *, 99 struct ext2_xattr_entry *); 100 101 static const struct xattr_handler *ext2_xattr_handler_map[] = { 102 [EXT2_XATTR_INDEX_USER] = &ext2_xattr_user_handler, 103 #ifdef CONFIG_EXT2_FS_POSIX_ACL 104 [EXT2_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler, 105 [EXT2_XATTR_INDEX_POSIX_ACL_DEFAULT] = &posix_acl_default_xattr_handler, 106 #endif 107 [EXT2_XATTR_INDEX_TRUSTED] = &ext2_xattr_trusted_handler, 108 #ifdef CONFIG_EXT2_FS_SECURITY 109 [EXT2_XATTR_INDEX_SECURITY] = &ext2_xattr_security_handler, 110 #endif 111 }; 112 113 const struct xattr_handler *ext2_xattr_handlers[] = { 114 &ext2_xattr_user_handler, 115 &ext2_xattr_trusted_handler, 116 #ifdef CONFIG_EXT2_FS_POSIX_ACL 117 &posix_acl_access_xattr_handler, 118 &posix_acl_default_xattr_handler, 119 #endif 120 #ifdef CONFIG_EXT2_FS_SECURITY 121 &ext2_xattr_security_handler, 122 #endif 123 NULL 124 }; 125 126 #define EA_BLOCK_CACHE(inode) (EXT2_SB(inode->i_sb)->s_ea_block_cache) 127 128 static inline const struct xattr_handler * 129 ext2_xattr_handler(int name_index) 130 { 131 const struct xattr_handler *handler = NULL; 132 133 if (name_index > 0 && name_index < ARRAY_SIZE(ext2_xattr_handler_map)) 134 handler = ext2_xattr_handler_map[name_index]; 135 return handler; 136 } 137 138 static bool 139 ext2_xattr_header_valid(struct ext2_xattr_header *header) 140 { 141 if (header->h_magic != cpu_to_le32(EXT2_XATTR_MAGIC) || 142 header->h_blocks != cpu_to_le32(1)) 143 return false; 144 145 return true; 146 } 147 148 static bool 149 ext2_xattr_entry_valid(struct ext2_xattr_entry *entry, 150 char *end, size_t end_offs) 151 { 152 struct ext2_xattr_entry *next; 153 size_t size; 154 155 next = EXT2_XATTR_NEXT(entry); 156 if ((char *)next >= end) 157 return false; 158 159 if (entry->e_value_block != 0) 160 return false; 161 162 size = le32_to_cpu(entry->e_value_size); 163 if (size > end_offs || 164 le16_to_cpu(entry->e_value_offs) + size > end_offs) 165 return false; 166 167 return true; 168 } 169 170 static int 171 ext2_xattr_cmp_entry(int name_index, size_t name_len, const char *name, 172 struct ext2_xattr_entry *entry) 173 { 174 int cmp; 175 176 cmp = name_index - entry->e_name_index; 177 if (!cmp) 178 cmp = name_len - entry->e_name_len; 179 if (!cmp) 180 cmp = memcmp(name, entry->e_name, name_len); 181 182 return cmp; 183 } 184 185 /* 186 * ext2_xattr_get() 187 * 188 * Copy an extended attribute into the buffer 189 * provided, or compute the buffer size required. 190 * Buffer is NULL to compute the size of the buffer required. 191 * 192 * Returns a negative error number on failure, or the number of bytes 193 * used / required on success. 194 */ 195 int 196 ext2_xattr_get(struct inode *inode, int name_index, const char *name, 197 void *buffer, size_t buffer_size) 198 { 199 struct buffer_head *bh = NULL; 200 struct ext2_xattr_entry *entry; 201 size_t name_len, size; 202 char *end; 203 int error, not_found; 204 struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode); 205 206 ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld", 207 name_index, name, buffer, (long)buffer_size); 208 209 if (name == NULL) 210 return -EINVAL; 211 name_len = strlen(name); 212 if (name_len > 255) 213 return -ERANGE; 214 215 down_read(&EXT2_I(inode)->xattr_sem); 216 error = -ENODATA; 217 if (!EXT2_I(inode)->i_file_acl) 218 goto cleanup; 219 ea_idebug(inode, "reading block %d", EXT2_I(inode)->i_file_acl); 220 bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl); 221 error = -EIO; 222 if (!bh) 223 goto cleanup; 224 ea_bdebug(bh, "b_count=%d, refcount=%d", 225 atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount)); 226 end = bh->b_data + bh->b_size; 227 if (!ext2_xattr_header_valid(HDR(bh))) { 228 bad_block: 229 ext2_error(inode->i_sb, "ext2_xattr_get", 230 "inode %ld: bad block %d", inode->i_ino, 231 EXT2_I(inode)->i_file_acl); 232 error = -EIO; 233 goto cleanup; 234 } 235 236 /* find named attribute */ 237 entry = FIRST_ENTRY(bh); 238 while (!IS_LAST_ENTRY(entry)) { 239 if (!ext2_xattr_entry_valid(entry, end, 240 inode->i_sb->s_blocksize)) 241 goto bad_block; 242 243 not_found = ext2_xattr_cmp_entry(name_index, name_len, name, 244 entry); 245 if (!not_found) 246 goto found; 247 if (not_found < 0) 248 break; 249 250 entry = EXT2_XATTR_NEXT(entry); 251 } 252 if (ext2_xattr_cache_insert(ea_block_cache, bh)) 253 ea_idebug(inode, "cache insert failed"); 254 error = -ENODATA; 255 goto cleanup; 256 found: 257 size = le32_to_cpu(entry->e_value_size); 258 if (ext2_xattr_cache_insert(ea_block_cache, bh)) 259 ea_idebug(inode, "cache insert failed"); 260 if (buffer) { 261 error = -ERANGE; 262 if (size > buffer_size) 263 goto cleanup; 264 /* return value of attribute */ 265 memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs), 266 size); 267 } 268 error = size; 269 270 cleanup: 271 brelse(bh); 272 up_read(&EXT2_I(inode)->xattr_sem); 273 274 return error; 275 } 276 277 /* 278 * ext2_xattr_list() 279 * 280 * Copy a list of attribute names into the buffer 281 * provided, or compute the buffer size required. 282 * Buffer is NULL to compute the size of the buffer required. 283 * 284 * Returns a negative error number on failure, or the number of bytes 285 * used / required on success. 286 */ 287 static int 288 ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size) 289 { 290 struct inode *inode = d_inode(dentry); 291 struct buffer_head *bh = NULL; 292 struct ext2_xattr_entry *entry; 293 char *end; 294 size_t rest = buffer_size; 295 int error; 296 struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode); 297 298 ea_idebug(inode, "buffer=%p, buffer_size=%ld", 299 buffer, (long)buffer_size); 300 301 down_read(&EXT2_I(inode)->xattr_sem); 302 error = 0; 303 if (!EXT2_I(inode)->i_file_acl) 304 goto cleanup; 305 ea_idebug(inode, "reading block %d", EXT2_I(inode)->i_file_acl); 306 bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl); 307 error = -EIO; 308 if (!bh) 309 goto cleanup; 310 ea_bdebug(bh, "b_count=%d, refcount=%d", 311 atomic_read(&(bh->b_count)), le32_to_cpu(HDR(bh)->h_refcount)); 312 end = bh->b_data + bh->b_size; 313 if (!ext2_xattr_header_valid(HDR(bh))) { 314 bad_block: 315 ext2_error(inode->i_sb, "ext2_xattr_list", 316 "inode %ld: bad block %d", inode->i_ino, 317 EXT2_I(inode)->i_file_acl); 318 error = -EIO; 319 goto cleanup; 320 } 321 322 /* check the on-disk data structure */ 323 entry = FIRST_ENTRY(bh); 324 while (!IS_LAST_ENTRY(entry)) { 325 if (!ext2_xattr_entry_valid(entry, end, 326 inode->i_sb->s_blocksize)) 327 goto bad_block; 328 entry = EXT2_XATTR_NEXT(entry); 329 } 330 if (ext2_xattr_cache_insert(ea_block_cache, bh)) 331 ea_idebug(inode, "cache insert failed"); 332 333 /* list the attribute names */ 334 for (entry = FIRST_ENTRY(bh); !IS_LAST_ENTRY(entry); 335 entry = EXT2_XATTR_NEXT(entry)) { 336 const struct xattr_handler *handler = 337 ext2_xattr_handler(entry->e_name_index); 338 339 if (handler && (!handler->list || handler->list(dentry))) { 340 const char *prefix = handler->prefix ?: handler->name; 341 size_t prefix_len = strlen(prefix); 342 size_t size = prefix_len + entry->e_name_len + 1; 343 344 if (buffer) { 345 if (size > rest) { 346 error = -ERANGE; 347 goto cleanup; 348 } 349 memcpy(buffer, prefix, prefix_len); 350 buffer += prefix_len; 351 memcpy(buffer, entry->e_name, entry->e_name_len); 352 buffer += entry->e_name_len; 353 *buffer++ = 0; 354 } 355 rest -= size; 356 } 357 } 358 error = buffer_size - rest; /* total size */ 359 360 cleanup: 361 brelse(bh); 362 up_read(&EXT2_I(inode)->xattr_sem); 363 364 return error; 365 } 366 367 /* 368 * Inode operation listxattr() 369 * 370 * d_inode(dentry)->i_mutex: don't care 371 */ 372 ssize_t 373 ext2_listxattr(struct dentry *dentry, char *buffer, size_t size) 374 { 375 return ext2_xattr_list(dentry, buffer, size); 376 } 377 378 /* 379 * If the EXT2_FEATURE_COMPAT_EXT_ATTR feature of this file system is 380 * not set, set it. 381 */ 382 static void ext2_xattr_update_super_block(struct super_block *sb) 383 { 384 if (EXT2_HAS_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR)) 385 return; 386 387 spin_lock(&EXT2_SB(sb)->s_lock); 388 ext2_update_dynamic_rev(sb); 389 EXT2_SET_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR); 390 spin_unlock(&EXT2_SB(sb)->s_lock); 391 mark_buffer_dirty(EXT2_SB(sb)->s_sbh); 392 } 393 394 /* 395 * ext2_xattr_set() 396 * 397 * Create, replace or remove an extended attribute for this inode. Value 398 * is NULL to remove an existing extended attribute, and non-NULL to 399 * either replace an existing extended attribute, or create a new extended 400 * attribute. The flags XATTR_REPLACE and XATTR_CREATE 401 * specify that an extended attribute must exist and must not exist 402 * previous to the call, respectively. 403 * 404 * Returns 0, or a negative error number on failure. 405 */ 406 int 407 ext2_xattr_set(struct inode *inode, int name_index, const char *name, 408 const void *value, size_t value_len, int flags) 409 { 410 struct super_block *sb = inode->i_sb; 411 struct buffer_head *bh = NULL; 412 struct ext2_xattr_header *header = NULL; 413 struct ext2_xattr_entry *here = NULL, *last = NULL; 414 size_t name_len, free, min_offs = sb->s_blocksize; 415 int not_found = 1, error; 416 char *end; 417 418 /* 419 * header -- Points either into bh, or to a temporarily 420 * allocated buffer. 421 * here -- The named entry found, or the place for inserting, within 422 * the block pointed to by header. 423 * last -- Points right after the last named entry within the block 424 * pointed to by header. 425 * min_offs -- The offset of the first value (values are aligned 426 * towards the end of the block). 427 * end -- Points right after the block pointed to by header. 428 */ 429 430 ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld", 431 name_index, name, value, (long)value_len); 432 433 if (value == NULL) 434 value_len = 0; 435 if (name == NULL) 436 return -EINVAL; 437 name_len = strlen(name); 438 if (name_len > 255 || value_len > sb->s_blocksize) 439 return -ERANGE; 440 error = dquot_initialize(inode); 441 if (error) 442 return error; 443 down_write(&EXT2_I(inode)->xattr_sem); 444 if (EXT2_I(inode)->i_file_acl) { 445 /* The inode already has an extended attribute block. */ 446 bh = sb_bread(sb, EXT2_I(inode)->i_file_acl); 447 error = -EIO; 448 if (!bh) 449 goto cleanup; 450 ea_bdebug(bh, "b_count=%d, refcount=%d", 451 atomic_read(&(bh->b_count)), 452 le32_to_cpu(HDR(bh)->h_refcount)); 453 header = HDR(bh); 454 end = bh->b_data + bh->b_size; 455 if (!ext2_xattr_header_valid(header)) { 456 bad_block: 457 ext2_error(sb, "ext2_xattr_set", 458 "inode %ld: bad block %d", inode->i_ino, 459 EXT2_I(inode)->i_file_acl); 460 error = -EIO; 461 goto cleanup; 462 } 463 /* 464 * Find the named attribute. If not found, 'here' will point 465 * to entry where the new attribute should be inserted to 466 * maintain sorting. 467 */ 468 last = FIRST_ENTRY(bh); 469 while (!IS_LAST_ENTRY(last)) { 470 if (!ext2_xattr_entry_valid(last, end, sb->s_blocksize)) 471 goto bad_block; 472 if (last->e_value_size) { 473 size_t offs = le16_to_cpu(last->e_value_offs); 474 if (offs < min_offs) 475 min_offs = offs; 476 } 477 if (not_found > 0) { 478 not_found = ext2_xattr_cmp_entry(name_index, 479 name_len, 480 name, last); 481 if (not_found <= 0) 482 here = last; 483 } 484 last = EXT2_XATTR_NEXT(last); 485 } 486 if (not_found > 0) 487 here = last; 488 489 /* Check whether we have enough space left. */ 490 free = min_offs - ((char*)last - (char*)header) - sizeof(__u32); 491 } else { 492 /* We will use a new extended attribute block. */ 493 free = sb->s_blocksize - 494 sizeof(struct ext2_xattr_header) - sizeof(__u32); 495 } 496 497 if (not_found) { 498 /* Request to remove a nonexistent attribute? */ 499 error = -ENODATA; 500 if (flags & XATTR_REPLACE) 501 goto cleanup; 502 error = 0; 503 if (value == NULL) 504 goto cleanup; 505 } else { 506 /* Request to create an existing attribute? */ 507 error = -EEXIST; 508 if (flags & XATTR_CREATE) 509 goto cleanup; 510 free += EXT2_XATTR_SIZE(le32_to_cpu(here->e_value_size)); 511 free += EXT2_XATTR_LEN(name_len); 512 } 513 error = -ENOSPC; 514 if (free < EXT2_XATTR_LEN(name_len) + EXT2_XATTR_SIZE(value_len)) 515 goto cleanup; 516 517 /* Here we know that we can set the new attribute. */ 518 519 if (header) { 520 /* assert(header == HDR(bh)); */ 521 lock_buffer(bh); 522 if (header->h_refcount == cpu_to_le32(1)) { 523 __u32 hash = le32_to_cpu(header->h_hash); 524 525 ea_bdebug(bh, "modifying in-place"); 526 /* 527 * This must happen under buffer lock for 528 * ext2_xattr_set2() to reliably detect modified block 529 */ 530 mb_cache_entry_delete(EA_BLOCK_CACHE(inode), hash, 531 bh->b_blocknr); 532 533 /* keep the buffer locked while modifying it. */ 534 } else { 535 int offset; 536 537 unlock_buffer(bh); 538 ea_bdebug(bh, "cloning"); 539 header = kmemdup(HDR(bh), bh->b_size, GFP_KERNEL); 540 error = -ENOMEM; 541 if (header == NULL) 542 goto cleanup; 543 header->h_refcount = cpu_to_le32(1); 544 545 offset = (char *)here - bh->b_data; 546 here = ENTRY((char *)header + offset); 547 offset = (char *)last - bh->b_data; 548 last = ENTRY((char *)header + offset); 549 } 550 } else { 551 /* Allocate a buffer where we construct the new block. */ 552 header = kzalloc(sb->s_blocksize, GFP_KERNEL); 553 error = -ENOMEM; 554 if (header == NULL) 555 goto cleanup; 556 end = (char *)header + sb->s_blocksize; 557 header->h_magic = cpu_to_le32(EXT2_XATTR_MAGIC); 558 header->h_blocks = header->h_refcount = cpu_to_le32(1); 559 last = here = ENTRY(header+1); 560 } 561 562 /* Iff we are modifying the block in-place, bh is locked here. */ 563 564 if (not_found) { 565 /* Insert the new name. */ 566 size_t size = EXT2_XATTR_LEN(name_len); 567 size_t rest = (char *)last - (char *)here; 568 memmove((char *)here + size, here, rest); 569 memset(here, 0, size); 570 here->e_name_index = name_index; 571 here->e_name_len = name_len; 572 memcpy(here->e_name, name, name_len); 573 } else { 574 if (here->e_value_size) { 575 char *first_val = (char *)header + min_offs; 576 size_t offs = le16_to_cpu(here->e_value_offs); 577 char *val = (char *)header + offs; 578 size_t size = EXT2_XATTR_SIZE( 579 le32_to_cpu(here->e_value_size)); 580 581 if (size == EXT2_XATTR_SIZE(value_len)) { 582 /* The old and the new value have the same 583 size. Just replace. */ 584 here->e_value_size = cpu_to_le32(value_len); 585 memset(val + size - EXT2_XATTR_PAD, 0, 586 EXT2_XATTR_PAD); /* Clear pad bytes. */ 587 memcpy(val, value, value_len); 588 goto skip_replace; 589 } 590 591 /* Remove the old value. */ 592 memmove(first_val + size, first_val, val - first_val); 593 memset(first_val, 0, size); 594 min_offs += size; 595 596 /* Adjust all value offsets. */ 597 last = ENTRY(header+1); 598 while (!IS_LAST_ENTRY(last)) { 599 size_t o = le16_to_cpu(last->e_value_offs); 600 if (o < offs) 601 last->e_value_offs = 602 cpu_to_le16(o + size); 603 last = EXT2_XATTR_NEXT(last); 604 } 605 606 here->e_value_offs = 0; 607 } 608 if (value == NULL) { 609 /* Remove the old name. */ 610 size_t size = EXT2_XATTR_LEN(name_len); 611 last = ENTRY((char *)last - size); 612 memmove(here, (char*)here + size, 613 (char*)last - (char*)here); 614 memset(last, 0, size); 615 } 616 } 617 618 if (value != NULL) { 619 /* Insert the new value. */ 620 here->e_value_size = cpu_to_le32(value_len); 621 if (value_len) { 622 size_t size = EXT2_XATTR_SIZE(value_len); 623 char *val = (char *)header + min_offs - size; 624 here->e_value_offs = 625 cpu_to_le16((char *)val - (char *)header); 626 memset(val + size - EXT2_XATTR_PAD, 0, 627 EXT2_XATTR_PAD); /* Clear the pad bytes. */ 628 memcpy(val, value, value_len); 629 } 630 } 631 632 skip_replace: 633 if (IS_LAST_ENTRY(ENTRY(header+1))) { 634 /* This block is now empty. */ 635 if (bh && header == HDR(bh)) 636 unlock_buffer(bh); /* we were modifying in-place. */ 637 error = ext2_xattr_set2(inode, bh, NULL); 638 } else { 639 ext2_xattr_rehash(header, here); 640 if (bh && header == HDR(bh)) 641 unlock_buffer(bh); /* we were modifying in-place. */ 642 error = ext2_xattr_set2(inode, bh, header); 643 } 644 645 cleanup: 646 if (!(bh && header == HDR(bh))) 647 kfree(header); 648 brelse(bh); 649 up_write(&EXT2_I(inode)->xattr_sem); 650 651 return error; 652 } 653 654 static void ext2_xattr_release_block(struct inode *inode, 655 struct buffer_head *bh) 656 { 657 struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode); 658 659 lock_buffer(bh); 660 if (HDR(bh)->h_refcount == cpu_to_le32(1)) { 661 __u32 hash = le32_to_cpu(HDR(bh)->h_hash); 662 663 /* 664 * This must happen under buffer lock for 665 * ext2_xattr_set2() to reliably detect freed block 666 */ 667 mb_cache_entry_delete(ea_block_cache, hash, 668 bh->b_blocknr); 669 /* Free the old block. */ 670 ea_bdebug(bh, "freeing"); 671 ext2_free_blocks(inode, bh->b_blocknr, 1); 672 /* We let our caller release bh, so we 673 * need to duplicate the buffer before. */ 674 get_bh(bh); 675 bforget(bh); 676 unlock_buffer(bh); 677 } else { 678 /* Decrement the refcount only. */ 679 le32_add_cpu(&HDR(bh)->h_refcount, -1); 680 dquot_free_block(inode, 1); 681 mark_buffer_dirty(bh); 682 unlock_buffer(bh); 683 ea_bdebug(bh, "refcount now=%d", 684 le32_to_cpu(HDR(bh)->h_refcount)); 685 if (IS_SYNC(inode)) 686 sync_dirty_buffer(bh); 687 } 688 } 689 690 /* 691 * Second half of ext2_xattr_set(): Update the file system. 692 */ 693 static int 694 ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh, 695 struct ext2_xattr_header *header) 696 { 697 struct super_block *sb = inode->i_sb; 698 struct buffer_head *new_bh = NULL; 699 int error; 700 struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode); 701 702 if (header) { 703 new_bh = ext2_xattr_cache_find(inode, header); 704 if (new_bh) { 705 /* We found an identical block in the cache. */ 706 if (new_bh == old_bh) { 707 ea_bdebug(new_bh, "keeping this block"); 708 } else { 709 /* The old block is released after updating 710 the inode. */ 711 ea_bdebug(new_bh, "reusing block"); 712 713 error = dquot_alloc_block(inode, 1); 714 if (error) { 715 unlock_buffer(new_bh); 716 goto cleanup; 717 } 718 le32_add_cpu(&HDR(new_bh)->h_refcount, 1); 719 ea_bdebug(new_bh, "refcount now=%d", 720 le32_to_cpu(HDR(new_bh)->h_refcount)); 721 } 722 unlock_buffer(new_bh); 723 } else if (old_bh && header == HDR(old_bh)) { 724 /* Keep this block. No need to lock the block as we 725 don't need to change the reference count. */ 726 new_bh = old_bh; 727 get_bh(new_bh); 728 ext2_xattr_cache_insert(ea_block_cache, new_bh); 729 } else { 730 /* We need to allocate a new block */ 731 ext2_fsblk_t goal = ext2_group_first_block_no(sb, 732 EXT2_I(inode)->i_block_group); 733 int block = ext2_new_block(inode, goal, &error); 734 if (error) 735 goto cleanup; 736 ea_idebug(inode, "creating block %d", block); 737 738 new_bh = sb_getblk(sb, block); 739 if (unlikely(!new_bh)) { 740 ext2_free_blocks(inode, block, 1); 741 mark_inode_dirty(inode); 742 error = -ENOMEM; 743 goto cleanup; 744 } 745 lock_buffer(new_bh); 746 memcpy(new_bh->b_data, header, new_bh->b_size); 747 set_buffer_uptodate(new_bh); 748 unlock_buffer(new_bh); 749 ext2_xattr_cache_insert(ea_block_cache, new_bh); 750 751 ext2_xattr_update_super_block(sb); 752 } 753 mark_buffer_dirty(new_bh); 754 if (IS_SYNC(inode)) { 755 sync_dirty_buffer(new_bh); 756 error = -EIO; 757 if (buffer_req(new_bh) && !buffer_uptodate(new_bh)) 758 goto cleanup; 759 } 760 } 761 762 /* Update the inode. */ 763 EXT2_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0; 764 inode->i_ctime = current_time(inode); 765 if (IS_SYNC(inode)) { 766 error = sync_inode_metadata(inode, 1); 767 /* In case sync failed due to ENOSPC the inode was actually 768 * written (only some dirty data were not) so we just proceed 769 * as if nothing happened and cleanup the unused block */ 770 if (error && error != -ENOSPC) { 771 if (new_bh && new_bh != old_bh) { 772 dquot_free_block_nodirty(inode, 1); 773 mark_inode_dirty(inode); 774 } 775 goto cleanup; 776 } 777 } else 778 mark_inode_dirty(inode); 779 780 error = 0; 781 if (old_bh && old_bh != new_bh) { 782 /* 783 * If there was an old block and we are no longer using it, 784 * release the old block. 785 */ 786 ext2_xattr_release_block(inode, old_bh); 787 } 788 789 cleanup: 790 brelse(new_bh); 791 792 return error; 793 } 794 795 /* 796 * ext2_xattr_delete_inode() 797 * 798 * Free extended attribute resources associated with this inode. This 799 * is called immediately before an inode is freed. 800 */ 801 void 802 ext2_xattr_delete_inode(struct inode *inode) 803 { 804 struct buffer_head *bh = NULL; 805 struct ext2_sb_info *sbi = EXT2_SB(inode->i_sb); 806 807 /* 808 * We are the only ones holding inode reference. The xattr_sem should 809 * better be unlocked! We could as well just not acquire xattr_sem at 810 * all but this makes the code more futureproof. OTOH we need trylock 811 * here to avoid false-positive warning from lockdep about reclaim 812 * circular dependency. 813 */ 814 if (WARN_ON_ONCE(!down_write_trylock(&EXT2_I(inode)->xattr_sem))) 815 return; 816 if (!EXT2_I(inode)->i_file_acl) 817 goto cleanup; 818 819 if (!ext2_data_block_valid(sbi, EXT2_I(inode)->i_file_acl, 1)) { 820 ext2_error(inode->i_sb, "ext2_xattr_delete_inode", 821 "inode %ld: xattr block %d is out of data blocks range", 822 inode->i_ino, EXT2_I(inode)->i_file_acl); 823 goto cleanup; 824 } 825 826 bh = sb_bread(inode->i_sb, EXT2_I(inode)->i_file_acl); 827 if (!bh) { 828 ext2_error(inode->i_sb, "ext2_xattr_delete_inode", 829 "inode %ld: block %d read error", inode->i_ino, 830 EXT2_I(inode)->i_file_acl); 831 goto cleanup; 832 } 833 ea_bdebug(bh, "b_count=%d", atomic_read(&(bh->b_count))); 834 if (!ext2_xattr_header_valid(HDR(bh))) { 835 ext2_error(inode->i_sb, "ext2_xattr_delete_inode", 836 "inode %ld: bad block %d", inode->i_ino, 837 EXT2_I(inode)->i_file_acl); 838 goto cleanup; 839 } 840 ext2_xattr_release_block(inode, bh); 841 EXT2_I(inode)->i_file_acl = 0; 842 843 cleanup: 844 brelse(bh); 845 up_write(&EXT2_I(inode)->xattr_sem); 846 } 847 848 /* 849 * ext2_xattr_cache_insert() 850 * 851 * Create a new entry in the extended attribute cache, and insert 852 * it unless such an entry is already in the cache. 853 * 854 * Returns 0, or a negative error number on failure. 855 */ 856 static int 857 ext2_xattr_cache_insert(struct mb_cache *cache, struct buffer_head *bh) 858 { 859 __u32 hash = le32_to_cpu(HDR(bh)->h_hash); 860 int error; 861 862 error = mb_cache_entry_create(cache, GFP_NOFS, hash, bh->b_blocknr, 863 true); 864 if (error) { 865 if (error == -EBUSY) { 866 ea_bdebug(bh, "already in cache"); 867 error = 0; 868 } 869 } else 870 ea_bdebug(bh, "inserting [%x]", (int)hash); 871 return error; 872 } 873 874 /* 875 * ext2_xattr_cmp() 876 * 877 * Compare two extended attribute blocks for equality. 878 * 879 * Returns 0 if the blocks are equal, 1 if they differ, and 880 * a negative error number on errors. 881 */ 882 static int 883 ext2_xattr_cmp(struct ext2_xattr_header *header1, 884 struct ext2_xattr_header *header2) 885 { 886 struct ext2_xattr_entry *entry1, *entry2; 887 888 entry1 = ENTRY(header1+1); 889 entry2 = ENTRY(header2+1); 890 while (!IS_LAST_ENTRY(entry1)) { 891 if (IS_LAST_ENTRY(entry2)) 892 return 1; 893 if (entry1->e_hash != entry2->e_hash || 894 entry1->e_name_index != entry2->e_name_index || 895 entry1->e_name_len != entry2->e_name_len || 896 entry1->e_value_size != entry2->e_value_size || 897 memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len)) 898 return 1; 899 if (entry1->e_value_block != 0 || entry2->e_value_block != 0) 900 return -EIO; 901 if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs), 902 (char *)header2 + le16_to_cpu(entry2->e_value_offs), 903 le32_to_cpu(entry1->e_value_size))) 904 return 1; 905 906 entry1 = EXT2_XATTR_NEXT(entry1); 907 entry2 = EXT2_XATTR_NEXT(entry2); 908 } 909 if (!IS_LAST_ENTRY(entry2)) 910 return 1; 911 return 0; 912 } 913 914 /* 915 * ext2_xattr_cache_find() 916 * 917 * Find an identical extended attribute block. 918 * 919 * Returns a locked buffer head to the block found, or NULL if such 920 * a block was not found or an error occurred. 921 */ 922 static struct buffer_head * 923 ext2_xattr_cache_find(struct inode *inode, struct ext2_xattr_header *header) 924 { 925 __u32 hash = le32_to_cpu(header->h_hash); 926 struct mb_cache_entry *ce; 927 struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode); 928 929 if (!header->h_hash) 930 return NULL; /* never share */ 931 ea_idebug(inode, "looking for cached blocks [%x]", (int)hash); 932 again: 933 ce = mb_cache_entry_find_first(ea_block_cache, hash); 934 while (ce) { 935 struct buffer_head *bh; 936 937 bh = sb_bread(inode->i_sb, ce->e_value); 938 if (!bh) { 939 ext2_error(inode->i_sb, "ext2_xattr_cache_find", 940 "inode %ld: block %ld read error", 941 inode->i_ino, (unsigned long) ce->e_value); 942 } else { 943 lock_buffer(bh); 944 /* 945 * We have to be careful about races with freeing or 946 * rehashing of xattr block. Once we hold buffer lock 947 * xattr block's state is stable so we can check 948 * whether the block got freed / rehashed or not. 949 * Since we unhash mbcache entry under buffer lock when 950 * freeing / rehashing xattr block, checking whether 951 * entry is still hashed is reliable. 952 */ 953 if (hlist_bl_unhashed(&ce->e_hash_list)) { 954 mb_cache_entry_put(ea_block_cache, ce); 955 unlock_buffer(bh); 956 brelse(bh); 957 goto again; 958 } else if (le32_to_cpu(HDR(bh)->h_refcount) > 959 EXT2_XATTR_REFCOUNT_MAX) { 960 ea_idebug(inode, "block %ld refcount %d>%d", 961 (unsigned long) ce->e_value, 962 le32_to_cpu(HDR(bh)->h_refcount), 963 EXT2_XATTR_REFCOUNT_MAX); 964 } else if (!ext2_xattr_cmp(header, HDR(bh))) { 965 ea_bdebug(bh, "b_count=%d", 966 atomic_read(&(bh->b_count))); 967 mb_cache_entry_touch(ea_block_cache, ce); 968 mb_cache_entry_put(ea_block_cache, ce); 969 return bh; 970 } 971 unlock_buffer(bh); 972 brelse(bh); 973 } 974 ce = mb_cache_entry_find_next(ea_block_cache, ce); 975 } 976 return NULL; 977 } 978 979 #define NAME_HASH_SHIFT 5 980 #define VALUE_HASH_SHIFT 16 981 982 /* 983 * ext2_xattr_hash_entry() 984 * 985 * Compute the hash of an extended attribute. 986 */ 987 static inline void ext2_xattr_hash_entry(struct ext2_xattr_header *header, 988 struct ext2_xattr_entry *entry) 989 { 990 __u32 hash = 0; 991 char *name = entry->e_name; 992 int n; 993 994 for (n=0; n < entry->e_name_len; n++) { 995 hash = (hash << NAME_HASH_SHIFT) ^ 996 (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^ 997 *name++; 998 } 999 1000 if (entry->e_value_block == 0 && entry->e_value_size != 0) { 1001 __le32 *value = (__le32 *)((char *)header + 1002 le16_to_cpu(entry->e_value_offs)); 1003 for (n = (le32_to_cpu(entry->e_value_size) + 1004 EXT2_XATTR_ROUND) >> EXT2_XATTR_PAD_BITS; n; n--) { 1005 hash = (hash << VALUE_HASH_SHIFT) ^ 1006 (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^ 1007 le32_to_cpu(*value++); 1008 } 1009 } 1010 entry->e_hash = cpu_to_le32(hash); 1011 } 1012 1013 #undef NAME_HASH_SHIFT 1014 #undef VALUE_HASH_SHIFT 1015 1016 #define BLOCK_HASH_SHIFT 16 1017 1018 /* 1019 * ext2_xattr_rehash() 1020 * 1021 * Re-compute the extended attribute hash value after an entry has changed. 1022 */ 1023 static void ext2_xattr_rehash(struct ext2_xattr_header *header, 1024 struct ext2_xattr_entry *entry) 1025 { 1026 struct ext2_xattr_entry *here; 1027 __u32 hash = 0; 1028 1029 ext2_xattr_hash_entry(header, entry); 1030 here = ENTRY(header+1); 1031 while (!IS_LAST_ENTRY(here)) { 1032 if (!here->e_hash) { 1033 /* Block is not shared if an entry's hash value == 0 */ 1034 hash = 0; 1035 break; 1036 } 1037 hash = (hash << BLOCK_HASH_SHIFT) ^ 1038 (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^ 1039 le32_to_cpu(here->e_hash); 1040 here = EXT2_XATTR_NEXT(here); 1041 } 1042 header->h_hash = cpu_to_le32(hash); 1043 } 1044 1045 #undef BLOCK_HASH_SHIFT 1046 1047 #define HASH_BUCKET_BITS 10 1048 1049 struct mb_cache *ext2_xattr_create_cache(void) 1050 { 1051 return mb_cache_create(HASH_BUCKET_BITS); 1052 } 1053 1054 void ext2_xattr_destroy_cache(struct mb_cache *cache) 1055 { 1056 if (cache) 1057 mb_cache_destroy(cache); 1058 } 1059