1 /* 2 * linux/fs/ext4/xattr.c 3 * 4 * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de> 5 * 6 * Fix by Harrison Xing <harrison@mountainviewdata.com>. 7 * Ext4 code with a lot of help from Eric Jarman <ejarman@acm.org>. 8 * Extended attributes for symlinks and special files added per 9 * suggestion of Luka Renko <luka.renko@hermes.si>. 10 * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>, 11 * Red Hat Inc. 12 * ea-in-inode support by Alex Tomas <alex@clusterfs.com> aka bzzz 13 * and Andreas Gruenbacher <agruen@suse.de>. 14 */ 15 16 /* 17 * Extended attributes are stored directly in inodes (on file systems with 18 * inodes bigger than 128 bytes) and on additional disk blocks. The i_file_acl 19 * field contains the block number if an inode uses an additional block. All 20 * attributes must fit in the inode and one additional block. Blocks that 21 * contain the identical set of attributes may be shared among several inodes. 22 * Identical blocks are detected by keeping a cache of blocks that have 23 * recently been accessed. 24 * 25 * The attributes in inodes and on blocks have a different header; the entries 26 * are stored in the same format: 27 * 28 * +------------------+ 29 * | header | 30 * | entry 1 | | 31 * | entry 2 | | growing downwards 32 * | entry 3 | v 33 * | four null bytes | 34 * | . . . | 35 * | value 1 | ^ 36 * | value 3 | | growing upwards 37 * | value 2 | | 38 * +------------------+ 39 * 40 * The header is followed by multiple entry descriptors. In disk blocks, the 41 * entry descriptors are kept sorted. In inodes, they are unsorted. The 42 * attribute values are aligned to the end of the block in no specific order. 43 * 44 * Locking strategy 45 * ---------------- 46 * EXT4_I(inode)->i_file_acl is protected by EXT4_I(inode)->xattr_sem. 47 * EA blocks are only changed if they are exclusive to an inode, so 48 * holding xattr_sem also means that nothing but the EA block's reference 49 * count can change. Multiple writers to the same block are synchronized 50 * by the buffer lock. 51 */ 52 53 #include <linux/init.h> 54 #include <linux/fs.h> 55 #include <linux/slab.h> 56 #include <linux/ext4_jbd2.h> 57 #include <linux/ext4_fs.h> 58 #include <linux/mbcache.h> 59 #include <linux/quotaops.h> 60 #include <linux/rwsem.h> 61 #include "xattr.h" 62 #include "acl.h" 63 64 #define BHDR(bh) ((struct ext4_xattr_header *)((bh)->b_data)) 65 #define ENTRY(ptr) ((struct ext4_xattr_entry *)(ptr)) 66 #define BFIRST(bh) ENTRY(BHDR(bh)+1) 67 #define IS_LAST_ENTRY(entry) (*(__u32 *)(entry) == 0) 68 69 #define IHDR(inode, raw_inode) \ 70 ((struct ext4_xattr_ibody_header *) \ 71 ((void *)raw_inode + \ 72 EXT4_GOOD_OLD_INODE_SIZE + \ 73 EXT4_I(inode)->i_extra_isize)) 74 #define IFIRST(hdr) ((struct ext4_xattr_entry *)((hdr)+1)) 75 76 #ifdef EXT4_XATTR_DEBUG 77 # define ea_idebug(inode, f...) do { \ 78 printk(KERN_DEBUG "inode %s:%lu: ", \ 79 inode->i_sb->s_id, inode->i_ino); \ 80 printk(f); \ 81 printk("\n"); \ 82 } while (0) 83 # define ea_bdebug(bh, f...) do { \ 84 char b[BDEVNAME_SIZE]; \ 85 printk(KERN_DEBUG "block %s:%lu: ", \ 86 bdevname(bh->b_bdev, b), \ 87 (unsigned long) bh->b_blocknr); \ 88 printk(f); \ 89 printk("\n"); \ 90 } while (0) 91 #else 92 # define ea_idebug(f...) 93 # define ea_bdebug(f...) 94 #endif 95 96 static void ext4_xattr_cache_insert(struct buffer_head *); 97 static struct buffer_head *ext4_xattr_cache_find(struct inode *, 98 struct ext4_xattr_header *, 99 struct mb_cache_entry **); 100 static void ext4_xattr_rehash(struct ext4_xattr_header *, 101 struct ext4_xattr_entry *); 102 103 static struct mb_cache *ext4_xattr_cache; 104 105 static struct xattr_handler *ext4_xattr_handler_map[] = { 106 [EXT4_XATTR_INDEX_USER] = &ext4_xattr_user_handler, 107 #ifdef CONFIG_EXT4DEV_FS_POSIX_ACL 108 [EXT4_XATTR_INDEX_POSIX_ACL_ACCESS] = &ext4_xattr_acl_access_handler, 109 [EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT] = &ext4_xattr_acl_default_handler, 110 #endif 111 [EXT4_XATTR_INDEX_TRUSTED] = &ext4_xattr_trusted_handler, 112 #ifdef CONFIG_EXT4DEV_FS_SECURITY 113 [EXT4_XATTR_INDEX_SECURITY] = &ext4_xattr_security_handler, 114 #endif 115 }; 116 117 struct xattr_handler *ext4_xattr_handlers[] = { 118 &ext4_xattr_user_handler, 119 &ext4_xattr_trusted_handler, 120 #ifdef CONFIG_EXT4DEV_FS_POSIX_ACL 121 &ext4_xattr_acl_access_handler, 122 &ext4_xattr_acl_default_handler, 123 #endif 124 #ifdef CONFIG_EXT4DEV_FS_SECURITY 125 &ext4_xattr_security_handler, 126 #endif 127 NULL 128 }; 129 130 static inline struct xattr_handler * 131 ext4_xattr_handler(int name_index) 132 { 133 struct xattr_handler *handler = NULL; 134 135 if (name_index > 0 && name_index < ARRAY_SIZE(ext4_xattr_handler_map)) 136 handler = ext4_xattr_handler_map[name_index]; 137 return handler; 138 } 139 140 /* 141 * Inode operation listxattr() 142 * 143 * dentry->d_inode->i_mutex: don't care 144 */ 145 ssize_t 146 ext4_listxattr(struct dentry *dentry, char *buffer, size_t size) 147 { 148 return ext4_xattr_list(dentry->d_inode, buffer, size); 149 } 150 151 static int 152 ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end) 153 { 154 while (!IS_LAST_ENTRY(entry)) { 155 struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(entry); 156 if ((void *)next >= end) 157 return -EIO; 158 entry = next; 159 } 160 return 0; 161 } 162 163 static inline int 164 ext4_xattr_check_block(struct buffer_head *bh) 165 { 166 int error; 167 168 if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) || 169 BHDR(bh)->h_blocks != cpu_to_le32(1)) 170 return -EIO; 171 error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size); 172 return error; 173 } 174 175 static inline int 176 ext4_xattr_check_entry(struct ext4_xattr_entry *entry, size_t size) 177 { 178 size_t value_size = le32_to_cpu(entry->e_value_size); 179 180 if (entry->e_value_block != 0 || value_size > size || 181 le16_to_cpu(entry->e_value_offs) + value_size > size) 182 return -EIO; 183 return 0; 184 } 185 186 static int 187 ext4_xattr_find_entry(struct ext4_xattr_entry **pentry, int name_index, 188 const char *name, size_t size, int sorted) 189 { 190 struct ext4_xattr_entry *entry; 191 size_t name_len; 192 int cmp = 1; 193 194 if (name == NULL) 195 return -EINVAL; 196 name_len = strlen(name); 197 entry = *pentry; 198 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) { 199 cmp = name_index - entry->e_name_index; 200 if (!cmp) 201 cmp = name_len - entry->e_name_len; 202 if (!cmp) 203 cmp = memcmp(name, entry->e_name, name_len); 204 if (cmp <= 0 && (sorted || cmp == 0)) 205 break; 206 } 207 *pentry = entry; 208 if (!cmp && ext4_xattr_check_entry(entry, size)) 209 return -EIO; 210 return cmp ? -ENODATA : 0; 211 } 212 213 static int 214 ext4_xattr_block_get(struct inode *inode, int name_index, const char *name, 215 void *buffer, size_t buffer_size) 216 { 217 struct buffer_head *bh = NULL; 218 struct ext4_xattr_entry *entry; 219 size_t size; 220 int error; 221 222 ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld", 223 name_index, name, buffer, (long)buffer_size); 224 225 error = -ENODATA; 226 if (!EXT4_I(inode)->i_file_acl) 227 goto cleanup; 228 ea_idebug(inode, "reading block %u", EXT4_I(inode)->i_file_acl); 229 bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl); 230 if (!bh) 231 goto cleanup; 232 ea_bdebug(bh, "b_count=%d, refcount=%d", 233 atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); 234 if (ext4_xattr_check_block(bh)) { 235 bad_block: ext4_error(inode->i_sb, __FUNCTION__, 236 "inode %lu: bad block %llu", inode->i_ino, 237 EXT4_I(inode)->i_file_acl); 238 error = -EIO; 239 goto cleanup; 240 } 241 ext4_xattr_cache_insert(bh); 242 entry = BFIRST(bh); 243 error = ext4_xattr_find_entry(&entry, name_index, name, bh->b_size, 1); 244 if (error == -EIO) 245 goto bad_block; 246 if (error) 247 goto cleanup; 248 size = le32_to_cpu(entry->e_value_size); 249 if (buffer) { 250 error = -ERANGE; 251 if (size > buffer_size) 252 goto cleanup; 253 memcpy(buffer, bh->b_data + le16_to_cpu(entry->e_value_offs), 254 size); 255 } 256 error = size; 257 258 cleanup: 259 brelse(bh); 260 return error; 261 } 262 263 static int 264 ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name, 265 void *buffer, size_t buffer_size) 266 { 267 struct ext4_xattr_ibody_header *header; 268 struct ext4_xattr_entry *entry; 269 struct ext4_inode *raw_inode; 270 struct ext4_iloc iloc; 271 size_t size; 272 void *end; 273 int error; 274 275 if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR)) 276 return -ENODATA; 277 error = ext4_get_inode_loc(inode, &iloc); 278 if (error) 279 return error; 280 raw_inode = ext4_raw_inode(&iloc); 281 header = IHDR(inode, raw_inode); 282 entry = IFIRST(header); 283 end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; 284 error = ext4_xattr_check_names(entry, end); 285 if (error) 286 goto cleanup; 287 error = ext4_xattr_find_entry(&entry, name_index, name, 288 end - (void *)entry, 0); 289 if (error) 290 goto cleanup; 291 size = le32_to_cpu(entry->e_value_size); 292 if (buffer) { 293 error = -ERANGE; 294 if (size > buffer_size) 295 goto cleanup; 296 memcpy(buffer, (void *)IFIRST(header) + 297 le16_to_cpu(entry->e_value_offs), size); 298 } 299 error = size; 300 301 cleanup: 302 brelse(iloc.bh); 303 return error; 304 } 305 306 /* 307 * ext4_xattr_get() 308 * 309 * Copy an extended attribute into the buffer 310 * provided, or compute the buffer size required. 311 * Buffer is NULL to compute the size of the buffer required. 312 * 313 * Returns a negative error number on failure, or the number of bytes 314 * used / required on success. 315 */ 316 int 317 ext4_xattr_get(struct inode *inode, int name_index, const char *name, 318 void *buffer, size_t buffer_size) 319 { 320 int error; 321 322 down_read(&EXT4_I(inode)->xattr_sem); 323 error = ext4_xattr_ibody_get(inode, name_index, name, buffer, 324 buffer_size); 325 if (error == -ENODATA) 326 error = ext4_xattr_block_get(inode, name_index, name, buffer, 327 buffer_size); 328 up_read(&EXT4_I(inode)->xattr_sem); 329 return error; 330 } 331 332 static int 333 ext4_xattr_list_entries(struct inode *inode, struct ext4_xattr_entry *entry, 334 char *buffer, size_t buffer_size) 335 { 336 size_t rest = buffer_size; 337 338 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) { 339 struct xattr_handler *handler = 340 ext4_xattr_handler(entry->e_name_index); 341 342 if (handler) { 343 size_t size = handler->list(inode, buffer, rest, 344 entry->e_name, 345 entry->e_name_len); 346 if (buffer) { 347 if (size > rest) 348 return -ERANGE; 349 buffer += size; 350 } 351 rest -= size; 352 } 353 } 354 return buffer_size - rest; 355 } 356 357 static int 358 ext4_xattr_block_list(struct inode *inode, char *buffer, size_t buffer_size) 359 { 360 struct buffer_head *bh = NULL; 361 int error; 362 363 ea_idebug(inode, "buffer=%p, buffer_size=%ld", 364 buffer, (long)buffer_size); 365 366 error = 0; 367 if (!EXT4_I(inode)->i_file_acl) 368 goto cleanup; 369 ea_idebug(inode, "reading block %u", EXT4_I(inode)->i_file_acl); 370 bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl); 371 error = -EIO; 372 if (!bh) 373 goto cleanup; 374 ea_bdebug(bh, "b_count=%d, refcount=%d", 375 atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); 376 if (ext4_xattr_check_block(bh)) { 377 ext4_error(inode->i_sb, __FUNCTION__, 378 "inode %lu: bad block %llu", inode->i_ino, 379 EXT4_I(inode)->i_file_acl); 380 error = -EIO; 381 goto cleanup; 382 } 383 ext4_xattr_cache_insert(bh); 384 error = ext4_xattr_list_entries(inode, BFIRST(bh), buffer, buffer_size); 385 386 cleanup: 387 brelse(bh); 388 389 return error; 390 } 391 392 static int 393 ext4_xattr_ibody_list(struct inode *inode, char *buffer, size_t buffer_size) 394 { 395 struct ext4_xattr_ibody_header *header; 396 struct ext4_inode *raw_inode; 397 struct ext4_iloc iloc; 398 void *end; 399 int error; 400 401 if (!(EXT4_I(inode)->i_state & EXT4_STATE_XATTR)) 402 return 0; 403 error = ext4_get_inode_loc(inode, &iloc); 404 if (error) 405 return error; 406 raw_inode = ext4_raw_inode(&iloc); 407 header = IHDR(inode, raw_inode); 408 end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; 409 error = ext4_xattr_check_names(IFIRST(header), end); 410 if (error) 411 goto cleanup; 412 error = ext4_xattr_list_entries(inode, IFIRST(header), 413 buffer, buffer_size); 414 415 cleanup: 416 brelse(iloc.bh); 417 return error; 418 } 419 420 /* 421 * ext4_xattr_list() 422 * 423 * Copy a list of attribute names into the buffer 424 * provided, or compute the buffer size required. 425 * Buffer is NULL to compute the size of the buffer required. 426 * 427 * Returns a negative error number on failure, or the number of bytes 428 * used / required on success. 429 */ 430 int 431 ext4_xattr_list(struct inode *inode, char *buffer, size_t buffer_size) 432 { 433 int i_error, b_error; 434 435 down_read(&EXT4_I(inode)->xattr_sem); 436 i_error = ext4_xattr_ibody_list(inode, buffer, buffer_size); 437 if (i_error < 0) { 438 b_error = 0; 439 } else { 440 if (buffer) { 441 buffer += i_error; 442 buffer_size -= i_error; 443 } 444 b_error = ext4_xattr_block_list(inode, buffer, buffer_size); 445 if (b_error < 0) 446 i_error = 0; 447 } 448 up_read(&EXT4_I(inode)->xattr_sem); 449 return i_error + b_error; 450 } 451 452 /* 453 * If the EXT4_FEATURE_COMPAT_EXT_ATTR feature of this file system is 454 * not set, set it. 455 */ 456 static void ext4_xattr_update_super_block(handle_t *handle, 457 struct super_block *sb) 458 { 459 if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_EXT_ATTR)) 460 return; 461 462 if (ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh) == 0) { 463 EXT4_SET_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_EXT_ATTR); 464 sb->s_dirt = 1; 465 ext4_journal_dirty_metadata(handle, EXT4_SB(sb)->s_sbh); 466 } 467 } 468 469 /* 470 * Release the xattr block BH: If the reference count is > 1, decrement 471 * it; otherwise free the block. 472 */ 473 static void 474 ext4_xattr_release_block(handle_t *handle, struct inode *inode, 475 struct buffer_head *bh) 476 { 477 struct mb_cache_entry *ce = NULL; 478 int error = 0; 479 480 ce = mb_cache_entry_get(ext4_xattr_cache, bh->b_bdev, bh->b_blocknr); 481 error = ext4_journal_get_write_access(handle, bh); 482 if (error) 483 goto out; 484 485 lock_buffer(bh); 486 if (BHDR(bh)->h_refcount == cpu_to_le32(1)) { 487 ea_bdebug(bh, "refcount now=0; freeing"); 488 if (ce) 489 mb_cache_entry_free(ce); 490 ext4_free_blocks(handle, inode, bh->b_blocknr, 1); 491 get_bh(bh); 492 ext4_forget(handle, 1, inode, bh, bh->b_blocknr); 493 } else { 494 BHDR(bh)->h_refcount = cpu_to_le32( 495 le32_to_cpu(BHDR(bh)->h_refcount) - 1); 496 error = ext4_journal_dirty_metadata(handle, bh); 497 if (IS_SYNC(inode)) 498 handle->h_sync = 1; 499 DQUOT_FREE_BLOCK(inode, 1); 500 ea_bdebug(bh, "refcount now=%d; releasing", 501 le32_to_cpu(BHDR(bh)->h_refcount)); 502 if (ce) 503 mb_cache_entry_release(ce); 504 } 505 unlock_buffer(bh); 506 out: 507 ext4_std_error(inode->i_sb, error); 508 return; 509 } 510 511 struct ext4_xattr_info { 512 int name_index; 513 const char *name; 514 const void *value; 515 size_t value_len; 516 }; 517 518 struct ext4_xattr_search { 519 struct ext4_xattr_entry *first; 520 void *base; 521 void *end; 522 struct ext4_xattr_entry *here; 523 int not_found; 524 }; 525 526 static int 527 ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s) 528 { 529 struct ext4_xattr_entry *last; 530 size_t free, min_offs = s->end - s->base, name_len = strlen(i->name); 531 532 /* Compute min_offs and last. */ 533 last = s->first; 534 for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) { 535 if (!last->e_value_block && last->e_value_size) { 536 size_t offs = le16_to_cpu(last->e_value_offs); 537 if (offs < min_offs) 538 min_offs = offs; 539 } 540 } 541 free = min_offs - ((void *)last - s->base) - sizeof(__u32); 542 if (!s->not_found) { 543 if (!s->here->e_value_block && s->here->e_value_size) { 544 size_t size = le32_to_cpu(s->here->e_value_size); 545 free += EXT4_XATTR_SIZE(size); 546 } 547 free += EXT4_XATTR_LEN(name_len); 548 } 549 if (i->value) { 550 if (free < EXT4_XATTR_SIZE(i->value_len) || 551 free < EXT4_XATTR_LEN(name_len) + 552 EXT4_XATTR_SIZE(i->value_len)) 553 return -ENOSPC; 554 } 555 556 if (i->value && s->not_found) { 557 /* Insert the new name. */ 558 size_t size = EXT4_XATTR_LEN(name_len); 559 size_t rest = (void *)last - (void *)s->here + sizeof(__u32); 560 memmove((void *)s->here + size, s->here, rest); 561 memset(s->here, 0, size); 562 s->here->e_name_index = i->name_index; 563 s->here->e_name_len = name_len; 564 memcpy(s->here->e_name, i->name, name_len); 565 } else { 566 if (!s->here->e_value_block && s->here->e_value_size) { 567 void *first_val = s->base + min_offs; 568 size_t offs = le16_to_cpu(s->here->e_value_offs); 569 void *val = s->base + offs; 570 size_t size = EXT4_XATTR_SIZE( 571 le32_to_cpu(s->here->e_value_size)); 572 573 if (i->value && size == EXT4_XATTR_SIZE(i->value_len)) { 574 /* The old and the new value have the same 575 size. Just replace. */ 576 s->here->e_value_size = 577 cpu_to_le32(i->value_len); 578 memset(val + size - EXT4_XATTR_PAD, 0, 579 EXT4_XATTR_PAD); /* Clear pad bytes. */ 580 memcpy(val, i->value, i->value_len); 581 return 0; 582 } 583 584 /* Remove the old value. */ 585 memmove(first_val + size, first_val, val - first_val); 586 memset(first_val, 0, size); 587 s->here->e_value_size = 0; 588 s->here->e_value_offs = 0; 589 min_offs += size; 590 591 /* Adjust all value offsets. */ 592 last = s->first; 593 while (!IS_LAST_ENTRY(last)) { 594 size_t o = le16_to_cpu(last->e_value_offs); 595 if (!last->e_value_block && 596 last->e_value_size && o < offs) 597 last->e_value_offs = 598 cpu_to_le16(o + size); 599 last = EXT4_XATTR_NEXT(last); 600 } 601 } 602 if (!i->value) { 603 /* Remove the old name. */ 604 size_t size = EXT4_XATTR_LEN(name_len); 605 last = ENTRY((void *)last - size); 606 memmove(s->here, (void *)s->here + size, 607 (void *)last - (void *)s->here + sizeof(__u32)); 608 memset(last, 0, size); 609 } 610 } 611 612 if (i->value) { 613 /* Insert the new value. */ 614 s->here->e_value_size = cpu_to_le32(i->value_len); 615 if (i->value_len) { 616 size_t size = EXT4_XATTR_SIZE(i->value_len); 617 void *val = s->base + min_offs - size; 618 s->here->e_value_offs = cpu_to_le16(min_offs - size); 619 memset(val + size - EXT4_XATTR_PAD, 0, 620 EXT4_XATTR_PAD); /* Clear the pad bytes. */ 621 memcpy(val, i->value, i->value_len); 622 } 623 } 624 return 0; 625 } 626 627 struct ext4_xattr_block_find { 628 struct ext4_xattr_search s; 629 struct buffer_head *bh; 630 }; 631 632 static int 633 ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i, 634 struct ext4_xattr_block_find *bs) 635 { 636 struct super_block *sb = inode->i_sb; 637 int error; 638 639 ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld", 640 i->name_index, i->name, i->value, (long)i->value_len); 641 642 if (EXT4_I(inode)->i_file_acl) { 643 /* The inode already has an extended attribute block. */ 644 bs->bh = sb_bread(sb, EXT4_I(inode)->i_file_acl); 645 error = -EIO; 646 if (!bs->bh) 647 goto cleanup; 648 ea_bdebug(bs->bh, "b_count=%d, refcount=%d", 649 atomic_read(&(bs->bh->b_count)), 650 le32_to_cpu(BHDR(bs->bh)->h_refcount)); 651 if (ext4_xattr_check_block(bs->bh)) { 652 ext4_error(sb, __FUNCTION__, 653 "inode %lu: bad block %llu", inode->i_ino, 654 EXT4_I(inode)->i_file_acl); 655 error = -EIO; 656 goto cleanup; 657 } 658 /* Find the named attribute. */ 659 bs->s.base = BHDR(bs->bh); 660 bs->s.first = BFIRST(bs->bh); 661 bs->s.end = bs->bh->b_data + bs->bh->b_size; 662 bs->s.here = bs->s.first; 663 error = ext4_xattr_find_entry(&bs->s.here, i->name_index, 664 i->name, bs->bh->b_size, 1); 665 if (error && error != -ENODATA) 666 goto cleanup; 667 bs->s.not_found = error; 668 } 669 error = 0; 670 671 cleanup: 672 return error; 673 } 674 675 static int 676 ext4_xattr_block_set(handle_t *handle, struct inode *inode, 677 struct ext4_xattr_info *i, 678 struct ext4_xattr_block_find *bs) 679 { 680 struct super_block *sb = inode->i_sb; 681 struct buffer_head *new_bh = NULL; 682 struct ext4_xattr_search *s = &bs->s; 683 struct mb_cache_entry *ce = NULL; 684 int error = 0; 685 686 #define header(x) ((struct ext4_xattr_header *)(x)) 687 688 if (i->value && i->value_len > sb->s_blocksize) 689 return -ENOSPC; 690 if (s->base) { 691 ce = mb_cache_entry_get(ext4_xattr_cache, bs->bh->b_bdev, 692 bs->bh->b_blocknr); 693 error = ext4_journal_get_write_access(handle, bs->bh); 694 if (error) 695 goto cleanup; 696 lock_buffer(bs->bh); 697 698 if (header(s->base)->h_refcount == cpu_to_le32(1)) { 699 if (ce) { 700 mb_cache_entry_free(ce); 701 ce = NULL; 702 } 703 ea_bdebug(bs->bh, "modifying in-place"); 704 error = ext4_xattr_set_entry(i, s); 705 if (!error) { 706 if (!IS_LAST_ENTRY(s->first)) 707 ext4_xattr_rehash(header(s->base), 708 s->here); 709 ext4_xattr_cache_insert(bs->bh); 710 } 711 unlock_buffer(bs->bh); 712 if (error == -EIO) 713 goto bad_block; 714 if (!error) 715 error = ext4_journal_dirty_metadata(handle, 716 bs->bh); 717 if (error) 718 goto cleanup; 719 goto inserted; 720 } else { 721 int offset = (char *)s->here - bs->bh->b_data; 722 723 unlock_buffer(bs->bh); 724 jbd2_journal_release_buffer(handle, bs->bh); 725 if (ce) { 726 mb_cache_entry_release(ce); 727 ce = NULL; 728 } 729 ea_bdebug(bs->bh, "cloning"); 730 s->base = kmalloc(bs->bh->b_size, GFP_KERNEL); 731 error = -ENOMEM; 732 if (s->base == NULL) 733 goto cleanup; 734 memcpy(s->base, BHDR(bs->bh), bs->bh->b_size); 735 s->first = ENTRY(header(s->base)+1); 736 header(s->base)->h_refcount = cpu_to_le32(1); 737 s->here = ENTRY(s->base + offset); 738 s->end = s->base + bs->bh->b_size; 739 } 740 } else { 741 /* Allocate a buffer where we construct the new block. */ 742 s->base = kmalloc(sb->s_blocksize, GFP_KERNEL); 743 /* assert(header == s->base) */ 744 error = -ENOMEM; 745 if (s->base == NULL) 746 goto cleanup; 747 memset(s->base, 0, sb->s_blocksize); 748 header(s->base)->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC); 749 header(s->base)->h_blocks = cpu_to_le32(1); 750 header(s->base)->h_refcount = cpu_to_le32(1); 751 s->first = ENTRY(header(s->base)+1); 752 s->here = ENTRY(header(s->base)+1); 753 s->end = s->base + sb->s_blocksize; 754 } 755 756 error = ext4_xattr_set_entry(i, s); 757 if (error == -EIO) 758 goto bad_block; 759 if (error) 760 goto cleanup; 761 if (!IS_LAST_ENTRY(s->first)) 762 ext4_xattr_rehash(header(s->base), s->here); 763 764 inserted: 765 if (!IS_LAST_ENTRY(s->first)) { 766 new_bh = ext4_xattr_cache_find(inode, header(s->base), &ce); 767 if (new_bh) { 768 /* We found an identical block in the cache. */ 769 if (new_bh == bs->bh) 770 ea_bdebug(new_bh, "keeping"); 771 else { 772 /* The old block is released after updating 773 the inode. */ 774 error = -EDQUOT; 775 if (DQUOT_ALLOC_BLOCK(inode, 1)) 776 goto cleanup; 777 error = ext4_journal_get_write_access(handle, 778 new_bh); 779 if (error) 780 goto cleanup_dquot; 781 lock_buffer(new_bh); 782 BHDR(new_bh)->h_refcount = cpu_to_le32(1 + 783 le32_to_cpu(BHDR(new_bh)->h_refcount)); 784 ea_bdebug(new_bh, "reusing; refcount now=%d", 785 le32_to_cpu(BHDR(new_bh)->h_refcount)); 786 unlock_buffer(new_bh); 787 error = ext4_journal_dirty_metadata(handle, 788 new_bh); 789 if (error) 790 goto cleanup_dquot; 791 } 792 mb_cache_entry_release(ce); 793 ce = NULL; 794 } else if (bs->bh && s->base == bs->bh->b_data) { 795 /* We were modifying this block in-place. */ 796 ea_bdebug(bs->bh, "keeping this block"); 797 new_bh = bs->bh; 798 get_bh(new_bh); 799 } else { 800 /* We need to allocate a new block */ 801 ext4_fsblk_t goal = le32_to_cpu( 802 EXT4_SB(sb)->s_es->s_first_data_block) + 803 (ext4_fsblk_t)EXT4_I(inode)->i_block_group * 804 EXT4_BLOCKS_PER_GROUP(sb); 805 ext4_fsblk_t block = ext4_new_block(handle, inode, 806 goal, &error); 807 if (error) 808 goto cleanup; 809 ea_idebug(inode, "creating block %d", block); 810 811 new_bh = sb_getblk(sb, block); 812 if (!new_bh) { 813 getblk_failed: 814 ext4_free_blocks(handle, inode, block, 1); 815 error = -EIO; 816 goto cleanup; 817 } 818 lock_buffer(new_bh); 819 error = ext4_journal_get_create_access(handle, new_bh); 820 if (error) { 821 unlock_buffer(new_bh); 822 goto getblk_failed; 823 } 824 memcpy(new_bh->b_data, s->base, new_bh->b_size); 825 set_buffer_uptodate(new_bh); 826 unlock_buffer(new_bh); 827 ext4_xattr_cache_insert(new_bh); 828 error = ext4_journal_dirty_metadata(handle, new_bh); 829 if (error) 830 goto cleanup; 831 } 832 } 833 834 /* Update the inode. */ 835 EXT4_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0; 836 837 /* Drop the previous xattr block. */ 838 if (bs->bh && bs->bh != new_bh) 839 ext4_xattr_release_block(handle, inode, bs->bh); 840 error = 0; 841 842 cleanup: 843 if (ce) 844 mb_cache_entry_release(ce); 845 brelse(new_bh); 846 if (!(bs->bh && s->base == bs->bh->b_data)) 847 kfree(s->base); 848 849 return error; 850 851 cleanup_dquot: 852 DQUOT_FREE_BLOCK(inode, 1); 853 goto cleanup; 854 855 bad_block: 856 ext4_error(inode->i_sb, __FUNCTION__, 857 "inode %lu: bad block %llu", inode->i_ino, 858 EXT4_I(inode)->i_file_acl); 859 goto cleanup; 860 861 #undef header 862 } 863 864 struct ext4_xattr_ibody_find { 865 struct ext4_xattr_search s; 866 struct ext4_iloc iloc; 867 }; 868 869 static int 870 ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i, 871 struct ext4_xattr_ibody_find *is) 872 { 873 struct ext4_xattr_ibody_header *header; 874 struct ext4_inode *raw_inode; 875 int error; 876 877 if (EXT4_I(inode)->i_extra_isize == 0) 878 return 0; 879 raw_inode = ext4_raw_inode(&is->iloc); 880 header = IHDR(inode, raw_inode); 881 is->s.base = is->s.first = IFIRST(header); 882 is->s.here = is->s.first; 883 is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; 884 if (EXT4_I(inode)->i_state & EXT4_STATE_XATTR) { 885 error = ext4_xattr_check_names(IFIRST(header), is->s.end); 886 if (error) 887 return error; 888 /* Find the named attribute. */ 889 error = ext4_xattr_find_entry(&is->s.here, i->name_index, 890 i->name, is->s.end - 891 (void *)is->s.base, 0); 892 if (error && error != -ENODATA) 893 return error; 894 is->s.not_found = error; 895 } 896 return 0; 897 } 898 899 static int 900 ext4_xattr_ibody_set(handle_t *handle, struct inode *inode, 901 struct ext4_xattr_info *i, 902 struct ext4_xattr_ibody_find *is) 903 { 904 struct ext4_xattr_ibody_header *header; 905 struct ext4_xattr_search *s = &is->s; 906 int error; 907 908 if (EXT4_I(inode)->i_extra_isize == 0) 909 return -ENOSPC; 910 error = ext4_xattr_set_entry(i, s); 911 if (error) 912 return error; 913 header = IHDR(inode, ext4_raw_inode(&is->iloc)); 914 if (!IS_LAST_ENTRY(s->first)) { 915 header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC); 916 EXT4_I(inode)->i_state |= EXT4_STATE_XATTR; 917 } else { 918 header->h_magic = cpu_to_le32(0); 919 EXT4_I(inode)->i_state &= ~EXT4_STATE_XATTR; 920 } 921 return 0; 922 } 923 924 /* 925 * ext4_xattr_set_handle() 926 * 927 * Create, replace or remove an extended attribute for this inode. Buffer 928 * is NULL to remove an existing extended attribute, and non-NULL to 929 * either replace an existing extended attribute, or create a new extended 930 * attribute. The flags XATTR_REPLACE and XATTR_CREATE 931 * specify that an extended attribute must exist and must not exist 932 * previous to the call, respectively. 933 * 934 * Returns 0, or a negative error number on failure. 935 */ 936 int 937 ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index, 938 const char *name, const void *value, size_t value_len, 939 int flags) 940 { 941 struct ext4_xattr_info i = { 942 .name_index = name_index, 943 .name = name, 944 .value = value, 945 .value_len = value_len, 946 947 }; 948 struct ext4_xattr_ibody_find is = { 949 .s = { .not_found = -ENODATA, }, 950 }; 951 struct ext4_xattr_block_find bs = { 952 .s = { .not_found = -ENODATA, }, 953 }; 954 int error; 955 956 if (!name) 957 return -EINVAL; 958 if (strlen(name) > 255) 959 return -ERANGE; 960 down_write(&EXT4_I(inode)->xattr_sem); 961 error = ext4_get_inode_loc(inode, &is.iloc); 962 if (error) 963 goto cleanup; 964 965 if (EXT4_I(inode)->i_state & EXT4_STATE_NEW) { 966 struct ext4_inode *raw_inode = ext4_raw_inode(&is.iloc); 967 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size); 968 EXT4_I(inode)->i_state &= ~EXT4_STATE_NEW; 969 } 970 971 error = ext4_xattr_ibody_find(inode, &i, &is); 972 if (error) 973 goto cleanup; 974 if (is.s.not_found) 975 error = ext4_xattr_block_find(inode, &i, &bs); 976 if (error) 977 goto cleanup; 978 if (is.s.not_found && bs.s.not_found) { 979 error = -ENODATA; 980 if (flags & XATTR_REPLACE) 981 goto cleanup; 982 error = 0; 983 if (!value) 984 goto cleanup; 985 } else { 986 error = -EEXIST; 987 if (flags & XATTR_CREATE) 988 goto cleanup; 989 } 990 error = ext4_journal_get_write_access(handle, is.iloc.bh); 991 if (error) 992 goto cleanup; 993 if (!value) { 994 if (!is.s.not_found) 995 error = ext4_xattr_ibody_set(handle, inode, &i, &is); 996 else if (!bs.s.not_found) 997 error = ext4_xattr_block_set(handle, inode, &i, &bs); 998 } else { 999 error = ext4_xattr_ibody_set(handle, inode, &i, &is); 1000 if (!error && !bs.s.not_found) { 1001 i.value = NULL; 1002 error = ext4_xattr_block_set(handle, inode, &i, &bs); 1003 } else if (error == -ENOSPC) { 1004 error = ext4_xattr_block_set(handle, inode, &i, &bs); 1005 if (error) 1006 goto cleanup; 1007 if (!is.s.not_found) { 1008 i.value = NULL; 1009 error = ext4_xattr_ibody_set(handle, inode, &i, 1010 &is); 1011 } 1012 } 1013 } 1014 if (!error) { 1015 ext4_xattr_update_super_block(handle, inode->i_sb); 1016 inode->i_ctime = CURRENT_TIME_SEC; 1017 error = ext4_mark_iloc_dirty(handle, inode, &is.iloc); 1018 /* 1019 * The bh is consumed by ext4_mark_iloc_dirty, even with 1020 * error != 0. 1021 */ 1022 is.iloc.bh = NULL; 1023 if (IS_SYNC(inode)) 1024 handle->h_sync = 1; 1025 } 1026 1027 cleanup: 1028 brelse(is.iloc.bh); 1029 brelse(bs.bh); 1030 up_write(&EXT4_I(inode)->xattr_sem); 1031 return error; 1032 } 1033 1034 /* 1035 * ext4_xattr_set() 1036 * 1037 * Like ext4_xattr_set_handle, but start from an inode. This extended 1038 * attribute modification is a filesystem transaction by itself. 1039 * 1040 * Returns 0, or a negative error number on failure. 1041 */ 1042 int 1043 ext4_xattr_set(struct inode *inode, int name_index, const char *name, 1044 const void *value, size_t value_len, int flags) 1045 { 1046 handle_t *handle; 1047 int error, retries = 0; 1048 1049 retry: 1050 handle = ext4_journal_start(inode, EXT4_DATA_TRANS_BLOCKS(inode->i_sb)); 1051 if (IS_ERR(handle)) { 1052 error = PTR_ERR(handle); 1053 } else { 1054 int error2; 1055 1056 error = ext4_xattr_set_handle(handle, inode, name_index, name, 1057 value, value_len, flags); 1058 error2 = ext4_journal_stop(handle); 1059 if (error == -ENOSPC && 1060 ext4_should_retry_alloc(inode->i_sb, &retries)) 1061 goto retry; 1062 if (error == 0) 1063 error = error2; 1064 } 1065 1066 return error; 1067 } 1068 1069 /* 1070 * ext4_xattr_delete_inode() 1071 * 1072 * Free extended attribute resources associated with this inode. This 1073 * is called immediately before an inode is freed. We have exclusive 1074 * access to the inode. 1075 */ 1076 void 1077 ext4_xattr_delete_inode(handle_t *handle, struct inode *inode) 1078 { 1079 struct buffer_head *bh = NULL; 1080 1081 if (!EXT4_I(inode)->i_file_acl) 1082 goto cleanup; 1083 bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl); 1084 if (!bh) { 1085 ext4_error(inode->i_sb, __FUNCTION__, 1086 "inode %lu: block %llu read error", inode->i_ino, 1087 EXT4_I(inode)->i_file_acl); 1088 goto cleanup; 1089 } 1090 if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) || 1091 BHDR(bh)->h_blocks != cpu_to_le32(1)) { 1092 ext4_error(inode->i_sb, __FUNCTION__, 1093 "inode %lu: bad block %llu", inode->i_ino, 1094 EXT4_I(inode)->i_file_acl); 1095 goto cleanup; 1096 } 1097 ext4_xattr_release_block(handle, inode, bh); 1098 EXT4_I(inode)->i_file_acl = 0; 1099 1100 cleanup: 1101 brelse(bh); 1102 } 1103 1104 /* 1105 * ext4_xattr_put_super() 1106 * 1107 * This is called when a file system is unmounted. 1108 */ 1109 void 1110 ext4_xattr_put_super(struct super_block *sb) 1111 { 1112 mb_cache_shrink(sb->s_bdev); 1113 } 1114 1115 /* 1116 * ext4_xattr_cache_insert() 1117 * 1118 * Create a new entry in the extended attribute cache, and insert 1119 * it unless such an entry is already in the cache. 1120 * 1121 * Returns 0, or a negative error number on failure. 1122 */ 1123 static void 1124 ext4_xattr_cache_insert(struct buffer_head *bh) 1125 { 1126 __u32 hash = le32_to_cpu(BHDR(bh)->h_hash); 1127 struct mb_cache_entry *ce; 1128 int error; 1129 1130 ce = mb_cache_entry_alloc(ext4_xattr_cache); 1131 if (!ce) { 1132 ea_bdebug(bh, "out of memory"); 1133 return; 1134 } 1135 error = mb_cache_entry_insert(ce, bh->b_bdev, bh->b_blocknr, &hash); 1136 if (error) { 1137 mb_cache_entry_free(ce); 1138 if (error == -EBUSY) { 1139 ea_bdebug(bh, "already in cache"); 1140 error = 0; 1141 } 1142 } else { 1143 ea_bdebug(bh, "inserting [%x]", (int)hash); 1144 mb_cache_entry_release(ce); 1145 } 1146 } 1147 1148 /* 1149 * ext4_xattr_cmp() 1150 * 1151 * Compare two extended attribute blocks for equality. 1152 * 1153 * Returns 0 if the blocks are equal, 1 if they differ, and 1154 * a negative error number on errors. 1155 */ 1156 static int 1157 ext4_xattr_cmp(struct ext4_xattr_header *header1, 1158 struct ext4_xattr_header *header2) 1159 { 1160 struct ext4_xattr_entry *entry1, *entry2; 1161 1162 entry1 = ENTRY(header1+1); 1163 entry2 = ENTRY(header2+1); 1164 while (!IS_LAST_ENTRY(entry1)) { 1165 if (IS_LAST_ENTRY(entry2)) 1166 return 1; 1167 if (entry1->e_hash != entry2->e_hash || 1168 entry1->e_name_index != entry2->e_name_index || 1169 entry1->e_name_len != entry2->e_name_len || 1170 entry1->e_value_size != entry2->e_value_size || 1171 memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len)) 1172 return 1; 1173 if (entry1->e_value_block != 0 || entry2->e_value_block != 0) 1174 return -EIO; 1175 if (memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs), 1176 (char *)header2 + le16_to_cpu(entry2->e_value_offs), 1177 le32_to_cpu(entry1->e_value_size))) 1178 return 1; 1179 1180 entry1 = EXT4_XATTR_NEXT(entry1); 1181 entry2 = EXT4_XATTR_NEXT(entry2); 1182 } 1183 if (!IS_LAST_ENTRY(entry2)) 1184 return 1; 1185 return 0; 1186 } 1187 1188 /* 1189 * ext4_xattr_cache_find() 1190 * 1191 * Find an identical extended attribute block. 1192 * 1193 * Returns a pointer to the block found, or NULL if such a block was 1194 * not found or an error occurred. 1195 */ 1196 static struct buffer_head * 1197 ext4_xattr_cache_find(struct inode *inode, struct ext4_xattr_header *header, 1198 struct mb_cache_entry **pce) 1199 { 1200 __u32 hash = le32_to_cpu(header->h_hash); 1201 struct mb_cache_entry *ce; 1202 1203 if (!header->h_hash) 1204 return NULL; /* never share */ 1205 ea_idebug(inode, "looking for cached blocks [%x]", (int)hash); 1206 again: 1207 ce = mb_cache_entry_find_first(ext4_xattr_cache, 0, 1208 inode->i_sb->s_bdev, hash); 1209 while (ce) { 1210 struct buffer_head *bh; 1211 1212 if (IS_ERR(ce)) { 1213 if (PTR_ERR(ce) == -EAGAIN) 1214 goto again; 1215 break; 1216 } 1217 bh = sb_bread(inode->i_sb, ce->e_block); 1218 if (!bh) { 1219 ext4_error(inode->i_sb, __FUNCTION__, 1220 "inode %lu: block %lu read error", 1221 inode->i_ino, (unsigned long) ce->e_block); 1222 } else if (le32_to_cpu(BHDR(bh)->h_refcount) >= 1223 EXT4_XATTR_REFCOUNT_MAX) { 1224 ea_idebug(inode, "block %lu refcount %d>=%d", 1225 (unsigned long) ce->e_block, 1226 le32_to_cpu(BHDR(bh)->h_refcount), 1227 EXT4_XATTR_REFCOUNT_MAX); 1228 } else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) { 1229 *pce = ce; 1230 return bh; 1231 } 1232 brelse(bh); 1233 ce = mb_cache_entry_find_next(ce, 0, inode->i_sb->s_bdev, hash); 1234 } 1235 return NULL; 1236 } 1237 1238 #define NAME_HASH_SHIFT 5 1239 #define VALUE_HASH_SHIFT 16 1240 1241 /* 1242 * ext4_xattr_hash_entry() 1243 * 1244 * Compute the hash of an extended attribute. 1245 */ 1246 static inline void ext4_xattr_hash_entry(struct ext4_xattr_header *header, 1247 struct ext4_xattr_entry *entry) 1248 { 1249 __u32 hash = 0; 1250 char *name = entry->e_name; 1251 int n; 1252 1253 for (n=0; n < entry->e_name_len; n++) { 1254 hash = (hash << NAME_HASH_SHIFT) ^ 1255 (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^ 1256 *name++; 1257 } 1258 1259 if (entry->e_value_block == 0 && entry->e_value_size != 0) { 1260 __le32 *value = (__le32 *)((char *)header + 1261 le16_to_cpu(entry->e_value_offs)); 1262 for (n = (le32_to_cpu(entry->e_value_size) + 1263 EXT4_XATTR_ROUND) >> EXT4_XATTR_PAD_BITS; n; n--) { 1264 hash = (hash << VALUE_HASH_SHIFT) ^ 1265 (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^ 1266 le32_to_cpu(*value++); 1267 } 1268 } 1269 entry->e_hash = cpu_to_le32(hash); 1270 } 1271 1272 #undef NAME_HASH_SHIFT 1273 #undef VALUE_HASH_SHIFT 1274 1275 #define BLOCK_HASH_SHIFT 16 1276 1277 /* 1278 * ext4_xattr_rehash() 1279 * 1280 * Re-compute the extended attribute hash value after an entry has changed. 1281 */ 1282 static void ext4_xattr_rehash(struct ext4_xattr_header *header, 1283 struct ext4_xattr_entry *entry) 1284 { 1285 struct ext4_xattr_entry *here; 1286 __u32 hash = 0; 1287 1288 ext4_xattr_hash_entry(header, entry); 1289 here = ENTRY(header+1); 1290 while (!IS_LAST_ENTRY(here)) { 1291 if (!here->e_hash) { 1292 /* Block is not shared if an entry's hash value == 0 */ 1293 hash = 0; 1294 break; 1295 } 1296 hash = (hash << BLOCK_HASH_SHIFT) ^ 1297 (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^ 1298 le32_to_cpu(here->e_hash); 1299 here = EXT4_XATTR_NEXT(here); 1300 } 1301 header->h_hash = cpu_to_le32(hash); 1302 } 1303 1304 #undef BLOCK_HASH_SHIFT 1305 1306 int __init 1307 init_ext4_xattr(void) 1308 { 1309 ext4_xattr_cache = mb_cache_create("ext4_xattr", NULL, 1310 sizeof(struct mb_cache_entry) + 1311 sizeof(((struct mb_cache_entry *) 0)->e_indexes[0]), 1, 6); 1312 if (!ext4_xattr_cache) 1313 return -ENOMEM; 1314 return 0; 1315 } 1316 1317 void 1318 exit_ext4_xattr(void) 1319 { 1320 if (ext4_xattr_cache) 1321 mb_cache_destroy(ext4_xattr_cache); 1322 ext4_xattr_cache = NULL; 1323 } 1324