1 /* 2 * Copyright (C) International Business Machines Corp., 2000-2004 3 * Copyright (C) Christoph Hellwig, 2002 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See 13 * the GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 20 #include <linux/fs.h> 21 #include <linux/xattr.h> 22 #include <linux/quotaops.h> 23 #include "jfs_incore.h" 24 #include "jfs_superblock.h" 25 #include "jfs_dmap.h" 26 #include "jfs_debug.h" 27 #include "jfs_dinode.h" 28 #include "jfs_extent.h" 29 #include "jfs_metapage.h" 30 #include "jfs_xattr.h" 31 #include "jfs_acl.h" 32 33 /* 34 * jfs_xattr.c: extended attribute service 35 * 36 * Overall design -- 37 * 38 * Format: 39 * 40 * Extended attribute lists (jfs_ea_list) consist of an overall size (32 bit 41 * value) and a variable (0 or more) number of extended attribute 42 * entries. Each extended attribute entry (jfs_ea) is a <name,value> double 43 * where <name> is constructed from a null-terminated ascii string 44 * (1 ... 255 bytes in the name) and <value> is arbitrary 8 bit data 45 * (1 ... 65535 bytes). The in-memory format is 46 * 47 * 0 1 2 4 4 + namelen + 1 48 * +-------+--------+--------+----------------+-------------------+ 49 * | Flags | Name | Value | Name String \0 | Data . . . . | 50 * | | Length | Length | | | 51 * +-------+--------+--------+----------------+-------------------+ 52 * 53 * A jfs_ea_list then is structured as 54 * 55 * 0 4 4 + EA_SIZE(ea1) 56 * +------------+-------------------+--------------------+----- 57 * | Overall EA | First FEA Element | Second FEA Element | ..... 58 * | List Size | | | 59 * +------------+-------------------+--------------------+----- 60 * 61 * On-disk: 62 * 63 * FEALISTs are stored on disk using blocks allocated by dbAlloc() and 64 * written directly. An EA list may be in-lined in the inode if there is 65 * sufficient room available. 66 */ 67 68 struct ea_buffer { 69 int flag; /* Indicates what storage xattr points to */ 70 int max_size; /* largest xattr that fits in current buffer */ 71 dxd_t new_ea; /* dxd to replace ea when modifying xattr */ 72 struct metapage *mp; /* metapage containing ea list */ 73 struct jfs_ea_list *xattr; /* buffer containing ea list */ 74 }; 75 76 /* 77 * ea_buffer.flag values 78 */ 79 #define EA_INLINE 0x0001 80 #define EA_EXTENT 0x0002 81 #define EA_NEW 0x0004 82 #define EA_MALLOC 0x0008 83 84 /* Namespaces */ 85 #define XATTR_SYSTEM_PREFIX "system." 86 #define XATTR_SYSTEM_PREFIX_LEN (sizeof (XATTR_SYSTEM_PREFIX) - 1) 87 88 #define XATTR_USER_PREFIX "user." 89 #define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1) 90 91 #define XATTR_OS2_PREFIX "os2." 92 #define XATTR_OS2_PREFIX_LEN (sizeof (XATTR_OS2_PREFIX) - 1) 93 94 /* XATTR_SECURITY_PREFIX is defined in include/linux/xattr.h */ 95 #define XATTR_SECURITY_PREFIX_LEN (sizeof (XATTR_SECURITY_PREFIX) - 1) 96 97 #define XATTR_TRUSTED_PREFIX "trusted." 98 #define XATTR_TRUSTED_PREFIX_LEN (sizeof (XATTR_TRUSTED_PREFIX) - 1) 99 100 /* 101 * These three routines are used to recognize on-disk extended attributes 102 * that are in a recognized namespace. If the attribute is not recognized, 103 * "os2." is prepended to the name 104 */ 105 static inline int is_os2_xattr(struct jfs_ea *ea) 106 { 107 /* 108 * Check for "system." 109 */ 110 if ((ea->namelen >= XATTR_SYSTEM_PREFIX_LEN) && 111 !strncmp(ea->name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) 112 return FALSE; 113 /* 114 * Check for "user." 115 */ 116 if ((ea->namelen >= XATTR_USER_PREFIX_LEN) && 117 !strncmp(ea->name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) 118 return FALSE; 119 /* 120 * Check for "security." 121 */ 122 if ((ea->namelen >= XATTR_SECURITY_PREFIX_LEN) && 123 !strncmp(ea->name, XATTR_SECURITY_PREFIX, 124 XATTR_SECURITY_PREFIX_LEN)) 125 return FALSE; 126 /* 127 * Check for "trusted." 128 */ 129 if ((ea->namelen >= XATTR_TRUSTED_PREFIX_LEN) && 130 !strncmp(ea->name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN)) 131 return FALSE; 132 /* 133 * Add any other valid namespace prefixes here 134 */ 135 136 /* 137 * We assume it's OS/2's flat namespace 138 */ 139 return TRUE; 140 } 141 142 static inline int name_size(struct jfs_ea *ea) 143 { 144 if (is_os2_xattr(ea)) 145 return ea->namelen + XATTR_OS2_PREFIX_LEN; 146 else 147 return ea->namelen; 148 } 149 150 static inline int copy_name(char *buffer, struct jfs_ea *ea) 151 { 152 int len = ea->namelen; 153 154 if (is_os2_xattr(ea)) { 155 memcpy(buffer, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN); 156 buffer += XATTR_OS2_PREFIX_LEN; 157 len += XATTR_OS2_PREFIX_LEN; 158 } 159 memcpy(buffer, ea->name, ea->namelen); 160 buffer[ea->namelen] = 0; 161 162 return len; 163 } 164 165 /* Forward references */ 166 static void ea_release(struct inode *inode, struct ea_buffer *ea_buf); 167 168 /* 169 * NAME: ea_write_inline 170 * 171 * FUNCTION: Attempt to write an EA inline if area is available 172 * 173 * PRE CONDITIONS: 174 * Already verified that the specified EA is small enough to fit inline 175 * 176 * PARAMETERS: 177 * ip - Inode pointer 178 * ealist - EA list pointer 179 * size - size of ealist in bytes 180 * ea - dxd_t structure to be filled in with necessary EA information 181 * if we successfully copy the EA inline 182 * 183 * NOTES: 184 * Checks if the inode's inline area is available. If so, copies EA inline 185 * and sets <ea> fields appropriately. Otherwise, returns failure, EA will 186 * have to be put into an extent. 187 * 188 * RETURNS: 0 for successful copy to inline area; -1 if area not available 189 */ 190 static int ea_write_inline(struct inode *ip, struct jfs_ea_list *ealist, 191 int size, dxd_t * ea) 192 { 193 struct jfs_inode_info *ji = JFS_IP(ip); 194 195 /* 196 * Make sure we have an EA -- the NULL EA list is valid, but you 197 * can't copy it! 198 */ 199 if (ealist && size > sizeof (struct jfs_ea_list)) { 200 assert(size <= sizeof (ji->i_inline_ea)); 201 202 /* 203 * See if the space is available or if it is already being 204 * used for an inline EA. 205 */ 206 if (!(ji->mode2 & INLINEEA) && !(ji->ea.flag & DXD_INLINE)) 207 return -EPERM; 208 209 DXDsize(ea, size); 210 DXDlength(ea, 0); 211 DXDaddress(ea, 0); 212 memcpy(ji->i_inline_ea, ealist, size); 213 ea->flag = DXD_INLINE; 214 ji->mode2 &= ~INLINEEA; 215 } else { 216 ea->flag = 0; 217 DXDsize(ea, 0); 218 DXDlength(ea, 0); 219 DXDaddress(ea, 0); 220 221 /* Free up INLINE area */ 222 if (ji->ea.flag & DXD_INLINE) 223 ji->mode2 |= INLINEEA; 224 } 225 226 return 0; 227 } 228 229 /* 230 * NAME: ea_write 231 * 232 * FUNCTION: Write an EA for an inode 233 * 234 * PRE CONDITIONS: EA has been verified 235 * 236 * PARAMETERS: 237 * ip - Inode pointer 238 * ealist - EA list pointer 239 * size - size of ealist in bytes 240 * ea - dxd_t structure to be filled in appropriately with where the 241 * EA was copied 242 * 243 * NOTES: Will write EA inline if able to, otherwise allocates blocks for an 244 * extent and synchronously writes it to those blocks. 245 * 246 * RETURNS: 0 for success; Anything else indicates failure 247 */ 248 static int ea_write(struct inode *ip, struct jfs_ea_list *ealist, int size, 249 dxd_t * ea) 250 { 251 struct super_block *sb = ip->i_sb; 252 struct jfs_inode_info *ji = JFS_IP(ip); 253 struct jfs_sb_info *sbi = JFS_SBI(sb); 254 int nblocks; 255 s64 blkno; 256 int rc = 0, i; 257 char *cp; 258 s32 nbytes, nb; 259 s32 bytes_to_write; 260 struct metapage *mp; 261 262 /* 263 * Quick check to see if this is an in-linable EA. Short EAs 264 * and empty EAs are all in-linable, provided the space exists. 265 */ 266 if (!ealist || size <= sizeof (ji->i_inline_ea)) { 267 if (!ea_write_inline(ip, ealist, size, ea)) 268 return 0; 269 } 270 271 /* figure out how many blocks we need */ 272 nblocks = (size + (sb->s_blocksize - 1)) >> sb->s_blocksize_bits; 273 274 /* Allocate new blocks to quota. */ 275 if (DQUOT_ALLOC_BLOCK(ip, nblocks)) { 276 return -EDQUOT; 277 } 278 279 rc = dbAlloc(ip, INOHINT(ip), nblocks, &blkno); 280 if (rc) { 281 /*Rollback quota allocation. */ 282 DQUOT_FREE_BLOCK(ip, nblocks); 283 return rc; 284 } 285 286 /* 287 * Now have nblocks worth of storage to stuff into the FEALIST. 288 * loop over the FEALIST copying data into the buffer one page at 289 * a time. 290 */ 291 cp = (char *) ealist; 292 nbytes = size; 293 for (i = 0; i < nblocks; i += sbi->nbperpage) { 294 /* 295 * Determine how many bytes for this request, and round up to 296 * the nearest aggregate block size 297 */ 298 nb = min(PSIZE, nbytes); 299 bytes_to_write = 300 ((((nb + sb->s_blocksize - 1)) >> sb->s_blocksize_bits)) 301 << sb->s_blocksize_bits; 302 303 if (!(mp = get_metapage(ip, blkno + i, bytes_to_write, 1))) { 304 rc = -EIO; 305 goto failed; 306 } 307 308 memcpy(mp->data, cp, nb); 309 310 /* 311 * We really need a way to propagate errors for 312 * forced writes like this one. --hch 313 * 314 * (__write_metapage => release_metapage => flush_metapage) 315 */ 316 #ifdef _JFS_FIXME 317 if ((rc = flush_metapage(mp))) { 318 /* 319 * the write failed -- this means that the buffer 320 * is still assigned and the blocks are not being 321 * used. this seems like the best error recovery 322 * we can get ... 323 */ 324 goto failed; 325 } 326 #else 327 flush_metapage(mp); 328 #endif 329 330 cp += PSIZE; 331 nbytes -= nb; 332 } 333 334 ea->flag = DXD_EXTENT; 335 DXDsize(ea, le32_to_cpu(ealist->size)); 336 DXDlength(ea, nblocks); 337 DXDaddress(ea, blkno); 338 339 /* Free up INLINE area */ 340 if (ji->ea.flag & DXD_INLINE) 341 ji->mode2 |= INLINEEA; 342 343 return 0; 344 345 failed: 346 /* Rollback quota allocation. */ 347 DQUOT_FREE_BLOCK(ip, nblocks); 348 349 dbFree(ip, blkno, nblocks); 350 return rc; 351 } 352 353 /* 354 * NAME: ea_read_inline 355 * 356 * FUNCTION: Read an inlined EA into user's buffer 357 * 358 * PARAMETERS: 359 * ip - Inode pointer 360 * ealist - Pointer to buffer to fill in with EA 361 * 362 * RETURNS: 0 363 */ 364 static int ea_read_inline(struct inode *ip, struct jfs_ea_list *ealist) 365 { 366 struct jfs_inode_info *ji = JFS_IP(ip); 367 int ea_size = sizeDXD(&ji->ea); 368 369 if (ea_size == 0) { 370 ealist->size = 0; 371 return 0; 372 } 373 374 /* Sanity Check */ 375 if ((sizeDXD(&ji->ea) > sizeof (ji->i_inline_ea))) 376 return -EIO; 377 if (le32_to_cpu(((struct jfs_ea_list *) &ji->i_inline_ea)->size) 378 != ea_size) 379 return -EIO; 380 381 memcpy(ealist, ji->i_inline_ea, ea_size); 382 return 0; 383 } 384 385 /* 386 * NAME: ea_read 387 * 388 * FUNCTION: copy EA data into user's buffer 389 * 390 * PARAMETERS: 391 * ip - Inode pointer 392 * ealist - Pointer to buffer to fill in with EA 393 * 394 * NOTES: If EA is inline calls ea_read_inline() to copy EA. 395 * 396 * RETURNS: 0 for success; other indicates failure 397 */ 398 static int ea_read(struct inode *ip, struct jfs_ea_list *ealist) 399 { 400 struct super_block *sb = ip->i_sb; 401 struct jfs_inode_info *ji = JFS_IP(ip); 402 struct jfs_sb_info *sbi = JFS_SBI(sb); 403 int nblocks; 404 s64 blkno; 405 char *cp = (char *) ealist; 406 int i; 407 int nbytes, nb; 408 s32 bytes_to_read; 409 struct metapage *mp; 410 411 /* quick check for in-line EA */ 412 if (ji->ea.flag & DXD_INLINE) 413 return ea_read_inline(ip, ealist); 414 415 nbytes = sizeDXD(&ji->ea); 416 if (!nbytes) { 417 jfs_error(sb, "ea_read: nbytes is 0"); 418 return -EIO; 419 } 420 421 /* 422 * Figure out how many blocks were allocated when this EA list was 423 * originally written to disk. 424 */ 425 nblocks = lengthDXD(&ji->ea) << sbi->l2nbperpage; 426 blkno = addressDXD(&ji->ea) << sbi->l2nbperpage; 427 428 /* 429 * I have found the disk blocks which were originally used to store 430 * the FEALIST. now i loop over each contiguous block copying the 431 * data into the buffer. 432 */ 433 for (i = 0; i < nblocks; i += sbi->nbperpage) { 434 /* 435 * Determine how many bytes for this request, and round up to 436 * the nearest aggregate block size 437 */ 438 nb = min(PSIZE, nbytes); 439 bytes_to_read = 440 ((((nb + sb->s_blocksize - 1)) >> sb->s_blocksize_bits)) 441 << sb->s_blocksize_bits; 442 443 if (!(mp = read_metapage(ip, blkno + i, bytes_to_read, 1))) 444 return -EIO; 445 446 memcpy(cp, mp->data, nb); 447 release_metapage(mp); 448 449 cp += PSIZE; 450 nbytes -= nb; 451 } 452 453 return 0; 454 } 455 456 /* 457 * NAME: ea_get 458 * 459 * FUNCTION: Returns buffer containing existing extended attributes. 460 * The size of the buffer will be the larger of the existing 461 * attributes size, or min_size. 462 * 463 * The buffer, which may be inlined in the inode or in the 464 * page cache must be release by calling ea_release or ea_put 465 * 466 * PARAMETERS: 467 * inode - Inode pointer 468 * ea_buf - Structure to be populated with ealist and its metadata 469 * min_size- minimum size of buffer to be returned 470 * 471 * RETURNS: 0 for success; Other indicates failure 472 */ 473 static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size) 474 { 475 struct jfs_inode_info *ji = JFS_IP(inode); 476 struct super_block *sb = inode->i_sb; 477 int size; 478 int ea_size = sizeDXD(&ji->ea); 479 int blocks_needed, current_blocks; 480 s64 blkno; 481 int rc; 482 int quota_allocation = 0; 483 484 /* When fsck.jfs clears a bad ea, it doesn't clear the size */ 485 if (ji->ea.flag == 0) 486 ea_size = 0; 487 488 if (ea_size == 0) { 489 if (min_size == 0) { 490 ea_buf->flag = 0; 491 ea_buf->max_size = 0; 492 ea_buf->xattr = NULL; 493 return 0; 494 } 495 if ((min_size <= sizeof (ji->i_inline_ea)) && 496 (ji->mode2 & INLINEEA)) { 497 ea_buf->flag = EA_INLINE | EA_NEW; 498 ea_buf->max_size = sizeof (ji->i_inline_ea); 499 ea_buf->xattr = (struct jfs_ea_list *) ji->i_inline_ea; 500 DXDlength(&ea_buf->new_ea, 0); 501 DXDaddress(&ea_buf->new_ea, 0); 502 ea_buf->new_ea.flag = DXD_INLINE; 503 DXDsize(&ea_buf->new_ea, min_size); 504 return 0; 505 } 506 current_blocks = 0; 507 } else if (ji->ea.flag & DXD_INLINE) { 508 if (min_size <= sizeof (ji->i_inline_ea)) { 509 ea_buf->flag = EA_INLINE; 510 ea_buf->max_size = sizeof (ji->i_inline_ea); 511 ea_buf->xattr = (struct jfs_ea_list *) ji->i_inline_ea; 512 goto size_check; 513 } 514 current_blocks = 0; 515 } else { 516 if (!(ji->ea.flag & DXD_EXTENT)) { 517 jfs_error(sb, "ea_get: invalid ea.flag)"); 518 return -EIO; 519 } 520 current_blocks = (ea_size + sb->s_blocksize - 1) >> 521 sb->s_blocksize_bits; 522 } 523 size = max(min_size, ea_size); 524 525 if (size > PSIZE) { 526 /* 527 * To keep the rest of the code simple. Allocate a 528 * contiguous buffer to work with 529 */ 530 ea_buf->xattr = kmalloc(size, GFP_KERNEL); 531 if (ea_buf->xattr == NULL) 532 return -ENOMEM; 533 534 ea_buf->flag = EA_MALLOC; 535 ea_buf->max_size = (size + sb->s_blocksize - 1) & 536 ~(sb->s_blocksize - 1); 537 538 if (ea_size == 0) 539 return 0; 540 541 if ((rc = ea_read(inode, ea_buf->xattr))) { 542 kfree(ea_buf->xattr); 543 ea_buf->xattr = NULL; 544 return rc; 545 } 546 goto size_check; 547 } 548 blocks_needed = (min_size + sb->s_blocksize - 1) >> 549 sb->s_blocksize_bits; 550 551 if (blocks_needed > current_blocks) { 552 /* Allocate new blocks to quota. */ 553 if (DQUOT_ALLOC_BLOCK(inode, blocks_needed)) 554 return -EDQUOT; 555 556 quota_allocation = blocks_needed; 557 558 rc = dbAlloc(inode, INOHINT(inode), (s64) blocks_needed, 559 &blkno); 560 if (rc) 561 goto clean_up; 562 563 DXDlength(&ea_buf->new_ea, blocks_needed); 564 DXDaddress(&ea_buf->new_ea, blkno); 565 ea_buf->new_ea.flag = DXD_EXTENT; 566 DXDsize(&ea_buf->new_ea, min_size); 567 568 ea_buf->flag = EA_EXTENT | EA_NEW; 569 570 ea_buf->mp = get_metapage(inode, blkno, 571 blocks_needed << sb->s_blocksize_bits, 572 1); 573 if (ea_buf->mp == NULL) { 574 dbFree(inode, blkno, (s64) blocks_needed); 575 rc = -EIO; 576 goto clean_up; 577 } 578 ea_buf->xattr = ea_buf->mp->data; 579 ea_buf->max_size = (min_size + sb->s_blocksize - 1) & 580 ~(sb->s_blocksize - 1); 581 if (ea_size == 0) 582 return 0; 583 if ((rc = ea_read(inode, ea_buf->xattr))) { 584 discard_metapage(ea_buf->mp); 585 dbFree(inode, blkno, (s64) blocks_needed); 586 goto clean_up; 587 } 588 goto size_check; 589 } 590 ea_buf->flag = EA_EXTENT; 591 ea_buf->mp = read_metapage(inode, addressDXD(&ji->ea), 592 lengthDXD(&ji->ea) << sb->s_blocksize_bits, 593 1); 594 if (ea_buf->mp == NULL) { 595 rc = -EIO; 596 goto clean_up; 597 } 598 ea_buf->xattr = ea_buf->mp->data; 599 ea_buf->max_size = (ea_size + sb->s_blocksize - 1) & 600 ~(sb->s_blocksize - 1); 601 602 size_check: 603 if (EALIST_SIZE(ea_buf->xattr) != ea_size) { 604 printk(KERN_ERR "ea_get: invalid extended attribute\n"); 605 dump_mem("xattr", ea_buf->xattr, ea_size); 606 ea_release(inode, ea_buf); 607 rc = -EIO; 608 goto clean_up; 609 } 610 611 return ea_size; 612 613 clean_up: 614 /* Rollback quota allocation */ 615 if (quota_allocation) 616 DQUOT_FREE_BLOCK(inode, quota_allocation); 617 618 return (rc); 619 } 620 621 static void ea_release(struct inode *inode, struct ea_buffer *ea_buf) 622 { 623 if (ea_buf->flag & EA_MALLOC) 624 kfree(ea_buf->xattr); 625 else if (ea_buf->flag & EA_EXTENT) { 626 assert(ea_buf->mp); 627 release_metapage(ea_buf->mp); 628 629 if (ea_buf->flag & EA_NEW) 630 dbFree(inode, addressDXD(&ea_buf->new_ea), 631 lengthDXD(&ea_buf->new_ea)); 632 } 633 } 634 635 static int ea_put(struct inode *inode, struct ea_buffer *ea_buf, int new_size) 636 { 637 struct jfs_inode_info *ji = JFS_IP(inode); 638 unsigned long old_blocks, new_blocks; 639 int rc = 0; 640 tid_t tid; 641 642 if (new_size == 0) { 643 ea_release(inode, ea_buf); 644 ea_buf = NULL; 645 } else if (ea_buf->flag & EA_INLINE) { 646 assert(new_size <= sizeof (ji->i_inline_ea)); 647 ji->mode2 &= ~INLINEEA; 648 ea_buf->new_ea.flag = DXD_INLINE; 649 DXDsize(&ea_buf->new_ea, new_size); 650 DXDaddress(&ea_buf->new_ea, 0); 651 DXDlength(&ea_buf->new_ea, 0); 652 } else if (ea_buf->flag & EA_MALLOC) { 653 rc = ea_write(inode, ea_buf->xattr, new_size, &ea_buf->new_ea); 654 kfree(ea_buf->xattr); 655 } else if (ea_buf->flag & EA_NEW) { 656 /* We have already allocated a new dxd */ 657 flush_metapage(ea_buf->mp); 658 } else { 659 /* ->xattr must point to original ea's metapage */ 660 rc = ea_write(inode, ea_buf->xattr, new_size, &ea_buf->new_ea); 661 discard_metapage(ea_buf->mp); 662 } 663 if (rc) 664 return rc; 665 666 tid = txBegin(inode->i_sb, 0); 667 down(&ji->commit_sem); 668 669 old_blocks = new_blocks = 0; 670 671 if (ji->ea.flag & DXD_EXTENT) { 672 invalidate_dxd_metapages(inode, ji->ea); 673 old_blocks = lengthDXD(&ji->ea); 674 } 675 676 if (ea_buf) { 677 txEA(tid, inode, &ji->ea, &ea_buf->new_ea); 678 if (ea_buf->new_ea.flag & DXD_EXTENT) { 679 new_blocks = lengthDXD(&ea_buf->new_ea); 680 if (ji->ea.flag & DXD_INLINE) 681 ji->mode2 |= INLINEEA; 682 } 683 ji->ea = ea_buf->new_ea; 684 } else { 685 txEA(tid, inode, &ji->ea, NULL); 686 if (ji->ea.flag & DXD_INLINE) 687 ji->mode2 |= INLINEEA; 688 ji->ea.flag = 0; 689 ji->ea.size = 0; 690 } 691 692 /* If old blocks exist, they must be removed from quota allocation. */ 693 if (old_blocks) 694 DQUOT_FREE_BLOCK(inode, old_blocks); 695 696 inode->i_ctime = CURRENT_TIME; 697 rc = txCommit(tid, 1, &inode, 0); 698 txEnd(tid); 699 up(&ji->commit_sem); 700 701 return rc; 702 } 703 704 /* 705 * can_set_system_xattr 706 * 707 * This code is specific to the system.* namespace. It contains policy 708 * which doesn't belong in the main xattr codepath. 709 */ 710 static int can_set_system_xattr(struct inode *inode, const char *name, 711 const void *value, size_t value_len) 712 { 713 #ifdef CONFIG_JFS_POSIX_ACL 714 struct posix_acl *acl; 715 int rc; 716 717 if ((current->fsuid != inode->i_uid) && !capable(CAP_FOWNER)) 718 return -EPERM; 719 720 /* 721 * XATTR_NAME_ACL_ACCESS is tied to i_mode 722 */ 723 if (strcmp(name, XATTR_NAME_ACL_ACCESS) == 0) { 724 acl = posix_acl_from_xattr(value, value_len); 725 if (IS_ERR(acl)) { 726 rc = PTR_ERR(acl); 727 printk(KERN_ERR "posix_acl_from_xattr returned %d\n", 728 rc); 729 return rc; 730 } 731 if (acl) { 732 mode_t mode = inode->i_mode; 733 rc = posix_acl_equiv_mode(acl, &mode); 734 posix_acl_release(acl); 735 if (rc < 0) { 736 printk(KERN_ERR 737 "posix_acl_equiv_mode returned %d\n", 738 rc); 739 return rc; 740 } 741 inode->i_mode = mode; 742 mark_inode_dirty(inode); 743 } 744 /* 745 * We're changing the ACL. Get rid of the cached one 746 */ 747 acl =JFS_IP(inode)->i_acl; 748 if (acl != JFS_ACL_NOT_CACHED) 749 posix_acl_release(acl); 750 JFS_IP(inode)->i_acl = JFS_ACL_NOT_CACHED; 751 752 return 0; 753 } else if (strcmp(name, XATTR_NAME_ACL_DEFAULT) == 0) { 754 acl = posix_acl_from_xattr(value, value_len); 755 if (IS_ERR(acl)) { 756 rc = PTR_ERR(acl); 757 printk(KERN_ERR "posix_acl_from_xattr returned %d\n", 758 rc); 759 return rc; 760 } 761 posix_acl_release(acl); 762 763 /* 764 * We're changing the default ACL. Get rid of the cached one 765 */ 766 acl =JFS_IP(inode)->i_default_acl; 767 if (acl && (acl != JFS_ACL_NOT_CACHED)) 768 posix_acl_release(acl); 769 JFS_IP(inode)->i_default_acl = JFS_ACL_NOT_CACHED; 770 771 return 0; 772 } 773 #endif /* CONFIG_JFS_POSIX_ACL */ 774 return -EOPNOTSUPP; 775 } 776 777 static int can_set_xattr(struct inode *inode, const char *name, 778 const void *value, size_t value_len) 779 { 780 if (IS_RDONLY(inode)) 781 return -EROFS; 782 783 if (IS_IMMUTABLE(inode) || IS_APPEND(inode) || S_ISLNK(inode->i_mode)) 784 return -EPERM; 785 786 if(strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) == 0) 787 /* 788 * "system.*" 789 */ 790 return can_set_system_xattr(inode, name, value, value_len); 791 792 if(strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) != 0) 793 return (capable(CAP_SYS_ADMIN) ? 0 : -EPERM); 794 795 #ifdef CONFIG_JFS_SECURITY 796 if (strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) 797 != 0) 798 return 0; /* Leave it to the security module */ 799 #endif 800 801 if((strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) != 0) && 802 (strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) != 0)) 803 return -EOPNOTSUPP; 804 805 if (!S_ISREG(inode->i_mode) && 806 (!S_ISDIR(inode->i_mode) || inode->i_mode &S_ISVTX)) 807 return -EPERM; 808 809 return permission(inode, MAY_WRITE, NULL); 810 } 811 812 int __jfs_setxattr(struct inode *inode, const char *name, const void *value, 813 size_t value_len, int flags) 814 { 815 struct jfs_ea_list *ealist; 816 struct jfs_ea *ea, *old_ea = NULL, *next_ea = NULL; 817 struct ea_buffer ea_buf; 818 int old_ea_size = 0; 819 int xattr_size; 820 int new_size; 821 int namelen = strlen(name); 822 char *os2name = NULL; 823 int found = 0; 824 int rc; 825 int length; 826 827 if ((rc = can_set_xattr(inode, name, value, value_len))) 828 return rc; 829 830 if (strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) { 831 os2name = kmalloc(namelen - XATTR_OS2_PREFIX_LEN + 1, 832 GFP_KERNEL); 833 if (!os2name) 834 return -ENOMEM; 835 strcpy(os2name, name + XATTR_OS2_PREFIX_LEN); 836 name = os2name; 837 namelen -= XATTR_OS2_PREFIX_LEN; 838 } 839 840 down_write(&JFS_IP(inode)->xattr_sem); 841 842 xattr_size = ea_get(inode, &ea_buf, 0); 843 if (xattr_size < 0) { 844 rc = xattr_size; 845 goto out; 846 } 847 848 again: 849 ealist = (struct jfs_ea_list *) ea_buf.xattr; 850 new_size = sizeof (struct jfs_ea_list); 851 852 if (xattr_size) { 853 for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); 854 ea = NEXT_EA(ea)) { 855 if ((namelen == ea->namelen) && 856 (memcmp(name, ea->name, namelen) == 0)) { 857 found = 1; 858 if (flags & XATTR_CREATE) { 859 rc = -EEXIST; 860 goto release; 861 } 862 old_ea = ea; 863 old_ea_size = EA_SIZE(ea); 864 next_ea = NEXT_EA(ea); 865 } else 866 new_size += EA_SIZE(ea); 867 } 868 } 869 870 if (!found) { 871 if (flags & XATTR_REPLACE) { 872 rc = -ENODATA; 873 goto release; 874 } 875 if (value == NULL) { 876 rc = 0; 877 goto release; 878 } 879 } 880 if (value) 881 new_size += sizeof (struct jfs_ea) + namelen + 1 + value_len; 882 883 if (new_size > ea_buf.max_size) { 884 /* 885 * We need to allocate more space for merged ea list. 886 * We should only have loop to again: once. 887 */ 888 ea_release(inode, &ea_buf); 889 xattr_size = ea_get(inode, &ea_buf, new_size); 890 if (xattr_size < 0) { 891 rc = xattr_size; 892 goto out; 893 } 894 goto again; 895 } 896 897 /* Remove old ea of the same name */ 898 if (found) { 899 /* number of bytes following target EA */ 900 length = (char *) END_EALIST(ealist) - (char *) next_ea; 901 if (length > 0) 902 memmove(old_ea, next_ea, length); 903 xattr_size -= old_ea_size; 904 } 905 906 /* Add new entry to the end */ 907 if (value) { 908 if (xattr_size == 0) 909 /* Completely new ea list */ 910 xattr_size = sizeof (struct jfs_ea_list); 911 912 ea = (struct jfs_ea *) ((char *) ealist + xattr_size); 913 ea->flag = 0; 914 ea->namelen = namelen; 915 ea->valuelen = (cpu_to_le16(value_len)); 916 memcpy(ea->name, name, namelen); 917 ea->name[namelen] = 0; 918 if (value_len) 919 memcpy(&ea->name[namelen + 1], value, value_len); 920 xattr_size += EA_SIZE(ea); 921 } 922 923 /* DEBUG - If we did this right, these number match */ 924 if (xattr_size != new_size) { 925 printk(KERN_ERR 926 "jfs_xsetattr: xattr_size = %d, new_size = %d\n", 927 xattr_size, new_size); 928 929 rc = -EINVAL; 930 goto release; 931 } 932 933 /* 934 * If we're left with an empty list, there's no ea 935 */ 936 if (new_size == sizeof (struct jfs_ea_list)) 937 new_size = 0; 938 939 ealist->size = cpu_to_le32(new_size); 940 941 rc = ea_put(inode, &ea_buf, new_size); 942 943 goto out; 944 release: 945 ea_release(inode, &ea_buf); 946 out: 947 up_write(&JFS_IP(inode)->xattr_sem); 948 949 if (os2name) 950 kfree(os2name); 951 952 return rc; 953 } 954 955 int jfs_setxattr(struct dentry *dentry, const char *name, const void *value, 956 size_t value_len, int flags) 957 { 958 if (value == NULL) { /* empty EA, do not remove */ 959 value = ""; 960 value_len = 0; 961 } 962 963 return __jfs_setxattr(dentry->d_inode, name, value, value_len, flags); 964 } 965 966 static int can_get_xattr(struct inode *inode, const char *name) 967 { 968 #ifdef CONFIG_JFS_SECURITY 969 if(strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) == 0) 970 return 0; 971 #endif 972 973 if(strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) == 0) 974 return (capable(CAP_SYS_ADMIN) ? 0 : -EPERM); 975 976 if(strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) == 0) 977 return 0; 978 979 return permission(inode, MAY_READ, NULL); 980 } 981 982 ssize_t __jfs_getxattr(struct inode *inode, const char *name, void *data, 983 size_t buf_size) 984 { 985 struct jfs_ea_list *ealist; 986 struct jfs_ea *ea; 987 struct ea_buffer ea_buf; 988 int xattr_size; 989 ssize_t size; 990 int namelen = strlen(name); 991 char *os2name = NULL; 992 int rc; 993 char *value; 994 995 if ((rc = can_get_xattr(inode, name))) 996 return rc; 997 998 if (strncmp(name, XATTR_OS2_PREFIX, XATTR_OS2_PREFIX_LEN) == 0) { 999 os2name = kmalloc(namelen - XATTR_OS2_PREFIX_LEN + 1, 1000 GFP_KERNEL); 1001 if (!os2name) 1002 return -ENOMEM; 1003 strcpy(os2name, name + XATTR_OS2_PREFIX_LEN); 1004 name = os2name; 1005 namelen -= XATTR_OS2_PREFIX_LEN; 1006 } 1007 1008 down_read(&JFS_IP(inode)->xattr_sem); 1009 1010 xattr_size = ea_get(inode, &ea_buf, 0); 1011 1012 if (xattr_size < 0) { 1013 size = xattr_size; 1014 goto out; 1015 } 1016 1017 if (xattr_size == 0) 1018 goto not_found; 1019 1020 ealist = (struct jfs_ea_list *) ea_buf.xattr; 1021 1022 /* Find the named attribute */ 1023 for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) 1024 if ((namelen == ea->namelen) && 1025 memcmp(name, ea->name, namelen) == 0) { 1026 /* Found it */ 1027 size = le16_to_cpu(ea->valuelen); 1028 if (!data) 1029 goto release; 1030 else if (size > buf_size) { 1031 size = -ERANGE; 1032 goto release; 1033 } 1034 value = ((char *) &ea->name) + ea->namelen + 1; 1035 memcpy(data, value, size); 1036 goto release; 1037 } 1038 not_found: 1039 size = -ENODATA; 1040 release: 1041 ea_release(inode, &ea_buf); 1042 out: 1043 up_read(&JFS_IP(inode)->xattr_sem); 1044 1045 if (os2name) 1046 kfree(os2name); 1047 1048 return size; 1049 } 1050 1051 ssize_t jfs_getxattr(struct dentry *dentry, const char *name, void *data, 1052 size_t buf_size) 1053 { 1054 int err; 1055 1056 err = __jfs_getxattr(dentry->d_inode, name, data, buf_size); 1057 1058 return err; 1059 } 1060 1061 /* 1062 * No special permissions are needed to list attributes except for trusted.* 1063 */ 1064 static inline int can_list(struct jfs_ea *ea) 1065 { 1066 return (strncmp(ea->name, XATTR_TRUSTED_PREFIX, 1067 XATTR_TRUSTED_PREFIX_LEN) || 1068 capable(CAP_SYS_ADMIN)); 1069 } 1070 1071 ssize_t jfs_listxattr(struct dentry * dentry, char *data, size_t buf_size) 1072 { 1073 struct inode *inode = dentry->d_inode; 1074 char *buffer; 1075 ssize_t size = 0; 1076 int xattr_size; 1077 struct jfs_ea_list *ealist; 1078 struct jfs_ea *ea; 1079 struct ea_buffer ea_buf; 1080 1081 down_read(&JFS_IP(inode)->xattr_sem); 1082 1083 xattr_size = ea_get(inode, &ea_buf, 0); 1084 if (xattr_size < 0) { 1085 size = xattr_size; 1086 goto out; 1087 } 1088 1089 if (xattr_size == 0) 1090 goto release; 1091 1092 ealist = (struct jfs_ea_list *) ea_buf.xattr; 1093 1094 /* compute required size of list */ 1095 for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) { 1096 if (can_list(ea)) 1097 size += name_size(ea) + 1; 1098 } 1099 1100 if (!data) 1101 goto release; 1102 1103 if (size > buf_size) { 1104 size = -ERANGE; 1105 goto release; 1106 } 1107 1108 /* Copy attribute names to buffer */ 1109 buffer = data; 1110 for (ea = FIRST_EA(ealist); ea < END_EALIST(ealist); ea = NEXT_EA(ea)) { 1111 if (can_list(ea)) { 1112 int namelen = copy_name(buffer, ea); 1113 buffer += namelen + 1; 1114 } 1115 } 1116 1117 release: 1118 ea_release(inode, &ea_buf); 1119 out: 1120 up_read(&JFS_IP(inode)->xattr_sem); 1121 return size; 1122 } 1123 1124 int jfs_removexattr(struct dentry *dentry, const char *name) 1125 { 1126 return __jfs_setxattr(dentry->d_inode, name, NULL, 0, XATTR_REPLACE); 1127 } 1128