1 /* 2 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 3 * Copyright (c) 2013 Red Hat, Inc. 4 * All Rights Reserved. 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it would be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 */ 19 #include "xfs.h" 20 #include "xfs_fs.h" 21 #include "xfs_format.h" 22 #include "xfs_log_format.h" 23 #include "xfs_trans_resv.h" 24 #include "xfs_bit.h" 25 #include "xfs_mount.h" 26 #include "xfs_da_format.h" 27 #include "xfs_da_btree.h" 28 #include "xfs_inode.h" 29 #include "xfs_trans.h" 30 #include "xfs_inode_item.h" 31 #include "xfs_bmap.h" 32 #include "xfs_attr.h" 33 #include "xfs_attr_sf.h" 34 #include "xfs_attr_remote.h" 35 #include "xfs_attr_leaf.h" 36 #include "xfs_error.h" 37 #include "xfs_trace.h" 38 #include "xfs_buf_item.h" 39 #include "xfs_cksum.h" 40 #include "xfs_dir2.h" 41 42 STATIC int 43 xfs_attr_shortform_compare(const void *a, const void *b) 44 { 45 xfs_attr_sf_sort_t *sa, *sb; 46 47 sa = (xfs_attr_sf_sort_t *)a; 48 sb = (xfs_attr_sf_sort_t *)b; 49 if (sa->hash < sb->hash) { 50 return -1; 51 } else if (sa->hash > sb->hash) { 52 return 1; 53 } else { 54 return sa->entno - sb->entno; 55 } 56 } 57 58 #define XFS_ISRESET_CURSOR(cursor) \ 59 (!((cursor)->initted) && !((cursor)->hashval) && \ 60 !((cursor)->blkno) && !((cursor)->offset)) 61 /* 62 * Copy out entries of shortform attribute lists for attr_list(). 63 * Shortform attribute lists are not stored in hashval sorted order. 64 * If the output buffer is not large enough to hold them all, then we 65 * we have to calculate each entries' hashvalue and sort them before 66 * we can begin returning them to the user. 67 */ 68 int 69 xfs_attr_shortform_list(xfs_attr_list_context_t *context) 70 { 71 attrlist_cursor_kern_t *cursor; 72 xfs_attr_sf_sort_t *sbuf, *sbp; 73 xfs_attr_shortform_t *sf; 74 xfs_attr_sf_entry_t *sfe; 75 xfs_inode_t *dp; 76 int sbsize, nsbuf, count, i; 77 int error; 78 79 ASSERT(context != NULL); 80 dp = context->dp; 81 ASSERT(dp != NULL); 82 ASSERT(dp->i_afp != NULL); 83 sf = (xfs_attr_shortform_t *)dp->i_afp->if_u1.if_data; 84 ASSERT(sf != NULL); 85 if (!sf->hdr.count) 86 return 0; 87 cursor = context->cursor; 88 ASSERT(cursor != NULL); 89 90 trace_xfs_attr_list_sf(context); 91 92 /* 93 * If the buffer is large enough and the cursor is at the start, 94 * do not bother with sorting since we will return everything in 95 * one buffer and another call using the cursor won't need to be 96 * made. 97 * Note the generous fudge factor of 16 overhead bytes per entry. 98 * If bufsize is zero then put_listent must be a search function 99 * and can just scan through what we have. 100 */ 101 if (context->bufsize == 0 || 102 (XFS_ISRESET_CURSOR(cursor) && 103 (dp->i_afp->if_bytes + sf->hdr.count * 16) < context->bufsize)) { 104 for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) { 105 error = context->put_listent(context, 106 sfe->flags, 107 sfe->nameval, 108 (int)sfe->namelen, 109 (int)sfe->valuelen); 110 if (error) 111 return error; 112 /* 113 * Either search callback finished early or 114 * didn't fit it all in the buffer after all. 115 */ 116 if (context->seen_enough) 117 break; 118 sfe = XFS_ATTR_SF_NEXTENTRY(sfe); 119 } 120 trace_xfs_attr_list_sf_all(context); 121 return 0; 122 } 123 124 /* do no more for a search callback */ 125 if (context->bufsize == 0) 126 return 0; 127 128 /* 129 * It didn't all fit, so we have to sort everything on hashval. 130 */ 131 sbsize = sf->hdr.count * sizeof(*sbuf); 132 sbp = sbuf = kmem_alloc(sbsize, KM_SLEEP | KM_NOFS); 133 134 /* 135 * Scan the attribute list for the rest of the entries, storing 136 * the relevant info from only those that match into a buffer. 137 */ 138 nsbuf = 0; 139 for (i = 0, sfe = &sf->list[0]; i < sf->hdr.count; i++) { 140 if (unlikely( 141 ((char *)sfe < (char *)sf) || 142 ((char *)sfe >= ((char *)sf + dp->i_afp->if_bytes)))) { 143 XFS_CORRUPTION_ERROR("xfs_attr_shortform_list", 144 XFS_ERRLEVEL_LOW, 145 context->dp->i_mount, sfe); 146 kmem_free(sbuf); 147 return -EFSCORRUPTED; 148 } 149 150 sbp->entno = i; 151 sbp->hash = xfs_da_hashname(sfe->nameval, sfe->namelen); 152 sbp->name = sfe->nameval; 153 sbp->namelen = sfe->namelen; 154 /* These are bytes, and both on-disk, don't endian-flip */ 155 sbp->valuelen = sfe->valuelen; 156 sbp->flags = sfe->flags; 157 sfe = XFS_ATTR_SF_NEXTENTRY(sfe); 158 sbp++; 159 nsbuf++; 160 } 161 162 /* 163 * Sort the entries on hash then entno. 164 */ 165 xfs_sort(sbuf, nsbuf, sizeof(*sbuf), xfs_attr_shortform_compare); 166 167 /* 168 * Re-find our place IN THE SORTED LIST. 169 */ 170 count = 0; 171 cursor->initted = 1; 172 cursor->blkno = 0; 173 for (sbp = sbuf, i = 0; i < nsbuf; i++, sbp++) { 174 if (sbp->hash == cursor->hashval) { 175 if (cursor->offset == count) { 176 break; 177 } 178 count++; 179 } else if (sbp->hash > cursor->hashval) { 180 break; 181 } 182 } 183 if (i == nsbuf) { 184 kmem_free(sbuf); 185 return 0; 186 } 187 188 /* 189 * Loop putting entries into the user buffer. 190 */ 191 for ( ; i < nsbuf; i++, sbp++) { 192 if (cursor->hashval != sbp->hash) { 193 cursor->hashval = sbp->hash; 194 cursor->offset = 0; 195 } 196 error = context->put_listent(context, 197 sbp->flags, 198 sbp->name, 199 sbp->namelen, 200 sbp->valuelen); 201 if (error) { 202 kmem_free(sbuf); 203 return error; 204 } 205 if (context->seen_enough) 206 break; 207 cursor->offset++; 208 } 209 210 kmem_free(sbuf); 211 return 0; 212 } 213 214 STATIC int 215 xfs_attr_node_list(xfs_attr_list_context_t *context) 216 { 217 attrlist_cursor_kern_t *cursor; 218 xfs_attr_leafblock_t *leaf; 219 xfs_da_intnode_t *node; 220 struct xfs_attr3_icleaf_hdr leafhdr; 221 struct xfs_da3_icnode_hdr nodehdr; 222 struct xfs_da_node_entry *btree; 223 int error, i; 224 struct xfs_buf *bp; 225 struct xfs_inode *dp = context->dp; 226 struct xfs_mount *mp = dp->i_mount; 227 228 trace_xfs_attr_node_list(context); 229 230 cursor = context->cursor; 231 cursor->initted = 1; 232 233 /* 234 * Do all sorts of validation on the passed-in cursor structure. 235 * If anything is amiss, ignore the cursor and look up the hashval 236 * starting from the btree root. 237 */ 238 bp = NULL; 239 if (cursor->blkno > 0) { 240 error = xfs_da3_node_read(NULL, dp, cursor->blkno, -1, 241 &bp, XFS_ATTR_FORK); 242 if ((error != 0) && (error != -EFSCORRUPTED)) 243 return error; 244 if (bp) { 245 struct xfs_attr_leaf_entry *entries; 246 247 node = bp->b_addr; 248 switch (be16_to_cpu(node->hdr.info.magic)) { 249 case XFS_DA_NODE_MAGIC: 250 case XFS_DA3_NODE_MAGIC: 251 trace_xfs_attr_list_wrong_blk(context); 252 xfs_trans_brelse(NULL, bp); 253 bp = NULL; 254 break; 255 case XFS_ATTR_LEAF_MAGIC: 256 case XFS_ATTR3_LEAF_MAGIC: 257 leaf = bp->b_addr; 258 xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, 259 &leafhdr, leaf); 260 entries = xfs_attr3_leaf_entryp(leaf); 261 if (cursor->hashval > be32_to_cpu( 262 entries[leafhdr.count - 1].hashval)) { 263 trace_xfs_attr_list_wrong_blk(context); 264 xfs_trans_brelse(NULL, bp); 265 bp = NULL; 266 } else if (cursor->hashval <= be32_to_cpu( 267 entries[0].hashval)) { 268 trace_xfs_attr_list_wrong_blk(context); 269 xfs_trans_brelse(NULL, bp); 270 bp = NULL; 271 } 272 break; 273 default: 274 trace_xfs_attr_list_wrong_blk(context); 275 xfs_trans_brelse(NULL, bp); 276 bp = NULL; 277 } 278 } 279 } 280 281 /* 282 * We did not find what we expected given the cursor's contents, 283 * so we start from the top and work down based on the hash value. 284 * Note that start of node block is same as start of leaf block. 285 */ 286 if (bp == NULL) { 287 cursor->blkno = 0; 288 for (;;) { 289 __uint16_t magic; 290 291 error = xfs_da3_node_read(NULL, dp, 292 cursor->blkno, -1, &bp, 293 XFS_ATTR_FORK); 294 if (error) 295 return error; 296 node = bp->b_addr; 297 magic = be16_to_cpu(node->hdr.info.magic); 298 if (magic == XFS_ATTR_LEAF_MAGIC || 299 magic == XFS_ATTR3_LEAF_MAGIC) 300 break; 301 if (magic != XFS_DA_NODE_MAGIC && 302 magic != XFS_DA3_NODE_MAGIC) { 303 XFS_CORRUPTION_ERROR("xfs_attr_node_list(3)", 304 XFS_ERRLEVEL_LOW, 305 context->dp->i_mount, 306 node); 307 xfs_trans_brelse(NULL, bp); 308 return -EFSCORRUPTED; 309 } 310 311 dp->d_ops->node_hdr_from_disk(&nodehdr, node); 312 btree = dp->d_ops->node_tree_p(node); 313 for (i = 0; i < nodehdr.count; btree++, i++) { 314 if (cursor->hashval 315 <= be32_to_cpu(btree->hashval)) { 316 cursor->blkno = be32_to_cpu(btree->before); 317 trace_xfs_attr_list_node_descend(context, 318 btree); 319 break; 320 } 321 } 322 if (i == nodehdr.count) { 323 xfs_trans_brelse(NULL, bp); 324 return 0; 325 } 326 xfs_trans_brelse(NULL, bp); 327 } 328 } 329 ASSERT(bp != NULL); 330 331 /* 332 * Roll upward through the blocks, processing each leaf block in 333 * order. As long as there is space in the result buffer, keep 334 * adding the information. 335 */ 336 for (;;) { 337 leaf = bp->b_addr; 338 error = xfs_attr3_leaf_list_int(bp, context); 339 if (error) { 340 xfs_trans_brelse(NULL, bp); 341 return error; 342 } 343 xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &leafhdr, leaf); 344 if (context->seen_enough || leafhdr.forw == 0) 345 break; 346 cursor->blkno = leafhdr.forw; 347 xfs_trans_brelse(NULL, bp); 348 error = xfs_attr3_leaf_read(NULL, dp, cursor->blkno, -1, &bp); 349 if (error) 350 return error; 351 } 352 xfs_trans_brelse(NULL, bp); 353 return 0; 354 } 355 356 /* 357 * Copy out attribute list entries for attr_list(), for leaf attribute lists. 358 */ 359 int 360 xfs_attr3_leaf_list_int( 361 struct xfs_buf *bp, 362 struct xfs_attr_list_context *context) 363 { 364 struct attrlist_cursor_kern *cursor; 365 struct xfs_attr_leafblock *leaf; 366 struct xfs_attr3_icleaf_hdr ichdr; 367 struct xfs_attr_leaf_entry *entries; 368 struct xfs_attr_leaf_entry *entry; 369 int retval; 370 int i; 371 struct xfs_mount *mp = context->dp->i_mount; 372 373 trace_xfs_attr_list_leaf(context); 374 375 leaf = bp->b_addr; 376 xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf); 377 entries = xfs_attr3_leaf_entryp(leaf); 378 379 cursor = context->cursor; 380 cursor->initted = 1; 381 382 /* 383 * Re-find our place in the leaf block if this is a new syscall. 384 */ 385 if (context->resynch) { 386 entry = &entries[0]; 387 for (i = 0; i < ichdr.count; entry++, i++) { 388 if (be32_to_cpu(entry->hashval) == cursor->hashval) { 389 if (cursor->offset == context->dupcnt) { 390 context->dupcnt = 0; 391 break; 392 } 393 context->dupcnt++; 394 } else if (be32_to_cpu(entry->hashval) > 395 cursor->hashval) { 396 context->dupcnt = 0; 397 break; 398 } 399 } 400 if (i == ichdr.count) { 401 trace_xfs_attr_list_notfound(context); 402 return 0; 403 } 404 } else { 405 entry = &entries[0]; 406 i = 0; 407 } 408 context->resynch = 0; 409 410 /* 411 * We have found our place, start copying out the new attributes. 412 */ 413 retval = 0; 414 for (; i < ichdr.count; entry++, i++) { 415 char *name; 416 int namelen, valuelen; 417 418 if (be32_to_cpu(entry->hashval) != cursor->hashval) { 419 cursor->hashval = be32_to_cpu(entry->hashval); 420 cursor->offset = 0; 421 } 422 423 if (entry->flags & XFS_ATTR_INCOMPLETE) 424 continue; /* skip incomplete entries */ 425 426 if (entry->flags & XFS_ATTR_LOCAL) { 427 xfs_attr_leaf_name_local_t *name_loc; 428 429 name_loc = xfs_attr3_leaf_name_local(leaf, i); 430 name = name_loc->nameval; 431 namelen = name_loc->namelen; 432 valuelen = be16_to_cpu(name_loc->valuelen); 433 } else { 434 xfs_attr_leaf_name_remote_t *name_rmt; 435 436 name_rmt = xfs_attr3_leaf_name_remote(leaf, i); 437 name = name_rmt->name; 438 namelen = name_rmt->namelen; 439 valuelen = be32_to_cpu(name_rmt->valuelen); 440 } 441 442 retval = context->put_listent(context, entry->flags, 443 name, namelen, valuelen); 444 if (retval) 445 break; 446 if (context->seen_enough) 447 break; 448 cursor->offset++; 449 } 450 trace_xfs_attr_list_leaf_end(context); 451 return retval; 452 } 453 454 /* 455 * Copy out attribute entries for attr_list(), for leaf attribute lists. 456 */ 457 STATIC int 458 xfs_attr_leaf_list(xfs_attr_list_context_t *context) 459 { 460 int error; 461 struct xfs_buf *bp; 462 463 trace_xfs_attr_leaf_list(context); 464 465 context->cursor->blkno = 0; 466 error = xfs_attr3_leaf_read(NULL, context->dp, 0, -1, &bp); 467 if (error) 468 return error; 469 470 error = xfs_attr3_leaf_list_int(bp, context); 471 xfs_trans_brelse(NULL, bp); 472 return error; 473 } 474 475 int 476 xfs_attr_list_int( 477 xfs_attr_list_context_t *context) 478 { 479 int error; 480 xfs_inode_t *dp = context->dp; 481 uint lock_mode; 482 483 XFS_STATS_INC(dp->i_mount, xs_attr_list); 484 485 if (XFS_FORCED_SHUTDOWN(dp->i_mount)) 486 return -EIO; 487 488 /* 489 * Decide on what work routines to call based on the inode size. 490 */ 491 lock_mode = xfs_ilock_attr_map_shared(dp); 492 if (!xfs_inode_hasattr(dp)) { 493 error = 0; 494 } else if (dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) { 495 error = xfs_attr_shortform_list(context); 496 } else if (xfs_bmap_one_block(dp, XFS_ATTR_FORK)) { 497 error = xfs_attr_leaf_list(context); 498 } else { 499 error = xfs_attr_node_list(context); 500 } 501 xfs_iunlock(dp, lock_mode); 502 return error; 503 } 504 505 #define ATTR_ENTBASESIZE /* minimum bytes used by an attr */ \ 506 (((struct attrlist_ent *) 0)->a_name - (char *) 0) 507 #define ATTR_ENTSIZE(namelen) /* actual bytes used by an attr */ \ 508 ((ATTR_ENTBASESIZE + (namelen) + 1 + sizeof(u_int32_t)-1) \ 509 & ~(sizeof(u_int32_t)-1)) 510 511 /* 512 * Format an attribute and copy it out to the user's buffer. 513 * Take care to check values and protect against them changing later, 514 * we may be reading them directly out of a user buffer. 515 */ 516 STATIC int 517 xfs_attr_put_listent( 518 xfs_attr_list_context_t *context, 519 int flags, 520 unsigned char *name, 521 int namelen, 522 int valuelen) 523 { 524 struct attrlist *alist = (struct attrlist *)context->alist; 525 attrlist_ent_t *aep; 526 int arraytop; 527 528 ASSERT(!(context->flags & ATTR_KERNOVAL)); 529 ASSERT(context->count >= 0); 530 ASSERT(context->count < (ATTR_MAX_VALUELEN/8)); 531 ASSERT(context->firstu >= sizeof(*alist)); 532 ASSERT(context->firstu <= context->bufsize); 533 534 /* 535 * Only list entries in the right namespace. 536 */ 537 if (((context->flags & ATTR_SECURE) == 0) != 538 ((flags & XFS_ATTR_SECURE) == 0)) 539 return 0; 540 if (((context->flags & ATTR_ROOT) == 0) != 541 ((flags & XFS_ATTR_ROOT) == 0)) 542 return 0; 543 544 arraytop = sizeof(*alist) + 545 context->count * sizeof(alist->al_offset[0]); 546 context->firstu -= ATTR_ENTSIZE(namelen); 547 if (context->firstu < arraytop) { 548 trace_xfs_attr_list_full(context); 549 alist->al_more = 1; 550 context->seen_enough = 1; 551 return 0; 552 } 553 554 aep = (attrlist_ent_t *)&context->alist[context->firstu]; 555 aep->a_valuelen = valuelen; 556 memcpy(aep->a_name, name, namelen); 557 aep->a_name[namelen] = 0; 558 alist->al_offset[context->count++] = context->firstu; 559 alist->al_count = context->count; 560 trace_xfs_attr_list_add(context); 561 return 0; 562 } 563 564 /* 565 * Generate a list of extended attribute names and optionally 566 * also value lengths. Positive return value follows the XFS 567 * convention of being an error, zero or negative return code 568 * is the length of the buffer returned (negated), indicating 569 * success. 570 */ 571 int 572 xfs_attr_list( 573 xfs_inode_t *dp, 574 char *buffer, 575 int bufsize, 576 int flags, 577 attrlist_cursor_kern_t *cursor) 578 { 579 xfs_attr_list_context_t context; 580 struct attrlist *alist; 581 int error; 582 583 /* 584 * Validate the cursor. 585 */ 586 if (cursor->pad1 || cursor->pad2) 587 return -EINVAL; 588 if ((cursor->initted == 0) && 589 (cursor->hashval || cursor->blkno || cursor->offset)) 590 return -EINVAL; 591 592 /* 593 * Check for a properly aligned buffer. 594 */ 595 if (((long)buffer) & (sizeof(int)-1)) 596 return -EFAULT; 597 if (flags & ATTR_KERNOVAL) 598 bufsize = 0; 599 600 /* 601 * Initialize the output buffer. 602 */ 603 memset(&context, 0, sizeof(context)); 604 context.dp = dp; 605 context.cursor = cursor; 606 context.resynch = 1; 607 context.flags = flags; 608 context.alist = buffer; 609 context.bufsize = (bufsize & ~(sizeof(int)-1)); /* align */ 610 context.firstu = context.bufsize; 611 context.put_listent = xfs_attr_put_listent; 612 613 alist = (struct attrlist *)context.alist; 614 alist->al_count = 0; 615 alist->al_more = 0; 616 alist->al_offset[0] = context.bufsize; 617 618 error = xfs_attr_list_int(&context); 619 ASSERT(error <= 0); 620 return error; 621 } 622