1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2017-2018 HUAWEI, Inc. 4 * https://www.huawei.com/ 5 * Copyright (C) 2021-2022, Alibaba Cloud 6 */ 7 #include <linux/security.h> 8 #include "xattr.h" 9 10 struct xattr_iter { 11 struct super_block *sb; 12 struct erofs_buf buf; 13 void *kaddr; 14 15 erofs_blk_t blkaddr; 16 unsigned int ofs; 17 }; 18 19 static int init_inode_xattrs(struct inode *inode) 20 { 21 struct erofs_inode *const vi = EROFS_I(inode); 22 struct xattr_iter it; 23 unsigned int i; 24 struct erofs_xattr_ibody_header *ih; 25 struct super_block *sb; 26 struct erofs_sb_info *sbi; 27 int ret = 0; 28 29 /* the most case is that xattrs of this inode are initialized. */ 30 if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags)) { 31 /* 32 * paired with smp_mb() at the end of the function to ensure 33 * fields will only be observed after the bit is set. 34 */ 35 smp_mb(); 36 return 0; 37 } 38 39 if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_XATTR_BIT, TASK_KILLABLE)) 40 return -ERESTARTSYS; 41 42 /* someone has initialized xattrs for us? */ 43 if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags)) 44 goto out_unlock; 45 46 /* 47 * bypass all xattr operations if ->xattr_isize is not greater than 48 * sizeof(struct erofs_xattr_ibody_header), in detail: 49 * 1) it is not enough to contain erofs_xattr_ibody_header then 50 * ->xattr_isize should be 0 (it means no xattr); 51 * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk 52 * undefined right now (maybe use later with some new sb feature). 53 */ 54 if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) { 55 erofs_err(inode->i_sb, 56 "xattr_isize %d of nid %llu is not supported yet", 57 vi->xattr_isize, vi->nid); 58 ret = -EOPNOTSUPP; 59 goto out_unlock; 60 } else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) { 61 if (vi->xattr_isize) { 62 erofs_err(inode->i_sb, 63 "bogus xattr ibody @ nid %llu", vi->nid); 64 DBG_BUGON(1); 65 ret = -EFSCORRUPTED; 66 goto out_unlock; /* xattr ondisk layout error */ 67 } 68 ret = -ENOATTR; 69 goto out_unlock; 70 } 71 72 sb = inode->i_sb; 73 sbi = EROFS_SB(sb); 74 it.buf = __EROFS_BUF_INITIALIZER; 75 it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize); 76 it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize); 77 78 /* read in shared xattr array (non-atomic, see kmalloc below) */ 79 it.kaddr = erofs_read_metabuf(&it.buf, sb, it.blkaddr, EROFS_KMAP); 80 if (IS_ERR(it.kaddr)) { 81 ret = PTR_ERR(it.kaddr); 82 goto out_unlock; 83 } 84 85 ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs); 86 vi->xattr_shared_count = ih->h_shared_count; 87 vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count, 88 sizeof(uint), GFP_KERNEL); 89 if (!vi->xattr_shared_xattrs) { 90 erofs_put_metabuf(&it.buf); 91 ret = -ENOMEM; 92 goto out_unlock; 93 } 94 95 /* let's skip ibody header */ 96 it.ofs += sizeof(struct erofs_xattr_ibody_header); 97 98 for (i = 0; i < vi->xattr_shared_count; ++i) { 99 if (it.ofs >= EROFS_BLKSIZ) { 100 /* cannot be unaligned */ 101 DBG_BUGON(it.ofs != EROFS_BLKSIZ); 102 103 it.kaddr = erofs_read_metabuf(&it.buf, sb, ++it.blkaddr, 104 EROFS_KMAP); 105 if (IS_ERR(it.kaddr)) { 106 kfree(vi->xattr_shared_xattrs); 107 vi->xattr_shared_xattrs = NULL; 108 ret = PTR_ERR(it.kaddr); 109 goto out_unlock; 110 } 111 it.ofs = 0; 112 } 113 vi->xattr_shared_xattrs[i] = 114 le32_to_cpu(*(__le32 *)(it.kaddr + it.ofs)); 115 it.ofs += sizeof(__le32); 116 } 117 erofs_put_metabuf(&it.buf); 118 119 /* paired with smp_mb() at the beginning of the function. */ 120 smp_mb(); 121 set_bit(EROFS_I_EA_INITED_BIT, &vi->flags); 122 123 out_unlock: 124 clear_and_wake_up_bit(EROFS_I_BL_XATTR_BIT, &vi->flags); 125 return ret; 126 } 127 128 /* 129 * the general idea for these return values is 130 * if 0 is returned, go on processing the current xattr; 131 * 1 (> 0) is returned, skip this round to process the next xattr; 132 * -err (< 0) is returned, an error (maybe ENOXATTR) occurred 133 * and need to be handled 134 */ 135 struct xattr_iter_handlers { 136 int (*entry)(struct xattr_iter *_it, struct erofs_xattr_entry *entry); 137 int (*name)(struct xattr_iter *_it, unsigned int processed, char *buf, 138 unsigned int len); 139 int (*alloc_buffer)(struct xattr_iter *_it, unsigned int value_sz); 140 void (*value)(struct xattr_iter *_it, unsigned int processed, char *buf, 141 unsigned int len); 142 }; 143 144 static inline int xattr_iter_fixup(struct xattr_iter *it) 145 { 146 if (it->ofs < EROFS_BLKSIZ) 147 return 0; 148 149 it->blkaddr += erofs_blknr(it->ofs); 150 it->kaddr = erofs_read_metabuf(&it->buf, it->sb, it->blkaddr, 151 EROFS_KMAP_ATOMIC); 152 if (IS_ERR(it->kaddr)) 153 return PTR_ERR(it->kaddr); 154 it->ofs = erofs_blkoff(it->ofs); 155 return 0; 156 } 157 158 static int inline_xattr_iter_begin(struct xattr_iter *it, 159 struct inode *inode) 160 { 161 struct erofs_inode *const vi = EROFS_I(inode); 162 struct erofs_sb_info *const sbi = EROFS_SB(inode->i_sb); 163 unsigned int xattr_header_sz, inline_xattr_ofs; 164 165 xattr_header_sz = inlinexattr_header_size(inode); 166 if (xattr_header_sz >= vi->xattr_isize) { 167 DBG_BUGON(xattr_header_sz > vi->xattr_isize); 168 return -ENOATTR; 169 } 170 171 inline_xattr_ofs = vi->inode_isize + xattr_header_sz; 172 173 it->blkaddr = erofs_blknr(iloc(sbi, vi->nid) + inline_xattr_ofs); 174 it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs); 175 176 it->kaddr = erofs_read_metabuf(&it->buf, inode->i_sb, it->blkaddr, 177 EROFS_KMAP_ATOMIC); 178 if (IS_ERR(it->kaddr)) 179 return PTR_ERR(it->kaddr); 180 return vi->xattr_isize - xattr_header_sz; 181 } 182 183 /* 184 * Regardless of success or failure, `xattr_foreach' will end up with 185 * `ofs' pointing to the next xattr item rather than an arbitrary position. 186 */ 187 static int xattr_foreach(struct xattr_iter *it, 188 const struct xattr_iter_handlers *op, 189 unsigned int *tlimit) 190 { 191 struct erofs_xattr_entry entry; 192 unsigned int value_sz, processed, slice; 193 int err; 194 195 /* 0. fixup blkaddr, ofs, ipage */ 196 err = xattr_iter_fixup(it); 197 if (err) 198 return err; 199 200 /* 201 * 1. read xattr entry to the memory, 202 * since we do EROFS_XATTR_ALIGN 203 * therefore entry should be in the page 204 */ 205 entry = *(struct erofs_xattr_entry *)(it->kaddr + it->ofs); 206 if (tlimit) { 207 unsigned int entry_sz = erofs_xattr_entry_size(&entry); 208 209 /* xattr on-disk corruption: xattr entry beyond xattr_isize */ 210 if (*tlimit < entry_sz) { 211 DBG_BUGON(1); 212 return -EFSCORRUPTED; 213 } 214 *tlimit -= entry_sz; 215 } 216 217 it->ofs += sizeof(struct erofs_xattr_entry); 218 value_sz = le16_to_cpu(entry.e_value_size); 219 220 /* handle entry */ 221 err = op->entry(it, &entry); 222 if (err) { 223 it->ofs += entry.e_name_len + value_sz; 224 goto out; 225 } 226 227 /* 2. handle xattr name (ofs will finally be at the end of name) */ 228 processed = 0; 229 230 while (processed < entry.e_name_len) { 231 if (it->ofs >= EROFS_BLKSIZ) { 232 DBG_BUGON(it->ofs > EROFS_BLKSIZ); 233 234 err = xattr_iter_fixup(it); 235 if (err) 236 goto out; 237 it->ofs = 0; 238 } 239 240 slice = min_t(unsigned int, EROFS_BLKSIZ - it->ofs, 241 entry.e_name_len - processed); 242 243 /* handle name */ 244 err = op->name(it, processed, it->kaddr + it->ofs, slice); 245 if (err) { 246 it->ofs += entry.e_name_len - processed + value_sz; 247 goto out; 248 } 249 250 it->ofs += slice; 251 processed += slice; 252 } 253 254 /* 3. handle xattr value */ 255 processed = 0; 256 257 if (op->alloc_buffer) { 258 err = op->alloc_buffer(it, value_sz); 259 if (err) { 260 it->ofs += value_sz; 261 goto out; 262 } 263 } 264 265 while (processed < value_sz) { 266 if (it->ofs >= EROFS_BLKSIZ) { 267 DBG_BUGON(it->ofs > EROFS_BLKSIZ); 268 269 err = xattr_iter_fixup(it); 270 if (err) 271 goto out; 272 it->ofs = 0; 273 } 274 275 slice = min_t(unsigned int, EROFS_BLKSIZ - it->ofs, 276 value_sz - processed); 277 op->value(it, processed, it->kaddr + it->ofs, slice); 278 it->ofs += slice; 279 processed += slice; 280 } 281 282 out: 283 /* xattrs should be 4-byte aligned (on-disk constraint) */ 284 it->ofs = EROFS_XATTR_ALIGN(it->ofs); 285 return err < 0 ? err : 0; 286 } 287 288 struct getxattr_iter { 289 struct xattr_iter it; 290 291 char *buffer; 292 int buffer_size, index; 293 struct qstr name; 294 }; 295 296 static int xattr_entrymatch(struct xattr_iter *_it, 297 struct erofs_xattr_entry *entry) 298 { 299 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it); 300 301 return (it->index != entry->e_name_index || 302 it->name.len != entry->e_name_len) ? -ENOATTR : 0; 303 } 304 305 static int xattr_namematch(struct xattr_iter *_it, 306 unsigned int processed, char *buf, unsigned int len) 307 { 308 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it); 309 310 return memcmp(buf, it->name.name + processed, len) ? -ENOATTR : 0; 311 } 312 313 static int xattr_checkbuffer(struct xattr_iter *_it, 314 unsigned int value_sz) 315 { 316 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it); 317 int err = it->buffer_size < value_sz ? -ERANGE : 0; 318 319 it->buffer_size = value_sz; 320 return !it->buffer ? 1 : err; 321 } 322 323 static void xattr_copyvalue(struct xattr_iter *_it, 324 unsigned int processed, 325 char *buf, unsigned int len) 326 { 327 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it); 328 329 memcpy(it->buffer + processed, buf, len); 330 } 331 332 static const struct xattr_iter_handlers find_xattr_handlers = { 333 .entry = xattr_entrymatch, 334 .name = xattr_namematch, 335 .alloc_buffer = xattr_checkbuffer, 336 .value = xattr_copyvalue 337 }; 338 339 static int inline_getxattr(struct inode *inode, struct getxattr_iter *it) 340 { 341 int ret; 342 unsigned int remaining; 343 344 ret = inline_xattr_iter_begin(&it->it, inode); 345 if (ret < 0) 346 return ret; 347 348 remaining = ret; 349 while (remaining) { 350 ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining); 351 if (ret != -ENOATTR) 352 break; 353 } 354 return ret ? ret : it->buffer_size; 355 } 356 357 static int shared_getxattr(struct inode *inode, struct getxattr_iter *it) 358 { 359 struct erofs_inode *const vi = EROFS_I(inode); 360 struct super_block *const sb = inode->i_sb; 361 struct erofs_sb_info *const sbi = EROFS_SB(sb); 362 unsigned int i; 363 int ret = -ENOATTR; 364 365 for (i = 0; i < vi->xattr_shared_count; ++i) { 366 erofs_blk_t blkaddr = 367 xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]); 368 369 it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]); 370 it->it.kaddr = erofs_read_metabuf(&it->it.buf, sb, blkaddr, 371 EROFS_KMAP_ATOMIC); 372 if (IS_ERR(it->it.kaddr)) 373 return PTR_ERR(it->it.kaddr); 374 it->it.blkaddr = blkaddr; 375 376 ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL); 377 if (ret != -ENOATTR) 378 break; 379 } 380 return ret ? ret : it->buffer_size; 381 } 382 383 static bool erofs_xattr_user_list(struct dentry *dentry) 384 { 385 return test_opt(&EROFS_SB(dentry->d_sb)->opt, XATTR_USER); 386 } 387 388 static bool erofs_xattr_trusted_list(struct dentry *dentry) 389 { 390 return capable(CAP_SYS_ADMIN); 391 } 392 393 int erofs_getxattr(struct inode *inode, int index, 394 const char *name, 395 void *buffer, size_t buffer_size) 396 { 397 int ret; 398 struct getxattr_iter it; 399 400 if (!name) 401 return -EINVAL; 402 403 ret = init_inode_xattrs(inode); 404 if (ret) 405 return ret; 406 407 it.index = index; 408 it.name.len = strlen(name); 409 if (it.name.len > EROFS_NAME_LEN) 410 return -ERANGE; 411 412 it.it.buf = __EROFS_BUF_INITIALIZER; 413 it.name.name = name; 414 415 it.buffer = buffer; 416 it.buffer_size = buffer_size; 417 418 it.it.sb = inode->i_sb; 419 ret = inline_getxattr(inode, &it); 420 if (ret == -ENOATTR) 421 ret = shared_getxattr(inode, &it); 422 erofs_put_metabuf(&it.it.buf); 423 return ret; 424 } 425 426 static int erofs_xattr_generic_get(const struct xattr_handler *handler, 427 struct dentry *unused, struct inode *inode, 428 const char *name, void *buffer, size_t size) 429 { 430 struct erofs_sb_info *const sbi = EROFS_I_SB(inode); 431 432 switch (handler->flags) { 433 case EROFS_XATTR_INDEX_USER: 434 if (!test_opt(&sbi->opt, XATTR_USER)) 435 return -EOPNOTSUPP; 436 break; 437 case EROFS_XATTR_INDEX_TRUSTED: 438 break; 439 case EROFS_XATTR_INDEX_SECURITY: 440 break; 441 default: 442 return -EINVAL; 443 } 444 445 return erofs_getxattr(inode, handler->flags, name, buffer, size); 446 } 447 448 const struct xattr_handler erofs_xattr_user_handler = { 449 .prefix = XATTR_USER_PREFIX, 450 .flags = EROFS_XATTR_INDEX_USER, 451 .list = erofs_xattr_user_list, 452 .get = erofs_xattr_generic_get, 453 }; 454 455 const struct xattr_handler erofs_xattr_trusted_handler = { 456 .prefix = XATTR_TRUSTED_PREFIX, 457 .flags = EROFS_XATTR_INDEX_TRUSTED, 458 .list = erofs_xattr_trusted_list, 459 .get = erofs_xattr_generic_get, 460 }; 461 462 #ifdef CONFIG_EROFS_FS_SECURITY 463 const struct xattr_handler __maybe_unused erofs_xattr_security_handler = { 464 .prefix = XATTR_SECURITY_PREFIX, 465 .flags = EROFS_XATTR_INDEX_SECURITY, 466 .get = erofs_xattr_generic_get, 467 }; 468 #endif 469 470 const struct xattr_handler *erofs_xattr_handlers[] = { 471 &erofs_xattr_user_handler, 472 #ifdef CONFIG_EROFS_FS_POSIX_ACL 473 &posix_acl_access_xattr_handler, 474 &posix_acl_default_xattr_handler, 475 #endif 476 &erofs_xattr_trusted_handler, 477 #ifdef CONFIG_EROFS_FS_SECURITY 478 &erofs_xattr_security_handler, 479 #endif 480 NULL, 481 }; 482 483 struct listxattr_iter { 484 struct xattr_iter it; 485 486 struct dentry *dentry; 487 char *buffer; 488 int buffer_size, buffer_ofs; 489 }; 490 491 static int xattr_entrylist(struct xattr_iter *_it, 492 struct erofs_xattr_entry *entry) 493 { 494 struct listxattr_iter *it = 495 container_of(_it, struct listxattr_iter, it); 496 unsigned int prefix_len; 497 const char *prefix; 498 499 const struct xattr_handler *h = 500 erofs_xattr_handler(entry->e_name_index); 501 502 if (!h || (h->list && !h->list(it->dentry))) 503 return 1; 504 505 prefix = xattr_prefix(h); 506 prefix_len = strlen(prefix); 507 508 if (!it->buffer) { 509 it->buffer_ofs += prefix_len + entry->e_name_len + 1; 510 return 1; 511 } 512 513 if (it->buffer_ofs + prefix_len 514 + entry->e_name_len + 1 > it->buffer_size) 515 return -ERANGE; 516 517 memcpy(it->buffer + it->buffer_ofs, prefix, prefix_len); 518 it->buffer_ofs += prefix_len; 519 return 0; 520 } 521 522 static int xattr_namelist(struct xattr_iter *_it, 523 unsigned int processed, char *buf, unsigned int len) 524 { 525 struct listxattr_iter *it = 526 container_of(_it, struct listxattr_iter, it); 527 528 memcpy(it->buffer + it->buffer_ofs, buf, len); 529 it->buffer_ofs += len; 530 return 0; 531 } 532 533 static int xattr_skipvalue(struct xattr_iter *_it, 534 unsigned int value_sz) 535 { 536 struct listxattr_iter *it = 537 container_of(_it, struct listxattr_iter, it); 538 539 it->buffer[it->buffer_ofs++] = '\0'; 540 return 1; 541 } 542 543 static const struct xattr_iter_handlers list_xattr_handlers = { 544 .entry = xattr_entrylist, 545 .name = xattr_namelist, 546 .alloc_buffer = xattr_skipvalue, 547 .value = NULL 548 }; 549 550 static int inline_listxattr(struct listxattr_iter *it) 551 { 552 int ret; 553 unsigned int remaining; 554 555 ret = inline_xattr_iter_begin(&it->it, d_inode(it->dentry)); 556 if (ret < 0) 557 return ret; 558 559 remaining = ret; 560 while (remaining) { 561 ret = xattr_foreach(&it->it, &list_xattr_handlers, &remaining); 562 if (ret) 563 break; 564 } 565 return ret ? ret : it->buffer_ofs; 566 } 567 568 static int shared_listxattr(struct listxattr_iter *it) 569 { 570 struct inode *const inode = d_inode(it->dentry); 571 struct erofs_inode *const vi = EROFS_I(inode); 572 struct super_block *const sb = inode->i_sb; 573 struct erofs_sb_info *const sbi = EROFS_SB(sb); 574 unsigned int i; 575 int ret = 0; 576 577 for (i = 0; i < vi->xattr_shared_count; ++i) { 578 erofs_blk_t blkaddr = 579 xattrblock_addr(sbi, vi->xattr_shared_xattrs[i]); 580 581 it->it.ofs = xattrblock_offset(sbi, vi->xattr_shared_xattrs[i]); 582 it->it.kaddr = erofs_read_metabuf(&it->it.buf, sb, blkaddr, 583 EROFS_KMAP_ATOMIC); 584 if (IS_ERR(it->it.kaddr)) 585 return PTR_ERR(it->it.kaddr); 586 it->it.blkaddr = blkaddr; 587 588 ret = xattr_foreach(&it->it, &list_xattr_handlers, NULL); 589 if (ret) 590 break; 591 } 592 return ret ? ret : it->buffer_ofs; 593 } 594 595 ssize_t erofs_listxattr(struct dentry *dentry, 596 char *buffer, size_t buffer_size) 597 { 598 int ret; 599 struct listxattr_iter it; 600 601 ret = init_inode_xattrs(d_inode(dentry)); 602 if (ret == -ENOATTR) 603 return 0; 604 if (ret) 605 return ret; 606 607 it.it.buf = __EROFS_BUF_INITIALIZER; 608 it.dentry = dentry; 609 it.buffer = buffer; 610 it.buffer_size = buffer_size; 611 it.buffer_ofs = 0; 612 613 it.it.sb = dentry->d_sb; 614 615 ret = inline_listxattr(&it); 616 if (ret >= 0 || ret == -ENOATTR) 617 ret = shared_listxattr(&it); 618 erofs_put_metabuf(&it.it.buf); 619 return ret; 620 } 621 622 #ifdef CONFIG_EROFS_FS_POSIX_ACL 623 struct posix_acl *erofs_get_acl(struct inode *inode, int type, bool rcu) 624 { 625 struct posix_acl *acl; 626 int prefix, rc; 627 char *value = NULL; 628 629 if (rcu) 630 return ERR_PTR(-ECHILD); 631 632 switch (type) { 633 case ACL_TYPE_ACCESS: 634 prefix = EROFS_XATTR_INDEX_POSIX_ACL_ACCESS; 635 break; 636 case ACL_TYPE_DEFAULT: 637 prefix = EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT; 638 break; 639 default: 640 return ERR_PTR(-EINVAL); 641 } 642 643 rc = erofs_getxattr(inode, prefix, "", NULL, 0); 644 if (rc > 0) { 645 value = kmalloc(rc, GFP_KERNEL); 646 if (!value) 647 return ERR_PTR(-ENOMEM); 648 rc = erofs_getxattr(inode, prefix, "", value, rc); 649 } 650 651 if (rc == -ENOATTR) 652 acl = NULL; 653 else if (rc < 0) 654 acl = ERR_PTR(rc); 655 else 656 acl = posix_acl_from_xattr(&init_user_ns, value, rc); 657 kfree(value); 658 return acl; 659 } 660 #endif 661