1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2017-2018 HUAWEI, Inc. 4 * https://www.huawei.com/ 5 * Copyright (C) 2021-2022, Alibaba Cloud 6 */ 7 #include <linux/security.h> 8 #include "xattr.h" 9 10 static inline erofs_blk_t erofs_xattr_blkaddr(struct super_block *sb, 11 unsigned int xattr_id) 12 { 13 return EROFS_SB(sb)->xattr_blkaddr + 14 erofs_blknr(sb, xattr_id * sizeof(__u32)); 15 } 16 17 static inline unsigned int erofs_xattr_blkoff(struct super_block *sb, 18 unsigned int xattr_id) 19 { 20 return erofs_blkoff(sb, xattr_id * sizeof(__u32)); 21 } 22 23 struct xattr_iter { 24 struct super_block *sb; 25 struct erofs_buf buf; 26 void *kaddr; 27 28 erofs_blk_t blkaddr; 29 unsigned int ofs; 30 }; 31 32 static int erofs_init_inode_xattrs(struct inode *inode) 33 { 34 struct erofs_inode *const vi = EROFS_I(inode); 35 struct xattr_iter it; 36 unsigned int i; 37 struct erofs_xattr_ibody_header *ih; 38 struct super_block *sb = inode->i_sb; 39 int ret = 0; 40 41 /* the most case is that xattrs of this inode are initialized. */ 42 if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags)) { 43 /* 44 * paired with smp_mb() at the end of the function to ensure 45 * fields will only be observed after the bit is set. 46 */ 47 smp_mb(); 48 return 0; 49 } 50 51 if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_XATTR_BIT, TASK_KILLABLE)) 52 return -ERESTARTSYS; 53 54 /* someone has initialized xattrs for us? */ 55 if (test_bit(EROFS_I_EA_INITED_BIT, &vi->flags)) 56 goto out_unlock; 57 58 /* 59 * bypass all xattr operations if ->xattr_isize is not greater than 60 * sizeof(struct erofs_xattr_ibody_header), in detail: 61 * 1) it is not enough to contain erofs_xattr_ibody_header then 62 * ->xattr_isize should be 0 (it means no xattr); 63 * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk 64 * undefined right now (maybe use later with some new sb feature). 65 */ 66 if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) { 67 erofs_err(sb, 68 "xattr_isize %d of nid %llu is not supported yet", 69 vi->xattr_isize, vi->nid); 70 ret = -EOPNOTSUPP; 71 goto out_unlock; 72 } else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) { 73 if (vi->xattr_isize) { 74 erofs_err(sb, "bogus xattr ibody @ nid %llu", vi->nid); 75 DBG_BUGON(1); 76 ret = -EFSCORRUPTED; 77 goto out_unlock; /* xattr ondisk layout error */ 78 } 79 ret = -ENOATTR; 80 goto out_unlock; 81 } 82 83 it.buf = __EROFS_BUF_INITIALIZER; 84 it.blkaddr = erofs_blknr(sb, erofs_iloc(inode) + vi->inode_isize); 85 it.ofs = erofs_blkoff(sb, erofs_iloc(inode) + vi->inode_isize); 86 87 /* read in shared xattr array (non-atomic, see kmalloc below) */ 88 it.kaddr = erofs_read_metabuf(&it.buf, sb, it.blkaddr, EROFS_KMAP); 89 if (IS_ERR(it.kaddr)) { 90 ret = PTR_ERR(it.kaddr); 91 goto out_unlock; 92 } 93 94 ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs); 95 vi->xattr_shared_count = ih->h_shared_count; 96 vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count, 97 sizeof(uint), GFP_KERNEL); 98 if (!vi->xattr_shared_xattrs) { 99 erofs_put_metabuf(&it.buf); 100 ret = -ENOMEM; 101 goto out_unlock; 102 } 103 104 /* let's skip ibody header */ 105 it.ofs += sizeof(struct erofs_xattr_ibody_header); 106 107 for (i = 0; i < vi->xattr_shared_count; ++i) { 108 if (it.ofs >= sb->s_blocksize) { 109 /* cannot be unaligned */ 110 DBG_BUGON(it.ofs != sb->s_blocksize); 111 112 it.kaddr = erofs_read_metabuf(&it.buf, sb, ++it.blkaddr, 113 EROFS_KMAP); 114 if (IS_ERR(it.kaddr)) { 115 kfree(vi->xattr_shared_xattrs); 116 vi->xattr_shared_xattrs = NULL; 117 ret = PTR_ERR(it.kaddr); 118 goto out_unlock; 119 } 120 it.ofs = 0; 121 } 122 vi->xattr_shared_xattrs[i] = 123 le32_to_cpu(*(__le32 *)(it.kaddr + it.ofs)); 124 it.ofs += sizeof(__le32); 125 } 126 erofs_put_metabuf(&it.buf); 127 128 /* paired with smp_mb() at the beginning of the function. */ 129 smp_mb(); 130 set_bit(EROFS_I_EA_INITED_BIT, &vi->flags); 131 132 out_unlock: 133 clear_and_wake_up_bit(EROFS_I_BL_XATTR_BIT, &vi->flags); 134 return ret; 135 } 136 137 /* 138 * the general idea for these return values is 139 * if 0 is returned, go on processing the current xattr; 140 * 1 (> 0) is returned, skip this round to process the next xattr; 141 * -err (< 0) is returned, an error (maybe ENOXATTR) occurred 142 * and need to be handled 143 */ 144 struct xattr_iter_handlers { 145 int (*entry)(struct xattr_iter *_it, struct erofs_xattr_entry *entry); 146 int (*name)(struct xattr_iter *_it, unsigned int processed, char *buf, 147 unsigned int len); 148 int (*alloc_buffer)(struct xattr_iter *_it, unsigned int value_sz); 149 void (*value)(struct xattr_iter *_it, unsigned int processed, char *buf, 150 unsigned int len); 151 }; 152 153 static inline int xattr_iter_fixup(struct xattr_iter *it) 154 { 155 if (it->ofs < it->sb->s_blocksize) 156 return 0; 157 158 it->blkaddr += erofs_blknr(it->sb, it->ofs); 159 it->kaddr = erofs_read_metabuf(&it->buf, it->sb, it->blkaddr, 160 EROFS_KMAP); 161 if (IS_ERR(it->kaddr)) 162 return PTR_ERR(it->kaddr); 163 it->ofs = erofs_blkoff(it->sb, it->ofs); 164 return 0; 165 } 166 167 static int inline_xattr_iter_begin(struct xattr_iter *it, 168 struct inode *inode) 169 { 170 struct erofs_inode *const vi = EROFS_I(inode); 171 unsigned int xattr_header_sz, inline_xattr_ofs; 172 173 xattr_header_sz = sizeof(struct erofs_xattr_ibody_header) + 174 sizeof(u32) * vi->xattr_shared_count; 175 if (xattr_header_sz >= vi->xattr_isize) { 176 DBG_BUGON(xattr_header_sz > vi->xattr_isize); 177 return -ENOATTR; 178 } 179 180 inline_xattr_ofs = vi->inode_isize + xattr_header_sz; 181 182 it->blkaddr = erofs_blknr(it->sb, erofs_iloc(inode) + inline_xattr_ofs); 183 it->ofs = erofs_blkoff(it->sb, erofs_iloc(inode) + inline_xattr_ofs); 184 it->kaddr = erofs_read_metabuf(&it->buf, inode->i_sb, it->blkaddr, 185 EROFS_KMAP); 186 if (IS_ERR(it->kaddr)) 187 return PTR_ERR(it->kaddr); 188 return vi->xattr_isize - xattr_header_sz; 189 } 190 191 /* 192 * Regardless of success or failure, `xattr_foreach' will end up with 193 * `ofs' pointing to the next xattr item rather than an arbitrary position. 194 */ 195 static int xattr_foreach(struct xattr_iter *it, 196 const struct xattr_iter_handlers *op, 197 unsigned int *tlimit) 198 { 199 struct erofs_xattr_entry entry; 200 unsigned int value_sz, processed, slice; 201 int err; 202 203 /* 0. fixup blkaddr, ofs, ipage */ 204 err = xattr_iter_fixup(it); 205 if (err) 206 return err; 207 208 /* 209 * 1. read xattr entry to the memory, 210 * since we do EROFS_XATTR_ALIGN 211 * therefore entry should be in the page 212 */ 213 entry = *(struct erofs_xattr_entry *)(it->kaddr + it->ofs); 214 if (tlimit) { 215 unsigned int entry_sz = erofs_xattr_entry_size(&entry); 216 217 /* xattr on-disk corruption: xattr entry beyond xattr_isize */ 218 if (*tlimit < entry_sz) { 219 DBG_BUGON(1); 220 return -EFSCORRUPTED; 221 } 222 *tlimit -= entry_sz; 223 } 224 225 it->ofs += sizeof(struct erofs_xattr_entry); 226 value_sz = le16_to_cpu(entry.e_value_size); 227 228 /* handle entry */ 229 err = op->entry(it, &entry); 230 if (err) { 231 it->ofs += entry.e_name_len + value_sz; 232 goto out; 233 } 234 235 /* 2. handle xattr name (ofs will finally be at the end of name) */ 236 processed = 0; 237 238 while (processed < entry.e_name_len) { 239 if (it->ofs >= it->sb->s_blocksize) { 240 DBG_BUGON(it->ofs > it->sb->s_blocksize); 241 242 err = xattr_iter_fixup(it); 243 if (err) 244 goto out; 245 it->ofs = 0; 246 } 247 248 slice = min_t(unsigned int, it->sb->s_blocksize - it->ofs, 249 entry.e_name_len - processed); 250 251 /* handle name */ 252 err = op->name(it, processed, it->kaddr + it->ofs, slice); 253 if (err) { 254 it->ofs += entry.e_name_len - processed + value_sz; 255 goto out; 256 } 257 258 it->ofs += slice; 259 processed += slice; 260 } 261 262 /* 3. handle xattr value */ 263 processed = 0; 264 265 if (op->alloc_buffer) { 266 err = op->alloc_buffer(it, value_sz); 267 if (err) { 268 it->ofs += value_sz; 269 goto out; 270 } 271 } 272 273 while (processed < value_sz) { 274 if (it->ofs >= it->sb->s_blocksize) { 275 DBG_BUGON(it->ofs > it->sb->s_blocksize); 276 277 err = xattr_iter_fixup(it); 278 if (err) 279 goto out; 280 it->ofs = 0; 281 } 282 283 slice = min_t(unsigned int, it->sb->s_blocksize - it->ofs, 284 value_sz - processed); 285 op->value(it, processed, it->kaddr + it->ofs, slice); 286 it->ofs += slice; 287 processed += slice; 288 } 289 290 out: 291 /* xattrs should be 4-byte aligned (on-disk constraint) */ 292 it->ofs = EROFS_XATTR_ALIGN(it->ofs); 293 return err < 0 ? err : 0; 294 } 295 296 struct getxattr_iter { 297 struct xattr_iter it; 298 299 char *buffer; 300 int buffer_size, index; 301 struct qstr name; 302 }; 303 304 static int xattr_entrymatch(struct xattr_iter *_it, 305 struct erofs_xattr_entry *entry) 306 { 307 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it); 308 309 return (it->index != entry->e_name_index || 310 it->name.len != entry->e_name_len) ? -ENOATTR : 0; 311 } 312 313 static int xattr_namematch(struct xattr_iter *_it, 314 unsigned int processed, char *buf, unsigned int len) 315 { 316 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it); 317 318 return memcmp(buf, it->name.name + processed, len) ? -ENOATTR : 0; 319 } 320 321 static int xattr_checkbuffer(struct xattr_iter *_it, 322 unsigned int value_sz) 323 { 324 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it); 325 int err = it->buffer_size < value_sz ? -ERANGE : 0; 326 327 it->buffer_size = value_sz; 328 return !it->buffer ? 1 : err; 329 } 330 331 static void xattr_copyvalue(struct xattr_iter *_it, 332 unsigned int processed, 333 char *buf, unsigned int len) 334 { 335 struct getxattr_iter *it = container_of(_it, struct getxattr_iter, it); 336 337 memcpy(it->buffer + processed, buf, len); 338 } 339 340 static const struct xattr_iter_handlers find_xattr_handlers = { 341 .entry = xattr_entrymatch, 342 .name = xattr_namematch, 343 .alloc_buffer = xattr_checkbuffer, 344 .value = xattr_copyvalue 345 }; 346 347 static int inline_getxattr(struct inode *inode, struct getxattr_iter *it) 348 { 349 int ret; 350 unsigned int remaining; 351 352 ret = inline_xattr_iter_begin(&it->it, inode); 353 if (ret < 0) 354 return ret; 355 356 remaining = ret; 357 while (remaining) { 358 ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining); 359 if (ret != -ENOATTR) 360 break; 361 } 362 return ret ? ret : it->buffer_size; 363 } 364 365 static int shared_getxattr(struct inode *inode, struct getxattr_iter *it) 366 { 367 struct erofs_inode *const vi = EROFS_I(inode); 368 struct super_block *const sb = it->it.sb; 369 unsigned int i, xsid; 370 int ret = -ENOATTR; 371 372 for (i = 0; i < vi->xattr_shared_count; ++i) { 373 xsid = vi->xattr_shared_xattrs[i]; 374 it->it.blkaddr = erofs_xattr_blkaddr(sb, xsid); 375 it->it.ofs = erofs_xattr_blkoff(sb, xsid); 376 it->it.kaddr = erofs_read_metabuf(&it->it.buf, sb, 377 it->it.blkaddr, EROFS_KMAP); 378 if (IS_ERR(it->it.kaddr)) 379 return PTR_ERR(it->it.kaddr); 380 381 ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL); 382 if (ret != -ENOATTR) 383 break; 384 } 385 return ret ? ret : it->buffer_size; 386 } 387 388 static bool erofs_xattr_user_list(struct dentry *dentry) 389 { 390 return test_opt(&EROFS_SB(dentry->d_sb)->opt, XATTR_USER); 391 } 392 393 static bool erofs_xattr_trusted_list(struct dentry *dentry) 394 { 395 return capable(CAP_SYS_ADMIN); 396 } 397 398 int erofs_getxattr(struct inode *inode, int index, 399 const char *name, 400 void *buffer, size_t buffer_size) 401 { 402 int ret; 403 struct getxattr_iter it; 404 405 if (!name) 406 return -EINVAL; 407 408 ret = erofs_init_inode_xattrs(inode); 409 if (ret) 410 return ret; 411 412 it.index = index; 413 it.name.len = strlen(name); 414 if (it.name.len > EROFS_NAME_LEN) 415 return -ERANGE; 416 417 it.it.buf = __EROFS_BUF_INITIALIZER; 418 it.name.name = name; 419 420 it.buffer = buffer; 421 it.buffer_size = buffer_size; 422 423 it.it.sb = inode->i_sb; 424 ret = inline_getxattr(inode, &it); 425 if (ret == -ENOATTR) 426 ret = shared_getxattr(inode, &it); 427 erofs_put_metabuf(&it.it.buf); 428 return ret; 429 } 430 431 static int erofs_xattr_generic_get(const struct xattr_handler *handler, 432 struct dentry *unused, struct inode *inode, 433 const char *name, void *buffer, size_t size) 434 { 435 struct erofs_sb_info *const sbi = EROFS_I_SB(inode); 436 437 switch (handler->flags) { 438 case EROFS_XATTR_INDEX_USER: 439 if (!test_opt(&sbi->opt, XATTR_USER)) 440 return -EOPNOTSUPP; 441 break; 442 case EROFS_XATTR_INDEX_TRUSTED: 443 break; 444 case EROFS_XATTR_INDEX_SECURITY: 445 break; 446 default: 447 return -EINVAL; 448 } 449 450 return erofs_getxattr(inode, handler->flags, name, buffer, size); 451 } 452 453 const struct xattr_handler erofs_xattr_user_handler = { 454 .prefix = XATTR_USER_PREFIX, 455 .flags = EROFS_XATTR_INDEX_USER, 456 .list = erofs_xattr_user_list, 457 .get = erofs_xattr_generic_get, 458 }; 459 460 const struct xattr_handler erofs_xattr_trusted_handler = { 461 .prefix = XATTR_TRUSTED_PREFIX, 462 .flags = EROFS_XATTR_INDEX_TRUSTED, 463 .list = erofs_xattr_trusted_list, 464 .get = erofs_xattr_generic_get, 465 }; 466 467 #ifdef CONFIG_EROFS_FS_SECURITY 468 const struct xattr_handler __maybe_unused erofs_xattr_security_handler = { 469 .prefix = XATTR_SECURITY_PREFIX, 470 .flags = EROFS_XATTR_INDEX_SECURITY, 471 .get = erofs_xattr_generic_get, 472 }; 473 #endif 474 475 const struct xattr_handler *erofs_xattr_handlers[] = { 476 &erofs_xattr_user_handler, 477 #ifdef CONFIG_EROFS_FS_POSIX_ACL 478 &posix_acl_access_xattr_handler, 479 &posix_acl_default_xattr_handler, 480 #endif 481 &erofs_xattr_trusted_handler, 482 #ifdef CONFIG_EROFS_FS_SECURITY 483 &erofs_xattr_security_handler, 484 #endif 485 NULL, 486 }; 487 488 struct listxattr_iter { 489 struct xattr_iter it; 490 491 struct dentry *dentry; 492 char *buffer; 493 int buffer_size, buffer_ofs; 494 }; 495 496 static int xattr_entrylist(struct xattr_iter *_it, 497 struct erofs_xattr_entry *entry) 498 { 499 struct listxattr_iter *it = 500 container_of(_it, struct listxattr_iter, it); 501 unsigned int prefix_len; 502 const char *prefix; 503 504 const struct xattr_handler *h = 505 erofs_xattr_handler(entry->e_name_index); 506 507 if (!h || (h->list && !h->list(it->dentry))) 508 return 1; 509 510 prefix = xattr_prefix(h); 511 prefix_len = strlen(prefix); 512 513 if (!it->buffer) { 514 it->buffer_ofs += prefix_len + entry->e_name_len + 1; 515 return 1; 516 } 517 518 if (it->buffer_ofs + prefix_len 519 + entry->e_name_len + 1 > it->buffer_size) 520 return -ERANGE; 521 522 memcpy(it->buffer + it->buffer_ofs, prefix, prefix_len); 523 it->buffer_ofs += prefix_len; 524 return 0; 525 } 526 527 static int xattr_namelist(struct xattr_iter *_it, 528 unsigned int processed, char *buf, unsigned int len) 529 { 530 struct listxattr_iter *it = 531 container_of(_it, struct listxattr_iter, it); 532 533 memcpy(it->buffer + it->buffer_ofs, buf, len); 534 it->buffer_ofs += len; 535 return 0; 536 } 537 538 static int xattr_skipvalue(struct xattr_iter *_it, 539 unsigned int value_sz) 540 { 541 struct listxattr_iter *it = 542 container_of(_it, struct listxattr_iter, it); 543 544 it->buffer[it->buffer_ofs++] = '\0'; 545 return 1; 546 } 547 548 static const struct xattr_iter_handlers list_xattr_handlers = { 549 .entry = xattr_entrylist, 550 .name = xattr_namelist, 551 .alloc_buffer = xattr_skipvalue, 552 .value = NULL 553 }; 554 555 static int inline_listxattr(struct listxattr_iter *it) 556 { 557 int ret; 558 unsigned int remaining; 559 560 ret = inline_xattr_iter_begin(&it->it, d_inode(it->dentry)); 561 if (ret < 0) 562 return ret; 563 564 remaining = ret; 565 while (remaining) { 566 ret = xattr_foreach(&it->it, &list_xattr_handlers, &remaining); 567 if (ret) 568 break; 569 } 570 return ret ? ret : it->buffer_ofs; 571 } 572 573 static int shared_listxattr(struct listxattr_iter *it) 574 { 575 struct inode *const inode = d_inode(it->dentry); 576 struct erofs_inode *const vi = EROFS_I(inode); 577 struct super_block *const sb = it->it.sb; 578 unsigned int i, xsid; 579 int ret = 0; 580 581 for (i = 0; i < vi->xattr_shared_count; ++i) { 582 xsid = vi->xattr_shared_xattrs[i]; 583 it->it.blkaddr = erofs_xattr_blkaddr(sb, xsid); 584 it->it.ofs = erofs_xattr_blkoff(sb, xsid); 585 it->it.kaddr = erofs_read_metabuf(&it->it.buf, sb, 586 it->it.blkaddr, EROFS_KMAP); 587 if (IS_ERR(it->it.kaddr)) 588 return PTR_ERR(it->it.kaddr); 589 590 ret = xattr_foreach(&it->it, &list_xattr_handlers, NULL); 591 if (ret) 592 break; 593 } 594 return ret ? ret : it->buffer_ofs; 595 } 596 597 ssize_t erofs_listxattr(struct dentry *dentry, 598 char *buffer, size_t buffer_size) 599 { 600 int ret; 601 struct listxattr_iter it; 602 603 ret = erofs_init_inode_xattrs(d_inode(dentry)); 604 if (ret == -ENOATTR) 605 return 0; 606 if (ret) 607 return ret; 608 609 it.it.buf = __EROFS_BUF_INITIALIZER; 610 it.dentry = dentry; 611 it.buffer = buffer; 612 it.buffer_size = buffer_size; 613 it.buffer_ofs = 0; 614 615 it.it.sb = dentry->d_sb; 616 617 ret = inline_listxattr(&it); 618 if (ret >= 0 || ret == -ENOATTR) 619 ret = shared_listxattr(&it); 620 erofs_put_metabuf(&it.it.buf); 621 return ret; 622 } 623 624 #ifdef CONFIG_EROFS_FS_POSIX_ACL 625 struct posix_acl *erofs_get_acl(struct inode *inode, int type, bool rcu) 626 { 627 struct posix_acl *acl; 628 int prefix, rc; 629 char *value = NULL; 630 631 if (rcu) 632 return ERR_PTR(-ECHILD); 633 634 switch (type) { 635 case ACL_TYPE_ACCESS: 636 prefix = EROFS_XATTR_INDEX_POSIX_ACL_ACCESS; 637 break; 638 case ACL_TYPE_DEFAULT: 639 prefix = EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT; 640 break; 641 default: 642 return ERR_PTR(-EINVAL); 643 } 644 645 rc = erofs_getxattr(inode, prefix, "", NULL, 0); 646 if (rc > 0) { 647 value = kmalloc(rc, GFP_KERNEL); 648 if (!value) 649 return ERR_PTR(-ENOMEM); 650 rc = erofs_getxattr(inode, prefix, "", value, rc); 651 } 652 653 if (rc == -ENOATTR) 654 acl = NULL; 655 else if (rc < 0) 656 acl = ERR_PTR(rc); 657 else 658 acl = posix_acl_from_xattr(&init_user_ns, value, rc); 659 kfree(value); 660 return acl; 661 } 662 #endif 663