1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/ceph/ceph_debug.h> 3 #include <linux/ceph/pagelist.h> 4 5 #include "super.h" 6 #include "mds_client.h" 7 8 #include <linux/ceph/decode.h> 9 10 #include <linux/xattr.h> 11 #include <linux/posix_acl_xattr.h> 12 #include <linux/slab.h> 13 14 #define XATTR_CEPH_PREFIX "ceph." 15 #define XATTR_CEPH_PREFIX_LEN (sizeof (XATTR_CEPH_PREFIX) - 1) 16 17 static int __remove_xattr(struct ceph_inode_info *ci, 18 struct ceph_inode_xattr *xattr); 19 20 static const struct xattr_handler ceph_other_xattr_handler; 21 22 /* 23 * List of handlers for synthetic system.* attributes. Other 24 * attributes are handled directly. 25 */ 26 const struct xattr_handler *ceph_xattr_handlers[] = { 27 #ifdef CONFIG_CEPH_FS_POSIX_ACL 28 &posix_acl_access_xattr_handler, 29 &posix_acl_default_xattr_handler, 30 #endif 31 &ceph_other_xattr_handler, 32 NULL, 33 }; 34 35 static bool ceph_is_valid_xattr(const char *name) 36 { 37 return !strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN) || 38 !strncmp(name, XATTR_SECURITY_PREFIX, 39 XATTR_SECURITY_PREFIX_LEN) || 40 !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) || 41 !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN); 42 } 43 44 /* 45 * These define virtual xattrs exposing the recursive directory 46 * statistics and layout metadata. 47 */ 48 struct ceph_vxattr { 49 char *name; 50 size_t name_size; /* strlen(name) + 1 (for '\0') */ 51 size_t (*getxattr_cb)(struct ceph_inode_info *ci, char *val, 52 size_t size); 53 bool readonly, hidden; 54 bool (*exists_cb)(struct ceph_inode_info *ci); 55 }; 56 57 /* layouts */ 58 59 static bool ceph_vxattrcb_layout_exists(struct ceph_inode_info *ci) 60 { 61 struct ceph_file_layout *fl = &ci->i_layout; 62 return (fl->stripe_unit > 0 || fl->stripe_count > 0 || 63 fl->object_size > 0 || fl->pool_id >= 0 || 64 rcu_dereference_raw(fl->pool_ns) != NULL); 65 } 66 67 static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val, 68 size_t size) 69 { 70 struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb); 71 struct ceph_osd_client *osdc = &fsc->client->osdc; 72 struct ceph_string *pool_ns; 73 s64 pool = ci->i_layout.pool_id; 74 const char *pool_name; 75 const char *ns_field = " pool_namespace="; 76 char buf[128]; 77 size_t len, total_len = 0; 78 int ret; 79 80 pool_ns = ceph_try_get_string(ci->i_layout.pool_ns); 81 82 dout("ceph_vxattrcb_layout %p\n", &ci->vfs_inode); 83 down_read(&osdc->lock); 84 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool); 85 if (pool_name) { 86 len = snprintf(buf, sizeof(buf), 87 "stripe_unit=%u stripe_count=%u object_size=%u pool=", 88 ci->i_layout.stripe_unit, ci->i_layout.stripe_count, 89 ci->i_layout.object_size); 90 total_len = len + strlen(pool_name); 91 } else { 92 len = snprintf(buf, sizeof(buf), 93 "stripe_unit=%u stripe_count=%u object_size=%u pool=%lld", 94 ci->i_layout.stripe_unit, ci->i_layout.stripe_count, 95 ci->i_layout.object_size, (unsigned long long)pool); 96 total_len = len; 97 } 98 99 if (pool_ns) 100 total_len += strlen(ns_field) + pool_ns->len; 101 102 if (!size) { 103 ret = total_len; 104 } else if (total_len > size) { 105 ret = -ERANGE; 106 } else { 107 memcpy(val, buf, len); 108 ret = len; 109 if (pool_name) { 110 len = strlen(pool_name); 111 memcpy(val + ret, pool_name, len); 112 ret += len; 113 } 114 if (pool_ns) { 115 len = strlen(ns_field); 116 memcpy(val + ret, ns_field, len); 117 ret += len; 118 memcpy(val + ret, pool_ns->str, pool_ns->len); 119 ret += pool_ns->len; 120 } 121 } 122 up_read(&osdc->lock); 123 ceph_put_string(pool_ns); 124 return ret; 125 } 126 127 static size_t ceph_vxattrcb_layout_stripe_unit(struct ceph_inode_info *ci, 128 char *val, size_t size) 129 { 130 return snprintf(val, size, "%u", ci->i_layout.stripe_unit); 131 } 132 133 static size_t ceph_vxattrcb_layout_stripe_count(struct ceph_inode_info *ci, 134 char *val, size_t size) 135 { 136 return snprintf(val, size, "%u", ci->i_layout.stripe_count); 137 } 138 139 static size_t ceph_vxattrcb_layout_object_size(struct ceph_inode_info *ci, 140 char *val, size_t size) 141 { 142 return snprintf(val, size, "%u", ci->i_layout.object_size); 143 } 144 145 static size_t ceph_vxattrcb_layout_pool(struct ceph_inode_info *ci, 146 char *val, size_t size) 147 { 148 int ret; 149 struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb); 150 struct ceph_osd_client *osdc = &fsc->client->osdc; 151 s64 pool = ci->i_layout.pool_id; 152 const char *pool_name; 153 154 down_read(&osdc->lock); 155 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool); 156 if (pool_name) 157 ret = snprintf(val, size, "%s", pool_name); 158 else 159 ret = snprintf(val, size, "%lld", (unsigned long long)pool); 160 up_read(&osdc->lock); 161 return ret; 162 } 163 164 static size_t ceph_vxattrcb_layout_pool_namespace(struct ceph_inode_info *ci, 165 char *val, size_t size) 166 { 167 int ret = 0; 168 struct ceph_string *ns = ceph_try_get_string(ci->i_layout.pool_ns); 169 if (ns) { 170 ret = snprintf(val, size, "%.*s", (int)ns->len, ns->str); 171 ceph_put_string(ns); 172 } 173 return ret; 174 } 175 176 /* directories */ 177 178 static size_t ceph_vxattrcb_dir_entries(struct ceph_inode_info *ci, char *val, 179 size_t size) 180 { 181 return snprintf(val, size, "%lld", ci->i_files + ci->i_subdirs); 182 } 183 184 static size_t ceph_vxattrcb_dir_files(struct ceph_inode_info *ci, char *val, 185 size_t size) 186 { 187 return snprintf(val, size, "%lld", ci->i_files); 188 } 189 190 static size_t ceph_vxattrcb_dir_subdirs(struct ceph_inode_info *ci, char *val, 191 size_t size) 192 { 193 return snprintf(val, size, "%lld", ci->i_subdirs); 194 } 195 196 static size_t ceph_vxattrcb_dir_rentries(struct ceph_inode_info *ci, char *val, 197 size_t size) 198 { 199 return snprintf(val, size, "%lld", ci->i_rfiles + ci->i_rsubdirs); 200 } 201 202 static size_t ceph_vxattrcb_dir_rfiles(struct ceph_inode_info *ci, char *val, 203 size_t size) 204 { 205 return snprintf(val, size, "%lld", ci->i_rfiles); 206 } 207 208 static size_t ceph_vxattrcb_dir_rsubdirs(struct ceph_inode_info *ci, char *val, 209 size_t size) 210 { 211 return snprintf(val, size, "%lld", ci->i_rsubdirs); 212 } 213 214 static size_t ceph_vxattrcb_dir_rbytes(struct ceph_inode_info *ci, char *val, 215 size_t size) 216 { 217 return snprintf(val, size, "%lld", ci->i_rbytes); 218 } 219 220 static size_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val, 221 size_t size) 222 { 223 return snprintf(val, size, "%ld.09%ld", (long)ci->i_rctime.tv_sec, 224 (long)ci->i_rctime.tv_nsec); 225 } 226 227 /* quotas */ 228 229 static bool ceph_vxattrcb_quota_exists(struct ceph_inode_info *ci) 230 { 231 bool ret = false; 232 spin_lock(&ci->i_ceph_lock); 233 if ((ci->i_max_files || ci->i_max_bytes) && 234 ci->i_vino.snap == CEPH_NOSNAP && 235 ci->i_snap_realm && 236 ci->i_snap_realm->ino == ci->i_vino.ino) 237 ret = true; 238 spin_unlock(&ci->i_ceph_lock); 239 return ret; 240 } 241 242 static size_t ceph_vxattrcb_quota(struct ceph_inode_info *ci, char *val, 243 size_t size) 244 { 245 return snprintf(val, size, "max_bytes=%llu max_files=%llu", 246 ci->i_max_bytes, ci->i_max_files); 247 } 248 249 static size_t ceph_vxattrcb_quota_max_bytes(struct ceph_inode_info *ci, 250 char *val, size_t size) 251 { 252 return snprintf(val, size, "%llu", ci->i_max_bytes); 253 } 254 255 static size_t ceph_vxattrcb_quota_max_files(struct ceph_inode_info *ci, 256 char *val, size_t size) 257 { 258 return snprintf(val, size, "%llu", ci->i_max_files); 259 } 260 261 #define CEPH_XATTR_NAME(_type, _name) XATTR_CEPH_PREFIX #_type "." #_name 262 #define CEPH_XATTR_NAME2(_type, _name, _name2) \ 263 XATTR_CEPH_PREFIX #_type "." #_name "." #_name2 264 265 #define XATTR_NAME_CEPH(_type, _name) \ 266 { \ 267 .name = CEPH_XATTR_NAME(_type, _name), \ 268 .name_size = sizeof (CEPH_XATTR_NAME(_type, _name)), \ 269 .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \ 270 .readonly = true, \ 271 .hidden = false, \ 272 .exists_cb = NULL, \ 273 } 274 #define XATTR_LAYOUT_FIELD(_type, _name, _field) \ 275 { \ 276 .name = CEPH_XATTR_NAME2(_type, _name, _field), \ 277 .name_size = sizeof (CEPH_XATTR_NAME2(_type, _name, _field)), \ 278 .getxattr_cb = ceph_vxattrcb_ ## _name ## _ ## _field, \ 279 .readonly = false, \ 280 .hidden = true, \ 281 .exists_cb = ceph_vxattrcb_layout_exists, \ 282 } 283 #define XATTR_QUOTA_FIELD(_type, _name) \ 284 { \ 285 .name = CEPH_XATTR_NAME(_type, _name), \ 286 .name_size = sizeof(CEPH_XATTR_NAME(_type, _name)), \ 287 .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \ 288 .readonly = false, \ 289 .hidden = true, \ 290 .exists_cb = ceph_vxattrcb_quota_exists, \ 291 } 292 293 static struct ceph_vxattr ceph_dir_vxattrs[] = { 294 { 295 .name = "ceph.dir.layout", 296 .name_size = sizeof("ceph.dir.layout"), 297 .getxattr_cb = ceph_vxattrcb_layout, 298 .readonly = false, 299 .hidden = true, 300 .exists_cb = ceph_vxattrcb_layout_exists, 301 }, 302 XATTR_LAYOUT_FIELD(dir, layout, stripe_unit), 303 XATTR_LAYOUT_FIELD(dir, layout, stripe_count), 304 XATTR_LAYOUT_FIELD(dir, layout, object_size), 305 XATTR_LAYOUT_FIELD(dir, layout, pool), 306 XATTR_LAYOUT_FIELD(dir, layout, pool_namespace), 307 XATTR_NAME_CEPH(dir, entries), 308 XATTR_NAME_CEPH(dir, files), 309 XATTR_NAME_CEPH(dir, subdirs), 310 XATTR_NAME_CEPH(dir, rentries), 311 XATTR_NAME_CEPH(dir, rfiles), 312 XATTR_NAME_CEPH(dir, rsubdirs), 313 XATTR_NAME_CEPH(dir, rbytes), 314 XATTR_NAME_CEPH(dir, rctime), 315 { 316 .name = "ceph.quota", 317 .name_size = sizeof("ceph.quota"), 318 .getxattr_cb = ceph_vxattrcb_quota, 319 .readonly = false, 320 .hidden = true, 321 .exists_cb = ceph_vxattrcb_quota_exists, 322 }, 323 XATTR_QUOTA_FIELD(quota, max_bytes), 324 XATTR_QUOTA_FIELD(quota, max_files), 325 { .name = NULL, 0 } /* Required table terminator */ 326 }; 327 static size_t ceph_dir_vxattrs_name_size; /* total size of all names */ 328 329 /* files */ 330 331 static struct ceph_vxattr ceph_file_vxattrs[] = { 332 { 333 .name = "ceph.file.layout", 334 .name_size = sizeof("ceph.file.layout"), 335 .getxattr_cb = ceph_vxattrcb_layout, 336 .readonly = false, 337 .hidden = true, 338 .exists_cb = ceph_vxattrcb_layout_exists, 339 }, 340 XATTR_LAYOUT_FIELD(file, layout, stripe_unit), 341 XATTR_LAYOUT_FIELD(file, layout, stripe_count), 342 XATTR_LAYOUT_FIELD(file, layout, object_size), 343 XATTR_LAYOUT_FIELD(file, layout, pool), 344 XATTR_LAYOUT_FIELD(file, layout, pool_namespace), 345 { .name = NULL, 0 } /* Required table terminator */ 346 }; 347 static size_t ceph_file_vxattrs_name_size; /* total size of all names */ 348 349 static struct ceph_vxattr *ceph_inode_vxattrs(struct inode *inode) 350 { 351 if (S_ISDIR(inode->i_mode)) 352 return ceph_dir_vxattrs; 353 else if (S_ISREG(inode->i_mode)) 354 return ceph_file_vxattrs; 355 return NULL; 356 } 357 358 static size_t ceph_vxattrs_name_size(struct ceph_vxattr *vxattrs) 359 { 360 if (vxattrs == ceph_dir_vxattrs) 361 return ceph_dir_vxattrs_name_size; 362 if (vxattrs == ceph_file_vxattrs) 363 return ceph_file_vxattrs_name_size; 364 BUG_ON(vxattrs); 365 return 0; 366 } 367 368 /* 369 * Compute the aggregate size (including terminating '\0') of all 370 * virtual extended attribute names in the given vxattr table. 371 */ 372 static size_t __init vxattrs_name_size(struct ceph_vxattr *vxattrs) 373 { 374 struct ceph_vxattr *vxattr; 375 size_t size = 0; 376 377 for (vxattr = vxattrs; vxattr->name; vxattr++) 378 if (!vxattr->hidden) 379 size += vxattr->name_size; 380 381 return size; 382 } 383 384 /* Routines called at initialization and exit time */ 385 386 void __init ceph_xattr_init(void) 387 { 388 ceph_dir_vxattrs_name_size = vxattrs_name_size(ceph_dir_vxattrs); 389 ceph_file_vxattrs_name_size = vxattrs_name_size(ceph_file_vxattrs); 390 } 391 392 void ceph_xattr_exit(void) 393 { 394 ceph_dir_vxattrs_name_size = 0; 395 ceph_file_vxattrs_name_size = 0; 396 } 397 398 static struct ceph_vxattr *ceph_match_vxattr(struct inode *inode, 399 const char *name) 400 { 401 struct ceph_vxattr *vxattr = ceph_inode_vxattrs(inode); 402 403 if (vxattr) { 404 while (vxattr->name) { 405 if (!strcmp(vxattr->name, name)) 406 return vxattr; 407 vxattr++; 408 } 409 } 410 411 return NULL; 412 } 413 414 static int __set_xattr(struct ceph_inode_info *ci, 415 const char *name, int name_len, 416 const char *val, int val_len, 417 int flags, int update_xattr, 418 struct ceph_inode_xattr **newxattr) 419 { 420 struct rb_node **p; 421 struct rb_node *parent = NULL; 422 struct ceph_inode_xattr *xattr = NULL; 423 int c; 424 int new = 0; 425 426 p = &ci->i_xattrs.index.rb_node; 427 while (*p) { 428 parent = *p; 429 xattr = rb_entry(parent, struct ceph_inode_xattr, node); 430 c = strncmp(name, xattr->name, min(name_len, xattr->name_len)); 431 if (c < 0) 432 p = &(*p)->rb_left; 433 else if (c > 0) 434 p = &(*p)->rb_right; 435 else { 436 if (name_len == xattr->name_len) 437 break; 438 else if (name_len < xattr->name_len) 439 p = &(*p)->rb_left; 440 else 441 p = &(*p)->rb_right; 442 } 443 xattr = NULL; 444 } 445 446 if (update_xattr) { 447 int err = 0; 448 449 if (xattr && (flags & XATTR_CREATE)) 450 err = -EEXIST; 451 else if (!xattr && (flags & XATTR_REPLACE)) 452 err = -ENODATA; 453 if (err) { 454 kfree(name); 455 kfree(val); 456 kfree(*newxattr); 457 return err; 458 } 459 if (update_xattr < 0) { 460 if (xattr) 461 __remove_xattr(ci, xattr); 462 kfree(name); 463 kfree(*newxattr); 464 return 0; 465 } 466 } 467 468 if (!xattr) { 469 new = 1; 470 xattr = *newxattr; 471 xattr->name = name; 472 xattr->name_len = name_len; 473 xattr->should_free_name = update_xattr; 474 475 ci->i_xattrs.count++; 476 dout("__set_xattr count=%d\n", ci->i_xattrs.count); 477 } else { 478 kfree(*newxattr); 479 *newxattr = NULL; 480 if (xattr->should_free_val) 481 kfree((void *)xattr->val); 482 483 if (update_xattr) { 484 kfree((void *)name); 485 name = xattr->name; 486 } 487 ci->i_xattrs.names_size -= xattr->name_len; 488 ci->i_xattrs.vals_size -= xattr->val_len; 489 } 490 ci->i_xattrs.names_size += name_len; 491 ci->i_xattrs.vals_size += val_len; 492 if (val) 493 xattr->val = val; 494 else 495 xattr->val = ""; 496 497 xattr->val_len = val_len; 498 xattr->dirty = update_xattr; 499 xattr->should_free_val = (val && update_xattr); 500 501 if (new) { 502 rb_link_node(&xattr->node, parent, p); 503 rb_insert_color(&xattr->node, &ci->i_xattrs.index); 504 dout("__set_xattr_val p=%p\n", p); 505 } 506 507 dout("__set_xattr_val added %llx.%llx xattr %p %s=%.*s\n", 508 ceph_vinop(&ci->vfs_inode), xattr, name, val_len, val); 509 510 return 0; 511 } 512 513 static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci, 514 const char *name) 515 { 516 struct rb_node **p; 517 struct rb_node *parent = NULL; 518 struct ceph_inode_xattr *xattr = NULL; 519 int name_len = strlen(name); 520 int c; 521 522 p = &ci->i_xattrs.index.rb_node; 523 while (*p) { 524 parent = *p; 525 xattr = rb_entry(parent, struct ceph_inode_xattr, node); 526 c = strncmp(name, xattr->name, xattr->name_len); 527 if (c == 0 && name_len > xattr->name_len) 528 c = 1; 529 if (c < 0) 530 p = &(*p)->rb_left; 531 else if (c > 0) 532 p = &(*p)->rb_right; 533 else { 534 dout("__get_xattr %s: found %.*s\n", name, 535 xattr->val_len, xattr->val); 536 return xattr; 537 } 538 } 539 540 dout("__get_xattr %s: not found\n", name); 541 542 return NULL; 543 } 544 545 static void __free_xattr(struct ceph_inode_xattr *xattr) 546 { 547 BUG_ON(!xattr); 548 549 if (xattr->should_free_name) 550 kfree((void *)xattr->name); 551 if (xattr->should_free_val) 552 kfree((void *)xattr->val); 553 554 kfree(xattr); 555 } 556 557 static int __remove_xattr(struct ceph_inode_info *ci, 558 struct ceph_inode_xattr *xattr) 559 { 560 if (!xattr) 561 return -ENODATA; 562 563 rb_erase(&xattr->node, &ci->i_xattrs.index); 564 565 if (xattr->should_free_name) 566 kfree((void *)xattr->name); 567 if (xattr->should_free_val) 568 kfree((void *)xattr->val); 569 570 ci->i_xattrs.names_size -= xattr->name_len; 571 ci->i_xattrs.vals_size -= xattr->val_len; 572 ci->i_xattrs.count--; 573 kfree(xattr); 574 575 return 0; 576 } 577 578 static char *__copy_xattr_names(struct ceph_inode_info *ci, 579 char *dest) 580 { 581 struct rb_node *p; 582 struct ceph_inode_xattr *xattr = NULL; 583 584 p = rb_first(&ci->i_xattrs.index); 585 dout("__copy_xattr_names count=%d\n", ci->i_xattrs.count); 586 587 while (p) { 588 xattr = rb_entry(p, struct ceph_inode_xattr, node); 589 memcpy(dest, xattr->name, xattr->name_len); 590 dest[xattr->name_len] = '\0'; 591 592 dout("dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name, 593 xattr->name_len, ci->i_xattrs.names_size); 594 595 dest += xattr->name_len + 1; 596 p = rb_next(p); 597 } 598 599 return dest; 600 } 601 602 void __ceph_destroy_xattrs(struct ceph_inode_info *ci) 603 { 604 struct rb_node *p, *tmp; 605 struct ceph_inode_xattr *xattr = NULL; 606 607 p = rb_first(&ci->i_xattrs.index); 608 609 dout("__ceph_destroy_xattrs p=%p\n", p); 610 611 while (p) { 612 xattr = rb_entry(p, struct ceph_inode_xattr, node); 613 tmp = p; 614 p = rb_next(tmp); 615 dout("__ceph_destroy_xattrs next p=%p (%.*s)\n", p, 616 xattr->name_len, xattr->name); 617 rb_erase(tmp, &ci->i_xattrs.index); 618 619 __free_xattr(xattr); 620 } 621 622 ci->i_xattrs.names_size = 0; 623 ci->i_xattrs.vals_size = 0; 624 ci->i_xattrs.index_version = 0; 625 ci->i_xattrs.count = 0; 626 ci->i_xattrs.index = RB_ROOT; 627 } 628 629 static int __build_xattrs(struct inode *inode) 630 __releases(ci->i_ceph_lock) 631 __acquires(ci->i_ceph_lock) 632 { 633 u32 namelen; 634 u32 numattr = 0; 635 void *p, *end; 636 u32 len; 637 const char *name, *val; 638 struct ceph_inode_info *ci = ceph_inode(inode); 639 int xattr_version; 640 struct ceph_inode_xattr **xattrs = NULL; 641 int err = 0; 642 int i; 643 644 dout("__build_xattrs() len=%d\n", 645 ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0); 646 647 if (ci->i_xattrs.index_version >= ci->i_xattrs.version) 648 return 0; /* already built */ 649 650 __ceph_destroy_xattrs(ci); 651 652 start: 653 /* updated internal xattr rb tree */ 654 if (ci->i_xattrs.blob && ci->i_xattrs.blob->vec.iov_len > 4) { 655 p = ci->i_xattrs.blob->vec.iov_base; 656 end = p + ci->i_xattrs.blob->vec.iov_len; 657 ceph_decode_32_safe(&p, end, numattr, bad); 658 xattr_version = ci->i_xattrs.version; 659 spin_unlock(&ci->i_ceph_lock); 660 661 xattrs = kcalloc(numattr, sizeof(struct ceph_inode_xattr *), 662 GFP_NOFS); 663 err = -ENOMEM; 664 if (!xattrs) 665 goto bad_lock; 666 667 for (i = 0; i < numattr; i++) { 668 xattrs[i] = kmalloc(sizeof(struct ceph_inode_xattr), 669 GFP_NOFS); 670 if (!xattrs[i]) 671 goto bad_lock; 672 } 673 674 spin_lock(&ci->i_ceph_lock); 675 if (ci->i_xattrs.version != xattr_version) { 676 /* lost a race, retry */ 677 for (i = 0; i < numattr; i++) 678 kfree(xattrs[i]); 679 kfree(xattrs); 680 xattrs = NULL; 681 goto start; 682 } 683 err = -EIO; 684 while (numattr--) { 685 ceph_decode_32_safe(&p, end, len, bad); 686 namelen = len; 687 name = p; 688 p += len; 689 ceph_decode_32_safe(&p, end, len, bad); 690 val = p; 691 p += len; 692 693 err = __set_xattr(ci, name, namelen, val, len, 694 0, 0, &xattrs[numattr]); 695 696 if (err < 0) 697 goto bad; 698 } 699 kfree(xattrs); 700 } 701 ci->i_xattrs.index_version = ci->i_xattrs.version; 702 ci->i_xattrs.dirty = false; 703 704 return err; 705 bad_lock: 706 spin_lock(&ci->i_ceph_lock); 707 bad: 708 if (xattrs) { 709 for (i = 0; i < numattr; i++) 710 kfree(xattrs[i]); 711 kfree(xattrs); 712 } 713 ci->i_xattrs.names_size = 0; 714 return err; 715 } 716 717 static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size, 718 int val_size) 719 { 720 /* 721 * 4 bytes for the length, and additional 4 bytes per each xattr name, 722 * 4 bytes per each value 723 */ 724 int size = 4 + ci->i_xattrs.count*(4 + 4) + 725 ci->i_xattrs.names_size + 726 ci->i_xattrs.vals_size; 727 dout("__get_required_blob_size c=%d names.size=%d vals.size=%d\n", 728 ci->i_xattrs.count, ci->i_xattrs.names_size, 729 ci->i_xattrs.vals_size); 730 731 if (name_size) 732 size += 4 + 4 + name_size + val_size; 733 734 return size; 735 } 736 737 /* 738 * If there are dirty xattrs, reencode xattrs into the prealloc_blob 739 * and swap into place. 740 */ 741 void __ceph_build_xattrs_blob(struct ceph_inode_info *ci) 742 { 743 struct rb_node *p; 744 struct ceph_inode_xattr *xattr = NULL; 745 void *dest; 746 747 dout("__build_xattrs_blob %p\n", &ci->vfs_inode); 748 if (ci->i_xattrs.dirty) { 749 int need = __get_required_blob_size(ci, 0, 0); 750 751 BUG_ON(need > ci->i_xattrs.prealloc_blob->alloc_len); 752 753 p = rb_first(&ci->i_xattrs.index); 754 dest = ci->i_xattrs.prealloc_blob->vec.iov_base; 755 756 ceph_encode_32(&dest, ci->i_xattrs.count); 757 while (p) { 758 xattr = rb_entry(p, struct ceph_inode_xattr, node); 759 760 ceph_encode_32(&dest, xattr->name_len); 761 memcpy(dest, xattr->name, xattr->name_len); 762 dest += xattr->name_len; 763 ceph_encode_32(&dest, xattr->val_len); 764 memcpy(dest, xattr->val, xattr->val_len); 765 dest += xattr->val_len; 766 767 p = rb_next(p); 768 } 769 770 /* adjust buffer len; it may be larger than we need */ 771 ci->i_xattrs.prealloc_blob->vec.iov_len = 772 dest - ci->i_xattrs.prealloc_blob->vec.iov_base; 773 774 if (ci->i_xattrs.blob) 775 ceph_buffer_put(ci->i_xattrs.blob); 776 ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob; 777 ci->i_xattrs.prealloc_blob = NULL; 778 ci->i_xattrs.dirty = false; 779 ci->i_xattrs.version++; 780 } 781 } 782 783 static inline int __get_request_mask(struct inode *in) { 784 struct ceph_mds_request *req = current->journal_info; 785 int mask = 0; 786 if (req && req->r_target_inode == in) { 787 if (req->r_op == CEPH_MDS_OP_LOOKUP || 788 req->r_op == CEPH_MDS_OP_LOOKUPINO || 789 req->r_op == CEPH_MDS_OP_LOOKUPPARENT || 790 req->r_op == CEPH_MDS_OP_GETATTR) { 791 mask = le32_to_cpu(req->r_args.getattr.mask); 792 } else if (req->r_op == CEPH_MDS_OP_OPEN || 793 req->r_op == CEPH_MDS_OP_CREATE) { 794 mask = le32_to_cpu(req->r_args.open.mask); 795 } 796 } 797 return mask; 798 } 799 800 ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value, 801 size_t size) 802 { 803 struct ceph_inode_info *ci = ceph_inode(inode); 804 struct ceph_inode_xattr *xattr; 805 struct ceph_vxattr *vxattr = NULL; 806 int req_mask; 807 int err; 808 809 /* let's see if a virtual xattr was requested */ 810 vxattr = ceph_match_vxattr(inode, name); 811 if (vxattr) { 812 err = ceph_do_getattr(inode, 0, true); 813 if (err) 814 return err; 815 err = -ENODATA; 816 if (!(vxattr->exists_cb && !vxattr->exists_cb(ci))) 817 err = vxattr->getxattr_cb(ci, value, size); 818 return err; 819 } 820 821 req_mask = __get_request_mask(inode); 822 823 spin_lock(&ci->i_ceph_lock); 824 dout("getxattr %p ver=%lld index_ver=%lld\n", inode, 825 ci->i_xattrs.version, ci->i_xattrs.index_version); 826 827 if (ci->i_xattrs.version == 0 || 828 !((req_mask & CEPH_CAP_XATTR_SHARED) || 829 __ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1))) { 830 spin_unlock(&ci->i_ceph_lock); 831 832 /* security module gets xattr while filling trace */ 833 if (current->journal_info) { 834 pr_warn_ratelimited("sync getxattr %p " 835 "during filling trace\n", inode); 836 return -EBUSY; 837 } 838 839 /* get xattrs from mds (if we don't already have them) */ 840 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR, true); 841 if (err) 842 return err; 843 spin_lock(&ci->i_ceph_lock); 844 } 845 846 err = __build_xattrs(inode); 847 if (err < 0) 848 goto out; 849 850 err = -ENODATA; /* == ENOATTR */ 851 xattr = __get_xattr(ci, name); 852 if (!xattr) 853 goto out; 854 855 err = -ERANGE; 856 if (size && size < xattr->val_len) 857 goto out; 858 859 err = xattr->val_len; 860 if (size == 0) 861 goto out; 862 863 memcpy(value, xattr->val, xattr->val_len); 864 865 if (current->journal_info && 866 !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN)) 867 ci->i_ceph_flags |= CEPH_I_SEC_INITED; 868 out: 869 spin_unlock(&ci->i_ceph_lock); 870 return err; 871 } 872 873 ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size) 874 { 875 struct inode *inode = d_inode(dentry); 876 struct ceph_inode_info *ci = ceph_inode(inode); 877 struct ceph_vxattr *vxattrs = ceph_inode_vxattrs(inode); 878 u32 vir_namelen = 0; 879 u32 namelen; 880 int err; 881 u32 len; 882 int i; 883 884 spin_lock(&ci->i_ceph_lock); 885 dout("listxattr %p ver=%lld index_ver=%lld\n", inode, 886 ci->i_xattrs.version, ci->i_xattrs.index_version); 887 888 if (ci->i_xattrs.version == 0 || 889 !__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1)) { 890 spin_unlock(&ci->i_ceph_lock); 891 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR, true); 892 if (err) 893 return err; 894 spin_lock(&ci->i_ceph_lock); 895 } 896 897 err = __build_xattrs(inode); 898 if (err < 0) 899 goto out; 900 /* 901 * Start with virtual dir xattr names (if any) (including 902 * terminating '\0' characters for each). 903 */ 904 vir_namelen = ceph_vxattrs_name_size(vxattrs); 905 906 /* adding 1 byte per each variable due to the null termination */ 907 namelen = ci->i_xattrs.names_size + ci->i_xattrs.count; 908 err = -ERANGE; 909 if (size && vir_namelen + namelen > size) 910 goto out; 911 912 err = namelen + vir_namelen; 913 if (size == 0) 914 goto out; 915 916 names = __copy_xattr_names(ci, names); 917 918 /* virtual xattr names, too */ 919 err = namelen; 920 if (vxattrs) { 921 for (i = 0; vxattrs[i].name; i++) { 922 if (!vxattrs[i].hidden && 923 !(vxattrs[i].exists_cb && 924 !vxattrs[i].exists_cb(ci))) { 925 len = sprintf(names, "%s", vxattrs[i].name); 926 names += len + 1; 927 err += len + 1; 928 } 929 } 930 } 931 932 out: 933 spin_unlock(&ci->i_ceph_lock); 934 return err; 935 } 936 937 static int ceph_sync_setxattr(struct inode *inode, const char *name, 938 const char *value, size_t size, int flags) 939 { 940 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); 941 struct ceph_inode_info *ci = ceph_inode(inode); 942 struct ceph_mds_request *req; 943 struct ceph_mds_client *mdsc = fsc->mdsc; 944 struct ceph_pagelist *pagelist = NULL; 945 int op = CEPH_MDS_OP_SETXATTR; 946 int err; 947 948 if (size > 0) { 949 /* copy value into pagelist */ 950 pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS); 951 if (!pagelist) 952 return -ENOMEM; 953 954 ceph_pagelist_init(pagelist); 955 err = ceph_pagelist_append(pagelist, value, size); 956 if (err) 957 goto out; 958 } else if (!value) { 959 if (flags & CEPH_XATTR_REPLACE) 960 op = CEPH_MDS_OP_RMXATTR; 961 else 962 flags |= CEPH_XATTR_REMOVE; 963 } 964 965 dout("setxattr value=%.*s\n", (int)size, value); 966 967 /* do request */ 968 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 969 if (IS_ERR(req)) { 970 err = PTR_ERR(req); 971 goto out; 972 } 973 974 req->r_path2 = kstrdup(name, GFP_NOFS); 975 if (!req->r_path2) { 976 ceph_mdsc_put_request(req); 977 err = -ENOMEM; 978 goto out; 979 } 980 981 if (op == CEPH_MDS_OP_SETXATTR) { 982 req->r_args.setxattr.flags = cpu_to_le32(flags); 983 req->r_pagelist = pagelist; 984 pagelist = NULL; 985 } 986 987 req->r_inode = inode; 988 ihold(inode); 989 req->r_num_caps = 1; 990 req->r_inode_drop = CEPH_CAP_XATTR_SHARED; 991 992 dout("xattr.ver (before): %lld\n", ci->i_xattrs.version); 993 err = ceph_mdsc_do_request(mdsc, NULL, req); 994 ceph_mdsc_put_request(req); 995 dout("xattr.ver (after): %lld\n", ci->i_xattrs.version); 996 997 out: 998 if (pagelist) 999 ceph_pagelist_release(pagelist); 1000 return err; 1001 } 1002 1003 int __ceph_setxattr(struct inode *inode, const char *name, 1004 const void *value, size_t size, int flags) 1005 { 1006 struct ceph_vxattr *vxattr; 1007 struct ceph_inode_info *ci = ceph_inode(inode); 1008 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; 1009 struct ceph_cap_flush *prealloc_cf = NULL; 1010 int issued; 1011 int err; 1012 int dirty = 0; 1013 int name_len = strlen(name); 1014 int val_len = size; 1015 char *newname = NULL; 1016 char *newval = NULL; 1017 struct ceph_inode_xattr *xattr = NULL; 1018 int required_blob_size; 1019 bool check_realm = false; 1020 bool lock_snap_rwsem = false; 1021 1022 if (ceph_snap(inode) != CEPH_NOSNAP) 1023 return -EROFS; 1024 1025 vxattr = ceph_match_vxattr(inode, name); 1026 if (vxattr) { 1027 if (vxattr->readonly) 1028 return -EOPNOTSUPP; 1029 if (value && !strncmp(vxattr->name, "ceph.quota", 10)) 1030 check_realm = true; 1031 } 1032 1033 /* pass any unhandled ceph.* xattrs through to the MDS */ 1034 if (!strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN)) 1035 goto do_sync_unlocked; 1036 1037 /* preallocate memory for xattr name, value, index node */ 1038 err = -ENOMEM; 1039 newname = kmemdup(name, name_len + 1, GFP_NOFS); 1040 if (!newname) 1041 goto out; 1042 1043 if (val_len) { 1044 newval = kmemdup(value, val_len, GFP_NOFS); 1045 if (!newval) 1046 goto out; 1047 } 1048 1049 xattr = kmalloc(sizeof(struct ceph_inode_xattr), GFP_NOFS); 1050 if (!xattr) 1051 goto out; 1052 1053 prealloc_cf = ceph_alloc_cap_flush(); 1054 if (!prealloc_cf) 1055 goto out; 1056 1057 spin_lock(&ci->i_ceph_lock); 1058 retry: 1059 issued = __ceph_caps_issued(ci, NULL); 1060 if (ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) 1061 goto do_sync; 1062 1063 if (!lock_snap_rwsem && !ci->i_head_snapc) { 1064 lock_snap_rwsem = true; 1065 if (!down_read_trylock(&mdsc->snap_rwsem)) { 1066 spin_unlock(&ci->i_ceph_lock); 1067 down_read(&mdsc->snap_rwsem); 1068 spin_lock(&ci->i_ceph_lock); 1069 goto retry; 1070 } 1071 } 1072 1073 dout("setxattr %p issued %s\n", inode, ceph_cap_string(issued)); 1074 __build_xattrs(inode); 1075 1076 required_blob_size = __get_required_blob_size(ci, name_len, val_len); 1077 1078 if (!ci->i_xattrs.prealloc_blob || 1079 required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) { 1080 struct ceph_buffer *blob; 1081 1082 spin_unlock(&ci->i_ceph_lock); 1083 dout(" preaallocating new blob size=%d\n", required_blob_size); 1084 blob = ceph_buffer_new(required_blob_size, GFP_NOFS); 1085 if (!blob) 1086 goto do_sync_unlocked; 1087 spin_lock(&ci->i_ceph_lock); 1088 if (ci->i_xattrs.prealloc_blob) 1089 ceph_buffer_put(ci->i_xattrs.prealloc_blob); 1090 ci->i_xattrs.prealloc_blob = blob; 1091 goto retry; 1092 } 1093 1094 err = __set_xattr(ci, newname, name_len, newval, val_len, 1095 flags, value ? 1 : -1, &xattr); 1096 1097 if (!err) { 1098 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL, 1099 &prealloc_cf); 1100 ci->i_xattrs.dirty = true; 1101 inode->i_ctime = current_time(inode); 1102 } 1103 1104 spin_unlock(&ci->i_ceph_lock); 1105 if (lock_snap_rwsem) 1106 up_read(&mdsc->snap_rwsem); 1107 if (dirty) 1108 __mark_inode_dirty(inode, dirty); 1109 ceph_free_cap_flush(prealloc_cf); 1110 return err; 1111 1112 do_sync: 1113 spin_unlock(&ci->i_ceph_lock); 1114 do_sync_unlocked: 1115 if (lock_snap_rwsem) 1116 up_read(&mdsc->snap_rwsem); 1117 1118 /* security module set xattr while filling trace */ 1119 if (current->journal_info) { 1120 pr_warn_ratelimited("sync setxattr %p " 1121 "during filling trace\n", inode); 1122 err = -EBUSY; 1123 } else { 1124 err = ceph_sync_setxattr(inode, name, value, size, flags); 1125 if (err >= 0 && check_realm) { 1126 /* check if snaprealm was created for quota inode */ 1127 spin_lock(&ci->i_ceph_lock); 1128 if ((ci->i_max_files || ci->i_max_bytes) && 1129 !(ci->i_snap_realm && 1130 ci->i_snap_realm->ino == ci->i_vino.ino)) 1131 err = -EOPNOTSUPP; 1132 spin_unlock(&ci->i_ceph_lock); 1133 } 1134 } 1135 out: 1136 ceph_free_cap_flush(prealloc_cf); 1137 kfree(newname); 1138 kfree(newval); 1139 kfree(xattr); 1140 return err; 1141 } 1142 1143 static int ceph_get_xattr_handler(const struct xattr_handler *handler, 1144 struct dentry *dentry, struct inode *inode, 1145 const char *name, void *value, size_t size) 1146 { 1147 if (!ceph_is_valid_xattr(name)) 1148 return -EOPNOTSUPP; 1149 return __ceph_getxattr(inode, name, value, size); 1150 } 1151 1152 static int ceph_set_xattr_handler(const struct xattr_handler *handler, 1153 struct dentry *unused, struct inode *inode, 1154 const char *name, const void *value, 1155 size_t size, int flags) 1156 { 1157 if (!ceph_is_valid_xattr(name)) 1158 return -EOPNOTSUPP; 1159 return __ceph_setxattr(inode, name, value, size, flags); 1160 } 1161 1162 static const struct xattr_handler ceph_other_xattr_handler = { 1163 .prefix = "", /* match any name => handlers called with full name */ 1164 .get = ceph_get_xattr_handler, 1165 .set = ceph_set_xattr_handler, 1166 }; 1167 1168 #ifdef CONFIG_SECURITY 1169 bool ceph_security_xattr_wanted(struct inode *in) 1170 { 1171 return in->i_security != NULL; 1172 } 1173 1174 bool ceph_security_xattr_deadlock(struct inode *in) 1175 { 1176 struct ceph_inode_info *ci; 1177 bool ret; 1178 if (!in->i_security) 1179 return false; 1180 ci = ceph_inode(in); 1181 spin_lock(&ci->i_ceph_lock); 1182 ret = !(ci->i_ceph_flags & CEPH_I_SEC_INITED) && 1183 !(ci->i_xattrs.version > 0 && 1184 __ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 0)); 1185 spin_unlock(&ci->i_ceph_lock); 1186 return ret; 1187 } 1188 #endif 1189