1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/ceph/ceph_debug.h> 3 #include <linux/ceph/pagelist.h> 4 5 #include "super.h" 6 #include "mds_client.h" 7 8 #include <linux/ceph/decode.h> 9 10 #include <linux/xattr.h> 11 #include <linux/security.h> 12 #include <linux/posix_acl_xattr.h> 13 #include <linux/slab.h> 14 15 #define XATTR_CEPH_PREFIX "ceph." 16 #define XATTR_CEPH_PREFIX_LEN (sizeof (XATTR_CEPH_PREFIX) - 1) 17 18 static int __remove_xattr(struct ceph_inode_info *ci, 19 struct ceph_inode_xattr *xattr); 20 21 static bool ceph_is_valid_xattr(const char *name) 22 { 23 return !strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN) || 24 !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) || 25 !strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN); 26 } 27 28 /* 29 * These define virtual xattrs exposing the recursive directory 30 * statistics and layout metadata. 31 */ 32 struct ceph_vxattr { 33 char *name; 34 size_t name_size; /* strlen(name) + 1 (for '\0') */ 35 ssize_t (*getxattr_cb)(struct ceph_inode_info *ci, char *val, 36 size_t size); 37 bool (*exists_cb)(struct ceph_inode_info *ci); 38 unsigned int flags; 39 }; 40 41 #define VXATTR_FLAG_READONLY (1<<0) 42 #define VXATTR_FLAG_HIDDEN (1<<1) 43 #define VXATTR_FLAG_RSTAT (1<<2) 44 45 /* layouts */ 46 47 static bool ceph_vxattrcb_layout_exists(struct ceph_inode_info *ci) 48 { 49 struct ceph_file_layout *fl = &ci->i_layout; 50 return (fl->stripe_unit > 0 || fl->stripe_count > 0 || 51 fl->object_size > 0 || fl->pool_id >= 0 || 52 rcu_dereference_raw(fl->pool_ns) != NULL); 53 } 54 55 static ssize_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val, 56 size_t size) 57 { 58 struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb); 59 struct ceph_osd_client *osdc = &fsc->client->osdc; 60 struct ceph_string *pool_ns; 61 s64 pool = ci->i_layout.pool_id; 62 const char *pool_name; 63 const char *ns_field = " pool_namespace="; 64 char buf[128]; 65 size_t len, total_len = 0; 66 ssize_t ret; 67 68 pool_ns = ceph_try_get_string(ci->i_layout.pool_ns); 69 70 dout("ceph_vxattrcb_layout %p\n", &ci->vfs_inode); 71 down_read(&osdc->lock); 72 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool); 73 if (pool_name) { 74 len = snprintf(buf, sizeof(buf), 75 "stripe_unit=%u stripe_count=%u object_size=%u pool=", 76 ci->i_layout.stripe_unit, ci->i_layout.stripe_count, 77 ci->i_layout.object_size); 78 total_len = len + strlen(pool_name); 79 } else { 80 len = snprintf(buf, sizeof(buf), 81 "stripe_unit=%u stripe_count=%u object_size=%u pool=%lld", 82 ci->i_layout.stripe_unit, ci->i_layout.stripe_count, 83 ci->i_layout.object_size, pool); 84 total_len = len; 85 } 86 87 if (pool_ns) 88 total_len += strlen(ns_field) + pool_ns->len; 89 90 ret = total_len; 91 if (size >= total_len) { 92 memcpy(val, buf, len); 93 ret = len; 94 if (pool_name) { 95 len = strlen(pool_name); 96 memcpy(val + ret, pool_name, len); 97 ret += len; 98 } 99 if (pool_ns) { 100 len = strlen(ns_field); 101 memcpy(val + ret, ns_field, len); 102 ret += len; 103 memcpy(val + ret, pool_ns->str, pool_ns->len); 104 ret += pool_ns->len; 105 } 106 } 107 up_read(&osdc->lock); 108 ceph_put_string(pool_ns); 109 return ret; 110 } 111 112 /* 113 * The convention with strings in xattrs is that they should not be NULL 114 * terminated, since we're returning the length with them. snprintf always 115 * NULL terminates however, so call it on a temporary buffer and then memcpy 116 * the result into place. 117 */ 118 static int ceph_fmt_xattr(char *val, size_t size, const char *fmt, ...) 119 { 120 int ret; 121 va_list args; 122 char buf[96]; /* NB: reevaluate size if new vxattrs are added */ 123 124 va_start(args, fmt); 125 ret = vsnprintf(buf, size ? sizeof(buf) : 0, fmt, args); 126 va_end(args); 127 128 /* Sanity check */ 129 if (size && ret + 1 > sizeof(buf)) { 130 WARN_ONCE(true, "Returned length too big (%d)", ret); 131 return -E2BIG; 132 } 133 134 if (ret <= size) 135 memcpy(val, buf, ret); 136 return ret; 137 } 138 139 static ssize_t ceph_vxattrcb_layout_stripe_unit(struct ceph_inode_info *ci, 140 char *val, size_t size) 141 { 142 return ceph_fmt_xattr(val, size, "%u", ci->i_layout.stripe_unit); 143 } 144 145 static ssize_t ceph_vxattrcb_layout_stripe_count(struct ceph_inode_info *ci, 146 char *val, size_t size) 147 { 148 return ceph_fmt_xattr(val, size, "%u", ci->i_layout.stripe_count); 149 } 150 151 static ssize_t ceph_vxattrcb_layout_object_size(struct ceph_inode_info *ci, 152 char *val, size_t size) 153 { 154 return ceph_fmt_xattr(val, size, "%u", ci->i_layout.object_size); 155 } 156 157 static ssize_t ceph_vxattrcb_layout_pool(struct ceph_inode_info *ci, 158 char *val, size_t size) 159 { 160 ssize_t ret; 161 struct ceph_fs_client *fsc = ceph_sb_to_client(ci->vfs_inode.i_sb); 162 struct ceph_osd_client *osdc = &fsc->client->osdc; 163 s64 pool = ci->i_layout.pool_id; 164 const char *pool_name; 165 166 down_read(&osdc->lock); 167 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool); 168 if (pool_name) { 169 ret = strlen(pool_name); 170 if (ret <= size) 171 memcpy(val, pool_name, ret); 172 } else { 173 ret = ceph_fmt_xattr(val, size, "%lld", pool); 174 } 175 up_read(&osdc->lock); 176 return ret; 177 } 178 179 static ssize_t ceph_vxattrcb_layout_pool_namespace(struct ceph_inode_info *ci, 180 char *val, size_t size) 181 { 182 ssize_t ret = 0; 183 struct ceph_string *ns = ceph_try_get_string(ci->i_layout.pool_ns); 184 185 if (ns) { 186 ret = ns->len; 187 if (ret <= size) 188 memcpy(val, ns->str, ret); 189 ceph_put_string(ns); 190 } 191 return ret; 192 } 193 194 /* directories */ 195 196 static ssize_t ceph_vxattrcb_dir_entries(struct ceph_inode_info *ci, char *val, 197 size_t size) 198 { 199 return ceph_fmt_xattr(val, size, "%lld", ci->i_files + ci->i_subdirs); 200 } 201 202 static ssize_t ceph_vxattrcb_dir_files(struct ceph_inode_info *ci, char *val, 203 size_t size) 204 { 205 return ceph_fmt_xattr(val, size, "%lld", ci->i_files); 206 } 207 208 static ssize_t ceph_vxattrcb_dir_subdirs(struct ceph_inode_info *ci, char *val, 209 size_t size) 210 { 211 return ceph_fmt_xattr(val, size, "%lld", ci->i_subdirs); 212 } 213 214 static ssize_t ceph_vxattrcb_dir_rentries(struct ceph_inode_info *ci, char *val, 215 size_t size) 216 { 217 return ceph_fmt_xattr(val, size, "%lld", 218 ci->i_rfiles + ci->i_rsubdirs); 219 } 220 221 static ssize_t ceph_vxattrcb_dir_rfiles(struct ceph_inode_info *ci, char *val, 222 size_t size) 223 { 224 return ceph_fmt_xattr(val, size, "%lld", ci->i_rfiles); 225 } 226 227 static ssize_t ceph_vxattrcb_dir_rsubdirs(struct ceph_inode_info *ci, char *val, 228 size_t size) 229 { 230 return ceph_fmt_xattr(val, size, "%lld", ci->i_rsubdirs); 231 } 232 233 static ssize_t ceph_vxattrcb_dir_rbytes(struct ceph_inode_info *ci, char *val, 234 size_t size) 235 { 236 return ceph_fmt_xattr(val, size, "%lld", ci->i_rbytes); 237 } 238 239 static ssize_t ceph_vxattrcb_dir_rctime(struct ceph_inode_info *ci, char *val, 240 size_t size) 241 { 242 return ceph_fmt_xattr(val, size, "%lld.%09ld", ci->i_rctime.tv_sec, 243 ci->i_rctime.tv_nsec); 244 } 245 246 /* dir pin */ 247 static bool ceph_vxattrcb_dir_pin_exists(struct ceph_inode_info *ci) 248 { 249 return ci->i_dir_pin != -ENODATA; 250 } 251 252 static ssize_t ceph_vxattrcb_dir_pin(struct ceph_inode_info *ci, char *val, 253 size_t size) 254 { 255 return ceph_fmt_xattr(val, size, "%d", (int)ci->i_dir_pin); 256 } 257 258 /* quotas */ 259 static bool ceph_vxattrcb_quota_exists(struct ceph_inode_info *ci) 260 { 261 bool ret = false; 262 spin_lock(&ci->i_ceph_lock); 263 if ((ci->i_max_files || ci->i_max_bytes) && 264 ci->i_vino.snap == CEPH_NOSNAP && 265 ci->i_snap_realm && 266 ci->i_snap_realm->ino == ci->i_vino.ino) 267 ret = true; 268 spin_unlock(&ci->i_ceph_lock); 269 return ret; 270 } 271 272 static ssize_t ceph_vxattrcb_quota(struct ceph_inode_info *ci, char *val, 273 size_t size) 274 { 275 return ceph_fmt_xattr(val, size, "max_bytes=%llu max_files=%llu", 276 ci->i_max_bytes, ci->i_max_files); 277 } 278 279 static ssize_t ceph_vxattrcb_quota_max_bytes(struct ceph_inode_info *ci, 280 char *val, size_t size) 281 { 282 return ceph_fmt_xattr(val, size, "%llu", ci->i_max_bytes); 283 } 284 285 static ssize_t ceph_vxattrcb_quota_max_files(struct ceph_inode_info *ci, 286 char *val, size_t size) 287 { 288 return ceph_fmt_xattr(val, size, "%llu", ci->i_max_files); 289 } 290 291 /* snapshots */ 292 static bool ceph_vxattrcb_snap_btime_exists(struct ceph_inode_info *ci) 293 { 294 return (ci->i_snap_btime.tv_sec != 0 || ci->i_snap_btime.tv_nsec != 0); 295 } 296 297 static ssize_t ceph_vxattrcb_snap_btime(struct ceph_inode_info *ci, char *val, 298 size_t size) 299 { 300 return ceph_fmt_xattr(val, size, "%lld.%09ld", ci->i_snap_btime.tv_sec, 301 ci->i_snap_btime.tv_nsec); 302 } 303 304 #define CEPH_XATTR_NAME(_type, _name) XATTR_CEPH_PREFIX #_type "." #_name 305 #define CEPH_XATTR_NAME2(_type, _name, _name2) \ 306 XATTR_CEPH_PREFIX #_type "." #_name "." #_name2 307 308 #define XATTR_NAME_CEPH(_type, _name, _flags) \ 309 { \ 310 .name = CEPH_XATTR_NAME(_type, _name), \ 311 .name_size = sizeof (CEPH_XATTR_NAME(_type, _name)), \ 312 .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \ 313 .exists_cb = NULL, \ 314 .flags = (VXATTR_FLAG_READONLY | _flags), \ 315 } 316 #define XATTR_RSTAT_FIELD(_type, _name) \ 317 XATTR_NAME_CEPH(_type, _name, VXATTR_FLAG_RSTAT) 318 #define XATTR_LAYOUT_FIELD(_type, _name, _field) \ 319 { \ 320 .name = CEPH_XATTR_NAME2(_type, _name, _field), \ 321 .name_size = sizeof (CEPH_XATTR_NAME2(_type, _name, _field)), \ 322 .getxattr_cb = ceph_vxattrcb_ ## _name ## _ ## _field, \ 323 .exists_cb = ceph_vxattrcb_layout_exists, \ 324 .flags = VXATTR_FLAG_HIDDEN, \ 325 } 326 #define XATTR_QUOTA_FIELD(_type, _name) \ 327 { \ 328 .name = CEPH_XATTR_NAME(_type, _name), \ 329 .name_size = sizeof(CEPH_XATTR_NAME(_type, _name)), \ 330 .getxattr_cb = ceph_vxattrcb_ ## _type ## _ ## _name, \ 331 .exists_cb = ceph_vxattrcb_quota_exists, \ 332 .flags = VXATTR_FLAG_HIDDEN, \ 333 } 334 335 static struct ceph_vxattr ceph_dir_vxattrs[] = { 336 { 337 .name = "ceph.dir.layout", 338 .name_size = sizeof("ceph.dir.layout"), 339 .getxattr_cb = ceph_vxattrcb_layout, 340 .exists_cb = ceph_vxattrcb_layout_exists, 341 .flags = VXATTR_FLAG_HIDDEN, 342 }, 343 XATTR_LAYOUT_FIELD(dir, layout, stripe_unit), 344 XATTR_LAYOUT_FIELD(dir, layout, stripe_count), 345 XATTR_LAYOUT_FIELD(dir, layout, object_size), 346 XATTR_LAYOUT_FIELD(dir, layout, pool), 347 XATTR_LAYOUT_FIELD(dir, layout, pool_namespace), 348 XATTR_NAME_CEPH(dir, entries, 0), 349 XATTR_NAME_CEPH(dir, files, 0), 350 XATTR_NAME_CEPH(dir, subdirs, 0), 351 XATTR_RSTAT_FIELD(dir, rentries), 352 XATTR_RSTAT_FIELD(dir, rfiles), 353 XATTR_RSTAT_FIELD(dir, rsubdirs), 354 XATTR_RSTAT_FIELD(dir, rbytes), 355 XATTR_RSTAT_FIELD(dir, rctime), 356 { 357 .name = "ceph.dir.pin", 358 .name_size = sizeof("ceph.dir.pin"), 359 .getxattr_cb = ceph_vxattrcb_dir_pin, 360 .exists_cb = ceph_vxattrcb_dir_pin_exists, 361 .flags = VXATTR_FLAG_HIDDEN, 362 }, 363 { 364 .name = "ceph.quota", 365 .name_size = sizeof("ceph.quota"), 366 .getxattr_cb = ceph_vxattrcb_quota, 367 .exists_cb = ceph_vxattrcb_quota_exists, 368 .flags = VXATTR_FLAG_HIDDEN, 369 }, 370 XATTR_QUOTA_FIELD(quota, max_bytes), 371 XATTR_QUOTA_FIELD(quota, max_files), 372 { 373 .name = "ceph.snap.btime", 374 .name_size = sizeof("ceph.snap.btime"), 375 .getxattr_cb = ceph_vxattrcb_snap_btime, 376 .exists_cb = ceph_vxattrcb_snap_btime_exists, 377 .flags = VXATTR_FLAG_READONLY, 378 }, 379 { .name = NULL, 0 } /* Required table terminator */ 380 }; 381 382 /* files */ 383 384 static struct ceph_vxattr ceph_file_vxattrs[] = { 385 { 386 .name = "ceph.file.layout", 387 .name_size = sizeof("ceph.file.layout"), 388 .getxattr_cb = ceph_vxattrcb_layout, 389 .exists_cb = ceph_vxattrcb_layout_exists, 390 .flags = VXATTR_FLAG_HIDDEN, 391 }, 392 XATTR_LAYOUT_FIELD(file, layout, stripe_unit), 393 XATTR_LAYOUT_FIELD(file, layout, stripe_count), 394 XATTR_LAYOUT_FIELD(file, layout, object_size), 395 XATTR_LAYOUT_FIELD(file, layout, pool), 396 XATTR_LAYOUT_FIELD(file, layout, pool_namespace), 397 { 398 .name = "ceph.snap.btime", 399 .name_size = sizeof("ceph.snap.btime"), 400 .getxattr_cb = ceph_vxattrcb_snap_btime, 401 .exists_cb = ceph_vxattrcb_snap_btime_exists, 402 .flags = VXATTR_FLAG_READONLY, 403 }, 404 { .name = NULL, 0 } /* Required table terminator */ 405 }; 406 407 static struct ceph_vxattr *ceph_inode_vxattrs(struct inode *inode) 408 { 409 if (S_ISDIR(inode->i_mode)) 410 return ceph_dir_vxattrs; 411 else if (S_ISREG(inode->i_mode)) 412 return ceph_file_vxattrs; 413 return NULL; 414 } 415 416 static struct ceph_vxattr *ceph_match_vxattr(struct inode *inode, 417 const char *name) 418 { 419 struct ceph_vxattr *vxattr = ceph_inode_vxattrs(inode); 420 421 if (vxattr) { 422 while (vxattr->name) { 423 if (!strcmp(vxattr->name, name)) 424 return vxattr; 425 vxattr++; 426 } 427 } 428 429 return NULL; 430 } 431 432 static int __set_xattr(struct ceph_inode_info *ci, 433 const char *name, int name_len, 434 const char *val, int val_len, 435 int flags, int update_xattr, 436 struct ceph_inode_xattr **newxattr) 437 { 438 struct rb_node **p; 439 struct rb_node *parent = NULL; 440 struct ceph_inode_xattr *xattr = NULL; 441 int c; 442 int new = 0; 443 444 p = &ci->i_xattrs.index.rb_node; 445 while (*p) { 446 parent = *p; 447 xattr = rb_entry(parent, struct ceph_inode_xattr, node); 448 c = strncmp(name, xattr->name, min(name_len, xattr->name_len)); 449 if (c < 0) 450 p = &(*p)->rb_left; 451 else if (c > 0) 452 p = &(*p)->rb_right; 453 else { 454 if (name_len == xattr->name_len) 455 break; 456 else if (name_len < xattr->name_len) 457 p = &(*p)->rb_left; 458 else 459 p = &(*p)->rb_right; 460 } 461 xattr = NULL; 462 } 463 464 if (update_xattr) { 465 int err = 0; 466 467 if (xattr && (flags & XATTR_CREATE)) 468 err = -EEXIST; 469 else if (!xattr && (flags & XATTR_REPLACE)) 470 err = -ENODATA; 471 if (err) { 472 kfree(name); 473 kfree(val); 474 kfree(*newxattr); 475 return err; 476 } 477 if (update_xattr < 0) { 478 if (xattr) 479 __remove_xattr(ci, xattr); 480 kfree(name); 481 kfree(*newxattr); 482 return 0; 483 } 484 } 485 486 if (!xattr) { 487 new = 1; 488 xattr = *newxattr; 489 xattr->name = name; 490 xattr->name_len = name_len; 491 xattr->should_free_name = update_xattr; 492 493 ci->i_xattrs.count++; 494 dout("__set_xattr count=%d\n", ci->i_xattrs.count); 495 } else { 496 kfree(*newxattr); 497 *newxattr = NULL; 498 if (xattr->should_free_val) 499 kfree((void *)xattr->val); 500 501 if (update_xattr) { 502 kfree((void *)name); 503 name = xattr->name; 504 } 505 ci->i_xattrs.names_size -= xattr->name_len; 506 ci->i_xattrs.vals_size -= xattr->val_len; 507 } 508 ci->i_xattrs.names_size += name_len; 509 ci->i_xattrs.vals_size += val_len; 510 if (val) 511 xattr->val = val; 512 else 513 xattr->val = ""; 514 515 xattr->val_len = val_len; 516 xattr->dirty = update_xattr; 517 xattr->should_free_val = (val && update_xattr); 518 519 if (new) { 520 rb_link_node(&xattr->node, parent, p); 521 rb_insert_color(&xattr->node, &ci->i_xattrs.index); 522 dout("__set_xattr_val p=%p\n", p); 523 } 524 525 dout("__set_xattr_val added %llx.%llx xattr %p %.*s=%.*s\n", 526 ceph_vinop(&ci->vfs_inode), xattr, name_len, name, val_len, val); 527 528 return 0; 529 } 530 531 static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci, 532 const char *name) 533 { 534 struct rb_node **p; 535 struct rb_node *parent = NULL; 536 struct ceph_inode_xattr *xattr = NULL; 537 int name_len = strlen(name); 538 int c; 539 540 p = &ci->i_xattrs.index.rb_node; 541 while (*p) { 542 parent = *p; 543 xattr = rb_entry(parent, struct ceph_inode_xattr, node); 544 c = strncmp(name, xattr->name, xattr->name_len); 545 if (c == 0 && name_len > xattr->name_len) 546 c = 1; 547 if (c < 0) 548 p = &(*p)->rb_left; 549 else if (c > 0) 550 p = &(*p)->rb_right; 551 else { 552 dout("__get_xattr %s: found %.*s\n", name, 553 xattr->val_len, xattr->val); 554 return xattr; 555 } 556 } 557 558 dout("__get_xattr %s: not found\n", name); 559 560 return NULL; 561 } 562 563 static void __free_xattr(struct ceph_inode_xattr *xattr) 564 { 565 BUG_ON(!xattr); 566 567 if (xattr->should_free_name) 568 kfree((void *)xattr->name); 569 if (xattr->should_free_val) 570 kfree((void *)xattr->val); 571 572 kfree(xattr); 573 } 574 575 static int __remove_xattr(struct ceph_inode_info *ci, 576 struct ceph_inode_xattr *xattr) 577 { 578 if (!xattr) 579 return -ENODATA; 580 581 rb_erase(&xattr->node, &ci->i_xattrs.index); 582 583 if (xattr->should_free_name) 584 kfree((void *)xattr->name); 585 if (xattr->should_free_val) 586 kfree((void *)xattr->val); 587 588 ci->i_xattrs.names_size -= xattr->name_len; 589 ci->i_xattrs.vals_size -= xattr->val_len; 590 ci->i_xattrs.count--; 591 kfree(xattr); 592 593 return 0; 594 } 595 596 static char *__copy_xattr_names(struct ceph_inode_info *ci, 597 char *dest) 598 { 599 struct rb_node *p; 600 struct ceph_inode_xattr *xattr = NULL; 601 602 p = rb_first(&ci->i_xattrs.index); 603 dout("__copy_xattr_names count=%d\n", ci->i_xattrs.count); 604 605 while (p) { 606 xattr = rb_entry(p, struct ceph_inode_xattr, node); 607 memcpy(dest, xattr->name, xattr->name_len); 608 dest[xattr->name_len] = '\0'; 609 610 dout("dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name, 611 xattr->name_len, ci->i_xattrs.names_size); 612 613 dest += xattr->name_len + 1; 614 p = rb_next(p); 615 } 616 617 return dest; 618 } 619 620 void __ceph_destroy_xattrs(struct ceph_inode_info *ci) 621 { 622 struct rb_node *p, *tmp; 623 struct ceph_inode_xattr *xattr = NULL; 624 625 p = rb_first(&ci->i_xattrs.index); 626 627 dout("__ceph_destroy_xattrs p=%p\n", p); 628 629 while (p) { 630 xattr = rb_entry(p, struct ceph_inode_xattr, node); 631 tmp = p; 632 p = rb_next(tmp); 633 dout("__ceph_destroy_xattrs next p=%p (%.*s)\n", p, 634 xattr->name_len, xattr->name); 635 rb_erase(tmp, &ci->i_xattrs.index); 636 637 __free_xattr(xattr); 638 } 639 640 ci->i_xattrs.names_size = 0; 641 ci->i_xattrs.vals_size = 0; 642 ci->i_xattrs.index_version = 0; 643 ci->i_xattrs.count = 0; 644 ci->i_xattrs.index = RB_ROOT; 645 } 646 647 static int __build_xattrs(struct inode *inode) 648 __releases(ci->i_ceph_lock) 649 __acquires(ci->i_ceph_lock) 650 { 651 u32 namelen; 652 u32 numattr = 0; 653 void *p, *end; 654 u32 len; 655 const char *name, *val; 656 struct ceph_inode_info *ci = ceph_inode(inode); 657 int xattr_version; 658 struct ceph_inode_xattr **xattrs = NULL; 659 int err = 0; 660 int i; 661 662 dout("__build_xattrs() len=%d\n", 663 ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0); 664 665 if (ci->i_xattrs.index_version >= ci->i_xattrs.version) 666 return 0; /* already built */ 667 668 __ceph_destroy_xattrs(ci); 669 670 start: 671 /* updated internal xattr rb tree */ 672 if (ci->i_xattrs.blob && ci->i_xattrs.blob->vec.iov_len > 4) { 673 p = ci->i_xattrs.blob->vec.iov_base; 674 end = p + ci->i_xattrs.blob->vec.iov_len; 675 ceph_decode_32_safe(&p, end, numattr, bad); 676 xattr_version = ci->i_xattrs.version; 677 spin_unlock(&ci->i_ceph_lock); 678 679 xattrs = kcalloc(numattr, sizeof(struct ceph_inode_xattr *), 680 GFP_NOFS); 681 err = -ENOMEM; 682 if (!xattrs) 683 goto bad_lock; 684 685 for (i = 0; i < numattr; i++) { 686 xattrs[i] = kmalloc(sizeof(struct ceph_inode_xattr), 687 GFP_NOFS); 688 if (!xattrs[i]) 689 goto bad_lock; 690 } 691 692 spin_lock(&ci->i_ceph_lock); 693 if (ci->i_xattrs.version != xattr_version) { 694 /* lost a race, retry */ 695 for (i = 0; i < numattr; i++) 696 kfree(xattrs[i]); 697 kfree(xattrs); 698 xattrs = NULL; 699 goto start; 700 } 701 err = -EIO; 702 while (numattr--) { 703 ceph_decode_32_safe(&p, end, len, bad); 704 namelen = len; 705 name = p; 706 p += len; 707 ceph_decode_32_safe(&p, end, len, bad); 708 val = p; 709 p += len; 710 711 err = __set_xattr(ci, name, namelen, val, len, 712 0, 0, &xattrs[numattr]); 713 714 if (err < 0) 715 goto bad; 716 } 717 kfree(xattrs); 718 } 719 ci->i_xattrs.index_version = ci->i_xattrs.version; 720 ci->i_xattrs.dirty = false; 721 722 return err; 723 bad_lock: 724 spin_lock(&ci->i_ceph_lock); 725 bad: 726 if (xattrs) { 727 for (i = 0; i < numattr; i++) 728 kfree(xattrs[i]); 729 kfree(xattrs); 730 } 731 ci->i_xattrs.names_size = 0; 732 return err; 733 } 734 735 static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size, 736 int val_size) 737 { 738 /* 739 * 4 bytes for the length, and additional 4 bytes per each xattr name, 740 * 4 bytes per each value 741 */ 742 int size = 4 + ci->i_xattrs.count*(4 + 4) + 743 ci->i_xattrs.names_size + 744 ci->i_xattrs.vals_size; 745 dout("__get_required_blob_size c=%d names.size=%d vals.size=%d\n", 746 ci->i_xattrs.count, ci->i_xattrs.names_size, 747 ci->i_xattrs.vals_size); 748 749 if (name_size) 750 size += 4 + 4 + name_size + val_size; 751 752 return size; 753 } 754 755 /* 756 * If there are dirty xattrs, reencode xattrs into the prealloc_blob 757 * and swap into place. It returns the old i_xattrs.blob (or NULL) so 758 * that it can be freed by the caller as the i_ceph_lock is likely to be 759 * held. 760 */ 761 struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci) 762 { 763 struct rb_node *p; 764 struct ceph_inode_xattr *xattr = NULL; 765 struct ceph_buffer *old_blob = NULL; 766 void *dest; 767 768 dout("__build_xattrs_blob %p\n", &ci->vfs_inode); 769 if (ci->i_xattrs.dirty) { 770 int need = __get_required_blob_size(ci, 0, 0); 771 772 BUG_ON(need > ci->i_xattrs.prealloc_blob->alloc_len); 773 774 p = rb_first(&ci->i_xattrs.index); 775 dest = ci->i_xattrs.prealloc_blob->vec.iov_base; 776 777 ceph_encode_32(&dest, ci->i_xattrs.count); 778 while (p) { 779 xattr = rb_entry(p, struct ceph_inode_xattr, node); 780 781 ceph_encode_32(&dest, xattr->name_len); 782 memcpy(dest, xattr->name, xattr->name_len); 783 dest += xattr->name_len; 784 ceph_encode_32(&dest, xattr->val_len); 785 memcpy(dest, xattr->val, xattr->val_len); 786 dest += xattr->val_len; 787 788 p = rb_next(p); 789 } 790 791 /* adjust buffer len; it may be larger than we need */ 792 ci->i_xattrs.prealloc_blob->vec.iov_len = 793 dest - ci->i_xattrs.prealloc_blob->vec.iov_base; 794 795 if (ci->i_xattrs.blob) 796 old_blob = ci->i_xattrs.blob; 797 ci->i_xattrs.blob = ci->i_xattrs.prealloc_blob; 798 ci->i_xattrs.prealloc_blob = NULL; 799 ci->i_xattrs.dirty = false; 800 ci->i_xattrs.version++; 801 } 802 803 return old_blob; 804 } 805 806 static inline int __get_request_mask(struct inode *in) { 807 struct ceph_mds_request *req = current->journal_info; 808 int mask = 0; 809 if (req && req->r_target_inode == in) { 810 if (req->r_op == CEPH_MDS_OP_LOOKUP || 811 req->r_op == CEPH_MDS_OP_LOOKUPINO || 812 req->r_op == CEPH_MDS_OP_LOOKUPPARENT || 813 req->r_op == CEPH_MDS_OP_GETATTR) { 814 mask = le32_to_cpu(req->r_args.getattr.mask); 815 } else if (req->r_op == CEPH_MDS_OP_OPEN || 816 req->r_op == CEPH_MDS_OP_CREATE) { 817 mask = le32_to_cpu(req->r_args.open.mask); 818 } 819 } 820 return mask; 821 } 822 823 ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value, 824 size_t size) 825 { 826 struct ceph_inode_info *ci = ceph_inode(inode); 827 struct ceph_inode_xattr *xattr; 828 struct ceph_vxattr *vxattr = NULL; 829 int req_mask; 830 ssize_t err; 831 832 /* let's see if a virtual xattr was requested */ 833 vxattr = ceph_match_vxattr(inode, name); 834 if (vxattr) { 835 int mask = 0; 836 if (vxattr->flags & VXATTR_FLAG_RSTAT) 837 mask |= CEPH_STAT_RSTAT; 838 err = ceph_do_getattr(inode, mask, true); 839 if (err) 840 return err; 841 err = -ENODATA; 842 if (!(vxattr->exists_cb && !vxattr->exists_cb(ci))) { 843 err = vxattr->getxattr_cb(ci, value, size); 844 if (size && size < err) 845 err = -ERANGE; 846 } 847 return err; 848 } 849 850 req_mask = __get_request_mask(inode); 851 852 spin_lock(&ci->i_ceph_lock); 853 dout("getxattr %p ver=%lld index_ver=%lld\n", inode, 854 ci->i_xattrs.version, ci->i_xattrs.index_version); 855 856 if (ci->i_xattrs.version == 0 || 857 !((req_mask & CEPH_CAP_XATTR_SHARED) || 858 __ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1))) { 859 spin_unlock(&ci->i_ceph_lock); 860 861 /* security module gets xattr while filling trace */ 862 if (current->journal_info) { 863 pr_warn_ratelimited("sync getxattr %p " 864 "during filling trace\n", inode); 865 return -EBUSY; 866 } 867 868 /* get xattrs from mds (if we don't already have them) */ 869 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR, true); 870 if (err) 871 return err; 872 spin_lock(&ci->i_ceph_lock); 873 } 874 875 err = __build_xattrs(inode); 876 if (err < 0) 877 goto out; 878 879 err = -ENODATA; /* == ENOATTR */ 880 xattr = __get_xattr(ci, name); 881 if (!xattr) 882 goto out; 883 884 err = -ERANGE; 885 if (size && size < xattr->val_len) 886 goto out; 887 888 err = xattr->val_len; 889 if (size == 0) 890 goto out; 891 892 memcpy(value, xattr->val, xattr->val_len); 893 894 if (current->journal_info && 895 !strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN)) 896 ci->i_ceph_flags |= CEPH_I_SEC_INITED; 897 out: 898 spin_unlock(&ci->i_ceph_lock); 899 return err; 900 } 901 902 ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size) 903 { 904 struct inode *inode = d_inode(dentry); 905 struct ceph_inode_info *ci = ceph_inode(inode); 906 struct ceph_vxattr *vxattrs = ceph_inode_vxattrs(inode); 907 bool len_only = (size == 0); 908 u32 namelen; 909 int err; 910 int i; 911 912 spin_lock(&ci->i_ceph_lock); 913 dout("listxattr %p ver=%lld index_ver=%lld\n", inode, 914 ci->i_xattrs.version, ci->i_xattrs.index_version); 915 916 if (ci->i_xattrs.version == 0 || 917 !__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 1)) { 918 spin_unlock(&ci->i_ceph_lock); 919 err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR, true); 920 if (err) 921 return err; 922 spin_lock(&ci->i_ceph_lock); 923 } 924 925 err = __build_xattrs(inode); 926 if (err < 0) 927 goto out; 928 929 /* add 1 byte for each xattr due to the null termination */ 930 namelen = ci->i_xattrs.names_size + ci->i_xattrs.count; 931 if (!len_only) { 932 if (namelen > size) { 933 err = -ERANGE; 934 goto out; 935 } 936 names = __copy_xattr_names(ci, names); 937 size -= namelen; 938 } 939 940 941 /* virtual xattr names, too */ 942 if (vxattrs) { 943 for (i = 0; vxattrs[i].name; i++) { 944 size_t this_len; 945 946 if (vxattrs[i].flags & VXATTR_FLAG_HIDDEN) 947 continue; 948 if (vxattrs[i].exists_cb && !vxattrs[i].exists_cb(ci)) 949 continue; 950 951 this_len = strlen(vxattrs[i].name) + 1; 952 namelen += this_len; 953 if (len_only) 954 continue; 955 956 if (this_len > size) { 957 err = -ERANGE; 958 goto out; 959 } 960 961 memcpy(names, vxattrs[i].name, this_len); 962 names += this_len; 963 size -= this_len; 964 } 965 } 966 err = namelen; 967 out: 968 spin_unlock(&ci->i_ceph_lock); 969 return err; 970 } 971 972 static int ceph_sync_setxattr(struct inode *inode, const char *name, 973 const char *value, size_t size, int flags) 974 { 975 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb); 976 struct ceph_inode_info *ci = ceph_inode(inode); 977 struct ceph_mds_request *req; 978 struct ceph_mds_client *mdsc = fsc->mdsc; 979 struct ceph_pagelist *pagelist = NULL; 980 int op = CEPH_MDS_OP_SETXATTR; 981 int err; 982 983 if (size > 0) { 984 /* copy value into pagelist */ 985 pagelist = ceph_pagelist_alloc(GFP_NOFS); 986 if (!pagelist) 987 return -ENOMEM; 988 989 err = ceph_pagelist_append(pagelist, value, size); 990 if (err) 991 goto out; 992 } else if (!value) { 993 if (flags & CEPH_XATTR_REPLACE) 994 op = CEPH_MDS_OP_RMXATTR; 995 else 996 flags |= CEPH_XATTR_REMOVE; 997 } 998 999 dout("setxattr value=%.*s\n", (int)size, value); 1000 1001 /* do request */ 1002 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); 1003 if (IS_ERR(req)) { 1004 err = PTR_ERR(req); 1005 goto out; 1006 } 1007 1008 req->r_path2 = kstrdup(name, GFP_NOFS); 1009 if (!req->r_path2) { 1010 ceph_mdsc_put_request(req); 1011 err = -ENOMEM; 1012 goto out; 1013 } 1014 1015 if (op == CEPH_MDS_OP_SETXATTR) { 1016 req->r_args.setxattr.flags = cpu_to_le32(flags); 1017 req->r_pagelist = pagelist; 1018 pagelist = NULL; 1019 } 1020 1021 req->r_inode = inode; 1022 ihold(inode); 1023 req->r_num_caps = 1; 1024 req->r_inode_drop = CEPH_CAP_XATTR_SHARED; 1025 1026 dout("xattr.ver (before): %lld\n", ci->i_xattrs.version); 1027 err = ceph_mdsc_do_request(mdsc, NULL, req); 1028 ceph_mdsc_put_request(req); 1029 dout("xattr.ver (after): %lld\n", ci->i_xattrs.version); 1030 1031 out: 1032 if (pagelist) 1033 ceph_pagelist_release(pagelist); 1034 return err; 1035 } 1036 1037 int __ceph_setxattr(struct inode *inode, const char *name, 1038 const void *value, size_t size, int flags) 1039 { 1040 struct ceph_vxattr *vxattr; 1041 struct ceph_inode_info *ci = ceph_inode(inode); 1042 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc; 1043 struct ceph_cap_flush *prealloc_cf = NULL; 1044 struct ceph_buffer *old_blob = NULL; 1045 int issued; 1046 int err; 1047 int dirty = 0; 1048 int name_len = strlen(name); 1049 int val_len = size; 1050 char *newname = NULL; 1051 char *newval = NULL; 1052 struct ceph_inode_xattr *xattr = NULL; 1053 int required_blob_size; 1054 bool check_realm = false; 1055 bool lock_snap_rwsem = false; 1056 1057 if (ceph_snap(inode) != CEPH_NOSNAP) 1058 return -EROFS; 1059 1060 vxattr = ceph_match_vxattr(inode, name); 1061 if (vxattr) { 1062 if (vxattr->flags & VXATTR_FLAG_READONLY) 1063 return -EOPNOTSUPP; 1064 if (value && !strncmp(vxattr->name, "ceph.quota", 10)) 1065 check_realm = true; 1066 } 1067 1068 /* pass any unhandled ceph.* xattrs through to the MDS */ 1069 if (!strncmp(name, XATTR_CEPH_PREFIX, XATTR_CEPH_PREFIX_LEN)) 1070 goto do_sync_unlocked; 1071 1072 /* preallocate memory for xattr name, value, index node */ 1073 err = -ENOMEM; 1074 newname = kmemdup(name, name_len + 1, GFP_NOFS); 1075 if (!newname) 1076 goto out; 1077 1078 if (val_len) { 1079 newval = kmemdup(value, val_len, GFP_NOFS); 1080 if (!newval) 1081 goto out; 1082 } 1083 1084 xattr = kmalloc(sizeof(struct ceph_inode_xattr), GFP_NOFS); 1085 if (!xattr) 1086 goto out; 1087 1088 prealloc_cf = ceph_alloc_cap_flush(); 1089 if (!prealloc_cf) 1090 goto out; 1091 1092 spin_lock(&ci->i_ceph_lock); 1093 retry: 1094 issued = __ceph_caps_issued(ci, NULL); 1095 if (ci->i_xattrs.version == 0 || !(issued & CEPH_CAP_XATTR_EXCL)) 1096 goto do_sync; 1097 1098 if (!lock_snap_rwsem && !ci->i_head_snapc) { 1099 lock_snap_rwsem = true; 1100 if (!down_read_trylock(&mdsc->snap_rwsem)) { 1101 spin_unlock(&ci->i_ceph_lock); 1102 down_read(&mdsc->snap_rwsem); 1103 spin_lock(&ci->i_ceph_lock); 1104 goto retry; 1105 } 1106 } 1107 1108 dout("setxattr %p issued %s\n", inode, ceph_cap_string(issued)); 1109 __build_xattrs(inode); 1110 1111 required_blob_size = __get_required_blob_size(ci, name_len, val_len); 1112 1113 if (!ci->i_xattrs.prealloc_blob || 1114 required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) { 1115 struct ceph_buffer *blob; 1116 1117 spin_unlock(&ci->i_ceph_lock); 1118 ceph_buffer_put(old_blob); /* Shouldn't be required */ 1119 dout(" pre-allocating new blob size=%d\n", required_blob_size); 1120 blob = ceph_buffer_new(required_blob_size, GFP_NOFS); 1121 if (!blob) 1122 goto do_sync_unlocked; 1123 spin_lock(&ci->i_ceph_lock); 1124 /* prealloc_blob can't be released while holding i_ceph_lock */ 1125 if (ci->i_xattrs.prealloc_blob) 1126 old_blob = ci->i_xattrs.prealloc_blob; 1127 ci->i_xattrs.prealloc_blob = blob; 1128 goto retry; 1129 } 1130 1131 err = __set_xattr(ci, newname, name_len, newval, val_len, 1132 flags, value ? 1 : -1, &xattr); 1133 1134 if (!err) { 1135 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL, 1136 &prealloc_cf); 1137 ci->i_xattrs.dirty = true; 1138 inode->i_ctime = current_time(inode); 1139 } 1140 1141 spin_unlock(&ci->i_ceph_lock); 1142 ceph_buffer_put(old_blob); 1143 if (lock_snap_rwsem) 1144 up_read(&mdsc->snap_rwsem); 1145 if (dirty) 1146 __mark_inode_dirty(inode, dirty); 1147 ceph_free_cap_flush(prealloc_cf); 1148 return err; 1149 1150 do_sync: 1151 spin_unlock(&ci->i_ceph_lock); 1152 do_sync_unlocked: 1153 if (lock_snap_rwsem) 1154 up_read(&mdsc->snap_rwsem); 1155 1156 /* security module set xattr while filling trace */ 1157 if (current->journal_info) { 1158 pr_warn_ratelimited("sync setxattr %p " 1159 "during filling trace\n", inode); 1160 err = -EBUSY; 1161 } else { 1162 err = ceph_sync_setxattr(inode, name, value, size, flags); 1163 if (err >= 0 && check_realm) { 1164 /* check if snaprealm was created for quota inode */ 1165 spin_lock(&ci->i_ceph_lock); 1166 if ((ci->i_max_files || ci->i_max_bytes) && 1167 !(ci->i_snap_realm && 1168 ci->i_snap_realm->ino == ci->i_vino.ino)) 1169 err = -EOPNOTSUPP; 1170 spin_unlock(&ci->i_ceph_lock); 1171 } 1172 } 1173 out: 1174 ceph_free_cap_flush(prealloc_cf); 1175 kfree(newname); 1176 kfree(newval); 1177 kfree(xattr); 1178 return err; 1179 } 1180 1181 static int ceph_get_xattr_handler(const struct xattr_handler *handler, 1182 struct dentry *dentry, struct inode *inode, 1183 const char *name, void *value, size_t size) 1184 { 1185 if (!ceph_is_valid_xattr(name)) 1186 return -EOPNOTSUPP; 1187 return __ceph_getxattr(inode, name, value, size); 1188 } 1189 1190 static int ceph_set_xattr_handler(const struct xattr_handler *handler, 1191 struct dentry *unused, struct inode *inode, 1192 const char *name, const void *value, 1193 size_t size, int flags) 1194 { 1195 if (!ceph_is_valid_xattr(name)) 1196 return -EOPNOTSUPP; 1197 return __ceph_setxattr(inode, name, value, size, flags); 1198 } 1199 1200 static const struct xattr_handler ceph_other_xattr_handler = { 1201 .prefix = "", /* match any name => handlers called with full name */ 1202 .get = ceph_get_xattr_handler, 1203 .set = ceph_set_xattr_handler, 1204 }; 1205 1206 #ifdef CONFIG_SECURITY 1207 bool ceph_security_xattr_wanted(struct inode *in) 1208 { 1209 return in->i_security != NULL; 1210 } 1211 1212 bool ceph_security_xattr_deadlock(struct inode *in) 1213 { 1214 struct ceph_inode_info *ci; 1215 bool ret; 1216 if (!in->i_security) 1217 return false; 1218 ci = ceph_inode(in); 1219 spin_lock(&ci->i_ceph_lock); 1220 ret = !(ci->i_ceph_flags & CEPH_I_SEC_INITED) && 1221 !(ci->i_xattrs.version > 0 && 1222 __ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 0)); 1223 spin_unlock(&ci->i_ceph_lock); 1224 return ret; 1225 } 1226 1227 #ifdef CONFIG_CEPH_FS_SECURITY_LABEL 1228 int ceph_security_init_secctx(struct dentry *dentry, umode_t mode, 1229 struct ceph_acl_sec_ctx *as_ctx) 1230 { 1231 struct ceph_pagelist *pagelist = as_ctx->pagelist; 1232 const char *name; 1233 size_t name_len; 1234 int err; 1235 1236 err = security_dentry_init_security(dentry, mode, &dentry->d_name, 1237 &as_ctx->sec_ctx, 1238 &as_ctx->sec_ctxlen); 1239 if (err < 0) { 1240 WARN_ON_ONCE(err != -EOPNOTSUPP); 1241 err = 0; /* do nothing */ 1242 goto out; 1243 } 1244 1245 err = -ENOMEM; 1246 if (!pagelist) { 1247 pagelist = ceph_pagelist_alloc(GFP_KERNEL); 1248 if (!pagelist) 1249 goto out; 1250 err = ceph_pagelist_reserve(pagelist, PAGE_SIZE); 1251 if (err) 1252 goto out; 1253 ceph_pagelist_encode_32(pagelist, 1); 1254 } 1255 1256 /* 1257 * FIXME: Make security_dentry_init_security() generic. Currently 1258 * It only supports single security module and only selinux has 1259 * dentry_init_security hook. 1260 */ 1261 name = XATTR_NAME_SELINUX; 1262 name_len = strlen(name); 1263 err = ceph_pagelist_reserve(pagelist, 1264 4 * 2 + name_len + as_ctx->sec_ctxlen); 1265 if (err) 1266 goto out; 1267 1268 if (as_ctx->pagelist) { 1269 /* update count of KV pairs */ 1270 BUG_ON(pagelist->length <= sizeof(__le32)); 1271 if (list_is_singular(&pagelist->head)) { 1272 le32_add_cpu((__le32*)pagelist->mapped_tail, 1); 1273 } else { 1274 struct page *page = list_first_entry(&pagelist->head, 1275 struct page, lru); 1276 void *addr = kmap_atomic(page); 1277 le32_add_cpu((__le32*)addr, 1); 1278 kunmap_atomic(addr); 1279 } 1280 } else { 1281 as_ctx->pagelist = pagelist; 1282 } 1283 1284 ceph_pagelist_encode_32(pagelist, name_len); 1285 ceph_pagelist_append(pagelist, name, name_len); 1286 1287 ceph_pagelist_encode_32(pagelist, as_ctx->sec_ctxlen); 1288 ceph_pagelist_append(pagelist, as_ctx->sec_ctx, as_ctx->sec_ctxlen); 1289 1290 err = 0; 1291 out: 1292 if (pagelist && !as_ctx->pagelist) 1293 ceph_pagelist_release(pagelist); 1294 return err; 1295 } 1296 1297 void ceph_security_invalidate_secctx(struct inode *inode) 1298 { 1299 security_inode_invalidate_secctx(inode); 1300 } 1301 1302 static int ceph_xattr_set_security_label(const struct xattr_handler *handler, 1303 struct dentry *unused, struct inode *inode, 1304 const char *key, const void *buf, 1305 size_t buflen, int flags) 1306 { 1307 if (security_ismaclabel(key)) { 1308 const char *name = xattr_full_name(handler, key); 1309 return __ceph_setxattr(inode, name, buf, buflen, flags); 1310 } 1311 return -EOPNOTSUPP; 1312 } 1313 1314 static int ceph_xattr_get_security_label(const struct xattr_handler *handler, 1315 struct dentry *unused, struct inode *inode, 1316 const char *key, void *buf, size_t buflen) 1317 { 1318 if (security_ismaclabel(key)) { 1319 const char *name = xattr_full_name(handler, key); 1320 return __ceph_getxattr(inode, name, buf, buflen); 1321 } 1322 return -EOPNOTSUPP; 1323 } 1324 1325 static const struct xattr_handler ceph_security_label_handler = { 1326 .prefix = XATTR_SECURITY_PREFIX, 1327 .get = ceph_xattr_get_security_label, 1328 .set = ceph_xattr_set_security_label, 1329 }; 1330 #endif 1331 #endif 1332 1333 void ceph_release_acl_sec_ctx(struct ceph_acl_sec_ctx *as_ctx) 1334 { 1335 #ifdef CONFIG_CEPH_FS_POSIX_ACL 1336 posix_acl_release(as_ctx->acl); 1337 posix_acl_release(as_ctx->default_acl); 1338 #endif 1339 #ifdef CONFIG_CEPH_FS_SECURITY_LABEL 1340 security_release_secctx(as_ctx->sec_ctx, as_ctx->sec_ctxlen); 1341 #endif 1342 if (as_ctx->pagelist) 1343 ceph_pagelist_release(as_ctx->pagelist); 1344 } 1345 1346 /* 1347 * List of handlers for synthetic system.* attributes. Other 1348 * attributes are handled directly. 1349 */ 1350 const struct xattr_handler *ceph_xattr_handlers[] = { 1351 #ifdef CONFIG_CEPH_FS_POSIX_ACL 1352 &posix_acl_access_xattr_handler, 1353 &posix_acl_default_xattr_handler, 1354 #endif 1355 #ifdef CONFIG_CEPH_FS_SECURITY_LABEL 1356 &ceph_security_label_handler, 1357 #endif 1358 &ceph_other_xattr_handler, 1359 NULL, 1360 }; 1361