1 /* Keyring handling 2 * 3 * Copyright (C) 2004-2005, 2008, 2013 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #include <linux/export.h> 13 #include <linux/init.h> 14 #include <linux/sched.h> 15 #include <linux/slab.h> 16 #include <linux/security.h> 17 #include <linux/seq_file.h> 18 #include <linux/err.h> 19 #include <keys/keyring-type.h> 20 #include <keys/user-type.h> 21 #include <linux/assoc_array_priv.h> 22 #include <linux/uaccess.h> 23 #include "internal.h" 24 25 /* 26 * When plumbing the depths of the key tree, this sets a hard limit 27 * set on how deep we're willing to go. 28 */ 29 #define KEYRING_SEARCH_MAX_DEPTH 6 30 31 /* 32 * We keep all named keyrings in a hash to speed looking them up. 33 */ 34 #define KEYRING_NAME_HASH_SIZE (1 << 5) 35 36 /* 37 * We mark pointers we pass to the associative array with bit 1 set if 38 * they're keyrings and clear otherwise. 39 */ 40 #define KEYRING_PTR_SUBTYPE 0x2UL 41 42 static inline bool keyring_ptr_is_keyring(const struct assoc_array_ptr *x) 43 { 44 return (unsigned long)x & KEYRING_PTR_SUBTYPE; 45 } 46 static inline struct key *keyring_ptr_to_key(const struct assoc_array_ptr *x) 47 { 48 void *object = assoc_array_ptr_to_leaf(x); 49 return (struct key *)((unsigned long)object & ~KEYRING_PTR_SUBTYPE); 50 } 51 static inline void *keyring_key_to_ptr(struct key *key) 52 { 53 if (key->type == &key_type_keyring) 54 return (void *)((unsigned long)key | KEYRING_PTR_SUBTYPE); 55 return key; 56 } 57 58 static struct list_head keyring_name_hash[KEYRING_NAME_HASH_SIZE]; 59 static DEFINE_RWLOCK(keyring_name_lock); 60 61 static inline unsigned keyring_hash(const char *desc) 62 { 63 unsigned bucket = 0; 64 65 for (; *desc; desc++) 66 bucket += (unsigned char)*desc; 67 68 return bucket & (KEYRING_NAME_HASH_SIZE - 1); 69 } 70 71 /* 72 * The keyring key type definition. Keyrings are simply keys of this type and 73 * can be treated as ordinary keys in addition to having their own special 74 * operations. 75 */ 76 static int keyring_preparse(struct key_preparsed_payload *prep); 77 static void keyring_free_preparse(struct key_preparsed_payload *prep); 78 static int keyring_instantiate(struct key *keyring, 79 struct key_preparsed_payload *prep); 80 static void keyring_revoke(struct key *keyring); 81 static void keyring_destroy(struct key *keyring); 82 static void keyring_describe(const struct key *keyring, struct seq_file *m); 83 static long keyring_read(const struct key *keyring, 84 char __user *buffer, size_t buflen); 85 86 struct key_type key_type_keyring = { 87 .name = "keyring", 88 .def_datalen = 0, 89 .preparse = keyring_preparse, 90 .free_preparse = keyring_free_preparse, 91 .instantiate = keyring_instantiate, 92 .revoke = keyring_revoke, 93 .destroy = keyring_destroy, 94 .describe = keyring_describe, 95 .read = keyring_read, 96 }; 97 EXPORT_SYMBOL(key_type_keyring); 98 99 /* 100 * Semaphore to serialise link/link calls to prevent two link calls in parallel 101 * introducing a cycle. 102 */ 103 static DECLARE_RWSEM(keyring_serialise_link_sem); 104 105 /* 106 * Publish the name of a keyring so that it can be found by name (if it has 107 * one). 108 */ 109 static void keyring_publish_name(struct key *keyring) 110 { 111 int bucket; 112 113 if (keyring->description) { 114 bucket = keyring_hash(keyring->description); 115 116 write_lock(&keyring_name_lock); 117 118 if (!keyring_name_hash[bucket].next) 119 INIT_LIST_HEAD(&keyring_name_hash[bucket]); 120 121 list_add_tail(&keyring->name_link, 122 &keyring_name_hash[bucket]); 123 124 write_unlock(&keyring_name_lock); 125 } 126 } 127 128 /* 129 * Preparse a keyring payload 130 */ 131 static int keyring_preparse(struct key_preparsed_payload *prep) 132 { 133 return prep->datalen != 0 ? -EINVAL : 0; 134 } 135 136 /* 137 * Free a preparse of a user defined key payload 138 */ 139 static void keyring_free_preparse(struct key_preparsed_payload *prep) 140 { 141 } 142 143 /* 144 * Initialise a keyring. 145 * 146 * Returns 0 on success, -EINVAL if given any data. 147 */ 148 static int keyring_instantiate(struct key *keyring, 149 struct key_preparsed_payload *prep) 150 { 151 assoc_array_init(&keyring->keys); 152 /* make the keyring available by name if it has one */ 153 keyring_publish_name(keyring); 154 return 0; 155 } 156 157 /* 158 * Multiply 64-bits by 32-bits to 96-bits and fold back to 64-bit. Ideally we'd 159 * fold the carry back too, but that requires inline asm. 160 */ 161 static u64 mult_64x32_and_fold(u64 x, u32 y) 162 { 163 u64 hi = (u64)(u32)(x >> 32) * y; 164 u64 lo = (u64)(u32)(x) * y; 165 return lo + ((u64)(u32)hi << 32) + (u32)(hi >> 32); 166 } 167 168 /* 169 * Hash a key type and description. 170 */ 171 static unsigned long hash_key_type_and_desc(const struct keyring_index_key *index_key) 172 { 173 const unsigned level_shift = ASSOC_ARRAY_LEVEL_STEP; 174 const unsigned long fan_mask = ASSOC_ARRAY_FAN_MASK; 175 const char *description = index_key->description; 176 unsigned long hash, type; 177 u32 piece; 178 u64 acc; 179 int n, desc_len = index_key->desc_len; 180 181 type = (unsigned long)index_key->type; 182 183 acc = mult_64x32_and_fold(type, desc_len + 13); 184 acc = mult_64x32_and_fold(acc, 9207); 185 for (;;) { 186 n = desc_len; 187 if (n <= 0) 188 break; 189 if (n > 4) 190 n = 4; 191 piece = 0; 192 memcpy(&piece, description, n); 193 description += n; 194 desc_len -= n; 195 acc = mult_64x32_and_fold(acc, piece); 196 acc = mult_64x32_and_fold(acc, 9207); 197 } 198 199 /* Fold the hash down to 32 bits if need be. */ 200 hash = acc; 201 if (ASSOC_ARRAY_KEY_CHUNK_SIZE == 32) 202 hash ^= acc >> 32; 203 204 /* Squidge all the keyrings into a separate part of the tree to 205 * ordinary keys by making sure the lowest level segment in the hash is 206 * zero for keyrings and non-zero otherwise. 207 */ 208 if (index_key->type != &key_type_keyring && (hash & fan_mask) == 0) 209 return hash | (hash >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - level_shift)) | 1; 210 if (index_key->type == &key_type_keyring && (hash & fan_mask) != 0) 211 return (hash + (hash << level_shift)) & ~fan_mask; 212 return hash; 213 } 214 215 /* 216 * Build the next index key chunk. 217 * 218 * On 32-bit systems the index key is laid out as: 219 * 220 * 0 4 5 9... 221 * hash desclen typeptr desc[] 222 * 223 * On 64-bit systems: 224 * 225 * 0 8 9 17... 226 * hash desclen typeptr desc[] 227 * 228 * We return it one word-sized chunk at a time. 229 */ 230 static unsigned long keyring_get_key_chunk(const void *data, int level) 231 { 232 const struct keyring_index_key *index_key = data; 233 unsigned long chunk = 0; 234 long offset = 0; 235 int desc_len = index_key->desc_len, n = sizeof(chunk); 236 237 level /= ASSOC_ARRAY_KEY_CHUNK_SIZE; 238 switch (level) { 239 case 0: 240 return hash_key_type_and_desc(index_key); 241 case 1: 242 return ((unsigned long)index_key->type << 8) | desc_len; 243 case 2: 244 if (desc_len == 0) 245 return (u8)((unsigned long)index_key->type >> 246 (ASSOC_ARRAY_KEY_CHUNK_SIZE - 8)); 247 n--; 248 offset = 1; 249 /* fall through */ 250 default: 251 offset += sizeof(chunk) - 1; 252 offset += (level - 3) * sizeof(chunk); 253 if (offset >= desc_len) 254 return 0; 255 desc_len -= offset; 256 if (desc_len > n) 257 desc_len = n; 258 offset += desc_len; 259 do { 260 chunk <<= 8; 261 chunk |= ((u8*)index_key->description)[--offset]; 262 } while (--desc_len > 0); 263 264 if (level == 2) { 265 chunk <<= 8; 266 chunk |= (u8)((unsigned long)index_key->type >> 267 (ASSOC_ARRAY_KEY_CHUNK_SIZE - 8)); 268 } 269 return chunk; 270 } 271 } 272 273 static unsigned long keyring_get_object_key_chunk(const void *object, int level) 274 { 275 const struct key *key = keyring_ptr_to_key(object); 276 return keyring_get_key_chunk(&key->index_key, level); 277 } 278 279 static bool keyring_compare_object(const void *object, const void *data) 280 { 281 const struct keyring_index_key *index_key = data; 282 const struct key *key = keyring_ptr_to_key(object); 283 284 return key->index_key.type == index_key->type && 285 key->index_key.desc_len == index_key->desc_len && 286 memcmp(key->index_key.description, index_key->description, 287 index_key->desc_len) == 0; 288 } 289 290 /* 291 * Compare the index keys of a pair of objects and determine the bit position 292 * at which they differ - if they differ. 293 */ 294 static int keyring_diff_objects(const void *object, const void *data) 295 { 296 const struct key *key_a = keyring_ptr_to_key(object); 297 const struct keyring_index_key *a = &key_a->index_key; 298 const struct keyring_index_key *b = data; 299 unsigned long seg_a, seg_b; 300 int level, i; 301 302 level = 0; 303 seg_a = hash_key_type_and_desc(a); 304 seg_b = hash_key_type_and_desc(b); 305 if ((seg_a ^ seg_b) != 0) 306 goto differ; 307 308 /* The number of bits contributed by the hash is controlled by a 309 * constant in the assoc_array headers. Everything else thereafter we 310 * can deal with as being machine word-size dependent. 311 */ 312 level += ASSOC_ARRAY_KEY_CHUNK_SIZE / 8; 313 seg_a = a->desc_len; 314 seg_b = b->desc_len; 315 if ((seg_a ^ seg_b) != 0) 316 goto differ; 317 318 /* The next bit may not work on big endian */ 319 level++; 320 seg_a = (unsigned long)a->type; 321 seg_b = (unsigned long)b->type; 322 if ((seg_a ^ seg_b) != 0) 323 goto differ; 324 325 level += sizeof(unsigned long); 326 if (a->desc_len == 0) 327 goto same; 328 329 i = 0; 330 if (((unsigned long)a->description | (unsigned long)b->description) & 331 (sizeof(unsigned long) - 1)) { 332 do { 333 seg_a = *(unsigned long *)(a->description + i); 334 seg_b = *(unsigned long *)(b->description + i); 335 if ((seg_a ^ seg_b) != 0) 336 goto differ_plus_i; 337 i += sizeof(unsigned long); 338 } while (i < (a->desc_len & (sizeof(unsigned long) - 1))); 339 } 340 341 for (; i < a->desc_len; i++) { 342 seg_a = *(unsigned char *)(a->description + i); 343 seg_b = *(unsigned char *)(b->description + i); 344 if ((seg_a ^ seg_b) != 0) 345 goto differ_plus_i; 346 } 347 348 same: 349 return -1; 350 351 differ_plus_i: 352 level += i; 353 differ: 354 i = level * 8 + __ffs(seg_a ^ seg_b); 355 return i; 356 } 357 358 /* 359 * Free an object after stripping the keyring flag off of the pointer. 360 */ 361 static void keyring_free_object(void *object) 362 { 363 key_put(keyring_ptr_to_key(object)); 364 } 365 366 /* 367 * Operations for keyring management by the index-tree routines. 368 */ 369 static const struct assoc_array_ops keyring_assoc_array_ops = { 370 .get_key_chunk = keyring_get_key_chunk, 371 .get_object_key_chunk = keyring_get_object_key_chunk, 372 .compare_object = keyring_compare_object, 373 .diff_objects = keyring_diff_objects, 374 .free_object = keyring_free_object, 375 }; 376 377 /* 378 * Clean up a keyring when it is destroyed. Unpublish its name if it had one 379 * and dispose of its data. 380 * 381 * The garbage collector detects the final key_put(), removes the keyring from 382 * the serial number tree and then does RCU synchronisation before coming here, 383 * so we shouldn't need to worry about code poking around here with the RCU 384 * readlock held by this time. 385 */ 386 static void keyring_destroy(struct key *keyring) 387 { 388 if (keyring->description) { 389 write_lock(&keyring_name_lock); 390 391 if (keyring->name_link.next != NULL && 392 !list_empty(&keyring->name_link)) 393 list_del(&keyring->name_link); 394 395 write_unlock(&keyring_name_lock); 396 } 397 398 if (keyring->restrict_link) { 399 struct key_restriction *keyres = keyring->restrict_link; 400 401 key_put(keyres->key); 402 kfree(keyres); 403 } 404 405 assoc_array_destroy(&keyring->keys, &keyring_assoc_array_ops); 406 } 407 408 /* 409 * Describe a keyring for /proc. 410 */ 411 static void keyring_describe(const struct key *keyring, struct seq_file *m) 412 { 413 if (keyring->description) 414 seq_puts(m, keyring->description); 415 else 416 seq_puts(m, "[anon]"); 417 418 if (key_is_positive(keyring)) { 419 if (keyring->keys.nr_leaves_on_tree != 0) 420 seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree); 421 else 422 seq_puts(m, ": empty"); 423 } 424 } 425 426 struct keyring_read_iterator_context { 427 size_t buflen; 428 size_t count; 429 key_serial_t __user *buffer; 430 }; 431 432 static int keyring_read_iterator(const void *object, void *data) 433 { 434 struct keyring_read_iterator_context *ctx = data; 435 const struct key *key = keyring_ptr_to_key(object); 436 int ret; 437 438 kenter("{%s,%d},,{%zu/%zu}", 439 key->type->name, key->serial, ctx->count, ctx->buflen); 440 441 if (ctx->count >= ctx->buflen) 442 return 1; 443 444 ret = put_user(key->serial, ctx->buffer); 445 if (ret < 0) 446 return ret; 447 ctx->buffer++; 448 ctx->count += sizeof(key->serial); 449 return 0; 450 } 451 452 /* 453 * Read a list of key IDs from the keyring's contents in binary form 454 * 455 * The keyring's semaphore is read-locked by the caller. This prevents someone 456 * from modifying it under us - which could cause us to read key IDs multiple 457 * times. 458 */ 459 static long keyring_read(const struct key *keyring, 460 char __user *buffer, size_t buflen) 461 { 462 struct keyring_read_iterator_context ctx; 463 long ret; 464 465 kenter("{%d},,%zu", key_serial(keyring), buflen); 466 467 if (buflen & (sizeof(key_serial_t) - 1)) 468 return -EINVAL; 469 470 /* Copy as many key IDs as fit into the buffer */ 471 if (buffer && buflen) { 472 ctx.buffer = (key_serial_t __user *)buffer; 473 ctx.buflen = buflen; 474 ctx.count = 0; 475 ret = assoc_array_iterate(&keyring->keys, 476 keyring_read_iterator, &ctx); 477 if (ret < 0) { 478 kleave(" = %ld [iterate]", ret); 479 return ret; 480 } 481 } 482 483 /* Return the size of the buffer needed */ 484 ret = keyring->keys.nr_leaves_on_tree * sizeof(key_serial_t); 485 if (ret <= buflen) 486 kleave("= %ld [ok]", ret); 487 else 488 kleave("= %ld [buffer too small]", ret); 489 return ret; 490 } 491 492 /* 493 * Allocate a keyring and link into the destination keyring. 494 */ 495 struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid, 496 const struct cred *cred, key_perm_t perm, 497 unsigned long flags, 498 struct key_restriction *restrict_link, 499 struct key *dest) 500 { 501 struct key *keyring; 502 int ret; 503 504 keyring = key_alloc(&key_type_keyring, description, 505 uid, gid, cred, perm, flags, restrict_link); 506 if (!IS_ERR(keyring)) { 507 ret = key_instantiate_and_link(keyring, NULL, 0, dest, NULL); 508 if (ret < 0) { 509 key_put(keyring); 510 keyring = ERR_PTR(ret); 511 } 512 } 513 514 return keyring; 515 } 516 EXPORT_SYMBOL(keyring_alloc); 517 518 /** 519 * restrict_link_reject - Give -EPERM to restrict link 520 * @keyring: The keyring being added to. 521 * @type: The type of key being added. 522 * @payload: The payload of the key intended to be added. 523 * @data: Additional data for evaluating restriction. 524 * 525 * Reject the addition of any links to a keyring. It can be overridden by 526 * passing KEY_ALLOC_BYPASS_RESTRICTION to key_instantiate_and_link() when 527 * adding a key to a keyring. 528 * 529 * This is meant to be stored in a key_restriction structure which is passed 530 * in the restrict_link parameter to keyring_alloc(). 531 */ 532 int restrict_link_reject(struct key *keyring, 533 const struct key_type *type, 534 const union key_payload *payload, 535 struct key *restriction_key) 536 { 537 return -EPERM; 538 } 539 540 /* 541 * By default, we keys found by getting an exact match on their descriptions. 542 */ 543 bool key_default_cmp(const struct key *key, 544 const struct key_match_data *match_data) 545 { 546 return strcmp(key->description, match_data->raw_data) == 0; 547 } 548 549 /* 550 * Iteration function to consider each key found. 551 */ 552 static int keyring_search_iterator(const void *object, void *iterator_data) 553 { 554 struct keyring_search_context *ctx = iterator_data; 555 const struct key *key = keyring_ptr_to_key(object); 556 unsigned long kflags = READ_ONCE(key->flags); 557 short state = READ_ONCE(key->state); 558 559 kenter("{%d}", key->serial); 560 561 /* ignore keys not of this type */ 562 if (key->type != ctx->index_key.type) { 563 kleave(" = 0 [!type]"); 564 return 0; 565 } 566 567 /* skip invalidated, revoked and expired keys */ 568 if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) { 569 time64_t expiry = READ_ONCE(key->expiry); 570 571 if (kflags & ((1 << KEY_FLAG_INVALIDATED) | 572 (1 << KEY_FLAG_REVOKED))) { 573 ctx->result = ERR_PTR(-EKEYREVOKED); 574 kleave(" = %d [invrev]", ctx->skipped_ret); 575 goto skipped; 576 } 577 578 if (expiry && ctx->now >= expiry) { 579 if (!(ctx->flags & KEYRING_SEARCH_SKIP_EXPIRED)) 580 ctx->result = ERR_PTR(-EKEYEXPIRED); 581 kleave(" = %d [expire]", ctx->skipped_ret); 582 goto skipped; 583 } 584 } 585 586 /* keys that don't match */ 587 if (!ctx->match_data.cmp(key, &ctx->match_data)) { 588 kleave(" = 0 [!match]"); 589 return 0; 590 } 591 592 /* key must have search permissions */ 593 if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) && 594 key_task_permission(make_key_ref(key, ctx->possessed), 595 ctx->cred, KEY_NEED_SEARCH) < 0) { 596 ctx->result = ERR_PTR(-EACCES); 597 kleave(" = %d [!perm]", ctx->skipped_ret); 598 goto skipped; 599 } 600 601 if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) { 602 /* we set a different error code if we pass a negative key */ 603 if (state < 0) { 604 ctx->result = ERR_PTR(state); 605 kleave(" = %d [neg]", ctx->skipped_ret); 606 goto skipped; 607 } 608 } 609 610 /* Found */ 611 ctx->result = make_key_ref(key, ctx->possessed); 612 kleave(" = 1 [found]"); 613 return 1; 614 615 skipped: 616 return ctx->skipped_ret; 617 } 618 619 /* 620 * Search inside a keyring for a key. We can search by walking to it 621 * directly based on its index-key or we can iterate over the entire 622 * tree looking for it, based on the match function. 623 */ 624 static int search_keyring(struct key *keyring, struct keyring_search_context *ctx) 625 { 626 if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_DIRECT) { 627 const void *object; 628 629 object = assoc_array_find(&keyring->keys, 630 &keyring_assoc_array_ops, 631 &ctx->index_key); 632 return object ? ctx->iterator(object, ctx) : 0; 633 } 634 return assoc_array_iterate(&keyring->keys, ctx->iterator, ctx); 635 } 636 637 /* 638 * Search a tree of keyrings that point to other keyrings up to the maximum 639 * depth. 640 */ 641 static bool search_nested_keyrings(struct key *keyring, 642 struct keyring_search_context *ctx) 643 { 644 struct { 645 struct key *keyring; 646 struct assoc_array_node *node; 647 int slot; 648 } stack[KEYRING_SEARCH_MAX_DEPTH]; 649 650 struct assoc_array_shortcut *shortcut; 651 struct assoc_array_node *node; 652 struct assoc_array_ptr *ptr; 653 struct key *key; 654 int sp = 0, slot; 655 656 kenter("{%d},{%s,%s}", 657 keyring->serial, 658 ctx->index_key.type->name, 659 ctx->index_key.description); 660 661 #define STATE_CHECKS (KEYRING_SEARCH_NO_STATE_CHECK | KEYRING_SEARCH_DO_STATE_CHECK) 662 BUG_ON((ctx->flags & STATE_CHECKS) == 0 || 663 (ctx->flags & STATE_CHECKS) == STATE_CHECKS); 664 665 /* Check to see if this top-level keyring is what we are looking for 666 * and whether it is valid or not. 667 */ 668 if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_ITERATE || 669 keyring_compare_object(keyring, &ctx->index_key)) { 670 ctx->skipped_ret = 2; 671 switch (ctx->iterator(keyring_key_to_ptr(keyring), ctx)) { 672 case 1: 673 goto found; 674 case 2: 675 return false; 676 default: 677 break; 678 } 679 } 680 681 ctx->skipped_ret = 0; 682 683 /* Start processing a new keyring */ 684 descend_to_keyring: 685 kdebug("descend to %d", keyring->serial); 686 if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) | 687 (1 << KEY_FLAG_REVOKED))) 688 goto not_this_keyring; 689 690 /* Search through the keys in this keyring before its searching its 691 * subtrees. 692 */ 693 if (search_keyring(keyring, ctx)) 694 goto found; 695 696 /* Then manually iterate through the keyrings nested in this one. 697 * 698 * Start from the root node of the index tree. Because of the way the 699 * hash function has been set up, keyrings cluster on the leftmost 700 * branch of the root node (root slot 0) or in the root node itself. 701 * Non-keyrings avoid the leftmost branch of the root entirely (root 702 * slots 1-15). 703 */ 704 ptr = READ_ONCE(keyring->keys.root); 705 if (!ptr) 706 goto not_this_keyring; 707 708 if (assoc_array_ptr_is_shortcut(ptr)) { 709 /* If the root is a shortcut, either the keyring only contains 710 * keyring pointers (everything clusters behind root slot 0) or 711 * doesn't contain any keyring pointers. 712 */ 713 shortcut = assoc_array_ptr_to_shortcut(ptr); 714 if ((shortcut->index_key[0] & ASSOC_ARRAY_FAN_MASK) != 0) 715 goto not_this_keyring; 716 717 ptr = READ_ONCE(shortcut->next_node); 718 node = assoc_array_ptr_to_node(ptr); 719 goto begin_node; 720 } 721 722 node = assoc_array_ptr_to_node(ptr); 723 ptr = node->slots[0]; 724 if (!assoc_array_ptr_is_meta(ptr)) 725 goto begin_node; 726 727 descend_to_node: 728 /* Descend to a more distal node in this keyring's content tree and go 729 * through that. 730 */ 731 kdebug("descend"); 732 if (assoc_array_ptr_is_shortcut(ptr)) { 733 shortcut = assoc_array_ptr_to_shortcut(ptr); 734 ptr = READ_ONCE(shortcut->next_node); 735 BUG_ON(!assoc_array_ptr_is_node(ptr)); 736 } 737 node = assoc_array_ptr_to_node(ptr); 738 739 begin_node: 740 kdebug("begin_node"); 741 slot = 0; 742 ascend_to_node: 743 /* Go through the slots in a node */ 744 for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) { 745 ptr = READ_ONCE(node->slots[slot]); 746 747 if (assoc_array_ptr_is_meta(ptr) && node->back_pointer) 748 goto descend_to_node; 749 750 if (!keyring_ptr_is_keyring(ptr)) 751 continue; 752 753 key = keyring_ptr_to_key(ptr); 754 755 if (sp >= KEYRING_SEARCH_MAX_DEPTH) { 756 if (ctx->flags & KEYRING_SEARCH_DETECT_TOO_DEEP) { 757 ctx->result = ERR_PTR(-ELOOP); 758 return false; 759 } 760 goto not_this_keyring; 761 } 762 763 /* Search a nested keyring */ 764 if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) && 765 key_task_permission(make_key_ref(key, ctx->possessed), 766 ctx->cred, KEY_NEED_SEARCH) < 0) 767 continue; 768 769 /* stack the current position */ 770 stack[sp].keyring = keyring; 771 stack[sp].node = node; 772 stack[sp].slot = slot; 773 sp++; 774 775 /* begin again with the new keyring */ 776 keyring = key; 777 goto descend_to_keyring; 778 } 779 780 /* We've dealt with all the slots in the current node, so now we need 781 * to ascend to the parent and continue processing there. 782 */ 783 ptr = READ_ONCE(node->back_pointer); 784 slot = node->parent_slot; 785 786 if (ptr && assoc_array_ptr_is_shortcut(ptr)) { 787 shortcut = assoc_array_ptr_to_shortcut(ptr); 788 ptr = READ_ONCE(shortcut->back_pointer); 789 slot = shortcut->parent_slot; 790 } 791 if (!ptr) 792 goto not_this_keyring; 793 node = assoc_array_ptr_to_node(ptr); 794 slot++; 795 796 /* If we've ascended to the root (zero backpointer), we must have just 797 * finished processing the leftmost branch rather than the root slots - 798 * so there can't be any more keyrings for us to find. 799 */ 800 if (node->back_pointer) { 801 kdebug("ascend %d", slot); 802 goto ascend_to_node; 803 } 804 805 /* The keyring we're looking at was disqualified or didn't contain a 806 * matching key. 807 */ 808 not_this_keyring: 809 kdebug("not_this_keyring %d", sp); 810 if (sp <= 0) { 811 kleave(" = false"); 812 return false; 813 } 814 815 /* Resume the processing of a keyring higher up in the tree */ 816 sp--; 817 keyring = stack[sp].keyring; 818 node = stack[sp].node; 819 slot = stack[sp].slot + 1; 820 kdebug("ascend to %d [%d]", keyring->serial, slot); 821 goto ascend_to_node; 822 823 /* We found a viable match */ 824 found: 825 key = key_ref_to_ptr(ctx->result); 826 key_check(key); 827 if (!(ctx->flags & KEYRING_SEARCH_NO_UPDATE_TIME)) { 828 key->last_used_at = ctx->now; 829 keyring->last_used_at = ctx->now; 830 while (sp > 0) 831 stack[--sp].keyring->last_used_at = ctx->now; 832 } 833 kleave(" = true"); 834 return true; 835 } 836 837 /** 838 * keyring_search_aux - Search a keyring tree for a key matching some criteria 839 * @keyring_ref: A pointer to the keyring with possession indicator. 840 * @ctx: The keyring search context. 841 * 842 * Search the supplied keyring tree for a key that matches the criteria given. 843 * The root keyring and any linked keyrings must grant Search permission to the 844 * caller to be searchable and keys can only be found if they too grant Search 845 * to the caller. The possession flag on the root keyring pointer controls use 846 * of the possessor bits in permissions checking of the entire tree. In 847 * addition, the LSM gets to forbid keyring searches and key matches. 848 * 849 * The search is performed as a breadth-then-depth search up to the prescribed 850 * limit (KEYRING_SEARCH_MAX_DEPTH). 851 * 852 * Keys are matched to the type provided and are then filtered by the match 853 * function, which is given the description to use in any way it sees fit. The 854 * match function may use any attributes of a key that it wishes to to 855 * determine the match. Normally the match function from the key type would be 856 * used. 857 * 858 * RCU can be used to prevent the keyring key lists from disappearing without 859 * the need to take lots of locks. 860 * 861 * Returns a pointer to the found key and increments the key usage count if 862 * successful; -EAGAIN if no matching keys were found, or if expired or revoked 863 * keys were found; -ENOKEY if only negative keys were found; -ENOTDIR if the 864 * specified keyring wasn't a keyring. 865 * 866 * In the case of a successful return, the possession attribute from 867 * @keyring_ref is propagated to the returned key reference. 868 */ 869 key_ref_t keyring_search_aux(key_ref_t keyring_ref, 870 struct keyring_search_context *ctx) 871 { 872 struct key *keyring; 873 long err; 874 875 ctx->iterator = keyring_search_iterator; 876 ctx->possessed = is_key_possessed(keyring_ref); 877 ctx->result = ERR_PTR(-EAGAIN); 878 879 keyring = key_ref_to_ptr(keyring_ref); 880 key_check(keyring); 881 882 if (keyring->type != &key_type_keyring) 883 return ERR_PTR(-ENOTDIR); 884 885 if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM)) { 886 err = key_task_permission(keyring_ref, ctx->cred, KEY_NEED_SEARCH); 887 if (err < 0) 888 return ERR_PTR(err); 889 } 890 891 rcu_read_lock(); 892 ctx->now = ktime_get_real_seconds(); 893 if (search_nested_keyrings(keyring, ctx)) 894 __key_get(key_ref_to_ptr(ctx->result)); 895 rcu_read_unlock(); 896 return ctx->result; 897 } 898 899 /** 900 * keyring_search - Search the supplied keyring tree for a matching key 901 * @keyring: The root of the keyring tree to be searched. 902 * @type: The type of keyring we want to find. 903 * @description: The name of the keyring we want to find. 904 * 905 * As keyring_search_aux() above, but using the current task's credentials and 906 * type's default matching function and preferred search method. 907 */ 908 key_ref_t keyring_search(key_ref_t keyring, 909 struct key_type *type, 910 const char *description) 911 { 912 struct keyring_search_context ctx = { 913 .index_key.type = type, 914 .index_key.description = description, 915 .index_key.desc_len = strlen(description), 916 .cred = current_cred(), 917 .match_data.cmp = key_default_cmp, 918 .match_data.raw_data = description, 919 .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, 920 .flags = KEYRING_SEARCH_DO_STATE_CHECK, 921 }; 922 key_ref_t key; 923 int ret; 924 925 if (type->match_preparse) { 926 ret = type->match_preparse(&ctx.match_data); 927 if (ret < 0) 928 return ERR_PTR(ret); 929 } 930 931 key = keyring_search_aux(keyring, &ctx); 932 933 if (type->match_free) 934 type->match_free(&ctx.match_data); 935 return key; 936 } 937 EXPORT_SYMBOL(keyring_search); 938 939 static struct key_restriction *keyring_restriction_alloc( 940 key_restrict_link_func_t check) 941 { 942 struct key_restriction *keyres = 943 kzalloc(sizeof(struct key_restriction), GFP_KERNEL); 944 945 if (!keyres) 946 return ERR_PTR(-ENOMEM); 947 948 keyres->check = check; 949 950 return keyres; 951 } 952 953 /* 954 * Semaphore to serialise restriction setup to prevent reference count 955 * cycles through restriction key pointers. 956 */ 957 static DECLARE_RWSEM(keyring_serialise_restrict_sem); 958 959 /* 960 * Check for restriction cycles that would prevent keyring garbage collection. 961 * keyring_serialise_restrict_sem must be held. 962 */ 963 static bool keyring_detect_restriction_cycle(const struct key *dest_keyring, 964 struct key_restriction *keyres) 965 { 966 while (keyres && keyres->key && 967 keyres->key->type == &key_type_keyring) { 968 if (keyres->key == dest_keyring) 969 return true; 970 971 keyres = keyres->key->restrict_link; 972 } 973 974 return false; 975 } 976 977 /** 978 * keyring_restrict - Look up and apply a restriction to a keyring 979 * 980 * @keyring: The keyring to be restricted 981 * @restriction: The restriction options to apply to the keyring 982 */ 983 int keyring_restrict(key_ref_t keyring_ref, const char *type, 984 const char *restriction) 985 { 986 struct key *keyring; 987 struct key_type *restrict_type = NULL; 988 struct key_restriction *restrict_link; 989 int ret = 0; 990 991 keyring = key_ref_to_ptr(keyring_ref); 992 key_check(keyring); 993 994 if (keyring->type != &key_type_keyring) 995 return -ENOTDIR; 996 997 if (!type) { 998 restrict_link = keyring_restriction_alloc(restrict_link_reject); 999 } else { 1000 restrict_type = key_type_lookup(type); 1001 1002 if (IS_ERR(restrict_type)) 1003 return PTR_ERR(restrict_type); 1004 1005 if (!restrict_type->lookup_restriction) { 1006 ret = -ENOENT; 1007 goto error; 1008 } 1009 1010 restrict_link = restrict_type->lookup_restriction(restriction); 1011 } 1012 1013 if (IS_ERR(restrict_link)) { 1014 ret = PTR_ERR(restrict_link); 1015 goto error; 1016 } 1017 1018 down_write(&keyring->sem); 1019 down_write(&keyring_serialise_restrict_sem); 1020 1021 if (keyring->restrict_link) 1022 ret = -EEXIST; 1023 else if (keyring_detect_restriction_cycle(keyring, restrict_link)) 1024 ret = -EDEADLK; 1025 else 1026 keyring->restrict_link = restrict_link; 1027 1028 up_write(&keyring_serialise_restrict_sem); 1029 up_write(&keyring->sem); 1030 1031 if (ret < 0) { 1032 key_put(restrict_link->key); 1033 kfree(restrict_link); 1034 } 1035 1036 error: 1037 if (restrict_type) 1038 key_type_put(restrict_type); 1039 1040 return ret; 1041 } 1042 EXPORT_SYMBOL(keyring_restrict); 1043 1044 /* 1045 * Search the given keyring for a key that might be updated. 1046 * 1047 * The caller must guarantee that the keyring is a keyring and that the 1048 * permission is granted to modify the keyring as no check is made here. The 1049 * caller must also hold a lock on the keyring semaphore. 1050 * 1051 * Returns a pointer to the found key with usage count incremented if 1052 * successful and returns NULL if not found. Revoked and invalidated keys are 1053 * skipped over. 1054 * 1055 * If successful, the possession indicator is propagated from the keyring ref 1056 * to the returned key reference. 1057 */ 1058 key_ref_t find_key_to_update(key_ref_t keyring_ref, 1059 const struct keyring_index_key *index_key) 1060 { 1061 struct key *keyring, *key; 1062 const void *object; 1063 1064 keyring = key_ref_to_ptr(keyring_ref); 1065 1066 kenter("{%d},{%s,%s}", 1067 keyring->serial, index_key->type->name, index_key->description); 1068 1069 object = assoc_array_find(&keyring->keys, &keyring_assoc_array_ops, 1070 index_key); 1071 1072 if (object) 1073 goto found; 1074 1075 kleave(" = NULL"); 1076 return NULL; 1077 1078 found: 1079 key = keyring_ptr_to_key(object); 1080 if (key->flags & ((1 << KEY_FLAG_INVALIDATED) | 1081 (1 << KEY_FLAG_REVOKED))) { 1082 kleave(" = NULL [x]"); 1083 return NULL; 1084 } 1085 __key_get(key); 1086 kleave(" = {%d}", key->serial); 1087 return make_key_ref(key, is_key_possessed(keyring_ref)); 1088 } 1089 1090 /* 1091 * Find a keyring with the specified name. 1092 * 1093 * Only keyrings that have nonzero refcount, are not revoked, and are owned by a 1094 * user in the current user namespace are considered. If @uid_keyring is %true, 1095 * the keyring additionally must have been allocated as a user or user session 1096 * keyring; otherwise, it must grant Search permission directly to the caller. 1097 * 1098 * Returns a pointer to the keyring with the keyring's refcount having being 1099 * incremented on success. -ENOKEY is returned if a key could not be found. 1100 */ 1101 struct key *find_keyring_by_name(const char *name, bool uid_keyring) 1102 { 1103 struct key *keyring; 1104 int bucket; 1105 1106 if (!name) 1107 return ERR_PTR(-EINVAL); 1108 1109 bucket = keyring_hash(name); 1110 1111 read_lock(&keyring_name_lock); 1112 1113 if (keyring_name_hash[bucket].next) { 1114 /* search this hash bucket for a keyring with a matching name 1115 * that's readable and that hasn't been revoked */ 1116 list_for_each_entry(keyring, 1117 &keyring_name_hash[bucket], 1118 name_link 1119 ) { 1120 if (!kuid_has_mapping(current_user_ns(), keyring->user->uid)) 1121 continue; 1122 1123 if (test_bit(KEY_FLAG_REVOKED, &keyring->flags)) 1124 continue; 1125 1126 if (strcmp(keyring->description, name) != 0) 1127 continue; 1128 1129 if (uid_keyring) { 1130 if (!test_bit(KEY_FLAG_UID_KEYRING, 1131 &keyring->flags)) 1132 continue; 1133 } else { 1134 if (key_permission(make_key_ref(keyring, 0), 1135 KEY_NEED_SEARCH) < 0) 1136 continue; 1137 } 1138 1139 /* we've got a match but we might end up racing with 1140 * key_cleanup() if the keyring is currently 'dead' 1141 * (ie. it has a zero usage count) */ 1142 if (!refcount_inc_not_zero(&keyring->usage)) 1143 continue; 1144 keyring->last_used_at = ktime_get_real_seconds(); 1145 goto out; 1146 } 1147 } 1148 1149 keyring = ERR_PTR(-ENOKEY); 1150 out: 1151 read_unlock(&keyring_name_lock); 1152 return keyring; 1153 } 1154 1155 static int keyring_detect_cycle_iterator(const void *object, 1156 void *iterator_data) 1157 { 1158 struct keyring_search_context *ctx = iterator_data; 1159 const struct key *key = keyring_ptr_to_key(object); 1160 1161 kenter("{%d}", key->serial); 1162 1163 /* We might get a keyring with matching index-key that is nonetheless a 1164 * different keyring. */ 1165 if (key != ctx->match_data.raw_data) 1166 return 0; 1167 1168 ctx->result = ERR_PTR(-EDEADLK); 1169 return 1; 1170 } 1171 1172 /* 1173 * See if a cycle will will be created by inserting acyclic tree B in acyclic 1174 * tree A at the topmost level (ie: as a direct child of A). 1175 * 1176 * Since we are adding B to A at the top level, checking for cycles should just 1177 * be a matter of seeing if node A is somewhere in tree B. 1178 */ 1179 static int keyring_detect_cycle(struct key *A, struct key *B) 1180 { 1181 struct keyring_search_context ctx = { 1182 .index_key = A->index_key, 1183 .match_data.raw_data = A, 1184 .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, 1185 .iterator = keyring_detect_cycle_iterator, 1186 .flags = (KEYRING_SEARCH_NO_STATE_CHECK | 1187 KEYRING_SEARCH_NO_UPDATE_TIME | 1188 KEYRING_SEARCH_NO_CHECK_PERM | 1189 KEYRING_SEARCH_DETECT_TOO_DEEP), 1190 }; 1191 1192 rcu_read_lock(); 1193 search_nested_keyrings(B, &ctx); 1194 rcu_read_unlock(); 1195 return PTR_ERR(ctx.result) == -EAGAIN ? 0 : PTR_ERR(ctx.result); 1196 } 1197 1198 /* 1199 * Preallocate memory so that a key can be linked into to a keyring. 1200 */ 1201 int __key_link_begin(struct key *keyring, 1202 const struct keyring_index_key *index_key, 1203 struct assoc_array_edit **_edit) 1204 __acquires(&keyring->sem) 1205 __acquires(&keyring_serialise_link_sem) 1206 { 1207 struct assoc_array_edit *edit; 1208 int ret; 1209 1210 kenter("%d,%s,%s,", 1211 keyring->serial, index_key->type->name, index_key->description); 1212 1213 BUG_ON(index_key->desc_len == 0); 1214 1215 if (keyring->type != &key_type_keyring) 1216 return -ENOTDIR; 1217 1218 down_write(&keyring->sem); 1219 1220 ret = -EKEYREVOKED; 1221 if (test_bit(KEY_FLAG_REVOKED, &keyring->flags)) 1222 goto error_krsem; 1223 1224 /* serialise link/link calls to prevent parallel calls causing a cycle 1225 * when linking two keyring in opposite orders */ 1226 if (index_key->type == &key_type_keyring) 1227 down_write(&keyring_serialise_link_sem); 1228 1229 /* Create an edit script that will insert/replace the key in the 1230 * keyring tree. 1231 */ 1232 edit = assoc_array_insert(&keyring->keys, 1233 &keyring_assoc_array_ops, 1234 index_key, 1235 NULL); 1236 if (IS_ERR(edit)) { 1237 ret = PTR_ERR(edit); 1238 goto error_sem; 1239 } 1240 1241 /* If we're not replacing a link in-place then we're going to need some 1242 * extra quota. 1243 */ 1244 if (!edit->dead_leaf) { 1245 ret = key_payload_reserve(keyring, 1246 keyring->datalen + KEYQUOTA_LINK_BYTES); 1247 if (ret < 0) 1248 goto error_cancel; 1249 } 1250 1251 *_edit = edit; 1252 kleave(" = 0"); 1253 return 0; 1254 1255 error_cancel: 1256 assoc_array_cancel_edit(edit); 1257 error_sem: 1258 if (index_key->type == &key_type_keyring) 1259 up_write(&keyring_serialise_link_sem); 1260 error_krsem: 1261 up_write(&keyring->sem); 1262 kleave(" = %d", ret); 1263 return ret; 1264 } 1265 1266 /* 1267 * Check already instantiated keys aren't going to be a problem. 1268 * 1269 * The caller must have called __key_link_begin(). Don't need to call this for 1270 * keys that were created since __key_link_begin() was called. 1271 */ 1272 int __key_link_check_live_key(struct key *keyring, struct key *key) 1273 { 1274 if (key->type == &key_type_keyring) 1275 /* check that we aren't going to create a cycle by linking one 1276 * keyring to another */ 1277 return keyring_detect_cycle(keyring, key); 1278 return 0; 1279 } 1280 1281 /* 1282 * Link a key into to a keyring. 1283 * 1284 * Must be called with __key_link_begin() having being called. Discards any 1285 * already extant link to matching key if there is one, so that each keyring 1286 * holds at most one link to any given key of a particular type+description 1287 * combination. 1288 */ 1289 void __key_link(struct key *key, struct assoc_array_edit **_edit) 1290 { 1291 __key_get(key); 1292 assoc_array_insert_set_object(*_edit, keyring_key_to_ptr(key)); 1293 assoc_array_apply_edit(*_edit); 1294 *_edit = NULL; 1295 } 1296 1297 /* 1298 * Finish linking a key into to a keyring. 1299 * 1300 * Must be called with __key_link_begin() having being called. 1301 */ 1302 void __key_link_end(struct key *keyring, 1303 const struct keyring_index_key *index_key, 1304 struct assoc_array_edit *edit) 1305 __releases(&keyring->sem) 1306 __releases(&keyring_serialise_link_sem) 1307 { 1308 BUG_ON(index_key->type == NULL); 1309 kenter("%d,%s,", keyring->serial, index_key->type->name); 1310 1311 if (index_key->type == &key_type_keyring) 1312 up_write(&keyring_serialise_link_sem); 1313 1314 if (edit) { 1315 if (!edit->dead_leaf) { 1316 key_payload_reserve(keyring, 1317 keyring->datalen - KEYQUOTA_LINK_BYTES); 1318 } 1319 assoc_array_cancel_edit(edit); 1320 } 1321 up_write(&keyring->sem); 1322 } 1323 1324 /* 1325 * Check addition of keys to restricted keyrings. 1326 */ 1327 static int __key_link_check_restriction(struct key *keyring, struct key *key) 1328 { 1329 if (!keyring->restrict_link || !keyring->restrict_link->check) 1330 return 0; 1331 return keyring->restrict_link->check(keyring, key->type, &key->payload, 1332 keyring->restrict_link->key); 1333 } 1334 1335 /** 1336 * key_link - Link a key to a keyring 1337 * @keyring: The keyring to make the link in. 1338 * @key: The key to link to. 1339 * 1340 * Make a link in a keyring to a key, such that the keyring holds a reference 1341 * on that key and the key can potentially be found by searching that keyring. 1342 * 1343 * This function will write-lock the keyring's semaphore and will consume some 1344 * of the user's key data quota to hold the link. 1345 * 1346 * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring, 1347 * -EKEYREVOKED if the keyring has been revoked, -ENFILE if the keyring is 1348 * full, -EDQUOT if there is insufficient key data quota remaining to add 1349 * another link or -ENOMEM if there's insufficient memory. 1350 * 1351 * It is assumed that the caller has checked that it is permitted for a link to 1352 * be made (the keyring should have Write permission and the key Link 1353 * permission). 1354 */ 1355 int key_link(struct key *keyring, struct key *key) 1356 { 1357 struct assoc_array_edit *edit; 1358 int ret; 1359 1360 kenter("{%d,%d}", keyring->serial, refcount_read(&keyring->usage)); 1361 1362 key_check(keyring); 1363 key_check(key); 1364 1365 ret = __key_link_begin(keyring, &key->index_key, &edit); 1366 if (ret == 0) { 1367 kdebug("begun {%d,%d}", keyring->serial, refcount_read(&keyring->usage)); 1368 ret = __key_link_check_restriction(keyring, key); 1369 if (ret == 0) 1370 ret = __key_link_check_live_key(keyring, key); 1371 if (ret == 0) 1372 __key_link(key, &edit); 1373 __key_link_end(keyring, &key->index_key, edit); 1374 } 1375 1376 kleave(" = %d {%d,%d}", ret, keyring->serial, refcount_read(&keyring->usage)); 1377 return ret; 1378 } 1379 EXPORT_SYMBOL(key_link); 1380 1381 /** 1382 * key_unlink - Unlink the first link to a key from a keyring. 1383 * @keyring: The keyring to remove the link from. 1384 * @key: The key the link is to. 1385 * 1386 * Remove a link from a keyring to a key. 1387 * 1388 * This function will write-lock the keyring's semaphore. 1389 * 1390 * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring, -ENOENT if 1391 * the key isn't linked to by the keyring or -ENOMEM if there's insufficient 1392 * memory. 1393 * 1394 * It is assumed that the caller has checked that it is permitted for a link to 1395 * be removed (the keyring should have Write permission; no permissions are 1396 * required on the key). 1397 */ 1398 int key_unlink(struct key *keyring, struct key *key) 1399 { 1400 struct assoc_array_edit *edit; 1401 int ret; 1402 1403 key_check(keyring); 1404 key_check(key); 1405 1406 if (keyring->type != &key_type_keyring) 1407 return -ENOTDIR; 1408 1409 down_write(&keyring->sem); 1410 1411 edit = assoc_array_delete(&keyring->keys, &keyring_assoc_array_ops, 1412 &key->index_key); 1413 if (IS_ERR(edit)) { 1414 ret = PTR_ERR(edit); 1415 goto error; 1416 } 1417 ret = -ENOENT; 1418 if (edit == NULL) 1419 goto error; 1420 1421 assoc_array_apply_edit(edit); 1422 key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES); 1423 ret = 0; 1424 1425 error: 1426 up_write(&keyring->sem); 1427 return ret; 1428 } 1429 EXPORT_SYMBOL(key_unlink); 1430 1431 /** 1432 * keyring_clear - Clear a keyring 1433 * @keyring: The keyring to clear. 1434 * 1435 * Clear the contents of the specified keyring. 1436 * 1437 * Returns 0 if successful or -ENOTDIR if the keyring isn't a keyring. 1438 */ 1439 int keyring_clear(struct key *keyring) 1440 { 1441 struct assoc_array_edit *edit; 1442 int ret; 1443 1444 if (keyring->type != &key_type_keyring) 1445 return -ENOTDIR; 1446 1447 down_write(&keyring->sem); 1448 1449 edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops); 1450 if (IS_ERR(edit)) { 1451 ret = PTR_ERR(edit); 1452 } else { 1453 if (edit) 1454 assoc_array_apply_edit(edit); 1455 key_payload_reserve(keyring, 0); 1456 ret = 0; 1457 } 1458 1459 up_write(&keyring->sem); 1460 return ret; 1461 } 1462 EXPORT_SYMBOL(keyring_clear); 1463 1464 /* 1465 * Dispose of the links from a revoked keyring. 1466 * 1467 * This is called with the key sem write-locked. 1468 */ 1469 static void keyring_revoke(struct key *keyring) 1470 { 1471 struct assoc_array_edit *edit; 1472 1473 edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops); 1474 if (!IS_ERR(edit)) { 1475 if (edit) 1476 assoc_array_apply_edit(edit); 1477 key_payload_reserve(keyring, 0); 1478 } 1479 } 1480 1481 static bool keyring_gc_select_iterator(void *object, void *iterator_data) 1482 { 1483 struct key *key = keyring_ptr_to_key(object); 1484 time64_t *limit = iterator_data; 1485 1486 if (key_is_dead(key, *limit)) 1487 return false; 1488 key_get(key); 1489 return true; 1490 } 1491 1492 static int keyring_gc_check_iterator(const void *object, void *iterator_data) 1493 { 1494 const struct key *key = keyring_ptr_to_key(object); 1495 time64_t *limit = iterator_data; 1496 1497 key_check(key); 1498 return key_is_dead(key, *limit); 1499 } 1500 1501 /* 1502 * Garbage collect pointers from a keyring. 1503 * 1504 * Not called with any locks held. The keyring's key struct will not be 1505 * deallocated under us as only our caller may deallocate it. 1506 */ 1507 void keyring_gc(struct key *keyring, time64_t limit) 1508 { 1509 int result; 1510 1511 kenter("%x{%s}", keyring->serial, keyring->description ?: ""); 1512 1513 if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) | 1514 (1 << KEY_FLAG_REVOKED))) 1515 goto dont_gc; 1516 1517 /* scan the keyring looking for dead keys */ 1518 rcu_read_lock(); 1519 result = assoc_array_iterate(&keyring->keys, 1520 keyring_gc_check_iterator, &limit); 1521 rcu_read_unlock(); 1522 if (result == true) 1523 goto do_gc; 1524 1525 dont_gc: 1526 kleave(" [no gc]"); 1527 return; 1528 1529 do_gc: 1530 down_write(&keyring->sem); 1531 assoc_array_gc(&keyring->keys, &keyring_assoc_array_ops, 1532 keyring_gc_select_iterator, &limit); 1533 up_write(&keyring->sem); 1534 kleave(" [gc]"); 1535 } 1536 1537 /* 1538 * Garbage collect restriction pointers from a keyring. 1539 * 1540 * Keyring restrictions are associated with a key type, and must be cleaned 1541 * up if the key type is unregistered. The restriction is altered to always 1542 * reject additional keys so a keyring cannot be opened up by unregistering 1543 * a key type. 1544 * 1545 * Not called with any keyring locks held. The keyring's key struct will not 1546 * be deallocated under us as only our caller may deallocate it. 1547 * 1548 * The caller is required to hold key_types_sem and dead_type->sem. This is 1549 * fulfilled by key_gc_keytype() holding the locks on behalf of 1550 * key_garbage_collector(), which it invokes on a workqueue. 1551 */ 1552 void keyring_restriction_gc(struct key *keyring, struct key_type *dead_type) 1553 { 1554 struct key_restriction *keyres; 1555 1556 kenter("%x{%s}", keyring->serial, keyring->description ?: ""); 1557 1558 /* 1559 * keyring->restrict_link is only assigned at key allocation time 1560 * or with the key type locked, so the only values that could be 1561 * concurrently assigned to keyring->restrict_link are for key 1562 * types other than dead_type. Given this, it's ok to check 1563 * the key type before acquiring keyring->sem. 1564 */ 1565 if (!dead_type || !keyring->restrict_link || 1566 keyring->restrict_link->keytype != dead_type) { 1567 kleave(" [no restriction gc]"); 1568 return; 1569 } 1570 1571 /* Lock the keyring to ensure that a link is not in progress */ 1572 down_write(&keyring->sem); 1573 1574 keyres = keyring->restrict_link; 1575 1576 keyres->check = restrict_link_reject; 1577 1578 key_put(keyres->key); 1579 keyres->key = NULL; 1580 keyres->keytype = NULL; 1581 1582 up_write(&keyring->sem); 1583 1584 kleave(" [restriction gc]"); 1585 } 1586