1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* Basic authentication token and access key management 3 * 4 * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #include <linux/export.h> 9 #include <linux/init.h> 10 #include <linux/poison.h> 11 #include <linux/sched.h> 12 #include <linux/slab.h> 13 #include <linux/security.h> 14 #include <linux/workqueue.h> 15 #include <linux/random.h> 16 #include <linux/err.h> 17 #include "internal.h" 18 19 struct kmem_cache *key_jar; 20 struct rb_root key_serial_tree; /* tree of keys indexed by serial */ 21 DEFINE_SPINLOCK(key_serial_lock); 22 23 struct rb_root key_user_tree; /* tree of quota records indexed by UID */ 24 DEFINE_SPINLOCK(key_user_lock); 25 26 unsigned int key_quota_root_maxkeys = 1000000; /* root's key count quota */ 27 unsigned int key_quota_root_maxbytes = 25000000; /* root's key space quota */ 28 unsigned int key_quota_maxkeys = 200; /* general key count quota */ 29 unsigned int key_quota_maxbytes = 20000; /* general key space quota */ 30 31 static LIST_HEAD(key_types_list); 32 static DECLARE_RWSEM(key_types_sem); 33 34 /* We serialise key instantiation and link */ 35 DEFINE_MUTEX(key_construction_mutex); 36 37 #ifdef KEY_DEBUGGING 38 void __key_check(const struct key *key) 39 { 40 printk("__key_check: key %p {%08x} should be {%08x}\n", 41 key, key->magic, KEY_DEBUG_MAGIC); 42 BUG(); 43 } 44 #endif 45 46 /* 47 * Get the key quota record for a user, allocating a new record if one doesn't 48 * already exist. 49 */ 50 struct key_user *key_user_lookup(kuid_t uid) 51 { 52 struct key_user *candidate = NULL, *user; 53 struct rb_node *parent, **p; 54 55 try_again: 56 parent = NULL; 57 p = &key_user_tree.rb_node; 58 spin_lock(&key_user_lock); 59 60 /* search the tree for a user record with a matching UID */ 61 while (*p) { 62 parent = *p; 63 user = rb_entry(parent, struct key_user, node); 64 65 if (uid_lt(uid, user->uid)) 66 p = &(*p)->rb_left; 67 else if (uid_gt(uid, user->uid)) 68 p = &(*p)->rb_right; 69 else 70 goto found; 71 } 72 73 /* if we get here, we failed to find a match in the tree */ 74 if (!candidate) { 75 /* allocate a candidate user record if we don't already have 76 * one */ 77 spin_unlock(&key_user_lock); 78 79 user = NULL; 80 candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL); 81 if (unlikely(!candidate)) 82 goto out; 83 84 /* the allocation may have scheduled, so we need to repeat the 85 * search lest someone else added the record whilst we were 86 * asleep */ 87 goto try_again; 88 } 89 90 /* if we get here, then the user record still hadn't appeared on the 91 * second pass - so we use the candidate record */ 92 refcount_set(&candidate->usage, 1); 93 atomic_set(&candidate->nkeys, 0); 94 atomic_set(&candidate->nikeys, 0); 95 candidate->uid = uid; 96 candidate->qnkeys = 0; 97 candidate->qnbytes = 0; 98 spin_lock_init(&candidate->lock); 99 mutex_init(&candidate->cons_lock); 100 101 rb_link_node(&candidate->node, parent, p); 102 rb_insert_color(&candidate->node, &key_user_tree); 103 spin_unlock(&key_user_lock); 104 user = candidate; 105 goto out; 106 107 /* okay - we found a user record for this UID */ 108 found: 109 refcount_inc(&user->usage); 110 spin_unlock(&key_user_lock); 111 kfree(candidate); 112 out: 113 return user; 114 } 115 116 /* 117 * Dispose of a user structure 118 */ 119 void key_user_put(struct key_user *user) 120 { 121 if (refcount_dec_and_lock(&user->usage, &key_user_lock)) { 122 rb_erase(&user->node, &key_user_tree); 123 spin_unlock(&key_user_lock); 124 125 kfree(user); 126 } 127 } 128 129 /* 130 * Allocate a serial number for a key. These are assigned randomly to avoid 131 * security issues through covert channel problems. 132 */ 133 static inline void key_alloc_serial(struct key *key) 134 { 135 struct rb_node *parent, **p; 136 struct key *xkey; 137 138 /* propose a random serial number and look for a hole for it in the 139 * serial number tree */ 140 do { 141 get_random_bytes(&key->serial, sizeof(key->serial)); 142 143 key->serial >>= 1; /* negative numbers are not permitted */ 144 } while (key->serial < 3); 145 146 spin_lock(&key_serial_lock); 147 148 attempt_insertion: 149 parent = NULL; 150 p = &key_serial_tree.rb_node; 151 152 while (*p) { 153 parent = *p; 154 xkey = rb_entry(parent, struct key, serial_node); 155 156 if (key->serial < xkey->serial) 157 p = &(*p)->rb_left; 158 else if (key->serial > xkey->serial) 159 p = &(*p)->rb_right; 160 else 161 goto serial_exists; 162 } 163 164 /* we've found a suitable hole - arrange for this key to occupy it */ 165 rb_link_node(&key->serial_node, parent, p); 166 rb_insert_color(&key->serial_node, &key_serial_tree); 167 168 spin_unlock(&key_serial_lock); 169 return; 170 171 /* we found a key with the proposed serial number - walk the tree from 172 * that point looking for the next unused serial number */ 173 serial_exists: 174 for (;;) { 175 key->serial++; 176 if (key->serial < 3) { 177 key->serial = 3; 178 goto attempt_insertion; 179 } 180 181 parent = rb_next(parent); 182 if (!parent) 183 goto attempt_insertion; 184 185 xkey = rb_entry(parent, struct key, serial_node); 186 if (key->serial < xkey->serial) 187 goto attempt_insertion; 188 } 189 } 190 191 /** 192 * key_alloc - Allocate a key of the specified type. 193 * @type: The type of key to allocate. 194 * @desc: The key description to allow the key to be searched out. 195 * @uid: The owner of the new key. 196 * @gid: The group ID for the new key's group permissions. 197 * @cred: The credentials specifying UID namespace. 198 * @perm: The permissions mask of the new key. 199 * @flags: Flags specifying quota properties. 200 * @restrict_link: Optional link restriction for new keyrings. 201 * 202 * Allocate a key of the specified type with the attributes given. The key is 203 * returned in an uninstantiated state and the caller needs to instantiate the 204 * key before returning. 205 * 206 * The restrict_link structure (if not NULL) will be freed when the 207 * keyring is destroyed, so it must be dynamically allocated. 208 * 209 * The user's key count quota is updated to reflect the creation of the key and 210 * the user's key data quota has the default for the key type reserved. The 211 * instantiation function should amend this as necessary. If insufficient 212 * quota is available, -EDQUOT will be returned. 213 * 214 * The LSM security modules can prevent a key being created, in which case 215 * -EACCES will be returned. 216 * 217 * Returns a pointer to the new key if successful and an error code otherwise. 218 * 219 * Note that the caller needs to ensure the key type isn't uninstantiated. 220 * Internally this can be done by locking key_types_sem. Externally, this can 221 * be done by either never unregistering the key type, or making sure 222 * key_alloc() calls don't race with module unloading. 223 */ 224 struct key *key_alloc(struct key_type *type, const char *desc, 225 kuid_t uid, kgid_t gid, const struct cred *cred, 226 key_perm_t perm, unsigned long flags, 227 struct key_restriction *restrict_link) 228 { 229 struct key_user *user = NULL; 230 struct key *key; 231 size_t desclen, quotalen; 232 int ret; 233 234 key = ERR_PTR(-EINVAL); 235 if (!desc || !*desc) 236 goto error; 237 238 if (type->vet_description) { 239 ret = type->vet_description(desc); 240 if (ret < 0) { 241 key = ERR_PTR(ret); 242 goto error; 243 } 244 } 245 246 desclen = strlen(desc); 247 quotalen = desclen + 1 + type->def_datalen; 248 249 /* get hold of the key tracking for this user */ 250 user = key_user_lookup(uid); 251 if (!user) 252 goto no_memory_1; 253 254 /* check that the user's quota permits allocation of another key and 255 * its description */ 256 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { 257 unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ? 258 key_quota_root_maxkeys : key_quota_maxkeys; 259 unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ? 260 key_quota_root_maxbytes : key_quota_maxbytes; 261 262 spin_lock(&user->lock); 263 if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) { 264 if (user->qnkeys + 1 > maxkeys || 265 user->qnbytes + quotalen > maxbytes || 266 user->qnbytes + quotalen < user->qnbytes) 267 goto no_quota; 268 } 269 270 user->qnkeys++; 271 user->qnbytes += quotalen; 272 spin_unlock(&user->lock); 273 } 274 275 /* allocate and initialise the key and its description */ 276 key = kmem_cache_zalloc(key_jar, GFP_KERNEL); 277 if (!key) 278 goto no_memory_2; 279 280 key->index_key.desc_len = desclen; 281 key->index_key.description = kmemdup(desc, desclen + 1, GFP_KERNEL); 282 if (!key->index_key.description) 283 goto no_memory_3; 284 key->index_key.type = type; 285 key_set_index_key(&key->index_key); 286 287 refcount_set(&key->usage, 1); 288 init_rwsem(&key->sem); 289 lockdep_set_class(&key->sem, &type->lock_class); 290 key->user = user; 291 key->quotalen = quotalen; 292 key->datalen = type->def_datalen; 293 key->uid = uid; 294 key->gid = gid; 295 key->perm = perm; 296 key->restrict_link = restrict_link; 297 key->last_used_at = ktime_get_real_seconds(); 298 299 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) 300 key->flags |= 1 << KEY_FLAG_IN_QUOTA; 301 if (flags & KEY_ALLOC_BUILT_IN) 302 key->flags |= 1 << KEY_FLAG_BUILTIN; 303 if (flags & KEY_ALLOC_UID_KEYRING) 304 key->flags |= 1 << KEY_FLAG_UID_KEYRING; 305 306 #ifdef KEY_DEBUGGING 307 key->magic = KEY_DEBUG_MAGIC; 308 #endif 309 310 /* let the security module know about the key */ 311 ret = security_key_alloc(key, cred, flags); 312 if (ret < 0) 313 goto security_error; 314 315 /* publish the key by giving it a serial number */ 316 refcount_inc(&key->domain_tag->usage); 317 atomic_inc(&user->nkeys); 318 key_alloc_serial(key); 319 320 error: 321 return key; 322 323 security_error: 324 kfree(key->description); 325 kmem_cache_free(key_jar, key); 326 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { 327 spin_lock(&user->lock); 328 user->qnkeys--; 329 user->qnbytes -= quotalen; 330 spin_unlock(&user->lock); 331 } 332 key_user_put(user); 333 key = ERR_PTR(ret); 334 goto error; 335 336 no_memory_3: 337 kmem_cache_free(key_jar, key); 338 no_memory_2: 339 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { 340 spin_lock(&user->lock); 341 user->qnkeys--; 342 user->qnbytes -= quotalen; 343 spin_unlock(&user->lock); 344 } 345 key_user_put(user); 346 no_memory_1: 347 key = ERR_PTR(-ENOMEM); 348 goto error; 349 350 no_quota: 351 spin_unlock(&user->lock); 352 key_user_put(user); 353 key = ERR_PTR(-EDQUOT); 354 goto error; 355 } 356 EXPORT_SYMBOL(key_alloc); 357 358 /** 359 * key_payload_reserve - Adjust data quota reservation for the key's payload 360 * @key: The key to make the reservation for. 361 * @datalen: The amount of data payload the caller now wants. 362 * 363 * Adjust the amount of the owning user's key data quota that a key reserves. 364 * If the amount is increased, then -EDQUOT may be returned if there isn't 365 * enough free quota available. 366 * 367 * If successful, 0 is returned. 368 */ 369 int key_payload_reserve(struct key *key, size_t datalen) 370 { 371 int delta = (int)datalen - key->datalen; 372 int ret = 0; 373 374 key_check(key); 375 376 /* contemplate the quota adjustment */ 377 if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { 378 unsigned maxbytes = uid_eq(key->user->uid, GLOBAL_ROOT_UID) ? 379 key_quota_root_maxbytes : key_quota_maxbytes; 380 381 spin_lock(&key->user->lock); 382 383 if (delta > 0 && 384 (key->user->qnbytes + delta >= maxbytes || 385 key->user->qnbytes + delta < key->user->qnbytes)) { 386 ret = -EDQUOT; 387 } 388 else { 389 key->user->qnbytes += delta; 390 key->quotalen += delta; 391 } 392 spin_unlock(&key->user->lock); 393 } 394 395 /* change the recorded data length if that didn't generate an error */ 396 if (ret == 0) 397 key->datalen = datalen; 398 399 return ret; 400 } 401 EXPORT_SYMBOL(key_payload_reserve); 402 403 /* 404 * Change the key state to being instantiated. 405 */ 406 static void mark_key_instantiated(struct key *key, int reject_error) 407 { 408 /* Commit the payload before setting the state; barrier versus 409 * key_read_state(). 410 */ 411 smp_store_release(&key->state, 412 (reject_error < 0) ? reject_error : KEY_IS_POSITIVE); 413 } 414 415 /* 416 * Instantiate a key and link it into the target keyring atomically. Must be 417 * called with the target keyring's semaphore writelocked. The target key's 418 * semaphore need not be locked as instantiation is serialised by 419 * key_construction_mutex. 420 */ 421 static int __key_instantiate_and_link(struct key *key, 422 struct key_preparsed_payload *prep, 423 struct key *keyring, 424 struct key *authkey, 425 struct assoc_array_edit **_edit) 426 { 427 int ret, awaken; 428 429 key_check(key); 430 key_check(keyring); 431 432 awaken = 0; 433 ret = -EBUSY; 434 435 mutex_lock(&key_construction_mutex); 436 437 /* can't instantiate twice */ 438 if (key->state == KEY_IS_UNINSTANTIATED) { 439 /* instantiate the key */ 440 ret = key->type->instantiate(key, prep); 441 442 if (ret == 0) { 443 /* mark the key as being instantiated */ 444 atomic_inc(&key->user->nikeys); 445 mark_key_instantiated(key, 0); 446 447 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) 448 awaken = 1; 449 450 /* and link it into the destination keyring */ 451 if (keyring) { 452 if (test_bit(KEY_FLAG_KEEP, &keyring->flags)) 453 set_bit(KEY_FLAG_KEEP, &key->flags); 454 455 __key_link(key, _edit); 456 } 457 458 /* disable the authorisation key */ 459 if (authkey) 460 key_invalidate(authkey); 461 462 if (prep->expiry != TIME64_MAX) { 463 key->expiry = prep->expiry; 464 key_schedule_gc(prep->expiry + key_gc_delay); 465 } 466 } 467 } 468 469 mutex_unlock(&key_construction_mutex); 470 471 /* wake up anyone waiting for a key to be constructed */ 472 if (awaken) 473 wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT); 474 475 return ret; 476 } 477 478 /** 479 * key_instantiate_and_link - Instantiate a key and link it into the keyring. 480 * @key: The key to instantiate. 481 * @data: The data to use to instantiate the keyring. 482 * @datalen: The length of @data. 483 * @keyring: Keyring to create a link in on success (or NULL). 484 * @authkey: The authorisation token permitting instantiation. 485 * 486 * Instantiate a key that's in the uninstantiated state using the provided data 487 * and, if successful, link it in to the destination keyring if one is 488 * supplied. 489 * 490 * If successful, 0 is returned, the authorisation token is revoked and anyone 491 * waiting for the key is woken up. If the key was already instantiated, 492 * -EBUSY will be returned. 493 */ 494 int key_instantiate_and_link(struct key *key, 495 const void *data, 496 size_t datalen, 497 struct key *keyring, 498 struct key *authkey) 499 { 500 struct key_preparsed_payload prep; 501 struct assoc_array_edit *edit = NULL; 502 int ret; 503 504 memset(&prep, 0, sizeof(prep)); 505 prep.data = data; 506 prep.datalen = datalen; 507 prep.quotalen = key->type->def_datalen; 508 prep.expiry = TIME64_MAX; 509 if (key->type->preparse) { 510 ret = key->type->preparse(&prep); 511 if (ret < 0) 512 goto error; 513 } 514 515 if (keyring) { 516 ret = __key_link_lock(keyring, &key->index_key); 517 if (ret < 0) 518 goto error; 519 520 ret = __key_link_begin(keyring, &key->index_key, &edit); 521 if (ret < 0) 522 goto error_link_end; 523 524 if (keyring->restrict_link && keyring->restrict_link->check) { 525 struct key_restriction *keyres = keyring->restrict_link; 526 527 ret = keyres->check(keyring, key->type, &prep.payload, 528 keyres->key); 529 if (ret < 0) 530 goto error_link_end; 531 } 532 } 533 534 ret = __key_instantiate_and_link(key, &prep, keyring, authkey, &edit); 535 536 error_link_end: 537 if (keyring) 538 __key_link_end(keyring, &key->index_key, edit); 539 540 error: 541 if (key->type->preparse) 542 key->type->free_preparse(&prep); 543 return ret; 544 } 545 546 EXPORT_SYMBOL(key_instantiate_and_link); 547 548 /** 549 * key_reject_and_link - Negatively instantiate a key and link it into the keyring. 550 * @key: The key to instantiate. 551 * @timeout: The timeout on the negative key. 552 * @error: The error to return when the key is hit. 553 * @keyring: Keyring to create a link in on success (or NULL). 554 * @authkey: The authorisation token permitting instantiation. 555 * 556 * Negatively instantiate a key that's in the uninstantiated state and, if 557 * successful, set its timeout and stored error and link it in to the 558 * destination keyring if one is supplied. The key and any links to the key 559 * will be automatically garbage collected after the timeout expires. 560 * 561 * Negative keys are used to rate limit repeated request_key() calls by causing 562 * them to return the stored error code (typically ENOKEY) until the negative 563 * key expires. 564 * 565 * If successful, 0 is returned, the authorisation token is revoked and anyone 566 * waiting for the key is woken up. If the key was already instantiated, 567 * -EBUSY will be returned. 568 */ 569 int key_reject_and_link(struct key *key, 570 unsigned timeout, 571 unsigned error, 572 struct key *keyring, 573 struct key *authkey) 574 { 575 struct assoc_array_edit *edit = NULL; 576 int ret, awaken, link_ret = 0; 577 578 key_check(key); 579 key_check(keyring); 580 581 awaken = 0; 582 ret = -EBUSY; 583 584 if (keyring) { 585 if (keyring->restrict_link) 586 return -EPERM; 587 588 link_ret = __key_link_lock(keyring, &key->index_key); 589 if (link_ret == 0) { 590 link_ret = __key_link_begin(keyring, &key->index_key, &edit); 591 if (link_ret < 0) 592 __key_link_end(keyring, &key->index_key, edit); 593 } 594 } 595 596 mutex_lock(&key_construction_mutex); 597 598 /* can't instantiate twice */ 599 if (key->state == KEY_IS_UNINSTANTIATED) { 600 /* mark the key as being negatively instantiated */ 601 atomic_inc(&key->user->nikeys); 602 mark_key_instantiated(key, -error); 603 key->expiry = ktime_get_real_seconds() + timeout; 604 key_schedule_gc(key->expiry + key_gc_delay); 605 606 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) 607 awaken = 1; 608 609 ret = 0; 610 611 /* and link it into the destination keyring */ 612 if (keyring && link_ret == 0) 613 __key_link(key, &edit); 614 615 /* disable the authorisation key */ 616 if (authkey) 617 key_invalidate(authkey); 618 } 619 620 mutex_unlock(&key_construction_mutex); 621 622 if (keyring && link_ret == 0) 623 __key_link_end(keyring, &key->index_key, edit); 624 625 /* wake up anyone waiting for a key to be constructed */ 626 if (awaken) 627 wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT); 628 629 return ret == 0 ? link_ret : ret; 630 } 631 EXPORT_SYMBOL(key_reject_and_link); 632 633 /** 634 * key_put - Discard a reference to a key. 635 * @key: The key to discard a reference from. 636 * 637 * Discard a reference to a key, and when all the references are gone, we 638 * schedule the cleanup task to come and pull it out of the tree in process 639 * context at some later time. 640 */ 641 void key_put(struct key *key) 642 { 643 if (key) { 644 key_check(key); 645 646 if (refcount_dec_and_test(&key->usage)) 647 schedule_work(&key_gc_work); 648 } 649 } 650 EXPORT_SYMBOL(key_put); 651 652 /* 653 * Find a key by its serial number. 654 */ 655 struct key *key_lookup(key_serial_t id) 656 { 657 struct rb_node *n; 658 struct key *key; 659 660 spin_lock(&key_serial_lock); 661 662 /* search the tree for the specified key */ 663 n = key_serial_tree.rb_node; 664 while (n) { 665 key = rb_entry(n, struct key, serial_node); 666 667 if (id < key->serial) 668 n = n->rb_left; 669 else if (id > key->serial) 670 n = n->rb_right; 671 else 672 goto found; 673 } 674 675 not_found: 676 key = ERR_PTR(-ENOKEY); 677 goto error; 678 679 found: 680 /* A key is allowed to be looked up only if someone still owns a 681 * reference to it - otherwise it's awaiting the gc. 682 */ 683 if (!refcount_inc_not_zero(&key->usage)) 684 goto not_found; 685 686 error: 687 spin_unlock(&key_serial_lock); 688 return key; 689 } 690 691 /* 692 * Find and lock the specified key type against removal. 693 * 694 * We return with the sem read-locked if successful. If the type wasn't 695 * available -ENOKEY is returned instead. 696 */ 697 struct key_type *key_type_lookup(const char *type) 698 { 699 struct key_type *ktype; 700 701 down_read(&key_types_sem); 702 703 /* look up the key type to see if it's one of the registered kernel 704 * types */ 705 list_for_each_entry(ktype, &key_types_list, link) { 706 if (strcmp(ktype->name, type) == 0) 707 goto found_kernel_type; 708 } 709 710 up_read(&key_types_sem); 711 ktype = ERR_PTR(-ENOKEY); 712 713 found_kernel_type: 714 return ktype; 715 } 716 717 void key_set_timeout(struct key *key, unsigned timeout) 718 { 719 time64_t expiry = 0; 720 721 /* make the changes with the locks held to prevent races */ 722 down_write(&key->sem); 723 724 if (timeout > 0) 725 expiry = ktime_get_real_seconds() + timeout; 726 727 key->expiry = expiry; 728 key_schedule_gc(key->expiry + key_gc_delay); 729 730 up_write(&key->sem); 731 } 732 EXPORT_SYMBOL_GPL(key_set_timeout); 733 734 /* 735 * Unlock a key type locked by key_type_lookup(). 736 */ 737 void key_type_put(struct key_type *ktype) 738 { 739 up_read(&key_types_sem); 740 } 741 742 /* 743 * Attempt to update an existing key. 744 * 745 * The key is given to us with an incremented refcount that we need to discard 746 * if we get an error. 747 */ 748 static inline key_ref_t __key_update(key_ref_t key_ref, 749 struct key_preparsed_payload *prep) 750 { 751 struct key *key = key_ref_to_ptr(key_ref); 752 int ret; 753 754 /* need write permission on the key to update it */ 755 ret = key_permission(key_ref, KEY_NEED_WRITE); 756 if (ret < 0) 757 goto error; 758 759 ret = -EEXIST; 760 if (!key->type->update) 761 goto error; 762 763 down_write(&key->sem); 764 765 ret = key->type->update(key, prep); 766 if (ret == 0) 767 /* Updating a negative key positively instantiates it */ 768 mark_key_instantiated(key, 0); 769 770 up_write(&key->sem); 771 772 if (ret < 0) 773 goto error; 774 out: 775 return key_ref; 776 777 error: 778 key_put(key); 779 key_ref = ERR_PTR(ret); 780 goto out; 781 } 782 783 /** 784 * key_create_or_update - Update or create and instantiate a key. 785 * @keyring_ref: A pointer to the destination keyring with possession flag. 786 * @type: The type of key. 787 * @description: The searchable description for the key. 788 * @payload: The data to use to instantiate or update the key. 789 * @plen: The length of @payload. 790 * @perm: The permissions mask for a new key. 791 * @flags: The quota flags for a new key. 792 * 793 * Search the destination keyring for a key of the same description and if one 794 * is found, update it, otherwise create and instantiate a new one and create a 795 * link to it from that keyring. 796 * 797 * If perm is KEY_PERM_UNDEF then an appropriate key permissions mask will be 798 * concocted. 799 * 800 * Returns a pointer to the new key if successful, -ENODEV if the key type 801 * wasn't available, -ENOTDIR if the keyring wasn't a keyring, -EACCES if the 802 * caller isn't permitted to modify the keyring or the LSM did not permit 803 * creation of the key. 804 * 805 * On success, the possession flag from the keyring ref will be tacked on to 806 * the key ref before it is returned. 807 */ 808 key_ref_t key_create_or_update(key_ref_t keyring_ref, 809 const char *type, 810 const char *description, 811 const void *payload, 812 size_t plen, 813 key_perm_t perm, 814 unsigned long flags) 815 { 816 struct keyring_index_key index_key = { 817 .description = description, 818 }; 819 struct key_preparsed_payload prep; 820 struct assoc_array_edit *edit = NULL; 821 const struct cred *cred = current_cred(); 822 struct key *keyring, *key = NULL; 823 key_ref_t key_ref; 824 int ret; 825 struct key_restriction *restrict_link = NULL; 826 827 /* look up the key type to see if it's one of the registered kernel 828 * types */ 829 index_key.type = key_type_lookup(type); 830 if (IS_ERR(index_key.type)) { 831 key_ref = ERR_PTR(-ENODEV); 832 goto error; 833 } 834 835 key_ref = ERR_PTR(-EINVAL); 836 if (!index_key.type->instantiate || 837 (!index_key.description && !index_key.type->preparse)) 838 goto error_put_type; 839 840 keyring = key_ref_to_ptr(keyring_ref); 841 842 key_check(keyring); 843 844 if (!(flags & KEY_ALLOC_BYPASS_RESTRICTION)) 845 restrict_link = keyring->restrict_link; 846 847 key_ref = ERR_PTR(-ENOTDIR); 848 if (keyring->type != &key_type_keyring) 849 goto error_put_type; 850 851 memset(&prep, 0, sizeof(prep)); 852 prep.data = payload; 853 prep.datalen = plen; 854 prep.quotalen = index_key.type->def_datalen; 855 prep.expiry = TIME64_MAX; 856 if (index_key.type->preparse) { 857 ret = index_key.type->preparse(&prep); 858 if (ret < 0) { 859 key_ref = ERR_PTR(ret); 860 goto error_free_prep; 861 } 862 if (!index_key.description) 863 index_key.description = prep.description; 864 key_ref = ERR_PTR(-EINVAL); 865 if (!index_key.description) 866 goto error_free_prep; 867 } 868 index_key.desc_len = strlen(index_key.description); 869 key_set_index_key(&index_key); 870 871 ret = __key_link_lock(keyring, &index_key); 872 if (ret < 0) { 873 key_ref = ERR_PTR(ret); 874 goto error_free_prep; 875 } 876 877 ret = __key_link_begin(keyring, &index_key, &edit); 878 if (ret < 0) { 879 key_ref = ERR_PTR(ret); 880 goto error_link_end; 881 } 882 883 if (restrict_link && restrict_link->check) { 884 ret = restrict_link->check(keyring, index_key.type, 885 &prep.payload, restrict_link->key); 886 if (ret < 0) { 887 key_ref = ERR_PTR(ret); 888 goto error_link_end; 889 } 890 } 891 892 /* if we're going to allocate a new key, we're going to have 893 * to modify the keyring */ 894 ret = key_permission(keyring_ref, KEY_NEED_WRITE); 895 if (ret < 0) { 896 key_ref = ERR_PTR(ret); 897 goto error_link_end; 898 } 899 900 /* if it's possible to update this type of key, search for an existing 901 * key of the same type and description in the destination keyring and 902 * update that instead if possible 903 */ 904 if (index_key.type->update) { 905 key_ref = find_key_to_update(keyring_ref, &index_key); 906 if (key_ref) 907 goto found_matching_key; 908 } 909 910 /* if the client doesn't provide, decide on the permissions we want */ 911 if (perm == KEY_PERM_UNDEF) { 912 perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR; 913 perm |= KEY_USR_VIEW; 914 915 if (index_key.type->read) 916 perm |= KEY_POS_READ; 917 918 if (index_key.type == &key_type_keyring || 919 index_key.type->update) 920 perm |= KEY_POS_WRITE; 921 } 922 923 /* allocate a new key */ 924 key = key_alloc(index_key.type, index_key.description, 925 cred->fsuid, cred->fsgid, cred, perm, flags, NULL); 926 if (IS_ERR(key)) { 927 key_ref = ERR_CAST(key); 928 goto error_link_end; 929 } 930 931 /* instantiate it and link it into the target keyring */ 932 ret = __key_instantiate_and_link(key, &prep, keyring, NULL, &edit); 933 if (ret < 0) { 934 key_put(key); 935 key_ref = ERR_PTR(ret); 936 goto error_link_end; 937 } 938 939 key_ref = make_key_ref(key, is_key_possessed(keyring_ref)); 940 941 error_link_end: 942 __key_link_end(keyring, &index_key, edit); 943 error_free_prep: 944 if (index_key.type->preparse) 945 index_key.type->free_preparse(&prep); 946 error_put_type: 947 key_type_put(index_key.type); 948 error: 949 return key_ref; 950 951 found_matching_key: 952 /* we found a matching key, so we're going to try to update it 953 * - we can drop the locks first as we have the key pinned 954 */ 955 __key_link_end(keyring, &index_key, edit); 956 957 key = key_ref_to_ptr(key_ref); 958 if (test_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) { 959 ret = wait_for_key_construction(key, true); 960 if (ret < 0) { 961 key_ref_put(key_ref); 962 key_ref = ERR_PTR(ret); 963 goto error_free_prep; 964 } 965 } 966 967 key_ref = __key_update(key_ref, &prep); 968 goto error_free_prep; 969 } 970 EXPORT_SYMBOL(key_create_or_update); 971 972 /** 973 * key_update - Update a key's contents. 974 * @key_ref: The pointer (plus possession flag) to the key. 975 * @payload: The data to be used to update the key. 976 * @plen: The length of @payload. 977 * 978 * Attempt to update the contents of a key with the given payload data. The 979 * caller must be granted Write permission on the key. Negative keys can be 980 * instantiated by this method. 981 * 982 * Returns 0 on success, -EACCES if not permitted and -EOPNOTSUPP if the key 983 * type does not support updating. The key type may return other errors. 984 */ 985 int key_update(key_ref_t key_ref, const void *payload, size_t plen) 986 { 987 struct key_preparsed_payload prep; 988 struct key *key = key_ref_to_ptr(key_ref); 989 int ret; 990 991 key_check(key); 992 993 /* the key must be writable */ 994 ret = key_permission(key_ref, KEY_NEED_WRITE); 995 if (ret < 0) 996 return ret; 997 998 /* attempt to update it if supported */ 999 if (!key->type->update) 1000 return -EOPNOTSUPP; 1001 1002 memset(&prep, 0, sizeof(prep)); 1003 prep.data = payload; 1004 prep.datalen = plen; 1005 prep.quotalen = key->type->def_datalen; 1006 prep.expiry = TIME64_MAX; 1007 if (key->type->preparse) { 1008 ret = key->type->preparse(&prep); 1009 if (ret < 0) 1010 goto error; 1011 } 1012 1013 down_write(&key->sem); 1014 1015 ret = key->type->update(key, &prep); 1016 if (ret == 0) 1017 /* Updating a negative key positively instantiates it */ 1018 mark_key_instantiated(key, 0); 1019 1020 up_write(&key->sem); 1021 1022 error: 1023 if (key->type->preparse) 1024 key->type->free_preparse(&prep); 1025 return ret; 1026 } 1027 EXPORT_SYMBOL(key_update); 1028 1029 /** 1030 * key_revoke - Revoke a key. 1031 * @key: The key to be revoked. 1032 * 1033 * Mark a key as being revoked and ask the type to free up its resources. The 1034 * revocation timeout is set and the key and all its links will be 1035 * automatically garbage collected after key_gc_delay amount of time if they 1036 * are not manually dealt with first. 1037 */ 1038 void key_revoke(struct key *key) 1039 { 1040 time64_t time; 1041 1042 key_check(key); 1043 1044 /* make sure no one's trying to change or use the key when we mark it 1045 * - we tell lockdep that we might nest because we might be revoking an 1046 * authorisation key whilst holding the sem on a key we've just 1047 * instantiated 1048 */ 1049 down_write_nested(&key->sem, 1); 1050 if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags) && 1051 key->type->revoke) 1052 key->type->revoke(key); 1053 1054 /* set the death time to no more than the expiry time */ 1055 time = ktime_get_real_seconds(); 1056 if (key->revoked_at == 0 || key->revoked_at > time) { 1057 key->revoked_at = time; 1058 key_schedule_gc(key->revoked_at + key_gc_delay); 1059 } 1060 1061 up_write(&key->sem); 1062 } 1063 EXPORT_SYMBOL(key_revoke); 1064 1065 /** 1066 * key_invalidate - Invalidate a key. 1067 * @key: The key to be invalidated. 1068 * 1069 * Mark a key as being invalidated and have it cleaned up immediately. The key 1070 * is ignored by all searches and other operations from this point. 1071 */ 1072 void key_invalidate(struct key *key) 1073 { 1074 kenter("%d", key_serial(key)); 1075 1076 key_check(key); 1077 1078 if (!test_bit(KEY_FLAG_INVALIDATED, &key->flags)) { 1079 down_write_nested(&key->sem, 1); 1080 if (!test_and_set_bit(KEY_FLAG_INVALIDATED, &key->flags)) 1081 key_schedule_gc_links(); 1082 up_write(&key->sem); 1083 } 1084 } 1085 EXPORT_SYMBOL(key_invalidate); 1086 1087 /** 1088 * generic_key_instantiate - Simple instantiation of a key from preparsed data 1089 * @key: The key to be instantiated 1090 * @prep: The preparsed data to load. 1091 * 1092 * Instantiate a key from preparsed data. We assume we can just copy the data 1093 * in directly and clear the old pointers. 1094 * 1095 * This can be pointed to directly by the key type instantiate op pointer. 1096 */ 1097 int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep) 1098 { 1099 int ret; 1100 1101 pr_devel("==>%s()\n", __func__); 1102 1103 ret = key_payload_reserve(key, prep->quotalen); 1104 if (ret == 0) { 1105 rcu_assign_keypointer(key, prep->payload.data[0]); 1106 key->payload.data[1] = prep->payload.data[1]; 1107 key->payload.data[2] = prep->payload.data[2]; 1108 key->payload.data[3] = prep->payload.data[3]; 1109 prep->payload.data[0] = NULL; 1110 prep->payload.data[1] = NULL; 1111 prep->payload.data[2] = NULL; 1112 prep->payload.data[3] = NULL; 1113 } 1114 pr_devel("<==%s() = %d\n", __func__, ret); 1115 return ret; 1116 } 1117 EXPORT_SYMBOL(generic_key_instantiate); 1118 1119 /** 1120 * register_key_type - Register a type of key. 1121 * @ktype: The new key type. 1122 * 1123 * Register a new key type. 1124 * 1125 * Returns 0 on success or -EEXIST if a type of this name already exists. 1126 */ 1127 int register_key_type(struct key_type *ktype) 1128 { 1129 struct key_type *p; 1130 int ret; 1131 1132 memset(&ktype->lock_class, 0, sizeof(ktype->lock_class)); 1133 1134 ret = -EEXIST; 1135 down_write(&key_types_sem); 1136 1137 /* disallow key types with the same name */ 1138 list_for_each_entry(p, &key_types_list, link) { 1139 if (strcmp(p->name, ktype->name) == 0) 1140 goto out; 1141 } 1142 1143 /* store the type */ 1144 list_add(&ktype->link, &key_types_list); 1145 1146 pr_notice("Key type %s registered\n", ktype->name); 1147 ret = 0; 1148 1149 out: 1150 up_write(&key_types_sem); 1151 return ret; 1152 } 1153 EXPORT_SYMBOL(register_key_type); 1154 1155 /** 1156 * unregister_key_type - Unregister a type of key. 1157 * @ktype: The key type. 1158 * 1159 * Unregister a key type and mark all the extant keys of this type as dead. 1160 * Those keys of this type are then destroyed to get rid of their payloads and 1161 * they and their links will be garbage collected as soon as possible. 1162 */ 1163 void unregister_key_type(struct key_type *ktype) 1164 { 1165 down_write(&key_types_sem); 1166 list_del_init(&ktype->link); 1167 downgrade_write(&key_types_sem); 1168 key_gc_keytype(ktype); 1169 pr_notice("Key type %s unregistered\n", ktype->name); 1170 up_read(&key_types_sem); 1171 } 1172 EXPORT_SYMBOL(unregister_key_type); 1173 1174 /* 1175 * Initialise the key management state. 1176 */ 1177 void __init key_init(void) 1178 { 1179 /* allocate a slab in which we can store keys */ 1180 key_jar = kmem_cache_create("key_jar", sizeof(struct key), 1181 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1182 1183 /* add the special key types */ 1184 list_add_tail(&key_type_keyring.link, &key_types_list); 1185 list_add_tail(&key_type_dead.link, &key_types_list); 1186 list_add_tail(&key_type_user.link, &key_types_list); 1187 list_add_tail(&key_type_logon.link, &key_types_list); 1188 1189 /* record the root user tracking */ 1190 rb_link_node(&root_key_user.node, 1191 NULL, 1192 &key_user_tree.rb_node); 1193 1194 rb_insert_color(&root_key_user.node, 1195 &key_user_tree); 1196 } 1197