1 /* Basic authentication token and access key management 2 * 3 * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #include <linux/module.h> 13 #include <linux/init.h> 14 #include <linux/poison.h> 15 #include <linux/sched.h> 16 #include <linux/slab.h> 17 #include <linux/security.h> 18 #include <linux/workqueue.h> 19 #include <linux/random.h> 20 #include <linux/err.h> 21 #include "internal.h" 22 23 struct kmem_cache *key_jar; 24 struct rb_root key_serial_tree; /* tree of keys indexed by serial */ 25 DEFINE_SPINLOCK(key_serial_lock); 26 27 struct rb_root key_user_tree; /* tree of quota records indexed by UID */ 28 DEFINE_SPINLOCK(key_user_lock); 29 30 unsigned int key_quota_root_maxkeys = 200; /* root's key count quota */ 31 unsigned int key_quota_root_maxbytes = 20000; /* root's key space quota */ 32 unsigned int key_quota_maxkeys = 200; /* general key count quota */ 33 unsigned int key_quota_maxbytes = 20000; /* general key space quota */ 34 35 static LIST_HEAD(key_types_list); 36 static DECLARE_RWSEM(key_types_sem); 37 38 /* We serialise key instantiation and link */ 39 DEFINE_MUTEX(key_construction_mutex); 40 41 #ifdef KEY_DEBUGGING 42 void __key_check(const struct key *key) 43 { 44 printk("__key_check: key %p {%08x} should be {%08x}\n", 45 key, key->magic, KEY_DEBUG_MAGIC); 46 BUG(); 47 } 48 #endif 49 50 /* 51 * Get the key quota record for a user, allocating a new record if one doesn't 52 * already exist. 53 */ 54 struct key_user *key_user_lookup(kuid_t uid) 55 { 56 struct key_user *candidate = NULL, *user; 57 struct rb_node *parent = NULL; 58 struct rb_node **p; 59 60 try_again: 61 p = &key_user_tree.rb_node; 62 spin_lock(&key_user_lock); 63 64 /* search the tree for a user record with a matching UID */ 65 while (*p) { 66 parent = *p; 67 user = rb_entry(parent, struct key_user, node); 68 69 if (uid_lt(uid, user->uid)) 70 p = &(*p)->rb_left; 71 else if (uid_gt(uid, user->uid)) 72 p = &(*p)->rb_right; 73 else 74 goto found; 75 } 76 77 /* if we get here, we failed to find a match in the tree */ 78 if (!candidate) { 79 /* allocate a candidate user record if we don't already have 80 * one */ 81 spin_unlock(&key_user_lock); 82 83 user = NULL; 84 candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL); 85 if (unlikely(!candidate)) 86 goto out; 87 88 /* the allocation may have scheduled, so we need to repeat the 89 * search lest someone else added the record whilst we were 90 * asleep */ 91 goto try_again; 92 } 93 94 /* if we get here, then the user record still hadn't appeared on the 95 * second pass - so we use the candidate record */ 96 atomic_set(&candidate->usage, 1); 97 atomic_set(&candidate->nkeys, 0); 98 atomic_set(&candidate->nikeys, 0); 99 candidate->uid = uid; 100 candidate->qnkeys = 0; 101 candidate->qnbytes = 0; 102 spin_lock_init(&candidate->lock); 103 mutex_init(&candidate->cons_lock); 104 105 rb_link_node(&candidate->node, parent, p); 106 rb_insert_color(&candidate->node, &key_user_tree); 107 spin_unlock(&key_user_lock); 108 user = candidate; 109 goto out; 110 111 /* okay - we found a user record for this UID */ 112 found: 113 atomic_inc(&user->usage); 114 spin_unlock(&key_user_lock); 115 kfree(candidate); 116 out: 117 return user; 118 } 119 120 /* 121 * Dispose of a user structure 122 */ 123 void key_user_put(struct key_user *user) 124 { 125 if (atomic_dec_and_lock(&user->usage, &key_user_lock)) { 126 rb_erase(&user->node, &key_user_tree); 127 spin_unlock(&key_user_lock); 128 129 kfree(user); 130 } 131 } 132 133 /* 134 * Allocate a serial number for a key. These are assigned randomly to avoid 135 * security issues through covert channel problems. 136 */ 137 static inline void key_alloc_serial(struct key *key) 138 { 139 struct rb_node *parent, **p; 140 struct key *xkey; 141 142 /* propose a random serial number and look for a hole for it in the 143 * serial number tree */ 144 do { 145 get_random_bytes(&key->serial, sizeof(key->serial)); 146 147 key->serial >>= 1; /* negative numbers are not permitted */ 148 } while (key->serial < 3); 149 150 spin_lock(&key_serial_lock); 151 152 attempt_insertion: 153 parent = NULL; 154 p = &key_serial_tree.rb_node; 155 156 while (*p) { 157 parent = *p; 158 xkey = rb_entry(parent, struct key, serial_node); 159 160 if (key->serial < xkey->serial) 161 p = &(*p)->rb_left; 162 else if (key->serial > xkey->serial) 163 p = &(*p)->rb_right; 164 else 165 goto serial_exists; 166 } 167 168 /* we've found a suitable hole - arrange for this key to occupy it */ 169 rb_link_node(&key->serial_node, parent, p); 170 rb_insert_color(&key->serial_node, &key_serial_tree); 171 172 spin_unlock(&key_serial_lock); 173 return; 174 175 /* we found a key with the proposed serial number - walk the tree from 176 * that point looking for the next unused serial number */ 177 serial_exists: 178 for (;;) { 179 key->serial++; 180 if (key->serial < 3) { 181 key->serial = 3; 182 goto attempt_insertion; 183 } 184 185 parent = rb_next(parent); 186 if (!parent) 187 goto attempt_insertion; 188 189 xkey = rb_entry(parent, struct key, serial_node); 190 if (key->serial < xkey->serial) 191 goto attempt_insertion; 192 } 193 } 194 195 /** 196 * key_alloc - Allocate a key of the specified type. 197 * @type: The type of key to allocate. 198 * @desc: The key description to allow the key to be searched out. 199 * @uid: The owner of the new key. 200 * @gid: The group ID for the new key's group permissions. 201 * @cred: The credentials specifying UID namespace. 202 * @perm: The permissions mask of the new key. 203 * @flags: Flags specifying quota properties. 204 * 205 * Allocate a key of the specified type with the attributes given. The key is 206 * returned in an uninstantiated state and the caller needs to instantiate the 207 * key before returning. 208 * 209 * The user's key count quota is updated to reflect the creation of the key and 210 * the user's key data quota has the default for the key type reserved. The 211 * instantiation function should amend this as necessary. If insufficient 212 * quota is available, -EDQUOT will be returned. 213 * 214 * The LSM security modules can prevent a key being created, in which case 215 * -EACCES will be returned. 216 * 217 * Returns a pointer to the new key if successful and an error code otherwise. 218 * 219 * Note that the caller needs to ensure the key type isn't uninstantiated. 220 * Internally this can be done by locking key_types_sem. Externally, this can 221 * be done by either never unregistering the key type, or making sure 222 * key_alloc() calls don't race with module unloading. 223 */ 224 struct key *key_alloc(struct key_type *type, const char *desc, 225 kuid_t uid, kgid_t gid, const struct cred *cred, 226 key_perm_t perm, unsigned long flags) 227 { 228 struct key_user *user = NULL; 229 struct key *key; 230 size_t desclen, quotalen; 231 int ret; 232 233 key = ERR_PTR(-EINVAL); 234 if (!desc || !*desc) 235 goto error; 236 237 if (type->vet_description) { 238 ret = type->vet_description(desc); 239 if (ret < 0) { 240 key = ERR_PTR(ret); 241 goto error; 242 } 243 } 244 245 desclen = strlen(desc); 246 quotalen = desclen + 1 + type->def_datalen; 247 248 /* get hold of the key tracking for this user */ 249 user = key_user_lookup(uid); 250 if (!user) 251 goto no_memory_1; 252 253 /* check that the user's quota permits allocation of another key and 254 * its description */ 255 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { 256 unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ? 257 key_quota_root_maxkeys : key_quota_maxkeys; 258 unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ? 259 key_quota_root_maxbytes : key_quota_maxbytes; 260 261 spin_lock(&user->lock); 262 if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) { 263 if (user->qnkeys + 1 >= maxkeys || 264 user->qnbytes + quotalen >= maxbytes || 265 user->qnbytes + quotalen < user->qnbytes) 266 goto no_quota; 267 } 268 269 user->qnkeys++; 270 user->qnbytes += quotalen; 271 spin_unlock(&user->lock); 272 } 273 274 /* allocate and initialise the key and its description */ 275 key = kmem_cache_alloc(key_jar, GFP_KERNEL); 276 if (!key) 277 goto no_memory_2; 278 279 if (desc) { 280 key->index_key.desc_len = desclen; 281 key->index_key.description = kmemdup(desc, desclen + 1, GFP_KERNEL); 282 if (!key->description) 283 goto no_memory_3; 284 } 285 286 atomic_set(&key->usage, 1); 287 init_rwsem(&key->sem); 288 lockdep_set_class(&key->sem, &type->lock_class); 289 key->index_key.type = type; 290 key->user = user; 291 key->quotalen = quotalen; 292 key->datalen = type->def_datalen; 293 key->uid = uid; 294 key->gid = gid; 295 key->perm = perm; 296 key->flags = 0; 297 key->expiry = 0; 298 key->payload.data = NULL; 299 key->security = NULL; 300 301 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) 302 key->flags |= 1 << KEY_FLAG_IN_QUOTA; 303 if (flags & KEY_ALLOC_TRUSTED) 304 key->flags |= 1 << KEY_FLAG_TRUSTED; 305 306 memset(&key->type_data, 0, sizeof(key->type_data)); 307 308 #ifdef KEY_DEBUGGING 309 key->magic = KEY_DEBUG_MAGIC; 310 #endif 311 312 /* let the security module know about the key */ 313 ret = security_key_alloc(key, cred, flags); 314 if (ret < 0) 315 goto security_error; 316 317 /* publish the key by giving it a serial number */ 318 atomic_inc(&user->nkeys); 319 key_alloc_serial(key); 320 321 error: 322 return key; 323 324 security_error: 325 kfree(key->description); 326 kmem_cache_free(key_jar, key); 327 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { 328 spin_lock(&user->lock); 329 user->qnkeys--; 330 user->qnbytes -= quotalen; 331 spin_unlock(&user->lock); 332 } 333 key_user_put(user); 334 key = ERR_PTR(ret); 335 goto error; 336 337 no_memory_3: 338 kmem_cache_free(key_jar, key); 339 no_memory_2: 340 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { 341 spin_lock(&user->lock); 342 user->qnkeys--; 343 user->qnbytes -= quotalen; 344 spin_unlock(&user->lock); 345 } 346 key_user_put(user); 347 no_memory_1: 348 key = ERR_PTR(-ENOMEM); 349 goto error; 350 351 no_quota: 352 spin_unlock(&user->lock); 353 key_user_put(user); 354 key = ERR_PTR(-EDQUOT); 355 goto error; 356 } 357 EXPORT_SYMBOL(key_alloc); 358 359 /** 360 * key_payload_reserve - Adjust data quota reservation for the key's payload 361 * @key: The key to make the reservation for. 362 * @datalen: The amount of data payload the caller now wants. 363 * 364 * Adjust the amount of the owning user's key data quota that a key reserves. 365 * If the amount is increased, then -EDQUOT may be returned if there isn't 366 * enough free quota available. 367 * 368 * If successful, 0 is returned. 369 */ 370 int key_payload_reserve(struct key *key, size_t datalen) 371 { 372 int delta = (int)datalen - key->datalen; 373 int ret = 0; 374 375 key_check(key); 376 377 /* contemplate the quota adjustment */ 378 if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { 379 unsigned maxbytes = uid_eq(key->user->uid, GLOBAL_ROOT_UID) ? 380 key_quota_root_maxbytes : key_quota_maxbytes; 381 382 spin_lock(&key->user->lock); 383 384 if (delta > 0 && 385 (key->user->qnbytes + delta >= maxbytes || 386 key->user->qnbytes + delta < key->user->qnbytes)) { 387 ret = -EDQUOT; 388 } 389 else { 390 key->user->qnbytes += delta; 391 key->quotalen += delta; 392 } 393 spin_unlock(&key->user->lock); 394 } 395 396 /* change the recorded data length if that didn't generate an error */ 397 if (ret == 0) 398 key->datalen = datalen; 399 400 return ret; 401 } 402 EXPORT_SYMBOL(key_payload_reserve); 403 404 /* 405 * Instantiate a key and link it into the target keyring atomically. Must be 406 * called with the target keyring's semaphore writelocked. The target key's 407 * semaphore need not be locked as instantiation is serialised by 408 * key_construction_mutex. 409 */ 410 static int __key_instantiate_and_link(struct key *key, 411 struct key_preparsed_payload *prep, 412 struct key *keyring, 413 struct key *authkey, 414 struct assoc_array_edit **_edit) 415 { 416 int ret, awaken; 417 418 key_check(key); 419 key_check(keyring); 420 421 awaken = 0; 422 ret = -EBUSY; 423 424 mutex_lock(&key_construction_mutex); 425 426 /* can't instantiate twice */ 427 if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { 428 /* instantiate the key */ 429 ret = key->type->instantiate(key, prep); 430 431 if (ret == 0) { 432 /* mark the key as being instantiated */ 433 atomic_inc(&key->user->nikeys); 434 set_bit(KEY_FLAG_INSTANTIATED, &key->flags); 435 436 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) 437 awaken = 1; 438 439 /* and link it into the destination keyring */ 440 if (keyring) 441 __key_link(key, _edit); 442 443 /* disable the authorisation key */ 444 if (authkey) 445 key_revoke(authkey); 446 } 447 } 448 449 mutex_unlock(&key_construction_mutex); 450 451 /* wake up anyone waiting for a key to be constructed */ 452 if (awaken) 453 wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT); 454 455 return ret; 456 } 457 458 /** 459 * key_instantiate_and_link - Instantiate a key and link it into the keyring. 460 * @key: The key to instantiate. 461 * @data: The data to use to instantiate the keyring. 462 * @datalen: The length of @data. 463 * @keyring: Keyring to create a link in on success (or NULL). 464 * @authkey: The authorisation token permitting instantiation. 465 * 466 * Instantiate a key that's in the uninstantiated state using the provided data 467 * and, if successful, link it in to the destination keyring if one is 468 * supplied. 469 * 470 * If successful, 0 is returned, the authorisation token is revoked and anyone 471 * waiting for the key is woken up. If the key was already instantiated, 472 * -EBUSY will be returned. 473 */ 474 int key_instantiate_and_link(struct key *key, 475 const void *data, 476 size_t datalen, 477 struct key *keyring, 478 struct key *authkey) 479 { 480 struct key_preparsed_payload prep; 481 struct assoc_array_edit *edit; 482 int ret; 483 484 memset(&prep, 0, sizeof(prep)); 485 prep.data = data; 486 prep.datalen = datalen; 487 prep.quotalen = key->type->def_datalen; 488 if (key->type->preparse) { 489 ret = key->type->preparse(&prep); 490 if (ret < 0) 491 goto error; 492 } 493 494 if (keyring) { 495 ret = __key_link_begin(keyring, &key->index_key, &edit); 496 if (ret < 0) 497 goto error_free_preparse; 498 } 499 500 ret = __key_instantiate_and_link(key, &prep, keyring, authkey, &edit); 501 502 if (keyring) 503 __key_link_end(keyring, &key->index_key, edit); 504 505 error_free_preparse: 506 if (key->type->preparse) 507 key->type->free_preparse(&prep); 508 error: 509 return ret; 510 } 511 512 EXPORT_SYMBOL(key_instantiate_and_link); 513 514 /** 515 * key_reject_and_link - Negatively instantiate a key and link it into the keyring. 516 * @key: The key to instantiate. 517 * @timeout: The timeout on the negative key. 518 * @error: The error to return when the key is hit. 519 * @keyring: Keyring to create a link in on success (or NULL). 520 * @authkey: The authorisation token permitting instantiation. 521 * 522 * Negatively instantiate a key that's in the uninstantiated state and, if 523 * successful, set its timeout and stored error and link it in to the 524 * destination keyring if one is supplied. The key and any links to the key 525 * will be automatically garbage collected after the timeout expires. 526 * 527 * Negative keys are used to rate limit repeated request_key() calls by causing 528 * them to return the stored error code (typically ENOKEY) until the negative 529 * key expires. 530 * 531 * If successful, 0 is returned, the authorisation token is revoked and anyone 532 * waiting for the key is woken up. If the key was already instantiated, 533 * -EBUSY will be returned. 534 */ 535 int key_reject_and_link(struct key *key, 536 unsigned timeout, 537 unsigned error, 538 struct key *keyring, 539 struct key *authkey) 540 { 541 struct assoc_array_edit *edit; 542 struct timespec now; 543 int ret, awaken, link_ret = 0; 544 545 key_check(key); 546 key_check(keyring); 547 548 awaken = 0; 549 ret = -EBUSY; 550 551 if (keyring) 552 link_ret = __key_link_begin(keyring, &key->index_key, &edit); 553 554 mutex_lock(&key_construction_mutex); 555 556 /* can't instantiate twice */ 557 if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { 558 /* mark the key as being negatively instantiated */ 559 atomic_inc(&key->user->nikeys); 560 key->type_data.reject_error = -error; 561 smp_wmb(); 562 set_bit(KEY_FLAG_NEGATIVE, &key->flags); 563 set_bit(KEY_FLAG_INSTANTIATED, &key->flags); 564 now = current_kernel_time(); 565 key->expiry = now.tv_sec + timeout; 566 key_schedule_gc(key->expiry + key_gc_delay); 567 568 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) 569 awaken = 1; 570 571 ret = 0; 572 573 /* and link it into the destination keyring */ 574 if (keyring && link_ret == 0) 575 __key_link(key, &edit); 576 577 /* disable the authorisation key */ 578 if (authkey) 579 key_revoke(authkey); 580 } 581 582 mutex_unlock(&key_construction_mutex); 583 584 if (keyring) 585 __key_link_end(keyring, &key->index_key, edit); 586 587 /* wake up anyone waiting for a key to be constructed */ 588 if (awaken) 589 wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT); 590 591 return ret == 0 ? link_ret : ret; 592 } 593 EXPORT_SYMBOL(key_reject_and_link); 594 595 /** 596 * key_put - Discard a reference to a key. 597 * @key: The key to discard a reference from. 598 * 599 * Discard a reference to a key, and when all the references are gone, we 600 * schedule the cleanup task to come and pull it out of the tree in process 601 * context at some later time. 602 */ 603 void key_put(struct key *key) 604 { 605 if (key) { 606 key_check(key); 607 608 if (atomic_dec_and_test(&key->usage)) 609 schedule_work(&key_gc_work); 610 } 611 } 612 EXPORT_SYMBOL(key_put); 613 614 /* 615 * Find a key by its serial number. 616 */ 617 struct key *key_lookup(key_serial_t id) 618 { 619 struct rb_node *n; 620 struct key *key; 621 622 spin_lock(&key_serial_lock); 623 624 /* search the tree for the specified key */ 625 n = key_serial_tree.rb_node; 626 while (n) { 627 key = rb_entry(n, struct key, serial_node); 628 629 if (id < key->serial) 630 n = n->rb_left; 631 else if (id > key->serial) 632 n = n->rb_right; 633 else 634 goto found; 635 } 636 637 not_found: 638 key = ERR_PTR(-ENOKEY); 639 goto error; 640 641 found: 642 /* pretend it doesn't exist if it is awaiting deletion */ 643 if (atomic_read(&key->usage) == 0) 644 goto not_found; 645 646 /* this races with key_put(), but that doesn't matter since key_put() 647 * doesn't actually change the key 648 */ 649 __key_get(key); 650 651 error: 652 spin_unlock(&key_serial_lock); 653 return key; 654 } 655 656 /* 657 * Find and lock the specified key type against removal. 658 * 659 * We return with the sem read-locked if successful. If the type wasn't 660 * available -ENOKEY is returned instead. 661 */ 662 struct key_type *key_type_lookup(const char *type) 663 { 664 struct key_type *ktype; 665 666 down_read(&key_types_sem); 667 668 /* look up the key type to see if it's one of the registered kernel 669 * types */ 670 list_for_each_entry(ktype, &key_types_list, link) { 671 if (strcmp(ktype->name, type) == 0) 672 goto found_kernel_type; 673 } 674 675 up_read(&key_types_sem); 676 ktype = ERR_PTR(-ENOKEY); 677 678 found_kernel_type: 679 return ktype; 680 } 681 682 void key_set_timeout(struct key *key, unsigned timeout) 683 { 684 struct timespec now; 685 time_t expiry = 0; 686 687 /* make the changes with the locks held to prevent races */ 688 down_write(&key->sem); 689 690 if (timeout > 0) { 691 now = current_kernel_time(); 692 expiry = now.tv_sec + timeout; 693 } 694 695 key->expiry = expiry; 696 key_schedule_gc(key->expiry + key_gc_delay); 697 698 up_write(&key->sem); 699 } 700 EXPORT_SYMBOL_GPL(key_set_timeout); 701 702 /* 703 * Unlock a key type locked by key_type_lookup(). 704 */ 705 void key_type_put(struct key_type *ktype) 706 { 707 up_read(&key_types_sem); 708 } 709 710 /* 711 * Attempt to update an existing key. 712 * 713 * The key is given to us with an incremented refcount that we need to discard 714 * if we get an error. 715 */ 716 static inline key_ref_t __key_update(key_ref_t key_ref, 717 struct key_preparsed_payload *prep) 718 { 719 struct key *key = key_ref_to_ptr(key_ref); 720 int ret; 721 722 /* need write permission on the key to update it */ 723 ret = key_permission(key_ref, KEY_WRITE); 724 if (ret < 0) 725 goto error; 726 727 ret = -EEXIST; 728 if (!key->type->update) 729 goto error; 730 731 down_write(&key->sem); 732 733 ret = key->type->update(key, prep); 734 if (ret == 0) 735 /* updating a negative key instantiates it */ 736 clear_bit(KEY_FLAG_NEGATIVE, &key->flags); 737 738 up_write(&key->sem); 739 740 if (ret < 0) 741 goto error; 742 out: 743 return key_ref; 744 745 error: 746 key_put(key); 747 key_ref = ERR_PTR(ret); 748 goto out; 749 } 750 751 /** 752 * key_create_or_update - Update or create and instantiate a key. 753 * @keyring_ref: A pointer to the destination keyring with possession flag. 754 * @type: The type of key. 755 * @description: The searchable description for the key. 756 * @payload: The data to use to instantiate or update the key. 757 * @plen: The length of @payload. 758 * @perm: The permissions mask for a new key. 759 * @flags: The quota flags for a new key. 760 * 761 * Search the destination keyring for a key of the same description and if one 762 * is found, update it, otherwise create and instantiate a new one and create a 763 * link to it from that keyring. 764 * 765 * If perm is KEY_PERM_UNDEF then an appropriate key permissions mask will be 766 * concocted. 767 * 768 * Returns a pointer to the new key if successful, -ENODEV if the key type 769 * wasn't available, -ENOTDIR if the keyring wasn't a keyring, -EACCES if the 770 * caller isn't permitted to modify the keyring or the LSM did not permit 771 * creation of the key. 772 * 773 * On success, the possession flag from the keyring ref will be tacked on to 774 * the key ref before it is returned. 775 */ 776 key_ref_t key_create_or_update(key_ref_t keyring_ref, 777 const char *type, 778 const char *description, 779 const void *payload, 780 size_t plen, 781 key_perm_t perm, 782 unsigned long flags) 783 { 784 struct keyring_index_key index_key = { 785 .description = description, 786 }; 787 struct key_preparsed_payload prep; 788 struct assoc_array_edit *edit; 789 const struct cred *cred = current_cred(); 790 struct key *keyring, *key = NULL; 791 key_ref_t key_ref; 792 int ret; 793 794 /* look up the key type to see if it's one of the registered kernel 795 * types */ 796 index_key.type = key_type_lookup(type); 797 if (IS_ERR(index_key.type)) { 798 key_ref = ERR_PTR(-ENODEV); 799 goto error; 800 } 801 802 key_ref = ERR_PTR(-EINVAL); 803 if (!index_key.type->match || !index_key.type->instantiate || 804 (!index_key.description && !index_key.type->preparse)) 805 goto error_put_type; 806 807 keyring = key_ref_to_ptr(keyring_ref); 808 809 key_check(keyring); 810 811 key_ref = ERR_PTR(-ENOTDIR); 812 if (keyring->type != &key_type_keyring) 813 goto error_put_type; 814 815 memset(&prep, 0, sizeof(prep)); 816 prep.data = payload; 817 prep.datalen = plen; 818 prep.quotalen = index_key.type->def_datalen; 819 prep.trusted = flags & KEY_ALLOC_TRUSTED; 820 if (index_key.type->preparse) { 821 ret = index_key.type->preparse(&prep); 822 if (ret < 0) { 823 key_ref = ERR_PTR(ret); 824 goto error_put_type; 825 } 826 if (!index_key.description) 827 index_key.description = prep.description; 828 key_ref = ERR_PTR(-EINVAL); 829 if (!index_key.description) 830 goto error_free_prep; 831 } 832 index_key.desc_len = strlen(index_key.description); 833 834 key_ref = ERR_PTR(-EPERM); 835 if (!prep.trusted && test_bit(KEY_FLAG_TRUSTED_ONLY, &keyring->flags)) 836 goto error_free_prep; 837 flags |= prep.trusted ? KEY_ALLOC_TRUSTED : 0; 838 839 ret = __key_link_begin(keyring, &index_key, &edit); 840 if (ret < 0) { 841 key_ref = ERR_PTR(ret); 842 goto error_free_prep; 843 } 844 845 /* if we're going to allocate a new key, we're going to have 846 * to modify the keyring */ 847 ret = key_permission(keyring_ref, KEY_WRITE); 848 if (ret < 0) { 849 key_ref = ERR_PTR(ret); 850 goto error_link_end; 851 } 852 853 /* if it's possible to update this type of key, search for an existing 854 * key of the same type and description in the destination keyring and 855 * update that instead if possible 856 */ 857 if (index_key.type->update) { 858 key_ref = find_key_to_update(keyring_ref, &index_key); 859 if (key_ref) 860 goto found_matching_key; 861 } 862 863 /* if the client doesn't provide, decide on the permissions we want */ 864 if (perm == KEY_PERM_UNDEF) { 865 perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR; 866 perm |= KEY_USR_VIEW; 867 868 if (index_key.type->read) 869 perm |= KEY_POS_READ; 870 871 if (index_key.type == &key_type_keyring || 872 index_key.type->update) 873 perm |= KEY_POS_WRITE; 874 } 875 876 /* allocate a new key */ 877 key = key_alloc(index_key.type, index_key.description, 878 cred->fsuid, cred->fsgid, cred, perm, flags); 879 if (IS_ERR(key)) { 880 key_ref = ERR_CAST(key); 881 goto error_link_end; 882 } 883 884 /* instantiate it and link it into the target keyring */ 885 ret = __key_instantiate_and_link(key, &prep, keyring, NULL, &edit); 886 if (ret < 0) { 887 key_put(key); 888 key_ref = ERR_PTR(ret); 889 goto error_link_end; 890 } 891 892 key_ref = make_key_ref(key, is_key_possessed(keyring_ref)); 893 894 error_link_end: 895 __key_link_end(keyring, &index_key, edit); 896 error_free_prep: 897 if (index_key.type->preparse) 898 index_key.type->free_preparse(&prep); 899 error_put_type: 900 key_type_put(index_key.type); 901 error: 902 return key_ref; 903 904 found_matching_key: 905 /* we found a matching key, so we're going to try to update it 906 * - we can drop the locks first as we have the key pinned 907 */ 908 __key_link_end(keyring, &index_key, edit); 909 910 key_ref = __key_update(key_ref, &prep); 911 goto error_free_prep; 912 } 913 EXPORT_SYMBOL(key_create_or_update); 914 915 /** 916 * key_update - Update a key's contents. 917 * @key_ref: The pointer (plus possession flag) to the key. 918 * @payload: The data to be used to update the key. 919 * @plen: The length of @payload. 920 * 921 * Attempt to update the contents of a key with the given payload data. The 922 * caller must be granted Write permission on the key. Negative keys can be 923 * instantiated by this method. 924 * 925 * Returns 0 on success, -EACCES if not permitted and -EOPNOTSUPP if the key 926 * type does not support updating. The key type may return other errors. 927 */ 928 int key_update(key_ref_t key_ref, const void *payload, size_t plen) 929 { 930 struct key_preparsed_payload prep; 931 struct key *key = key_ref_to_ptr(key_ref); 932 int ret; 933 934 key_check(key); 935 936 /* the key must be writable */ 937 ret = key_permission(key_ref, KEY_WRITE); 938 if (ret < 0) 939 goto error; 940 941 /* attempt to update it if supported */ 942 ret = -EOPNOTSUPP; 943 if (!key->type->update) 944 goto error; 945 946 memset(&prep, 0, sizeof(prep)); 947 prep.data = payload; 948 prep.datalen = plen; 949 prep.quotalen = key->type->def_datalen; 950 if (key->type->preparse) { 951 ret = key->type->preparse(&prep); 952 if (ret < 0) 953 goto error; 954 } 955 956 down_write(&key->sem); 957 958 ret = key->type->update(key, &prep); 959 if (ret == 0) 960 /* updating a negative key instantiates it */ 961 clear_bit(KEY_FLAG_NEGATIVE, &key->flags); 962 963 up_write(&key->sem); 964 965 if (key->type->preparse) 966 key->type->free_preparse(&prep); 967 error: 968 return ret; 969 } 970 EXPORT_SYMBOL(key_update); 971 972 /** 973 * key_revoke - Revoke a key. 974 * @key: The key to be revoked. 975 * 976 * Mark a key as being revoked and ask the type to free up its resources. The 977 * revocation timeout is set and the key and all its links will be 978 * automatically garbage collected after key_gc_delay amount of time if they 979 * are not manually dealt with first. 980 */ 981 void key_revoke(struct key *key) 982 { 983 struct timespec now; 984 time_t time; 985 986 key_check(key); 987 988 /* make sure no one's trying to change or use the key when we mark it 989 * - we tell lockdep that we might nest because we might be revoking an 990 * authorisation key whilst holding the sem on a key we've just 991 * instantiated 992 */ 993 down_write_nested(&key->sem, 1); 994 if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags) && 995 key->type->revoke) 996 key->type->revoke(key); 997 998 /* set the death time to no more than the expiry time */ 999 now = current_kernel_time(); 1000 time = now.tv_sec; 1001 if (key->revoked_at == 0 || key->revoked_at > time) { 1002 key->revoked_at = time; 1003 key_schedule_gc(key->revoked_at + key_gc_delay); 1004 } 1005 1006 up_write(&key->sem); 1007 } 1008 EXPORT_SYMBOL(key_revoke); 1009 1010 /** 1011 * key_invalidate - Invalidate a key. 1012 * @key: The key to be invalidated. 1013 * 1014 * Mark a key as being invalidated and have it cleaned up immediately. The key 1015 * is ignored by all searches and other operations from this point. 1016 */ 1017 void key_invalidate(struct key *key) 1018 { 1019 kenter("%d", key_serial(key)); 1020 1021 key_check(key); 1022 1023 if (!test_bit(KEY_FLAG_INVALIDATED, &key->flags)) { 1024 down_write_nested(&key->sem, 1); 1025 if (!test_and_set_bit(KEY_FLAG_INVALIDATED, &key->flags)) 1026 key_schedule_gc_links(); 1027 up_write(&key->sem); 1028 } 1029 } 1030 EXPORT_SYMBOL(key_invalidate); 1031 1032 /** 1033 * register_key_type - Register a type of key. 1034 * @ktype: The new key type. 1035 * 1036 * Register a new key type. 1037 * 1038 * Returns 0 on success or -EEXIST if a type of this name already exists. 1039 */ 1040 int register_key_type(struct key_type *ktype) 1041 { 1042 struct key_type *p; 1043 int ret; 1044 1045 memset(&ktype->lock_class, 0, sizeof(ktype->lock_class)); 1046 1047 ret = -EEXIST; 1048 down_write(&key_types_sem); 1049 1050 /* disallow key types with the same name */ 1051 list_for_each_entry(p, &key_types_list, link) { 1052 if (strcmp(p->name, ktype->name) == 0) 1053 goto out; 1054 } 1055 1056 /* store the type */ 1057 list_add(&ktype->link, &key_types_list); 1058 1059 pr_notice("Key type %s registered\n", ktype->name); 1060 ret = 0; 1061 1062 out: 1063 up_write(&key_types_sem); 1064 return ret; 1065 } 1066 EXPORT_SYMBOL(register_key_type); 1067 1068 /** 1069 * unregister_key_type - Unregister a type of key. 1070 * @ktype: The key type. 1071 * 1072 * Unregister a key type and mark all the extant keys of this type as dead. 1073 * Those keys of this type are then destroyed to get rid of their payloads and 1074 * they and their links will be garbage collected as soon as possible. 1075 */ 1076 void unregister_key_type(struct key_type *ktype) 1077 { 1078 down_write(&key_types_sem); 1079 list_del_init(&ktype->link); 1080 downgrade_write(&key_types_sem); 1081 key_gc_keytype(ktype); 1082 pr_notice("Key type %s unregistered\n", ktype->name); 1083 up_read(&key_types_sem); 1084 } 1085 EXPORT_SYMBOL(unregister_key_type); 1086 1087 /* 1088 * Initialise the key management state. 1089 */ 1090 void __init key_init(void) 1091 { 1092 /* allocate a slab in which we can store keys */ 1093 key_jar = kmem_cache_create("key_jar", sizeof(struct key), 1094 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1095 1096 /* add the special key types */ 1097 list_add_tail(&key_type_keyring.link, &key_types_list); 1098 list_add_tail(&key_type_dead.link, &key_types_list); 1099 list_add_tail(&key_type_user.link, &key_types_list); 1100 list_add_tail(&key_type_logon.link, &key_types_list); 1101 1102 /* record the root user tracking */ 1103 rb_link_node(&root_key_user.node, 1104 NULL, 1105 &key_user_tree.rb_node); 1106 1107 rb_insert_color(&root_key_user.node, 1108 &key_user_tree); 1109 } 1110