1 /* Basic authentication token and access key management 2 * 3 * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #include <linux/module.h> 13 #include <linux/init.h> 14 #include <linux/poison.h> 15 #include <linux/sched.h> 16 #include <linux/slab.h> 17 #include <linux/security.h> 18 #include <linux/workqueue.h> 19 #include <linux/random.h> 20 #include <linux/err.h> 21 #include <linux/user_namespace.h> 22 #include "internal.h" 23 24 static struct kmem_cache *key_jar; 25 struct rb_root key_serial_tree; /* tree of keys indexed by serial */ 26 DEFINE_SPINLOCK(key_serial_lock); 27 28 struct rb_root key_user_tree; /* tree of quota records indexed by UID */ 29 DEFINE_SPINLOCK(key_user_lock); 30 31 unsigned int key_quota_root_maxkeys = 200; /* root's key count quota */ 32 unsigned int key_quota_root_maxbytes = 20000; /* root's key space quota */ 33 unsigned int key_quota_maxkeys = 200; /* general key count quota */ 34 unsigned int key_quota_maxbytes = 20000; /* general key space quota */ 35 36 static LIST_HEAD(key_types_list); 37 static DECLARE_RWSEM(key_types_sem); 38 39 static void key_cleanup(struct work_struct *work); 40 static DECLARE_WORK(key_cleanup_task, key_cleanup); 41 42 /* we serialise key instantiation and link */ 43 DEFINE_MUTEX(key_construction_mutex); 44 45 /* any key who's type gets unegistered will be re-typed to this */ 46 static struct key_type key_type_dead = { 47 .name = "dead", 48 }; 49 50 #ifdef KEY_DEBUGGING 51 void __key_check(const struct key *key) 52 { 53 printk("__key_check: key %p {%08x} should be {%08x}\n", 54 key, key->magic, KEY_DEBUG_MAGIC); 55 BUG(); 56 } 57 #endif 58 59 /*****************************************************************************/ 60 /* 61 * get the key quota record for a user, allocating a new record if one doesn't 62 * already exist 63 */ 64 struct key_user *key_user_lookup(uid_t uid, struct user_namespace *user_ns) 65 { 66 struct key_user *candidate = NULL, *user; 67 struct rb_node *parent = NULL; 68 struct rb_node **p; 69 70 try_again: 71 p = &key_user_tree.rb_node; 72 spin_lock(&key_user_lock); 73 74 /* search the tree for a user record with a matching UID */ 75 while (*p) { 76 parent = *p; 77 user = rb_entry(parent, struct key_user, node); 78 79 if (uid < user->uid) 80 p = &(*p)->rb_left; 81 else if (uid > user->uid) 82 p = &(*p)->rb_right; 83 else if (user_ns < user->user_ns) 84 p = &(*p)->rb_left; 85 else if (user_ns > user->user_ns) 86 p = &(*p)->rb_right; 87 else 88 goto found; 89 } 90 91 /* if we get here, we failed to find a match in the tree */ 92 if (!candidate) { 93 /* allocate a candidate user record if we don't already have 94 * one */ 95 spin_unlock(&key_user_lock); 96 97 user = NULL; 98 candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL); 99 if (unlikely(!candidate)) 100 goto out; 101 102 /* the allocation may have scheduled, so we need to repeat the 103 * search lest someone else added the record whilst we were 104 * asleep */ 105 goto try_again; 106 } 107 108 /* if we get here, then the user record still hadn't appeared on the 109 * second pass - so we use the candidate record */ 110 atomic_set(&candidate->usage, 1); 111 atomic_set(&candidate->nkeys, 0); 112 atomic_set(&candidate->nikeys, 0); 113 candidate->uid = uid; 114 candidate->user_ns = get_user_ns(user_ns); 115 candidate->qnkeys = 0; 116 candidate->qnbytes = 0; 117 spin_lock_init(&candidate->lock); 118 mutex_init(&candidate->cons_lock); 119 120 rb_link_node(&candidate->node, parent, p); 121 rb_insert_color(&candidate->node, &key_user_tree); 122 spin_unlock(&key_user_lock); 123 user = candidate; 124 goto out; 125 126 /* okay - we found a user record for this UID */ 127 found: 128 atomic_inc(&user->usage); 129 spin_unlock(&key_user_lock); 130 kfree(candidate); 131 out: 132 return user; 133 134 } /* end key_user_lookup() */ 135 136 /*****************************************************************************/ 137 /* 138 * dispose of a user structure 139 */ 140 void key_user_put(struct key_user *user) 141 { 142 if (atomic_dec_and_lock(&user->usage, &key_user_lock)) { 143 rb_erase(&user->node, &key_user_tree); 144 spin_unlock(&key_user_lock); 145 put_user_ns(user->user_ns); 146 147 kfree(user); 148 } 149 150 } /* end key_user_put() */ 151 152 /*****************************************************************************/ 153 /* 154 * assign a key the next unique serial number 155 * - these are assigned randomly to avoid security issues through covert 156 * channel problems 157 */ 158 static inline void key_alloc_serial(struct key *key) 159 { 160 struct rb_node *parent, **p; 161 struct key *xkey; 162 163 /* propose a random serial number and look for a hole for it in the 164 * serial number tree */ 165 do { 166 get_random_bytes(&key->serial, sizeof(key->serial)); 167 168 key->serial >>= 1; /* negative numbers are not permitted */ 169 } while (key->serial < 3); 170 171 spin_lock(&key_serial_lock); 172 173 attempt_insertion: 174 parent = NULL; 175 p = &key_serial_tree.rb_node; 176 177 while (*p) { 178 parent = *p; 179 xkey = rb_entry(parent, struct key, serial_node); 180 181 if (key->serial < xkey->serial) 182 p = &(*p)->rb_left; 183 else if (key->serial > xkey->serial) 184 p = &(*p)->rb_right; 185 else 186 goto serial_exists; 187 } 188 189 /* we've found a suitable hole - arrange for this key to occupy it */ 190 rb_link_node(&key->serial_node, parent, p); 191 rb_insert_color(&key->serial_node, &key_serial_tree); 192 193 spin_unlock(&key_serial_lock); 194 return; 195 196 /* we found a key with the proposed serial number - walk the tree from 197 * that point looking for the next unused serial number */ 198 serial_exists: 199 for (;;) { 200 key->serial++; 201 if (key->serial < 3) { 202 key->serial = 3; 203 goto attempt_insertion; 204 } 205 206 parent = rb_next(parent); 207 if (!parent) 208 goto attempt_insertion; 209 210 xkey = rb_entry(parent, struct key, serial_node); 211 if (key->serial < xkey->serial) 212 goto attempt_insertion; 213 } 214 215 } /* end key_alloc_serial() */ 216 217 /*****************************************************************************/ 218 /* 219 * allocate a key of the specified type 220 * - update the user's quota to reflect the existence of the key 221 * - called from a key-type operation with key_types_sem read-locked by 222 * key_create_or_update() 223 * - this prevents unregistration of the key type 224 * - upon return the key is as yet uninstantiated; the caller needs to either 225 * instantiate the key or discard it before returning 226 */ 227 struct key *key_alloc(struct key_type *type, const char *desc, 228 uid_t uid, gid_t gid, const struct cred *cred, 229 key_perm_t perm, unsigned long flags) 230 { 231 struct key_user *user = NULL; 232 struct key *key; 233 size_t desclen, quotalen; 234 int ret; 235 236 key = ERR_PTR(-EINVAL); 237 if (!desc || !*desc) 238 goto error; 239 240 desclen = strlen(desc) + 1; 241 quotalen = desclen + type->def_datalen; 242 243 /* get hold of the key tracking for this user */ 244 user = key_user_lookup(uid, cred->user->user_ns); 245 if (!user) 246 goto no_memory_1; 247 248 /* check that the user's quota permits allocation of another key and 249 * its description */ 250 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { 251 unsigned maxkeys = (uid == 0) ? 252 key_quota_root_maxkeys : key_quota_maxkeys; 253 unsigned maxbytes = (uid == 0) ? 254 key_quota_root_maxbytes : key_quota_maxbytes; 255 256 spin_lock(&user->lock); 257 if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) { 258 if (user->qnkeys + 1 >= maxkeys || 259 user->qnbytes + quotalen >= maxbytes || 260 user->qnbytes + quotalen < user->qnbytes) 261 goto no_quota; 262 } 263 264 user->qnkeys++; 265 user->qnbytes += quotalen; 266 spin_unlock(&user->lock); 267 } 268 269 /* allocate and initialise the key and its description */ 270 key = kmem_cache_alloc(key_jar, GFP_KERNEL); 271 if (!key) 272 goto no_memory_2; 273 274 if (desc) { 275 key->description = kmemdup(desc, desclen, GFP_KERNEL); 276 if (!key->description) 277 goto no_memory_3; 278 } 279 280 atomic_set(&key->usage, 1); 281 init_rwsem(&key->sem); 282 key->type = type; 283 key->user = user; 284 key->quotalen = quotalen; 285 key->datalen = type->def_datalen; 286 key->uid = uid; 287 key->gid = gid; 288 key->perm = perm; 289 key->flags = 0; 290 key->expiry = 0; 291 key->payload.data = NULL; 292 key->security = NULL; 293 294 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) 295 key->flags |= 1 << KEY_FLAG_IN_QUOTA; 296 297 memset(&key->type_data, 0, sizeof(key->type_data)); 298 299 #ifdef KEY_DEBUGGING 300 key->magic = KEY_DEBUG_MAGIC; 301 #endif 302 303 /* let the security module know about the key */ 304 ret = security_key_alloc(key, cred, flags); 305 if (ret < 0) 306 goto security_error; 307 308 /* publish the key by giving it a serial number */ 309 atomic_inc(&user->nkeys); 310 key_alloc_serial(key); 311 312 error: 313 return key; 314 315 security_error: 316 kfree(key->description); 317 kmem_cache_free(key_jar, key); 318 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { 319 spin_lock(&user->lock); 320 user->qnkeys--; 321 user->qnbytes -= quotalen; 322 spin_unlock(&user->lock); 323 } 324 key_user_put(user); 325 key = ERR_PTR(ret); 326 goto error; 327 328 no_memory_3: 329 kmem_cache_free(key_jar, key); 330 no_memory_2: 331 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { 332 spin_lock(&user->lock); 333 user->qnkeys--; 334 user->qnbytes -= quotalen; 335 spin_unlock(&user->lock); 336 } 337 key_user_put(user); 338 no_memory_1: 339 key = ERR_PTR(-ENOMEM); 340 goto error; 341 342 no_quota: 343 spin_unlock(&user->lock); 344 key_user_put(user); 345 key = ERR_PTR(-EDQUOT); 346 goto error; 347 348 } /* end key_alloc() */ 349 350 EXPORT_SYMBOL(key_alloc); 351 352 /*****************************************************************************/ 353 /* 354 * reserve an amount of quota for the key's payload 355 */ 356 int key_payload_reserve(struct key *key, size_t datalen) 357 { 358 int delta = (int) datalen - key->datalen; 359 int ret = 0; 360 361 key_check(key); 362 363 /* contemplate the quota adjustment */ 364 if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { 365 unsigned maxbytes = (key->user->uid == 0) ? 366 key_quota_root_maxbytes : key_quota_maxbytes; 367 368 spin_lock(&key->user->lock); 369 370 if (delta > 0 && 371 (key->user->qnbytes + delta >= maxbytes || 372 key->user->qnbytes + delta < key->user->qnbytes)) { 373 ret = -EDQUOT; 374 } 375 else { 376 key->user->qnbytes += delta; 377 key->quotalen += delta; 378 } 379 spin_unlock(&key->user->lock); 380 } 381 382 /* change the recorded data length if that didn't generate an error */ 383 if (ret == 0) 384 key->datalen = datalen; 385 386 return ret; 387 388 } /* end key_payload_reserve() */ 389 390 EXPORT_SYMBOL(key_payload_reserve); 391 392 /*****************************************************************************/ 393 /* 394 * instantiate a key and link it into the target keyring atomically 395 * - called with the target keyring's semaphore writelocked 396 */ 397 static int __key_instantiate_and_link(struct key *key, 398 const void *data, 399 size_t datalen, 400 struct key *keyring, 401 struct key *authkey) 402 { 403 int ret, awaken; 404 405 key_check(key); 406 key_check(keyring); 407 408 awaken = 0; 409 ret = -EBUSY; 410 411 mutex_lock(&key_construction_mutex); 412 413 /* can't instantiate twice */ 414 if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { 415 /* instantiate the key */ 416 ret = key->type->instantiate(key, data, datalen); 417 418 if (ret == 0) { 419 /* mark the key as being instantiated */ 420 atomic_inc(&key->user->nikeys); 421 set_bit(KEY_FLAG_INSTANTIATED, &key->flags); 422 423 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) 424 awaken = 1; 425 426 /* and link it into the destination keyring */ 427 if (keyring) 428 ret = __key_link(keyring, key); 429 430 /* disable the authorisation key */ 431 if (authkey) 432 key_revoke(authkey); 433 } 434 } 435 436 mutex_unlock(&key_construction_mutex); 437 438 /* wake up anyone waiting for a key to be constructed */ 439 if (awaken) 440 wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT); 441 442 return ret; 443 444 } /* end __key_instantiate_and_link() */ 445 446 /*****************************************************************************/ 447 /* 448 * instantiate a key and link it into the target keyring atomically 449 */ 450 int key_instantiate_and_link(struct key *key, 451 const void *data, 452 size_t datalen, 453 struct key *keyring, 454 struct key *authkey) 455 { 456 int ret; 457 458 if (keyring) 459 down_write(&keyring->sem); 460 461 ret = __key_instantiate_and_link(key, data, datalen, keyring, authkey); 462 463 if (keyring) 464 up_write(&keyring->sem); 465 466 return ret; 467 468 } /* end key_instantiate_and_link() */ 469 470 EXPORT_SYMBOL(key_instantiate_and_link); 471 472 /*****************************************************************************/ 473 /* 474 * negatively instantiate a key and link it into the target keyring atomically 475 */ 476 int key_negate_and_link(struct key *key, 477 unsigned timeout, 478 struct key *keyring, 479 struct key *authkey) 480 { 481 struct timespec now; 482 int ret, awaken; 483 484 key_check(key); 485 key_check(keyring); 486 487 awaken = 0; 488 ret = -EBUSY; 489 490 if (keyring) 491 down_write(&keyring->sem); 492 493 mutex_lock(&key_construction_mutex); 494 495 /* can't instantiate twice */ 496 if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { 497 /* mark the key as being negatively instantiated */ 498 atomic_inc(&key->user->nikeys); 499 set_bit(KEY_FLAG_NEGATIVE, &key->flags); 500 set_bit(KEY_FLAG_INSTANTIATED, &key->flags); 501 now = current_kernel_time(); 502 key->expiry = now.tv_sec + timeout; 503 key_schedule_gc(key->expiry + key_gc_delay); 504 505 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags)) 506 awaken = 1; 507 508 ret = 0; 509 510 /* and link it into the destination keyring */ 511 if (keyring) 512 ret = __key_link(keyring, key); 513 514 /* disable the authorisation key */ 515 if (authkey) 516 key_revoke(authkey); 517 } 518 519 mutex_unlock(&key_construction_mutex); 520 521 if (keyring) 522 up_write(&keyring->sem); 523 524 /* wake up anyone waiting for a key to be constructed */ 525 if (awaken) 526 wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT); 527 528 return ret; 529 530 } /* end key_negate_and_link() */ 531 532 EXPORT_SYMBOL(key_negate_and_link); 533 534 /*****************************************************************************/ 535 /* 536 * do cleaning up in process context so that we don't have to disable 537 * interrupts all over the place 538 */ 539 static void key_cleanup(struct work_struct *work) 540 { 541 struct rb_node *_n; 542 struct key *key; 543 544 go_again: 545 /* look for a dead key in the tree */ 546 spin_lock(&key_serial_lock); 547 548 for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) { 549 key = rb_entry(_n, struct key, serial_node); 550 551 if (atomic_read(&key->usage) == 0) 552 goto found_dead_key; 553 } 554 555 spin_unlock(&key_serial_lock); 556 return; 557 558 found_dead_key: 559 /* we found a dead key - once we've removed it from the tree, we can 560 * drop the lock */ 561 rb_erase(&key->serial_node, &key_serial_tree); 562 spin_unlock(&key_serial_lock); 563 564 key_check(key); 565 566 security_key_free(key); 567 568 /* deal with the user's key tracking and quota */ 569 if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { 570 spin_lock(&key->user->lock); 571 key->user->qnkeys--; 572 key->user->qnbytes -= key->quotalen; 573 spin_unlock(&key->user->lock); 574 } 575 576 atomic_dec(&key->user->nkeys); 577 if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) 578 atomic_dec(&key->user->nikeys); 579 580 key_user_put(key->user); 581 582 /* now throw away the key memory */ 583 if (key->type->destroy) 584 key->type->destroy(key); 585 586 kfree(key->description); 587 588 #ifdef KEY_DEBUGGING 589 key->magic = KEY_DEBUG_MAGIC_X; 590 #endif 591 kmem_cache_free(key_jar, key); 592 593 /* there may, of course, be more than one key to destroy */ 594 goto go_again; 595 596 } /* end key_cleanup() */ 597 598 /*****************************************************************************/ 599 /* 600 * dispose of a reference to a key 601 * - when all the references are gone, we schedule the cleanup task to come and 602 * pull it out of the tree in definite process context 603 */ 604 void key_put(struct key *key) 605 { 606 if (key) { 607 key_check(key); 608 609 if (atomic_dec_and_test(&key->usage)) 610 schedule_work(&key_cleanup_task); 611 } 612 613 } /* end key_put() */ 614 615 EXPORT_SYMBOL(key_put); 616 617 /*****************************************************************************/ 618 /* 619 * find a key by its serial number 620 */ 621 struct key *key_lookup(key_serial_t id) 622 { 623 struct rb_node *n; 624 struct key *key; 625 626 spin_lock(&key_serial_lock); 627 628 /* search the tree for the specified key */ 629 n = key_serial_tree.rb_node; 630 while (n) { 631 key = rb_entry(n, struct key, serial_node); 632 633 if (id < key->serial) 634 n = n->rb_left; 635 else if (id > key->serial) 636 n = n->rb_right; 637 else 638 goto found; 639 } 640 641 not_found: 642 key = ERR_PTR(-ENOKEY); 643 goto error; 644 645 found: 646 /* pretend it doesn't exist if it is awaiting deletion */ 647 if (atomic_read(&key->usage) == 0) 648 goto not_found; 649 650 /* this races with key_put(), but that doesn't matter since key_put() 651 * doesn't actually change the key 652 */ 653 atomic_inc(&key->usage); 654 655 error: 656 spin_unlock(&key_serial_lock); 657 return key; 658 659 } /* end key_lookup() */ 660 661 /*****************************************************************************/ 662 /* 663 * find and lock the specified key type against removal 664 * - we return with the sem readlocked 665 */ 666 struct key_type *key_type_lookup(const char *type) 667 { 668 struct key_type *ktype; 669 670 down_read(&key_types_sem); 671 672 /* look up the key type to see if it's one of the registered kernel 673 * types */ 674 list_for_each_entry(ktype, &key_types_list, link) { 675 if (strcmp(ktype->name, type) == 0) 676 goto found_kernel_type; 677 } 678 679 up_read(&key_types_sem); 680 ktype = ERR_PTR(-ENOKEY); 681 682 found_kernel_type: 683 return ktype; 684 685 } /* end key_type_lookup() */ 686 687 /*****************************************************************************/ 688 /* 689 * unlock a key type 690 */ 691 void key_type_put(struct key_type *ktype) 692 { 693 up_read(&key_types_sem); 694 695 } /* end key_type_put() */ 696 697 /*****************************************************************************/ 698 /* 699 * attempt to update an existing key 700 * - the key has an incremented refcount 701 * - we need to put the key if we get an error 702 */ 703 static inline key_ref_t __key_update(key_ref_t key_ref, 704 const void *payload, size_t plen) 705 { 706 struct key *key = key_ref_to_ptr(key_ref); 707 int ret; 708 709 /* need write permission on the key to update it */ 710 ret = key_permission(key_ref, KEY_WRITE); 711 if (ret < 0) 712 goto error; 713 714 ret = -EEXIST; 715 if (!key->type->update) 716 goto error; 717 718 down_write(&key->sem); 719 720 ret = key->type->update(key, payload, plen); 721 if (ret == 0) 722 /* updating a negative key instantiates it */ 723 clear_bit(KEY_FLAG_NEGATIVE, &key->flags); 724 725 up_write(&key->sem); 726 727 if (ret < 0) 728 goto error; 729 out: 730 return key_ref; 731 732 error: 733 key_put(key); 734 key_ref = ERR_PTR(ret); 735 goto out; 736 737 } /* end __key_update() */ 738 739 /*****************************************************************************/ 740 /* 741 * search the specified keyring for a key of the same description; if one is 742 * found, update it, otherwise add a new one 743 */ 744 key_ref_t key_create_or_update(key_ref_t keyring_ref, 745 const char *type, 746 const char *description, 747 const void *payload, 748 size_t plen, 749 key_perm_t perm, 750 unsigned long flags) 751 { 752 const struct cred *cred = current_cred(); 753 struct key_type *ktype; 754 struct key *keyring, *key = NULL; 755 key_ref_t key_ref; 756 int ret; 757 758 /* look up the key type to see if it's one of the registered kernel 759 * types */ 760 ktype = key_type_lookup(type); 761 if (IS_ERR(ktype)) { 762 key_ref = ERR_PTR(-ENODEV); 763 goto error; 764 } 765 766 key_ref = ERR_PTR(-EINVAL); 767 if (!ktype->match || !ktype->instantiate) 768 goto error_2; 769 770 keyring = key_ref_to_ptr(keyring_ref); 771 772 key_check(keyring); 773 774 key_ref = ERR_PTR(-ENOTDIR); 775 if (keyring->type != &key_type_keyring) 776 goto error_2; 777 778 down_write(&keyring->sem); 779 780 /* if we're going to allocate a new key, we're going to have 781 * to modify the keyring */ 782 ret = key_permission(keyring_ref, KEY_WRITE); 783 if (ret < 0) { 784 key_ref = ERR_PTR(ret); 785 goto error_3; 786 } 787 788 /* if it's possible to update this type of key, search for an existing 789 * key of the same type and description in the destination keyring and 790 * update that instead if possible 791 */ 792 if (ktype->update) { 793 key_ref = __keyring_search_one(keyring_ref, ktype, description, 794 0); 795 if (!IS_ERR(key_ref)) 796 goto found_matching_key; 797 } 798 799 /* if the client doesn't provide, decide on the permissions we want */ 800 if (perm == KEY_PERM_UNDEF) { 801 perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR; 802 perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK | KEY_USR_SETATTR; 803 804 if (ktype->read) 805 perm |= KEY_POS_READ | KEY_USR_READ; 806 807 if (ktype == &key_type_keyring || ktype->update) 808 perm |= KEY_USR_WRITE; 809 } 810 811 /* allocate a new key */ 812 key = key_alloc(ktype, description, cred->fsuid, cred->fsgid, cred, 813 perm, flags); 814 if (IS_ERR(key)) { 815 key_ref = ERR_CAST(key); 816 goto error_3; 817 } 818 819 /* instantiate it and link it into the target keyring */ 820 ret = __key_instantiate_and_link(key, payload, plen, keyring, NULL); 821 if (ret < 0) { 822 key_put(key); 823 key_ref = ERR_PTR(ret); 824 goto error_3; 825 } 826 827 key_ref = make_key_ref(key, is_key_possessed(keyring_ref)); 828 829 error_3: 830 up_write(&keyring->sem); 831 error_2: 832 key_type_put(ktype); 833 error: 834 return key_ref; 835 836 found_matching_key: 837 /* we found a matching key, so we're going to try to update it 838 * - we can drop the locks first as we have the key pinned 839 */ 840 up_write(&keyring->sem); 841 key_type_put(ktype); 842 843 key_ref = __key_update(key_ref, payload, plen); 844 goto error; 845 846 } /* end key_create_or_update() */ 847 848 EXPORT_SYMBOL(key_create_or_update); 849 850 /*****************************************************************************/ 851 /* 852 * update a key 853 */ 854 int key_update(key_ref_t key_ref, const void *payload, size_t plen) 855 { 856 struct key *key = key_ref_to_ptr(key_ref); 857 int ret; 858 859 key_check(key); 860 861 /* the key must be writable */ 862 ret = key_permission(key_ref, KEY_WRITE); 863 if (ret < 0) 864 goto error; 865 866 /* attempt to update it if supported */ 867 ret = -EOPNOTSUPP; 868 if (key->type->update) { 869 down_write(&key->sem); 870 871 ret = key->type->update(key, payload, plen); 872 if (ret == 0) 873 /* updating a negative key instantiates it */ 874 clear_bit(KEY_FLAG_NEGATIVE, &key->flags); 875 876 up_write(&key->sem); 877 } 878 879 error: 880 return ret; 881 882 } /* end key_update() */ 883 884 EXPORT_SYMBOL(key_update); 885 886 /*****************************************************************************/ 887 /* 888 * revoke a key 889 */ 890 void key_revoke(struct key *key) 891 { 892 struct timespec now; 893 time_t time; 894 895 key_check(key); 896 897 /* make sure no one's trying to change or use the key when we mark it 898 * - we tell lockdep that we might nest because we might be revoking an 899 * authorisation key whilst holding the sem on a key we've just 900 * instantiated 901 */ 902 down_write_nested(&key->sem, 1); 903 if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags) && 904 key->type->revoke) 905 key->type->revoke(key); 906 907 /* set the death time to no more than the expiry time */ 908 now = current_kernel_time(); 909 time = now.tv_sec; 910 if (key->revoked_at == 0 || key->revoked_at > time) { 911 key->revoked_at = time; 912 key_schedule_gc(key->revoked_at + key_gc_delay); 913 } 914 915 up_write(&key->sem); 916 917 } /* end key_revoke() */ 918 919 EXPORT_SYMBOL(key_revoke); 920 921 /*****************************************************************************/ 922 /* 923 * register a type of key 924 */ 925 int register_key_type(struct key_type *ktype) 926 { 927 struct key_type *p; 928 int ret; 929 930 ret = -EEXIST; 931 down_write(&key_types_sem); 932 933 /* disallow key types with the same name */ 934 list_for_each_entry(p, &key_types_list, link) { 935 if (strcmp(p->name, ktype->name) == 0) 936 goto out; 937 } 938 939 /* store the type */ 940 list_add(&ktype->link, &key_types_list); 941 ret = 0; 942 943 out: 944 up_write(&key_types_sem); 945 return ret; 946 947 } /* end register_key_type() */ 948 949 EXPORT_SYMBOL(register_key_type); 950 951 /*****************************************************************************/ 952 /* 953 * unregister a type of key 954 */ 955 void unregister_key_type(struct key_type *ktype) 956 { 957 struct rb_node *_n; 958 struct key *key; 959 960 down_write(&key_types_sem); 961 962 /* withdraw the key type */ 963 list_del_init(&ktype->link); 964 965 /* mark all the keys of this type dead */ 966 spin_lock(&key_serial_lock); 967 968 for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) { 969 key = rb_entry(_n, struct key, serial_node); 970 971 if (key->type == ktype) { 972 key->type = &key_type_dead; 973 set_bit(KEY_FLAG_DEAD, &key->flags); 974 } 975 } 976 977 spin_unlock(&key_serial_lock); 978 979 /* make sure everyone revalidates their keys */ 980 synchronize_rcu(); 981 982 /* we should now be able to destroy the payloads of all the keys of 983 * this type with impunity */ 984 spin_lock(&key_serial_lock); 985 986 for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) { 987 key = rb_entry(_n, struct key, serial_node); 988 989 if (key->type == ktype) { 990 if (ktype->destroy) 991 ktype->destroy(key); 992 memset(&key->payload, KEY_DESTROY, sizeof(key->payload)); 993 } 994 } 995 996 spin_unlock(&key_serial_lock); 997 up_write(&key_types_sem); 998 999 key_schedule_gc(0); 1000 1001 } /* end unregister_key_type() */ 1002 1003 EXPORT_SYMBOL(unregister_key_type); 1004 1005 /*****************************************************************************/ 1006 /* 1007 * initialise the key management stuff 1008 */ 1009 void __init key_init(void) 1010 { 1011 /* allocate a slab in which we can store keys */ 1012 key_jar = kmem_cache_create("key_jar", sizeof(struct key), 1013 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1014 1015 /* add the special key types */ 1016 list_add_tail(&key_type_keyring.link, &key_types_list); 1017 list_add_tail(&key_type_dead.link, &key_types_list); 1018 list_add_tail(&key_type_user.link, &key_types_list); 1019 1020 /* record the root user tracking */ 1021 rb_link_node(&root_key_user.node, 1022 NULL, 1023 &key_user_tree.rb_node); 1024 1025 rb_insert_color(&root_key_user.node, 1026 &key_user_tree); 1027 1028 } /* end key_init() */ 1029