xref: /openbmc/linux/security/keys/key.c (revision 1da177e4)
1 /* key.c: basic authentication token and access key management
2  *
3  * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/sched.h>
15 #include <linux/slab.h>
16 #include <linux/workqueue.h>
17 #include <linux/err.h>
18 #include "internal.h"
19 
20 static kmem_cache_t	*key_jar;
21 static key_serial_t	key_serial_next = 3;
22 struct rb_root		key_serial_tree; /* tree of keys indexed by serial */
23 DEFINE_SPINLOCK(key_serial_lock);
24 
25 struct rb_root	key_user_tree; /* tree of quota records indexed by UID */
26 DEFINE_SPINLOCK(key_user_lock);
27 
28 static LIST_HEAD(key_types_list);
29 static DECLARE_RWSEM(key_types_sem);
30 
31 static void key_cleanup(void *data);
32 static DECLARE_WORK(key_cleanup_task, key_cleanup, NULL);
33 
34 /* we serialise key instantiation and link */
35 DECLARE_RWSEM(key_construction_sem);
36 
37 /* any key who's type gets unegistered will be re-typed to this */
38 struct key_type key_type_dead = {
39 	.name		= "dead",
40 };
41 
42 #ifdef KEY_DEBUGGING
43 void __key_check(const struct key *key)
44 {
45 	printk("__key_check: key %p {%08x} should be {%08x}\n",
46 	       key, key->magic, KEY_DEBUG_MAGIC);
47 	BUG();
48 }
49 #endif
50 
51 /*****************************************************************************/
52 /*
53  * get the key quota record for a user, allocating a new record if one doesn't
54  * already exist
55  */
56 struct key_user *key_user_lookup(uid_t uid)
57 {
58 	struct key_user *candidate = NULL, *user;
59 	struct rb_node *parent = NULL;
60 	struct rb_node **p;
61 
62  try_again:
63 	p = &key_user_tree.rb_node;
64 	spin_lock(&key_user_lock);
65 
66 	/* search the tree for a user record with a matching UID */
67 	while (*p) {
68 		parent = *p;
69 		user = rb_entry(parent, struct key_user, node);
70 
71 		if (uid < user->uid)
72 			p = &(*p)->rb_left;
73 		else if (uid > user->uid)
74 			p = &(*p)->rb_right;
75 		else
76 			goto found;
77 	}
78 
79 	/* if we get here, we failed to find a match in the tree */
80 	if (!candidate) {
81 		/* allocate a candidate user record if we don't already have
82 		 * one */
83 		spin_unlock(&key_user_lock);
84 
85 		user = NULL;
86 		candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL);
87 		if (unlikely(!candidate))
88 			goto out;
89 
90 		/* the allocation may have scheduled, so we need to repeat the
91 		 * search lest someone else added the record whilst we were
92 		 * asleep */
93 		goto try_again;
94 	}
95 
96 	/* if we get here, then the user record still hadn't appeared on the
97 	 * second pass - so we use the candidate record */
98 	atomic_set(&candidate->usage, 1);
99 	atomic_set(&candidate->nkeys, 0);
100 	atomic_set(&candidate->nikeys, 0);
101 	candidate->uid = uid;
102 	candidate->qnkeys = 0;
103 	candidate->qnbytes = 0;
104 	spin_lock_init(&candidate->lock);
105 	INIT_LIST_HEAD(&candidate->consq);
106 
107 	rb_link_node(&candidate->node, parent, p);
108 	rb_insert_color(&candidate->node, &key_user_tree);
109 	spin_unlock(&key_user_lock);
110 	user = candidate;
111 	goto out;
112 
113 	/* okay - we found a user record for this UID */
114  found:
115 	atomic_inc(&user->usage);
116 	spin_unlock(&key_user_lock);
117 	if (candidate)
118 		kfree(candidate);
119  out:
120 	return user;
121 
122 } /* end key_user_lookup() */
123 
124 /*****************************************************************************/
125 /*
126  * dispose of a user structure
127  */
128 void key_user_put(struct key_user *user)
129 {
130 	if (atomic_dec_and_lock(&user->usage, &key_user_lock)) {
131 		rb_erase(&user->node, &key_user_tree);
132 		spin_unlock(&key_user_lock);
133 
134 		kfree(user);
135 	}
136 
137 } /* end key_user_put() */
138 
139 /*****************************************************************************/
140 /*
141  * insert a key with a fixed serial number
142  */
143 static void __init __key_insert_serial(struct key *key)
144 {
145 	struct rb_node *parent, **p;
146 	struct key *xkey;
147 
148 	parent = NULL;
149 	p = &key_serial_tree.rb_node;
150 
151 	while (*p) {
152 		parent = *p;
153 		xkey = rb_entry(parent, struct key, serial_node);
154 
155 		if (key->serial < xkey->serial)
156 			p = &(*p)->rb_left;
157 		else if (key->serial > xkey->serial)
158 			p = &(*p)->rb_right;
159 		else
160 			BUG();
161 	}
162 
163 	/* we've found a suitable hole - arrange for this key to occupy it */
164 	rb_link_node(&key->serial_node, parent, p);
165 	rb_insert_color(&key->serial_node, &key_serial_tree);
166 
167 } /* end __key_insert_serial() */
168 
169 /*****************************************************************************/
170 /*
171  * assign a key the next unique serial number
172  * - we work through all the serial numbers between 2 and 2^31-1 in turn and
173  *   then wrap
174  */
175 static inline void key_alloc_serial(struct key *key)
176 {
177 	struct rb_node *parent, **p;
178 	struct key *xkey;
179 
180 	spin_lock(&key_serial_lock);
181 
182 	/* propose a likely serial number and look for a hole for it in the
183 	 * serial number tree */
184 	key->serial = key_serial_next;
185 	if (key->serial < 3)
186 		key->serial = 3;
187 	key_serial_next = key->serial + 1;
188 
189 	parent = NULL;
190 	p = &key_serial_tree.rb_node;
191 
192 	while (*p) {
193 		parent = *p;
194 		xkey = rb_entry(parent, struct key, serial_node);
195 
196 		if (key->serial < xkey->serial)
197 			p = &(*p)->rb_left;
198 		else if (key->serial > xkey->serial)
199 			p = &(*p)->rb_right;
200 		else
201 			goto serial_exists;
202 	}
203 	goto insert_here;
204 
205 	/* we found a key with the proposed serial number - walk the tree from
206 	 * that point looking for the next unused serial number */
207  serial_exists:
208 	for (;;) {
209 		key->serial = key_serial_next;
210 		if (key->serial < 2)
211 			key->serial = 2;
212 		key_serial_next = key->serial + 1;
213 
214 		if (!parent->rb_parent)
215 			p = &key_serial_tree.rb_node;
216 		else if (parent->rb_parent->rb_left == parent)
217 			p = &parent->rb_parent->rb_left;
218 		else
219 			p = &parent->rb_parent->rb_right;
220 
221 		parent = rb_next(parent);
222 		if (!parent)
223 			break;
224 
225 		xkey = rb_entry(parent, struct key, serial_node);
226 		if (key->serial < xkey->serial)
227 			goto insert_here;
228 	}
229 
230 	/* we've found a suitable hole - arrange for this key to occupy it */
231  insert_here:
232 	rb_link_node(&key->serial_node, parent, p);
233 	rb_insert_color(&key->serial_node, &key_serial_tree);
234 
235 	spin_unlock(&key_serial_lock);
236 
237 } /* end key_alloc_serial() */
238 
239 /*****************************************************************************/
240 /*
241  * allocate a key of the specified type
242  * - update the user's quota to reflect the existence of the key
243  * - called from a key-type operation with key_types_sem read-locked by either
244  *   key_create_or_update() or by key_duplicate(); this prevents unregistration
245  *   of the key type
246  * - upon return the key is as yet uninstantiated; the caller needs to either
247  *   instantiate the key or discard it before returning
248  */
249 struct key *key_alloc(struct key_type *type, const char *desc,
250 		      uid_t uid, gid_t gid, key_perm_t perm,
251 		      int not_in_quota)
252 {
253 	struct key_user *user = NULL;
254 	struct key *key;
255 	size_t desclen, quotalen;
256 
257 	key = ERR_PTR(-EINVAL);
258 	if (!desc || !*desc)
259 		goto error;
260 
261 	desclen = strlen(desc) + 1;
262 	quotalen = desclen + type->def_datalen;
263 
264 	/* get hold of the key tracking for this user */
265 	user = key_user_lookup(uid);
266 	if (!user)
267 		goto no_memory_1;
268 
269 	/* check that the user's quota permits allocation of another key and
270 	 * its description */
271 	if (!not_in_quota) {
272 		spin_lock(&user->lock);
273 		if (user->qnkeys + 1 >= KEYQUOTA_MAX_KEYS &&
274 		    user->qnbytes + quotalen >= KEYQUOTA_MAX_BYTES
275 		    )
276 			goto no_quota;
277 
278 		user->qnkeys++;
279 		user->qnbytes += quotalen;
280 		spin_unlock(&user->lock);
281 	}
282 
283 	/* allocate and initialise the key and its description */
284 	key = kmem_cache_alloc(key_jar, SLAB_KERNEL);
285 	if (!key)
286 		goto no_memory_2;
287 
288 	if (desc) {
289 		key->description = kmalloc(desclen, GFP_KERNEL);
290 		if (!key->description)
291 			goto no_memory_3;
292 
293 		memcpy(key->description, desc, desclen);
294 	}
295 
296 	atomic_set(&key->usage, 1);
297 	rwlock_init(&key->lock);
298 	init_rwsem(&key->sem);
299 	key->type = type;
300 	key->user = user;
301 	key->quotalen = quotalen;
302 	key->datalen = type->def_datalen;
303 	key->uid = uid;
304 	key->gid = gid;
305 	key->perm = perm;
306 	key->flags = 0;
307 	key->expiry = 0;
308 	key->payload.data = NULL;
309 
310 	if (!not_in_quota)
311 		key->flags |= KEY_FLAG_IN_QUOTA;
312 
313 	memset(&key->type_data, 0, sizeof(key->type_data));
314 
315 #ifdef KEY_DEBUGGING
316 	key->magic = KEY_DEBUG_MAGIC;
317 #endif
318 
319 	/* publish the key by giving it a serial number */
320 	atomic_inc(&user->nkeys);
321 	key_alloc_serial(key);
322 
323  error:
324 	return key;
325 
326  no_memory_3:
327 	kmem_cache_free(key_jar, key);
328  no_memory_2:
329 	if (!not_in_quota) {
330 		spin_lock(&user->lock);
331 		user->qnkeys--;
332 		user->qnbytes -= quotalen;
333 		spin_unlock(&user->lock);
334 	}
335 	key_user_put(user);
336  no_memory_1:
337 	key = ERR_PTR(-ENOMEM);
338 	goto error;
339 
340  no_quota:
341 	spin_unlock(&user->lock);
342 	key_user_put(user);
343 	key = ERR_PTR(-EDQUOT);
344 	goto error;
345 
346 } /* end key_alloc() */
347 
348 EXPORT_SYMBOL(key_alloc);
349 
350 /*****************************************************************************/
351 /*
352  * reserve an amount of quota for the key's payload
353  */
354 int key_payload_reserve(struct key *key, size_t datalen)
355 {
356 	int delta = (int) datalen - key->datalen;
357 	int ret = 0;
358 
359 	key_check(key);
360 
361 	/* contemplate the quota adjustment */
362 	if (delta != 0 && key->flags & KEY_FLAG_IN_QUOTA) {
363 		spin_lock(&key->user->lock);
364 
365 		if (delta > 0 &&
366 		    key->user->qnbytes + delta > KEYQUOTA_MAX_BYTES
367 		    ) {
368 			ret = -EDQUOT;
369 		}
370 		else {
371 			key->user->qnbytes += delta;
372 			key->quotalen += delta;
373 		}
374 		spin_unlock(&key->user->lock);
375 	}
376 
377 	/* change the recorded data length if that didn't generate an error */
378 	if (ret == 0)
379 		key->datalen = datalen;
380 
381 	return ret;
382 
383 } /* end key_payload_reserve() */
384 
385 EXPORT_SYMBOL(key_payload_reserve);
386 
387 /*****************************************************************************/
388 /*
389  * instantiate a key and link it into the target keyring atomically
390  * - called with the target keyring's semaphore writelocked
391  */
392 static int __key_instantiate_and_link(struct key *key,
393 				      const void *data,
394 				      size_t datalen,
395 				      struct key *keyring)
396 {
397 	int ret, awaken;
398 
399 	key_check(key);
400 	key_check(keyring);
401 
402 	awaken = 0;
403 	ret = -EBUSY;
404 
405 	down_write(&key_construction_sem);
406 
407 	/* can't instantiate twice */
408 	if (!(key->flags & KEY_FLAG_INSTANTIATED)) {
409 		/* instantiate the key */
410 		ret = key->type->instantiate(key, data, datalen);
411 
412 		if (ret == 0) {
413 			/* mark the key as being instantiated */
414 			write_lock(&key->lock);
415 
416 			atomic_inc(&key->user->nikeys);
417 			key->flags |= KEY_FLAG_INSTANTIATED;
418 
419 			if (key->flags & KEY_FLAG_USER_CONSTRUCT) {
420 				key->flags &= ~KEY_FLAG_USER_CONSTRUCT;
421 				awaken = 1;
422 			}
423 
424 			write_unlock(&key->lock);
425 
426 			/* and link it into the destination keyring */
427 			if (keyring)
428 				ret = __key_link(keyring, key);
429 		}
430 	}
431 
432 	up_write(&key_construction_sem);
433 
434 	/* wake up anyone waiting for a key to be constructed */
435 	if (awaken)
436 		wake_up_all(&request_key_conswq);
437 
438 	return ret;
439 
440 } /* end __key_instantiate_and_link() */
441 
442 /*****************************************************************************/
443 /*
444  * instantiate a key and link it into the target keyring atomically
445  */
446 int key_instantiate_and_link(struct key *key,
447 			     const void *data,
448 			     size_t datalen,
449 			     struct key *keyring)
450 {
451 	int ret;
452 
453 	if (keyring)
454 		down_write(&keyring->sem);
455 
456 	ret = __key_instantiate_and_link(key, data, datalen, keyring);
457 
458 	if (keyring)
459 		up_write(&keyring->sem);
460 
461 	return ret;
462 } /* end key_instantiate_and_link() */
463 
464 EXPORT_SYMBOL(key_instantiate_and_link);
465 
466 /*****************************************************************************/
467 /*
468  * negatively instantiate a key and link it into the target keyring atomically
469  */
470 int key_negate_and_link(struct key *key,
471 			unsigned timeout,
472 			struct key *keyring)
473 {
474 	struct timespec now;
475 	int ret, awaken;
476 
477 	key_check(key);
478 	key_check(keyring);
479 
480 	awaken = 0;
481 	ret = -EBUSY;
482 
483 	if (keyring)
484 		down_write(&keyring->sem);
485 
486 	down_write(&key_construction_sem);
487 
488 	/* can't instantiate twice */
489 	if (!(key->flags & KEY_FLAG_INSTANTIATED)) {
490 		/* mark the key as being negatively instantiated */
491 		write_lock(&key->lock);
492 
493 		atomic_inc(&key->user->nikeys);
494 		key->flags |= KEY_FLAG_INSTANTIATED | KEY_FLAG_NEGATIVE;
495 		now = current_kernel_time();
496 		key->expiry = now.tv_sec + timeout;
497 
498 		if (key->flags & KEY_FLAG_USER_CONSTRUCT) {
499 			key->flags &= ~KEY_FLAG_USER_CONSTRUCT;
500 			awaken = 1;
501 		}
502 
503 		write_unlock(&key->lock);
504 		ret = 0;
505 
506 		/* and link it into the destination keyring */
507 		if (keyring)
508 			ret = __key_link(keyring, key);
509 	}
510 
511 	up_write(&key_construction_sem);
512 
513 	if (keyring)
514 		up_write(&keyring->sem);
515 
516 	/* wake up anyone waiting for a key to be constructed */
517 	if (awaken)
518 		wake_up_all(&request_key_conswq);
519 
520 	return ret;
521 
522 } /* end key_negate_and_link() */
523 
524 EXPORT_SYMBOL(key_negate_and_link);
525 
526 /*****************************************************************************/
527 /*
528  * do cleaning up in process context so that we don't have to disable
529  * interrupts all over the place
530  */
531 static void key_cleanup(void *data)
532 {
533 	struct rb_node *_n;
534 	struct key *key;
535 
536  go_again:
537 	/* look for a dead key in the tree */
538 	spin_lock(&key_serial_lock);
539 
540 	for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
541 		key = rb_entry(_n, struct key, serial_node);
542 
543 		if (atomic_read(&key->usage) == 0)
544 			goto found_dead_key;
545 	}
546 
547 	spin_unlock(&key_serial_lock);
548 	return;
549 
550  found_dead_key:
551 	/* we found a dead key - once we've removed it from the tree, we can
552 	 * drop the lock */
553 	rb_erase(&key->serial_node, &key_serial_tree);
554 	spin_unlock(&key_serial_lock);
555 
556 	/* deal with the user's key tracking and quota */
557 	if (key->flags & KEY_FLAG_IN_QUOTA) {
558 		spin_lock(&key->user->lock);
559 		key->user->qnkeys--;
560 		key->user->qnbytes -= key->quotalen;
561 		spin_unlock(&key->user->lock);
562 	}
563 
564 	atomic_dec(&key->user->nkeys);
565 	if (key->flags & KEY_FLAG_INSTANTIATED)
566 		atomic_dec(&key->user->nikeys);
567 
568 	key_user_put(key->user);
569 
570 	/* now throw away the key memory */
571 	if (key->type->destroy)
572 		key->type->destroy(key);
573 
574 	kfree(key->description);
575 
576 #ifdef KEY_DEBUGGING
577 	key->magic = KEY_DEBUG_MAGIC_X;
578 #endif
579 	kmem_cache_free(key_jar, key);
580 
581 	/* there may, of course, be more than one key to destroy */
582 	goto go_again;
583 
584 } /* end key_cleanup() */
585 
586 /*****************************************************************************/
587 /*
588  * dispose of a reference to a key
589  * - when all the references are gone, we schedule the cleanup task to come and
590  *   pull it out of the tree in definite process context
591  */
592 void key_put(struct key *key)
593 {
594 	if (key) {
595 		key_check(key);
596 
597 		if (atomic_dec_and_test(&key->usage))
598 			schedule_work(&key_cleanup_task);
599 	}
600 
601 } /* end key_put() */
602 
603 EXPORT_SYMBOL(key_put);
604 
605 /*****************************************************************************/
606 /*
607  * find a key by its serial number
608  */
609 struct key *key_lookup(key_serial_t id)
610 {
611 	struct rb_node *n;
612 	struct key *key;
613 
614 	spin_lock(&key_serial_lock);
615 
616 	/* search the tree for the specified key */
617 	n = key_serial_tree.rb_node;
618 	while (n) {
619 		key = rb_entry(n, struct key, serial_node);
620 
621 		if (id < key->serial)
622 			n = n->rb_left;
623 		else if (id > key->serial)
624 			n = n->rb_right;
625 		else
626 			goto found;
627 	}
628 
629  not_found:
630 	key = ERR_PTR(-ENOKEY);
631 	goto error;
632 
633  found:
634 	/* pretent doesn't exist if it's dead */
635 	if (atomic_read(&key->usage) == 0 ||
636 	    (key->flags & KEY_FLAG_DEAD) ||
637 	    key->type == &key_type_dead)
638 		goto not_found;
639 
640 	/* this races with key_put(), but that doesn't matter since key_put()
641 	 * doesn't actually change the key
642 	 */
643 	atomic_inc(&key->usage);
644 
645  error:
646 	spin_unlock(&key_serial_lock);
647 	return key;
648 
649 } /* end key_lookup() */
650 
651 /*****************************************************************************/
652 /*
653  * find and lock the specified key type against removal
654  * - we return with the sem readlocked
655  */
656 struct key_type *key_type_lookup(const char *type)
657 {
658 	struct key_type *ktype;
659 
660 	down_read(&key_types_sem);
661 
662 	/* look up the key type to see if it's one of the registered kernel
663 	 * types */
664 	list_for_each_entry(ktype, &key_types_list, link) {
665 		if (strcmp(ktype->name, type) == 0)
666 			goto found_kernel_type;
667 	}
668 
669 	up_read(&key_types_sem);
670 	ktype = ERR_PTR(-ENOKEY);
671 
672  found_kernel_type:
673 	return ktype;
674 
675 } /* end key_type_lookup() */
676 
677 /*****************************************************************************/
678 /*
679  * unlock a key type
680  */
681 void key_type_put(struct key_type *ktype)
682 {
683 	up_read(&key_types_sem);
684 
685 } /* end key_type_put() */
686 
687 /*****************************************************************************/
688 /*
689  * attempt to update an existing key
690  * - the key has an incremented refcount
691  * - we need to put the key if we get an error
692  */
693 static inline struct key *__key_update(struct key *key, const void *payload,
694 				       size_t plen)
695 {
696 	int ret;
697 
698 	/* need write permission on the key to update it */
699 	ret = -EACCES;
700 	if (!key_permission(key, KEY_WRITE))
701 		goto error;
702 
703 	ret = -EEXIST;
704 	if (!key->type->update)
705 		goto error;
706 
707 	down_write(&key->sem);
708 
709 	ret = key->type->update(key, payload, plen);
710 
711 	if (ret == 0) {
712 		/* updating a negative key instantiates it */
713 		write_lock(&key->lock);
714 		key->flags &= ~KEY_FLAG_NEGATIVE;
715 		write_unlock(&key->lock);
716 	}
717 
718 	up_write(&key->sem);
719 
720 	if (ret < 0)
721 		goto error;
722  out:
723 	return key;
724 
725  error:
726 	key_put(key);
727 	key = ERR_PTR(ret);
728 	goto out;
729 
730 } /* end __key_update() */
731 
732 /*****************************************************************************/
733 /*
734  * search the specified keyring for a key of the same description; if one is
735  * found, update it, otherwise add a new one
736  */
737 struct key *key_create_or_update(struct key *keyring,
738 				 const char *type,
739 				 const char *description,
740 				 const void *payload,
741 				 size_t plen,
742 				 int not_in_quota)
743 {
744 	struct key_type *ktype;
745 	struct key *key = NULL;
746 	key_perm_t perm;
747 	int ret;
748 
749 	key_check(keyring);
750 
751 	/* look up the key type to see if it's one of the registered kernel
752 	 * types */
753 	ktype = key_type_lookup(type);
754 	if (IS_ERR(ktype)) {
755 		key = ERR_PTR(-ENODEV);
756 		goto error;
757 	}
758 
759 	ret = -EINVAL;
760 	if (!ktype->match || !ktype->instantiate)
761 		goto error_2;
762 
763 	/* search for an existing key of the same type and description in the
764 	 * destination keyring
765 	 */
766 	down_write(&keyring->sem);
767 
768 	key = __keyring_search_one(keyring, ktype, description, 0);
769 	if (!IS_ERR(key))
770 		goto found_matching_key;
771 
772 	/* if we're going to allocate a new key, we're going to have to modify
773 	 * the keyring */
774 	ret = -EACCES;
775 	if (!key_permission(keyring, KEY_WRITE))
776 		goto error_3;
777 
778 	/* decide on the permissions we want */
779 	perm = KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK;
780 
781 	if (ktype->read)
782 		perm |= KEY_USR_READ;
783 
784 	if (ktype == &key_type_keyring || ktype->update)
785 		perm |= KEY_USR_WRITE;
786 
787 	/* allocate a new key */
788 	key = key_alloc(ktype, description, current->fsuid, current->fsgid,
789 			perm, not_in_quota);
790 	if (IS_ERR(key)) {
791 		ret = PTR_ERR(key);
792 		goto error_3;
793 	}
794 
795 	/* instantiate it and link it into the target keyring */
796 	ret = __key_instantiate_and_link(key, payload, plen, keyring);
797 	if (ret < 0) {
798 		key_put(key);
799 		key = ERR_PTR(ret);
800 	}
801 
802  error_3:
803 	up_write(&keyring->sem);
804  error_2:
805 	key_type_put(ktype);
806  error:
807 	return key;
808 
809  found_matching_key:
810 	/* we found a matching key, so we're going to try to update it
811 	 * - we can drop the locks first as we have the key pinned
812 	 */
813 	up_write(&keyring->sem);
814 	key_type_put(ktype);
815 
816 	key = __key_update(key, payload, plen);
817 	goto error;
818 
819 } /* end key_create_or_update() */
820 
821 EXPORT_SYMBOL(key_create_or_update);
822 
823 /*****************************************************************************/
824 /*
825  * update a key
826  */
827 int key_update(struct key *key, const void *payload, size_t plen)
828 {
829 	int ret;
830 
831 	key_check(key);
832 
833 	/* the key must be writable */
834 	ret = -EACCES;
835 	if (!key_permission(key, KEY_WRITE))
836 		goto error;
837 
838 	/* attempt to update it if supported */
839 	ret = -EOPNOTSUPP;
840 	if (key->type->update) {
841 		down_write(&key->sem);
842 		ret = key->type->update(key, payload, plen);
843 
844 		if (ret == 0) {
845 			/* updating a negative key instantiates it */
846 			write_lock(&key->lock);
847 			key->flags &= ~KEY_FLAG_NEGATIVE;
848 			write_unlock(&key->lock);
849 		}
850 
851 		up_write(&key->sem);
852 	}
853 
854  error:
855 	return ret;
856 
857 } /* end key_update() */
858 
859 EXPORT_SYMBOL(key_update);
860 
861 /*****************************************************************************/
862 /*
863  * duplicate a key, potentially with a revised description
864  * - must be supported by the keytype (keyrings for instance can be duplicated)
865  */
866 struct key *key_duplicate(struct key *source, const char *desc)
867 {
868 	struct key *key;
869 	int ret;
870 
871 	key_check(source);
872 
873 	if (!desc)
874 		desc = source->description;
875 
876 	down_read(&key_types_sem);
877 
878 	ret = -EINVAL;
879 	if (!source->type->duplicate)
880 		goto error;
881 
882 	/* allocate and instantiate a key */
883 	key = key_alloc(source->type, desc, current->fsuid, current->fsgid,
884 			source->perm, 0);
885 	if (IS_ERR(key))
886 		goto error_k;
887 
888 	down_read(&source->sem);
889 	ret = key->type->duplicate(key, source);
890 	up_read(&source->sem);
891 	if (ret < 0)
892 		goto error2;
893 
894 	atomic_inc(&key->user->nikeys);
895 
896 	write_lock(&key->lock);
897 	key->flags |= KEY_FLAG_INSTANTIATED;
898 	write_unlock(&key->lock);
899 
900  error_k:
901 	up_read(&key_types_sem);
902  out:
903 	return key;
904 
905  error2:
906 	key_put(key);
907  error:
908 	up_read(&key_types_sem);
909 	key = ERR_PTR(ret);
910 	goto out;
911 
912 } /* end key_duplicate() */
913 
914 /*****************************************************************************/
915 /*
916  * revoke a key
917  */
918 void key_revoke(struct key *key)
919 {
920 	key_check(key);
921 
922 	/* make sure no one's trying to change or use the key when we mark
923 	 * it */
924 	down_write(&key->sem);
925 	write_lock(&key->lock);
926 	key->flags |= KEY_FLAG_REVOKED;
927 	write_unlock(&key->lock);
928 	up_write(&key->sem);
929 
930 } /* end key_revoke() */
931 
932 EXPORT_SYMBOL(key_revoke);
933 
934 /*****************************************************************************/
935 /*
936  * register a type of key
937  */
938 int register_key_type(struct key_type *ktype)
939 {
940 	struct key_type *p;
941 	int ret;
942 
943 	ret = -EEXIST;
944 	down_write(&key_types_sem);
945 
946 	/* disallow key types with the same name */
947 	list_for_each_entry(p, &key_types_list, link) {
948 		if (strcmp(p->name, ktype->name) == 0)
949 			goto out;
950 	}
951 
952 	/* store the type */
953 	list_add(&ktype->link, &key_types_list);
954 	ret = 0;
955 
956  out:
957 	up_write(&key_types_sem);
958 	return ret;
959 
960 } /* end register_key_type() */
961 
962 EXPORT_SYMBOL(register_key_type);
963 
964 /*****************************************************************************/
965 /*
966  * unregister a type of key
967  */
968 void unregister_key_type(struct key_type *ktype)
969 {
970 	struct rb_node *_n;
971 	struct key *key;
972 
973 	down_write(&key_types_sem);
974 
975 	/* withdraw the key type */
976 	list_del_init(&ktype->link);
977 
978 	/* need to withdraw all keys of this type */
979 	spin_lock(&key_serial_lock);
980 
981 	for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
982 		key = rb_entry(_n, struct key, serial_node);
983 
984 		if (key->type != ktype)
985 			continue;
986 
987 		write_lock(&key->lock);
988 		key->type = &key_type_dead;
989 		write_unlock(&key->lock);
990 
991 		/* there shouldn't be anyone looking at the description or
992 		 * payload now */
993 		if (ktype->destroy)
994 			ktype->destroy(key);
995 		memset(&key->payload, 0xbd, sizeof(key->payload));
996 	}
997 
998 	spin_unlock(&key_serial_lock);
999 	up_write(&key_types_sem);
1000 
1001 } /* end unregister_key_type() */
1002 
1003 EXPORT_SYMBOL(unregister_key_type);
1004 
1005 /*****************************************************************************/
1006 /*
1007  * initialise the key management stuff
1008  */
1009 void __init key_init(void)
1010 {
1011 	/* allocate a slab in which we can store keys */
1012 	key_jar = kmem_cache_create("key_jar", sizeof(struct key),
1013 			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1014 
1015 	/* add the special key types */
1016 	list_add_tail(&key_type_keyring.link, &key_types_list);
1017 	list_add_tail(&key_type_dead.link, &key_types_list);
1018 	list_add_tail(&key_type_user.link, &key_types_list);
1019 
1020 	/* record the root user tracking */
1021 	rb_link_node(&root_key_user.node,
1022 		     NULL,
1023 		     &key_user_tree.rb_node);
1024 
1025 	rb_insert_color(&root_key_user.node,
1026 			&key_user_tree);
1027 
1028 	/* record root's user standard keyrings */
1029 	key_check(&root_user_keyring);
1030 	key_check(&root_session_keyring);
1031 
1032 	__key_insert_serial(&root_user_keyring);
1033 	__key_insert_serial(&root_session_keyring);
1034 
1035 	keyring_publish_name(&root_user_keyring);
1036 	keyring_publish_name(&root_session_keyring);
1037 
1038 	/* link the two root keyrings together */
1039 	key_link(&root_session_keyring, &root_user_keyring);
1040 } /* end key_init() */
1041