xref: /openbmc/linux/security/keys/key.c (revision 8fa5723aa7e053d498336b48448b292fc2e0458b)
1 /* Basic authentication token and access key management
2  *
3  * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
4  * Written by David Howells (dhowells@redhat.com)
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * as published by the Free Software Foundation; either version
9  * 2 of the License, or (at your option) any later version.
10  */
11 
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/poison.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/security.h>
18 #include <linux/workqueue.h>
19 #include <linux/random.h>
20 #include <linux/err.h>
21 #include "internal.h"
22 
23 static struct kmem_cache	*key_jar;
24 struct rb_root		key_serial_tree; /* tree of keys indexed by serial */
25 DEFINE_SPINLOCK(key_serial_lock);
26 
27 struct rb_root	key_user_tree; /* tree of quota records indexed by UID */
28 DEFINE_SPINLOCK(key_user_lock);
29 
30 unsigned int key_quota_root_maxkeys = 200;	/* root's key count quota */
31 unsigned int key_quota_root_maxbytes = 20000;	/* root's key space quota */
32 unsigned int key_quota_maxkeys = 200;		/* general key count quota */
33 unsigned int key_quota_maxbytes = 20000;	/* general key space quota */
34 
35 static LIST_HEAD(key_types_list);
36 static DECLARE_RWSEM(key_types_sem);
37 
38 static void key_cleanup(struct work_struct *work);
39 static DECLARE_WORK(key_cleanup_task, key_cleanup);
40 
41 /* we serialise key instantiation and link */
42 DEFINE_MUTEX(key_construction_mutex);
43 
44 /* any key who's type gets unegistered will be re-typed to this */
45 static struct key_type key_type_dead = {
46 	.name		= "dead",
47 };
48 
49 #ifdef KEY_DEBUGGING
50 void __key_check(const struct key *key)
51 {
52 	printk("__key_check: key %p {%08x} should be {%08x}\n",
53 	       key, key->magic, KEY_DEBUG_MAGIC);
54 	BUG();
55 }
56 #endif
57 
58 /*****************************************************************************/
59 /*
60  * get the key quota record for a user, allocating a new record if one doesn't
61  * already exist
62  */
63 struct key_user *key_user_lookup(uid_t uid)
64 {
65 	struct key_user *candidate = NULL, *user;
66 	struct rb_node *parent = NULL;
67 	struct rb_node **p;
68 
69  try_again:
70 	p = &key_user_tree.rb_node;
71 	spin_lock(&key_user_lock);
72 
73 	/* search the tree for a user record with a matching UID */
74 	while (*p) {
75 		parent = *p;
76 		user = rb_entry(parent, struct key_user, node);
77 
78 		if (uid < user->uid)
79 			p = &(*p)->rb_left;
80 		else if (uid > user->uid)
81 			p = &(*p)->rb_right;
82 		else
83 			goto found;
84 	}
85 
86 	/* if we get here, we failed to find a match in the tree */
87 	if (!candidate) {
88 		/* allocate a candidate user record if we don't already have
89 		 * one */
90 		spin_unlock(&key_user_lock);
91 
92 		user = NULL;
93 		candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL);
94 		if (unlikely(!candidate))
95 			goto out;
96 
97 		/* the allocation may have scheduled, so we need to repeat the
98 		 * search lest someone else added the record whilst we were
99 		 * asleep */
100 		goto try_again;
101 	}
102 
103 	/* if we get here, then the user record still hadn't appeared on the
104 	 * second pass - so we use the candidate record */
105 	atomic_set(&candidate->usage, 1);
106 	atomic_set(&candidate->nkeys, 0);
107 	atomic_set(&candidate->nikeys, 0);
108 	candidate->uid = uid;
109 	candidate->qnkeys = 0;
110 	candidate->qnbytes = 0;
111 	spin_lock_init(&candidate->lock);
112 	mutex_init(&candidate->cons_lock);
113 
114 	rb_link_node(&candidate->node, parent, p);
115 	rb_insert_color(&candidate->node, &key_user_tree);
116 	spin_unlock(&key_user_lock);
117 	user = candidate;
118 	goto out;
119 
120 	/* okay - we found a user record for this UID */
121  found:
122 	atomic_inc(&user->usage);
123 	spin_unlock(&key_user_lock);
124 	kfree(candidate);
125  out:
126 	return user;
127 
128 } /* end key_user_lookup() */
129 
130 /*****************************************************************************/
131 /*
132  * dispose of a user structure
133  */
134 void key_user_put(struct key_user *user)
135 {
136 	if (atomic_dec_and_lock(&user->usage, &key_user_lock)) {
137 		rb_erase(&user->node, &key_user_tree);
138 		spin_unlock(&key_user_lock);
139 
140 		kfree(user);
141 	}
142 
143 } /* end key_user_put() */
144 
145 /*****************************************************************************/
146 /*
147  * assign a key the next unique serial number
148  * - these are assigned randomly to avoid security issues through covert
149  *   channel problems
150  */
151 static inline void key_alloc_serial(struct key *key)
152 {
153 	struct rb_node *parent, **p;
154 	struct key *xkey;
155 
156 	/* propose a random serial number and look for a hole for it in the
157 	 * serial number tree */
158 	do {
159 		get_random_bytes(&key->serial, sizeof(key->serial));
160 
161 		key->serial >>= 1; /* negative numbers are not permitted */
162 	} while (key->serial < 3);
163 
164 	spin_lock(&key_serial_lock);
165 
166 attempt_insertion:
167 	parent = NULL;
168 	p = &key_serial_tree.rb_node;
169 
170 	while (*p) {
171 		parent = *p;
172 		xkey = rb_entry(parent, struct key, serial_node);
173 
174 		if (key->serial < xkey->serial)
175 			p = &(*p)->rb_left;
176 		else if (key->serial > xkey->serial)
177 			p = &(*p)->rb_right;
178 		else
179 			goto serial_exists;
180 	}
181 
182 	/* we've found a suitable hole - arrange for this key to occupy it */
183 	rb_link_node(&key->serial_node, parent, p);
184 	rb_insert_color(&key->serial_node, &key_serial_tree);
185 
186 	spin_unlock(&key_serial_lock);
187 	return;
188 
189 	/* we found a key with the proposed serial number - walk the tree from
190 	 * that point looking for the next unused serial number */
191 serial_exists:
192 	for (;;) {
193 		key->serial++;
194 		if (key->serial < 3) {
195 			key->serial = 3;
196 			goto attempt_insertion;
197 		}
198 
199 		parent = rb_next(parent);
200 		if (!parent)
201 			goto attempt_insertion;
202 
203 		xkey = rb_entry(parent, struct key, serial_node);
204 		if (key->serial < xkey->serial)
205 			goto attempt_insertion;
206 	}
207 
208 } /* end key_alloc_serial() */
209 
210 /*****************************************************************************/
211 /*
212  * allocate a key of the specified type
213  * - update the user's quota to reflect the existence of the key
214  * - called from a key-type operation with key_types_sem read-locked by
215  *   key_create_or_update()
216  *   - this prevents unregistration of the key type
217  * - upon return the key is as yet uninstantiated; the caller needs to either
218  *   instantiate the key or discard it before returning
219  */
220 struct key *key_alloc(struct key_type *type, const char *desc,
221 		      uid_t uid, gid_t gid, struct task_struct *ctx,
222 		      key_perm_t perm, unsigned long flags)
223 {
224 	struct key_user *user = NULL;
225 	struct key *key;
226 	size_t desclen, quotalen;
227 	int ret;
228 
229 	key = ERR_PTR(-EINVAL);
230 	if (!desc || !*desc)
231 		goto error;
232 
233 	desclen = strlen(desc) + 1;
234 	quotalen = desclen + type->def_datalen;
235 
236 	/* get hold of the key tracking for this user */
237 	user = key_user_lookup(uid);
238 	if (!user)
239 		goto no_memory_1;
240 
241 	/* check that the user's quota permits allocation of another key and
242 	 * its description */
243 	if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
244 		unsigned maxkeys = (uid == 0) ?
245 			key_quota_root_maxkeys : key_quota_maxkeys;
246 		unsigned maxbytes = (uid == 0) ?
247 			key_quota_root_maxbytes : key_quota_maxbytes;
248 
249 		spin_lock(&user->lock);
250 		if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
251 			if (user->qnkeys + 1 >= maxkeys ||
252 			    user->qnbytes + quotalen >= maxbytes ||
253 			    user->qnbytes + quotalen < user->qnbytes)
254 				goto no_quota;
255 		}
256 
257 		user->qnkeys++;
258 		user->qnbytes += quotalen;
259 		spin_unlock(&user->lock);
260 	}
261 
262 	/* allocate and initialise the key and its description */
263 	key = kmem_cache_alloc(key_jar, GFP_KERNEL);
264 	if (!key)
265 		goto no_memory_2;
266 
267 	if (desc) {
268 		key->description = kmemdup(desc, desclen, GFP_KERNEL);
269 		if (!key->description)
270 			goto no_memory_3;
271 	}
272 
273 	atomic_set(&key->usage, 1);
274 	init_rwsem(&key->sem);
275 	key->type = type;
276 	key->user = user;
277 	key->quotalen = quotalen;
278 	key->datalen = type->def_datalen;
279 	key->uid = uid;
280 	key->gid = gid;
281 	key->perm = perm;
282 	key->flags = 0;
283 	key->expiry = 0;
284 	key->payload.data = NULL;
285 	key->security = NULL;
286 
287 	if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
288 		key->flags |= 1 << KEY_FLAG_IN_QUOTA;
289 
290 	memset(&key->type_data, 0, sizeof(key->type_data));
291 
292 #ifdef KEY_DEBUGGING
293 	key->magic = KEY_DEBUG_MAGIC;
294 #endif
295 
296 	/* let the security module know about the key */
297 	ret = security_key_alloc(key, ctx, flags);
298 	if (ret < 0)
299 		goto security_error;
300 
301 	/* publish the key by giving it a serial number */
302 	atomic_inc(&user->nkeys);
303 	key_alloc_serial(key);
304 
305 error:
306 	return key;
307 
308 security_error:
309 	kfree(key->description);
310 	kmem_cache_free(key_jar, key);
311 	if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
312 		spin_lock(&user->lock);
313 		user->qnkeys--;
314 		user->qnbytes -= quotalen;
315 		spin_unlock(&user->lock);
316 	}
317 	key_user_put(user);
318 	key = ERR_PTR(ret);
319 	goto error;
320 
321 no_memory_3:
322 	kmem_cache_free(key_jar, key);
323 no_memory_2:
324 	if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
325 		spin_lock(&user->lock);
326 		user->qnkeys--;
327 		user->qnbytes -= quotalen;
328 		spin_unlock(&user->lock);
329 	}
330 	key_user_put(user);
331 no_memory_1:
332 	key = ERR_PTR(-ENOMEM);
333 	goto error;
334 
335 no_quota:
336 	spin_unlock(&user->lock);
337 	key_user_put(user);
338 	key = ERR_PTR(-EDQUOT);
339 	goto error;
340 
341 } /* end key_alloc() */
342 
343 EXPORT_SYMBOL(key_alloc);
344 
345 /*****************************************************************************/
346 /*
347  * reserve an amount of quota for the key's payload
348  */
349 int key_payload_reserve(struct key *key, size_t datalen)
350 {
351 	int delta = (int) datalen - key->datalen;
352 	int ret = 0;
353 
354 	key_check(key);
355 
356 	/* contemplate the quota adjustment */
357 	if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
358 		unsigned maxbytes = (key->user->uid == 0) ?
359 			key_quota_root_maxbytes : key_quota_maxbytes;
360 
361 		spin_lock(&key->user->lock);
362 
363 		if (delta > 0 &&
364 		    (key->user->qnbytes + delta >= maxbytes ||
365 		     key->user->qnbytes + delta < key->user->qnbytes)) {
366 			ret = -EDQUOT;
367 		}
368 		else {
369 			key->user->qnbytes += delta;
370 			key->quotalen += delta;
371 		}
372 		spin_unlock(&key->user->lock);
373 	}
374 
375 	/* change the recorded data length if that didn't generate an error */
376 	if (ret == 0)
377 		key->datalen = datalen;
378 
379 	return ret;
380 
381 } /* end key_payload_reserve() */
382 
383 EXPORT_SYMBOL(key_payload_reserve);
384 
385 /*****************************************************************************/
386 /*
387  * instantiate a key and link it into the target keyring atomically
388  * - called with the target keyring's semaphore writelocked
389  */
390 static int __key_instantiate_and_link(struct key *key,
391 				      const void *data,
392 				      size_t datalen,
393 				      struct key *keyring,
394 				      struct key *instkey)
395 {
396 	int ret, awaken;
397 
398 	key_check(key);
399 	key_check(keyring);
400 
401 	awaken = 0;
402 	ret = -EBUSY;
403 
404 	mutex_lock(&key_construction_mutex);
405 
406 	/* can't instantiate twice */
407 	if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
408 		/* instantiate the key */
409 		ret = key->type->instantiate(key, data, datalen);
410 
411 		if (ret == 0) {
412 			/* mark the key as being instantiated */
413 			atomic_inc(&key->user->nikeys);
414 			set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
415 
416 			if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
417 				awaken = 1;
418 
419 			/* and link it into the destination keyring */
420 			if (keyring)
421 				ret = __key_link(keyring, key);
422 
423 			/* disable the authorisation key */
424 			if (instkey)
425 				key_revoke(instkey);
426 		}
427 	}
428 
429 	mutex_unlock(&key_construction_mutex);
430 
431 	/* wake up anyone waiting for a key to be constructed */
432 	if (awaken)
433 		wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
434 
435 	return ret;
436 
437 } /* end __key_instantiate_and_link() */
438 
439 /*****************************************************************************/
440 /*
441  * instantiate a key and link it into the target keyring atomically
442  */
443 int key_instantiate_and_link(struct key *key,
444 			     const void *data,
445 			     size_t datalen,
446 			     struct key *keyring,
447 			     struct key *instkey)
448 {
449 	int ret;
450 
451 	if (keyring)
452 		down_write(&keyring->sem);
453 
454 	ret = __key_instantiate_and_link(key, data, datalen, keyring, instkey);
455 
456 	if (keyring)
457 		up_write(&keyring->sem);
458 
459 	return ret;
460 
461 } /* end key_instantiate_and_link() */
462 
463 EXPORT_SYMBOL(key_instantiate_and_link);
464 
465 /*****************************************************************************/
466 /*
467  * negatively instantiate a key and link it into the target keyring atomically
468  */
469 int key_negate_and_link(struct key *key,
470 			unsigned timeout,
471 			struct key *keyring,
472 			struct key *instkey)
473 {
474 	struct timespec now;
475 	int ret, awaken;
476 
477 	key_check(key);
478 	key_check(keyring);
479 
480 	awaken = 0;
481 	ret = -EBUSY;
482 
483 	if (keyring)
484 		down_write(&keyring->sem);
485 
486 	mutex_lock(&key_construction_mutex);
487 
488 	/* can't instantiate twice */
489 	if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
490 		/* mark the key as being negatively instantiated */
491 		atomic_inc(&key->user->nikeys);
492 		set_bit(KEY_FLAG_NEGATIVE, &key->flags);
493 		set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
494 		now = current_kernel_time();
495 		key->expiry = now.tv_sec + timeout;
496 
497 		if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
498 			awaken = 1;
499 
500 		ret = 0;
501 
502 		/* and link it into the destination keyring */
503 		if (keyring)
504 			ret = __key_link(keyring, key);
505 
506 		/* disable the authorisation key */
507 		if (instkey)
508 			key_revoke(instkey);
509 	}
510 
511 	mutex_unlock(&key_construction_mutex);
512 
513 	if (keyring)
514 		up_write(&keyring->sem);
515 
516 	/* wake up anyone waiting for a key to be constructed */
517 	if (awaken)
518 		wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
519 
520 	return ret;
521 
522 } /* end key_negate_and_link() */
523 
524 EXPORT_SYMBOL(key_negate_and_link);
525 
526 /*****************************************************************************/
527 /*
528  * do cleaning up in process context so that we don't have to disable
529  * interrupts all over the place
530  */
531 static void key_cleanup(struct work_struct *work)
532 {
533 	struct rb_node *_n;
534 	struct key *key;
535 
536  go_again:
537 	/* look for a dead key in the tree */
538 	spin_lock(&key_serial_lock);
539 
540 	for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
541 		key = rb_entry(_n, struct key, serial_node);
542 
543 		if (atomic_read(&key->usage) == 0)
544 			goto found_dead_key;
545 	}
546 
547 	spin_unlock(&key_serial_lock);
548 	return;
549 
550  found_dead_key:
551 	/* we found a dead key - once we've removed it from the tree, we can
552 	 * drop the lock */
553 	rb_erase(&key->serial_node, &key_serial_tree);
554 	spin_unlock(&key_serial_lock);
555 
556 	key_check(key);
557 
558 	security_key_free(key);
559 
560 	/* deal with the user's key tracking and quota */
561 	if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
562 		spin_lock(&key->user->lock);
563 		key->user->qnkeys--;
564 		key->user->qnbytes -= key->quotalen;
565 		spin_unlock(&key->user->lock);
566 	}
567 
568 	atomic_dec(&key->user->nkeys);
569 	if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
570 		atomic_dec(&key->user->nikeys);
571 
572 	key_user_put(key->user);
573 
574 	/* now throw away the key memory */
575 	if (key->type->destroy)
576 		key->type->destroy(key);
577 
578 	kfree(key->description);
579 
580 #ifdef KEY_DEBUGGING
581 	key->magic = KEY_DEBUG_MAGIC_X;
582 #endif
583 	kmem_cache_free(key_jar, key);
584 
585 	/* there may, of course, be more than one key to destroy */
586 	goto go_again;
587 
588 } /* end key_cleanup() */
589 
590 /*****************************************************************************/
591 /*
592  * dispose of a reference to a key
593  * - when all the references are gone, we schedule the cleanup task to come and
594  *   pull it out of the tree in definite process context
595  */
596 void key_put(struct key *key)
597 {
598 	if (key) {
599 		key_check(key);
600 
601 		if (atomic_dec_and_test(&key->usage))
602 			schedule_work(&key_cleanup_task);
603 	}
604 
605 } /* end key_put() */
606 
607 EXPORT_SYMBOL(key_put);
608 
609 /*****************************************************************************/
610 /*
611  * find a key by its serial number
612  */
613 struct key *key_lookup(key_serial_t id)
614 {
615 	struct rb_node *n;
616 	struct key *key;
617 
618 	spin_lock(&key_serial_lock);
619 
620 	/* search the tree for the specified key */
621 	n = key_serial_tree.rb_node;
622 	while (n) {
623 		key = rb_entry(n, struct key, serial_node);
624 
625 		if (id < key->serial)
626 			n = n->rb_left;
627 		else if (id > key->serial)
628 			n = n->rb_right;
629 		else
630 			goto found;
631 	}
632 
633  not_found:
634 	key = ERR_PTR(-ENOKEY);
635 	goto error;
636 
637  found:
638 	/* pretend it doesn't exist if it's dead */
639 	if (atomic_read(&key->usage) == 0 ||
640 	    test_bit(KEY_FLAG_DEAD, &key->flags) ||
641 	    key->type == &key_type_dead)
642 		goto not_found;
643 
644 	/* this races with key_put(), but that doesn't matter since key_put()
645 	 * doesn't actually change the key
646 	 */
647 	atomic_inc(&key->usage);
648 
649  error:
650 	spin_unlock(&key_serial_lock);
651 	return key;
652 
653 } /* end key_lookup() */
654 
655 /*****************************************************************************/
656 /*
657  * find and lock the specified key type against removal
658  * - we return with the sem readlocked
659  */
660 struct key_type *key_type_lookup(const char *type)
661 {
662 	struct key_type *ktype;
663 
664 	down_read(&key_types_sem);
665 
666 	/* look up the key type to see if it's one of the registered kernel
667 	 * types */
668 	list_for_each_entry(ktype, &key_types_list, link) {
669 		if (strcmp(ktype->name, type) == 0)
670 			goto found_kernel_type;
671 	}
672 
673 	up_read(&key_types_sem);
674 	ktype = ERR_PTR(-ENOKEY);
675 
676  found_kernel_type:
677 	return ktype;
678 
679 } /* end key_type_lookup() */
680 
681 /*****************************************************************************/
682 /*
683  * unlock a key type
684  */
685 void key_type_put(struct key_type *ktype)
686 {
687 	up_read(&key_types_sem);
688 
689 } /* end key_type_put() */
690 
691 /*****************************************************************************/
692 /*
693  * attempt to update an existing key
694  * - the key has an incremented refcount
695  * - we need to put the key if we get an error
696  */
697 static inline key_ref_t __key_update(key_ref_t key_ref,
698 				     const void *payload, size_t plen)
699 {
700 	struct key *key = key_ref_to_ptr(key_ref);
701 	int ret;
702 
703 	/* need write permission on the key to update it */
704 	ret = key_permission(key_ref, KEY_WRITE);
705 	if (ret < 0)
706 		goto error;
707 
708 	ret = -EEXIST;
709 	if (!key->type->update)
710 		goto error;
711 
712 	down_write(&key->sem);
713 
714 	ret = key->type->update(key, payload, plen);
715 	if (ret == 0)
716 		/* updating a negative key instantiates it */
717 		clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
718 
719 	up_write(&key->sem);
720 
721 	if (ret < 0)
722 		goto error;
723 out:
724 	return key_ref;
725 
726 error:
727 	key_put(key);
728 	key_ref = ERR_PTR(ret);
729 	goto out;
730 
731 } /* end __key_update() */
732 
733 /*****************************************************************************/
734 /*
735  * search the specified keyring for a key of the same description; if one is
736  * found, update it, otherwise add a new one
737  */
738 key_ref_t key_create_or_update(key_ref_t keyring_ref,
739 			       const char *type,
740 			       const char *description,
741 			       const void *payload,
742 			       size_t plen,
743 			       key_perm_t perm,
744 			       unsigned long flags)
745 {
746 	struct key_type *ktype;
747 	struct key *keyring, *key = NULL;
748 	key_ref_t key_ref;
749 	int ret;
750 
751 	/* look up the key type to see if it's one of the registered kernel
752 	 * types */
753 	ktype = key_type_lookup(type);
754 	if (IS_ERR(ktype)) {
755 		key_ref = ERR_PTR(-ENODEV);
756 		goto error;
757 	}
758 
759 	key_ref = ERR_PTR(-EINVAL);
760 	if (!ktype->match || !ktype->instantiate)
761 		goto error_2;
762 
763 	keyring = key_ref_to_ptr(keyring_ref);
764 
765 	key_check(keyring);
766 
767 	key_ref = ERR_PTR(-ENOTDIR);
768 	if (keyring->type != &key_type_keyring)
769 		goto error_2;
770 
771 	down_write(&keyring->sem);
772 
773 	/* if we're going to allocate a new key, we're going to have
774 	 * to modify the keyring */
775 	ret = key_permission(keyring_ref, KEY_WRITE);
776 	if (ret < 0) {
777 		key_ref = ERR_PTR(ret);
778 		goto error_3;
779 	}
780 
781 	/* if it's possible to update this type of key, search for an existing
782 	 * key of the same type and description in the destination keyring and
783 	 * update that instead if possible
784 	 */
785 	if (ktype->update) {
786 		key_ref = __keyring_search_one(keyring_ref, ktype, description,
787 					       0);
788 		if (!IS_ERR(key_ref))
789 			goto found_matching_key;
790 	}
791 
792 	/* if the client doesn't provide, decide on the permissions we want */
793 	if (perm == KEY_PERM_UNDEF) {
794 		perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
795 		perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK | KEY_USR_SETATTR;
796 
797 		if (ktype->read)
798 			perm |= KEY_POS_READ | KEY_USR_READ;
799 
800 		if (ktype == &key_type_keyring || ktype->update)
801 			perm |= KEY_USR_WRITE;
802 	}
803 
804 	/* allocate a new key */
805 	key = key_alloc(ktype, description, current->fsuid, current->fsgid,
806 			current, perm, flags);
807 	if (IS_ERR(key)) {
808 		key_ref = ERR_CAST(key);
809 		goto error_3;
810 	}
811 
812 	/* instantiate it and link it into the target keyring */
813 	ret = __key_instantiate_and_link(key, payload, plen, keyring, NULL);
814 	if (ret < 0) {
815 		key_put(key);
816 		key_ref = ERR_PTR(ret);
817 		goto error_3;
818 	}
819 
820 	key_ref = make_key_ref(key, is_key_possessed(keyring_ref));
821 
822  error_3:
823 	up_write(&keyring->sem);
824  error_2:
825 	key_type_put(ktype);
826  error:
827 	return key_ref;
828 
829  found_matching_key:
830 	/* we found a matching key, so we're going to try to update it
831 	 * - we can drop the locks first as we have the key pinned
832 	 */
833 	up_write(&keyring->sem);
834 	key_type_put(ktype);
835 
836 	key_ref = __key_update(key_ref, payload, plen);
837 	goto error;
838 
839 } /* end key_create_or_update() */
840 
841 EXPORT_SYMBOL(key_create_or_update);
842 
843 /*****************************************************************************/
844 /*
845  * update a key
846  */
847 int key_update(key_ref_t key_ref, const void *payload, size_t plen)
848 {
849 	struct key *key = key_ref_to_ptr(key_ref);
850 	int ret;
851 
852 	key_check(key);
853 
854 	/* the key must be writable */
855 	ret = key_permission(key_ref, KEY_WRITE);
856 	if (ret < 0)
857 		goto error;
858 
859 	/* attempt to update it if supported */
860 	ret = -EOPNOTSUPP;
861 	if (key->type->update) {
862 		down_write(&key->sem);
863 
864 		ret = key->type->update(key, payload, plen);
865 		if (ret == 0)
866 			/* updating a negative key instantiates it */
867 			clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
868 
869 		up_write(&key->sem);
870 	}
871 
872  error:
873 	return ret;
874 
875 } /* end key_update() */
876 
877 EXPORT_SYMBOL(key_update);
878 
879 /*****************************************************************************/
880 /*
881  * revoke a key
882  */
883 void key_revoke(struct key *key)
884 {
885 	key_check(key);
886 
887 	/* make sure no one's trying to change or use the key when we mark it
888 	 * - we tell lockdep that we might nest because we might be revoking an
889 	 *   authorisation key whilst holding the sem on a key we've just
890 	 *   instantiated
891 	 */
892 	down_write_nested(&key->sem, 1);
893 	if (!test_and_set_bit(KEY_FLAG_REVOKED, &key->flags) &&
894 	    key->type->revoke)
895 		key->type->revoke(key);
896 
897 	up_write(&key->sem);
898 
899 } /* end key_revoke() */
900 
901 EXPORT_SYMBOL(key_revoke);
902 
903 /*****************************************************************************/
904 /*
905  * register a type of key
906  */
907 int register_key_type(struct key_type *ktype)
908 {
909 	struct key_type *p;
910 	int ret;
911 
912 	ret = -EEXIST;
913 	down_write(&key_types_sem);
914 
915 	/* disallow key types with the same name */
916 	list_for_each_entry(p, &key_types_list, link) {
917 		if (strcmp(p->name, ktype->name) == 0)
918 			goto out;
919 	}
920 
921 	/* store the type */
922 	list_add(&ktype->link, &key_types_list);
923 	ret = 0;
924 
925  out:
926 	up_write(&key_types_sem);
927 	return ret;
928 
929 } /* end register_key_type() */
930 
931 EXPORT_SYMBOL(register_key_type);
932 
933 /*****************************************************************************/
934 /*
935  * unregister a type of key
936  */
937 void unregister_key_type(struct key_type *ktype)
938 {
939 	struct rb_node *_n;
940 	struct key *key;
941 
942 	down_write(&key_types_sem);
943 
944 	/* withdraw the key type */
945 	list_del_init(&ktype->link);
946 
947 	/* mark all the keys of this type dead */
948 	spin_lock(&key_serial_lock);
949 
950 	for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
951 		key = rb_entry(_n, struct key, serial_node);
952 
953 		if (key->type == ktype)
954 			key->type = &key_type_dead;
955 	}
956 
957 	spin_unlock(&key_serial_lock);
958 
959 	/* make sure everyone revalidates their keys */
960 	synchronize_rcu();
961 
962 	/* we should now be able to destroy the payloads of all the keys of
963 	 * this type with impunity */
964 	spin_lock(&key_serial_lock);
965 
966 	for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
967 		key = rb_entry(_n, struct key, serial_node);
968 
969 		if (key->type == ktype) {
970 			if (ktype->destroy)
971 				ktype->destroy(key);
972 			memset(&key->payload, KEY_DESTROY, sizeof(key->payload));
973 		}
974 	}
975 
976 	spin_unlock(&key_serial_lock);
977 	up_write(&key_types_sem);
978 
979 } /* end unregister_key_type() */
980 
981 EXPORT_SYMBOL(unregister_key_type);
982 
983 /*****************************************************************************/
984 /*
985  * initialise the key management stuff
986  */
987 void __init key_init(void)
988 {
989 	/* allocate a slab in which we can store keys */
990 	key_jar = kmem_cache_create("key_jar", sizeof(struct key),
991 			0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
992 
993 	/* add the special key types */
994 	list_add_tail(&key_type_keyring.link, &key_types_list);
995 	list_add_tail(&key_type_dead.link, &key_types_list);
996 	list_add_tail(&key_type_user.link, &key_types_list);
997 
998 	/* record the root user tracking */
999 	rb_link_node(&root_key_user.node,
1000 		     NULL,
1001 		     &key_user_tree.rb_node);
1002 
1003 	rb_insert_color(&root_key_user.node,
1004 			&key_user_tree);
1005 
1006 } /* end key_init() */
1007