1 /* 2 * Implementation of the kernel access vector cache (AVC). 3 * 4 * Authors: Stephen Smalley, <sds@epoch.ncsc.mil> 5 * James Morris <jmorris@redhat.com> 6 * 7 * Update: KaiGai, Kohei <kaigai@ak.jp.nec.com> 8 * Replaced the avc_lock spinlock by RCU. 9 * 10 * Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com> 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License version 2, 14 * as published by the Free Software Foundation. 15 */ 16 #include <linux/types.h> 17 #include <linux/stddef.h> 18 #include <linux/kernel.h> 19 #include <linux/slab.h> 20 #include <linux/fs.h> 21 #include <linux/dcache.h> 22 #include <linux/init.h> 23 #include <linux/skbuff.h> 24 #include <linux/percpu.h> 25 #include <net/sock.h> 26 #include <linux/un.h> 27 #include <net/af_unix.h> 28 #include <linux/ip.h> 29 #include <linux/audit.h> 30 #include <linux/ipv6.h> 31 #include <net/ipv6.h> 32 #include "avc.h" 33 #include "avc_ss.h" 34 35 static const struct av_perm_to_string 36 { 37 u16 tclass; 38 u32 value; 39 const char *name; 40 } av_perm_to_string[] = { 41 #define S_(c, v, s) { c, v, s }, 42 #include "av_perm_to_string.h" 43 #undef S_ 44 }; 45 46 #ifdef CONFIG_AUDIT 47 static const char *class_to_string[] = { 48 #define S_(s) s, 49 #include "class_to_string.h" 50 #undef S_ 51 }; 52 #endif 53 54 #define TB_(s) static const char * s [] = { 55 #define TE_(s) }; 56 #define S_(s) s, 57 #include "common_perm_to_string.h" 58 #undef TB_ 59 #undef TE_ 60 #undef S_ 61 62 static const struct av_inherit 63 { 64 u16 tclass; 65 const char **common_pts; 66 u32 common_base; 67 } av_inherit[] = { 68 #define S_(c, i, b) { c, common_##i##_perm_to_string, b }, 69 #include "av_inherit.h" 70 #undef S_ 71 }; 72 73 #define AVC_CACHE_SLOTS 512 74 #define AVC_DEF_CACHE_THRESHOLD 512 75 #define AVC_CACHE_RECLAIM 16 76 77 #ifdef CONFIG_SECURITY_SELINUX_AVC_STATS 78 #define avc_cache_stats_incr(field) \ 79 do { \ 80 per_cpu(avc_cache_stats, get_cpu()).field++; \ 81 put_cpu(); \ 82 } while (0) 83 #else 84 #define avc_cache_stats_incr(field) do {} while (0) 85 #endif 86 87 struct avc_entry { 88 u32 ssid; 89 u32 tsid; 90 u16 tclass; 91 struct av_decision avd; 92 atomic_t used; /* used recently */ 93 }; 94 95 struct avc_node { 96 struct avc_entry ae; 97 struct list_head list; 98 struct rcu_head rhead; 99 }; 100 101 struct avc_cache { 102 struct list_head slots[AVC_CACHE_SLOTS]; 103 spinlock_t slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */ 104 atomic_t lru_hint; /* LRU hint for reclaim scan */ 105 atomic_t active_nodes; 106 u32 latest_notif; /* latest revocation notification */ 107 }; 108 109 struct avc_callback_node { 110 int (*callback) (u32 event, u32 ssid, u32 tsid, 111 u16 tclass, u32 perms, 112 u32 *out_retained); 113 u32 events; 114 u32 ssid; 115 u32 tsid; 116 u16 tclass; 117 u32 perms; 118 struct avc_callback_node *next; 119 }; 120 121 /* Exported via selinufs */ 122 unsigned int avc_cache_threshold = AVC_DEF_CACHE_THRESHOLD; 123 124 #ifdef CONFIG_SECURITY_SELINUX_AVC_STATS 125 DEFINE_PER_CPU(struct avc_cache_stats, avc_cache_stats) = { 0 }; 126 #endif 127 128 static struct avc_cache avc_cache; 129 static struct avc_callback_node *avc_callbacks; 130 static kmem_cache_t *avc_node_cachep; 131 132 static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass) 133 { 134 return (ssid ^ (tsid<<2) ^ (tclass<<4)) & (AVC_CACHE_SLOTS - 1); 135 } 136 137 /** 138 * avc_dump_av - Display an access vector in human-readable form. 139 * @tclass: target security class 140 * @av: access vector 141 */ 142 static void avc_dump_av(struct audit_buffer *ab, u16 tclass, u32 av) 143 { 144 const char **common_pts = NULL; 145 u32 common_base = 0; 146 int i, i2, perm; 147 148 if (av == 0) { 149 audit_log_format(ab, " null"); 150 return; 151 } 152 153 for (i = 0; i < ARRAY_SIZE(av_inherit); i++) { 154 if (av_inherit[i].tclass == tclass) { 155 common_pts = av_inherit[i].common_pts; 156 common_base = av_inherit[i].common_base; 157 break; 158 } 159 } 160 161 audit_log_format(ab, " {"); 162 i = 0; 163 perm = 1; 164 while (perm < common_base) { 165 if (perm & av) { 166 audit_log_format(ab, " %s", common_pts[i]); 167 av &= ~perm; 168 } 169 i++; 170 perm <<= 1; 171 } 172 173 while (i < sizeof(av) * 8) { 174 if (perm & av) { 175 for (i2 = 0; i2 < ARRAY_SIZE(av_perm_to_string); i2++) { 176 if ((av_perm_to_string[i2].tclass == tclass) && 177 (av_perm_to_string[i2].value == perm)) 178 break; 179 } 180 if (i2 < ARRAY_SIZE(av_perm_to_string)) { 181 audit_log_format(ab, " %s", 182 av_perm_to_string[i2].name); 183 av &= ~perm; 184 } 185 } 186 i++; 187 perm <<= 1; 188 } 189 190 if (av) 191 audit_log_format(ab, " 0x%x", av); 192 193 audit_log_format(ab, " }"); 194 } 195 196 /** 197 * avc_dump_query - Display a SID pair and a class in human-readable form. 198 * @ssid: source security identifier 199 * @tsid: target security identifier 200 * @tclass: target security class 201 */ 202 static void avc_dump_query(struct audit_buffer *ab, u32 ssid, u32 tsid, u16 tclass) 203 { 204 int rc; 205 char *scontext; 206 u32 scontext_len; 207 208 rc = security_sid_to_context(ssid, &scontext, &scontext_len); 209 if (rc) 210 audit_log_format(ab, "ssid=%d", ssid); 211 else { 212 audit_log_format(ab, "scontext=%s", scontext); 213 kfree(scontext); 214 } 215 216 rc = security_sid_to_context(tsid, &scontext, &scontext_len); 217 if (rc) 218 audit_log_format(ab, " tsid=%d", tsid); 219 else { 220 audit_log_format(ab, " tcontext=%s", scontext); 221 kfree(scontext); 222 } 223 audit_log_format(ab, " tclass=%s", class_to_string[tclass]); 224 } 225 226 /** 227 * avc_init - Initialize the AVC. 228 * 229 * Initialize the access vector cache. 230 */ 231 void __init avc_init(void) 232 { 233 int i; 234 235 for (i = 0; i < AVC_CACHE_SLOTS; i++) { 236 INIT_LIST_HEAD(&avc_cache.slots[i]); 237 spin_lock_init(&avc_cache.slots_lock[i]); 238 } 239 atomic_set(&avc_cache.active_nodes, 0); 240 atomic_set(&avc_cache.lru_hint, 0); 241 242 avc_node_cachep = kmem_cache_create("avc_node", sizeof(struct avc_node), 243 0, SLAB_PANIC, NULL, NULL); 244 245 audit_log(current->audit_context, "AVC INITIALIZED\n"); 246 } 247 248 int avc_get_hash_stats(char *page) 249 { 250 int i, chain_len, max_chain_len, slots_used; 251 struct avc_node *node; 252 253 rcu_read_lock(); 254 255 slots_used = 0; 256 max_chain_len = 0; 257 for (i = 0; i < AVC_CACHE_SLOTS; i++) { 258 if (!list_empty(&avc_cache.slots[i])) { 259 slots_used++; 260 chain_len = 0; 261 list_for_each_entry_rcu(node, &avc_cache.slots[i], list) 262 chain_len++; 263 if (chain_len > max_chain_len) 264 max_chain_len = chain_len; 265 } 266 } 267 268 rcu_read_unlock(); 269 270 return scnprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n" 271 "longest chain: %d\n", 272 atomic_read(&avc_cache.active_nodes), 273 slots_used, AVC_CACHE_SLOTS, max_chain_len); 274 } 275 276 static void avc_node_free(struct rcu_head *rhead) 277 { 278 struct avc_node *node = container_of(rhead, struct avc_node, rhead); 279 kmem_cache_free(avc_node_cachep, node); 280 avc_cache_stats_incr(frees); 281 } 282 283 static void avc_node_delete(struct avc_node *node) 284 { 285 list_del_rcu(&node->list); 286 call_rcu(&node->rhead, avc_node_free); 287 atomic_dec(&avc_cache.active_nodes); 288 } 289 290 static void avc_node_kill(struct avc_node *node) 291 { 292 kmem_cache_free(avc_node_cachep, node); 293 avc_cache_stats_incr(frees); 294 atomic_dec(&avc_cache.active_nodes); 295 } 296 297 static void avc_node_replace(struct avc_node *new, struct avc_node *old) 298 { 299 list_replace_rcu(&old->list, &new->list); 300 call_rcu(&old->rhead, avc_node_free); 301 atomic_dec(&avc_cache.active_nodes); 302 } 303 304 static inline int avc_reclaim_node(void) 305 { 306 struct avc_node *node; 307 int hvalue, try, ecx; 308 unsigned long flags; 309 310 for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++ ) { 311 hvalue = atomic_inc_return(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1); 312 313 if (!spin_trylock_irqsave(&avc_cache.slots_lock[hvalue], flags)) 314 continue; 315 316 list_for_each_entry(node, &avc_cache.slots[hvalue], list) { 317 if (atomic_dec_and_test(&node->ae.used)) { 318 /* Recently Unused */ 319 avc_node_delete(node); 320 avc_cache_stats_incr(reclaims); 321 ecx++; 322 if (ecx >= AVC_CACHE_RECLAIM) { 323 spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flags); 324 goto out; 325 } 326 } 327 } 328 spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flags); 329 } 330 out: 331 return ecx; 332 } 333 334 static struct avc_node *avc_alloc_node(void) 335 { 336 struct avc_node *node; 337 338 node = kmem_cache_alloc(avc_node_cachep, SLAB_ATOMIC); 339 if (!node) 340 goto out; 341 342 memset(node, 0, sizeof(*node)); 343 INIT_RCU_HEAD(&node->rhead); 344 INIT_LIST_HEAD(&node->list); 345 atomic_set(&node->ae.used, 1); 346 avc_cache_stats_incr(allocations); 347 348 if (atomic_inc_return(&avc_cache.active_nodes) > avc_cache_threshold) 349 avc_reclaim_node(); 350 351 out: 352 return node; 353 } 354 355 static void avc_node_populate(struct avc_node *node, u32 ssid, u32 tsid, u16 tclass, struct avc_entry *ae) 356 { 357 node->ae.ssid = ssid; 358 node->ae.tsid = tsid; 359 node->ae.tclass = tclass; 360 memcpy(&node->ae.avd, &ae->avd, sizeof(node->ae.avd)); 361 } 362 363 static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass) 364 { 365 struct avc_node *node, *ret = NULL; 366 int hvalue; 367 368 hvalue = avc_hash(ssid, tsid, tclass); 369 list_for_each_entry_rcu(node, &avc_cache.slots[hvalue], list) { 370 if (ssid == node->ae.ssid && 371 tclass == node->ae.tclass && 372 tsid == node->ae.tsid) { 373 ret = node; 374 break; 375 } 376 } 377 378 if (ret == NULL) { 379 /* cache miss */ 380 goto out; 381 } 382 383 /* cache hit */ 384 if (atomic_read(&ret->ae.used) != 1) 385 atomic_set(&ret->ae.used, 1); 386 out: 387 return ret; 388 } 389 390 /** 391 * avc_lookup - Look up an AVC entry. 392 * @ssid: source security identifier 393 * @tsid: target security identifier 394 * @tclass: target security class 395 * @requested: requested permissions, interpreted based on @tclass 396 * 397 * Look up an AVC entry that is valid for the 398 * @requested permissions between the SID pair 399 * (@ssid, @tsid), interpreting the permissions 400 * based on @tclass. If a valid AVC entry exists, 401 * then this function return the avc_node. 402 * Otherwise, this function returns NULL. 403 */ 404 static struct avc_node *avc_lookup(u32 ssid, u32 tsid, u16 tclass, u32 requested) 405 { 406 struct avc_node *node; 407 408 avc_cache_stats_incr(lookups); 409 node = avc_search_node(ssid, tsid, tclass); 410 411 if (node && ((node->ae.avd.decided & requested) == requested)) { 412 avc_cache_stats_incr(hits); 413 goto out; 414 } 415 416 node = NULL; 417 avc_cache_stats_incr(misses); 418 out: 419 return node; 420 } 421 422 static int avc_latest_notif_update(int seqno, int is_insert) 423 { 424 int ret = 0; 425 static DEFINE_SPINLOCK(notif_lock); 426 unsigned long flag; 427 428 spin_lock_irqsave(¬if_lock, flag); 429 if (is_insert) { 430 if (seqno < avc_cache.latest_notif) { 431 printk(KERN_WARNING "avc: seqno %d < latest_notif %d\n", 432 seqno, avc_cache.latest_notif); 433 ret = -EAGAIN; 434 } 435 } else { 436 if (seqno > avc_cache.latest_notif) 437 avc_cache.latest_notif = seqno; 438 } 439 spin_unlock_irqrestore(¬if_lock, flag); 440 441 return ret; 442 } 443 444 /** 445 * avc_insert - Insert an AVC entry. 446 * @ssid: source security identifier 447 * @tsid: target security identifier 448 * @tclass: target security class 449 * @ae: AVC entry 450 * 451 * Insert an AVC entry for the SID pair 452 * (@ssid, @tsid) and class @tclass. 453 * The access vectors and the sequence number are 454 * normally provided by the security server in 455 * response to a security_compute_av() call. If the 456 * sequence number @ae->avd.seqno is not less than the latest 457 * revocation notification, then the function copies 458 * the access vectors into a cache entry, returns 459 * avc_node inserted. Otherwise, this function returns NULL. 460 */ 461 static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct avc_entry *ae) 462 { 463 struct avc_node *pos, *node = NULL; 464 int hvalue; 465 unsigned long flag; 466 467 if (avc_latest_notif_update(ae->avd.seqno, 1)) 468 goto out; 469 470 node = avc_alloc_node(); 471 if (node) { 472 hvalue = avc_hash(ssid, tsid, tclass); 473 avc_node_populate(node, ssid, tsid, tclass, ae); 474 475 spin_lock_irqsave(&avc_cache.slots_lock[hvalue], flag); 476 list_for_each_entry(pos, &avc_cache.slots[hvalue], list) { 477 if (pos->ae.ssid == ssid && 478 pos->ae.tsid == tsid && 479 pos->ae.tclass == tclass) { 480 avc_node_replace(node, pos); 481 goto found; 482 } 483 } 484 list_add_rcu(&node->list, &avc_cache.slots[hvalue]); 485 found: 486 spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flag); 487 } 488 out: 489 return node; 490 } 491 492 static inline void avc_print_ipv6_addr(struct audit_buffer *ab, 493 struct in6_addr *addr, u16 port, 494 char *name1, char *name2) 495 { 496 if (!ipv6_addr_any(addr)) 497 audit_log_format(ab, " %s=%04x:%04x:%04x:%04x:%04x:" 498 "%04x:%04x:%04x", name1, NIP6(*addr)); 499 if (port) 500 audit_log_format(ab, " %s=%d", name2, ntohs(port)); 501 } 502 503 static inline void avc_print_ipv4_addr(struct audit_buffer *ab, u32 addr, 504 u16 port, char *name1, char *name2) 505 { 506 if (addr) 507 audit_log_format(ab, " %s=%d.%d.%d.%d", name1, NIPQUAD(addr)); 508 if (port) 509 audit_log_format(ab, " %s=%d", name2, ntohs(port)); 510 } 511 512 /** 513 * avc_audit - Audit the granting or denial of permissions. 514 * @ssid: source security identifier 515 * @tsid: target security identifier 516 * @tclass: target security class 517 * @requested: requested permissions 518 * @avd: access vector decisions 519 * @result: result from avc_has_perm_noaudit 520 * @a: auxiliary audit data 521 * 522 * Audit the granting or denial of permissions in accordance 523 * with the policy. This function is typically called by 524 * avc_has_perm() after a permission check, but can also be 525 * called directly by callers who use avc_has_perm_noaudit() 526 * in order to separate the permission check from the auditing. 527 * For example, this separation is useful when the permission check must 528 * be performed under a lock, to allow the lock to be released 529 * before calling the auditing code. 530 */ 531 void avc_audit(u32 ssid, u32 tsid, 532 u16 tclass, u32 requested, 533 struct av_decision *avd, int result, struct avc_audit_data *a) 534 { 535 struct task_struct *tsk = current; 536 struct inode *inode = NULL; 537 u32 denied, audited; 538 struct audit_buffer *ab; 539 540 denied = requested & ~avd->allowed; 541 if (denied) { 542 audited = denied; 543 if (!(audited & avd->auditdeny)) 544 return; 545 } else if (result) { 546 audited = denied = requested; 547 } else { 548 audited = requested; 549 if (!(audited & avd->auditallow)) 550 return; 551 } 552 553 ab = audit_log_start(current->audit_context); 554 if (!ab) 555 return; /* audit_panic has been called */ 556 audit_log_format(ab, "avc: %s ", denied ? "denied" : "granted"); 557 avc_dump_av(ab, tclass,audited); 558 audit_log_format(ab, " for "); 559 if (a && a->tsk) 560 tsk = a->tsk; 561 if (tsk && tsk->pid) { 562 struct mm_struct *mm; 563 struct vm_area_struct *vma; 564 audit_log_format(ab, " pid=%d", tsk->pid); 565 if (tsk == current) 566 mm = current->mm; 567 else 568 mm = get_task_mm(tsk); 569 if (mm) { 570 if (down_read_trylock(&mm->mmap_sem)) { 571 vma = mm->mmap; 572 while (vma) { 573 if ((vma->vm_flags & VM_EXECUTABLE) && 574 vma->vm_file) { 575 audit_log_d_path(ab, "exe=", 576 vma->vm_file->f_dentry, 577 vma->vm_file->f_vfsmnt); 578 break; 579 } 580 vma = vma->vm_next; 581 } 582 up_read(&mm->mmap_sem); 583 } else { 584 audit_log_format(ab, " comm=%s", tsk->comm); 585 } 586 if (tsk != current) 587 mmput(mm); 588 } else { 589 audit_log_format(ab, " comm=%s", tsk->comm); 590 } 591 } 592 if (a) { 593 switch (a->type) { 594 case AVC_AUDIT_DATA_IPC: 595 audit_log_format(ab, " key=%d", a->u.ipc_id); 596 break; 597 case AVC_AUDIT_DATA_CAP: 598 audit_log_format(ab, " capability=%d", a->u.cap); 599 break; 600 case AVC_AUDIT_DATA_FS: 601 if (a->u.fs.dentry) { 602 struct dentry *dentry = a->u.fs.dentry; 603 if (a->u.fs.mnt) { 604 audit_log_d_path(ab, "path=", dentry, 605 a->u.fs.mnt); 606 } else { 607 audit_log_format(ab, " name=%s", 608 dentry->d_name.name); 609 } 610 inode = dentry->d_inode; 611 } else if (a->u.fs.inode) { 612 struct dentry *dentry; 613 inode = a->u.fs.inode; 614 dentry = d_find_alias(inode); 615 if (dentry) { 616 audit_log_format(ab, " name=%s", 617 dentry->d_name.name); 618 dput(dentry); 619 } 620 } 621 if (inode) 622 audit_log_format(ab, " dev=%s ino=%ld", 623 inode->i_sb->s_id, 624 inode->i_ino); 625 break; 626 case AVC_AUDIT_DATA_NET: 627 if (a->u.net.sk) { 628 struct sock *sk = a->u.net.sk; 629 struct unix_sock *u; 630 int len = 0; 631 char *p = NULL; 632 633 switch (sk->sk_family) { 634 case AF_INET: { 635 struct inet_sock *inet = inet_sk(sk); 636 637 avc_print_ipv4_addr(ab, inet->rcv_saddr, 638 inet->sport, 639 "laddr", "lport"); 640 avc_print_ipv4_addr(ab, inet->daddr, 641 inet->dport, 642 "faddr", "fport"); 643 break; 644 } 645 case AF_INET6: { 646 struct inet_sock *inet = inet_sk(sk); 647 struct ipv6_pinfo *inet6 = inet6_sk(sk); 648 649 avc_print_ipv6_addr(ab, &inet6->rcv_saddr, 650 inet->sport, 651 "laddr", "lport"); 652 avc_print_ipv6_addr(ab, &inet6->daddr, 653 inet->dport, 654 "faddr", "fport"); 655 break; 656 } 657 case AF_UNIX: 658 u = unix_sk(sk); 659 if (u->dentry) { 660 audit_log_d_path(ab, "path=", 661 u->dentry, u->mnt); 662 break; 663 } 664 if (!u->addr) 665 break; 666 len = u->addr->len-sizeof(short); 667 p = &u->addr->name->sun_path[0]; 668 if (*p) 669 audit_log_format(ab, 670 "path=%*.*s", len, 671 len, p); 672 else 673 audit_log_format(ab, 674 "path=@%*.*s", len-1, 675 len-1, p+1); 676 break; 677 } 678 } 679 680 switch (a->u.net.family) { 681 case AF_INET: 682 avc_print_ipv4_addr(ab, a->u.net.v4info.saddr, 683 a->u.net.sport, 684 "saddr", "src"); 685 avc_print_ipv4_addr(ab, a->u.net.v4info.daddr, 686 a->u.net.dport, 687 "daddr", "dest"); 688 break; 689 case AF_INET6: 690 avc_print_ipv6_addr(ab, &a->u.net.v6info.saddr, 691 a->u.net.sport, 692 "saddr", "src"); 693 avc_print_ipv6_addr(ab, &a->u.net.v6info.daddr, 694 a->u.net.dport, 695 "daddr", "dest"); 696 break; 697 } 698 if (a->u.net.netif) 699 audit_log_format(ab, " netif=%s", 700 a->u.net.netif); 701 break; 702 } 703 } 704 audit_log_format(ab, " "); 705 avc_dump_query(ab, ssid, tsid, tclass); 706 audit_log_end(ab); 707 } 708 709 /** 710 * avc_add_callback - Register a callback for security events. 711 * @callback: callback function 712 * @events: security events 713 * @ssid: source security identifier or %SECSID_WILD 714 * @tsid: target security identifier or %SECSID_WILD 715 * @tclass: target security class 716 * @perms: permissions 717 * 718 * Register a callback function for events in the set @events 719 * related to the SID pair (@ssid, @tsid) and 720 * and the permissions @perms, interpreting 721 * @perms based on @tclass. Returns %0 on success or 722 * -%ENOMEM if insufficient memory exists to add the callback. 723 */ 724 int avc_add_callback(int (*callback)(u32 event, u32 ssid, u32 tsid, 725 u16 tclass, u32 perms, 726 u32 *out_retained), 727 u32 events, u32 ssid, u32 tsid, 728 u16 tclass, u32 perms) 729 { 730 struct avc_callback_node *c; 731 int rc = 0; 732 733 c = kmalloc(sizeof(*c), GFP_ATOMIC); 734 if (!c) { 735 rc = -ENOMEM; 736 goto out; 737 } 738 739 c->callback = callback; 740 c->events = events; 741 c->ssid = ssid; 742 c->tsid = tsid; 743 c->perms = perms; 744 c->next = avc_callbacks; 745 avc_callbacks = c; 746 out: 747 return rc; 748 } 749 750 static inline int avc_sidcmp(u32 x, u32 y) 751 { 752 return (x == y || x == SECSID_WILD || y == SECSID_WILD); 753 } 754 755 /** 756 * avc_update_node Update an AVC entry 757 * @event : Updating event 758 * @perms : Permission mask bits 759 * @ssid,@tsid,@tclass : identifier of an AVC entry 760 * 761 * if a valid AVC entry doesn't exist,this function returns -ENOENT. 762 * if kmalloc() called internal returns NULL, this function returns -ENOMEM. 763 * otherwise, this function update the AVC entry. The original AVC-entry object 764 * will release later by RCU. 765 */ 766 static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass) 767 { 768 int hvalue, rc = 0; 769 unsigned long flag; 770 struct avc_node *pos, *node, *orig = NULL; 771 772 node = avc_alloc_node(); 773 if (!node) { 774 rc = -ENOMEM; 775 goto out; 776 } 777 778 /* Lock the target slot */ 779 hvalue = avc_hash(ssid, tsid, tclass); 780 spin_lock_irqsave(&avc_cache.slots_lock[hvalue], flag); 781 782 list_for_each_entry(pos, &avc_cache.slots[hvalue], list){ 783 if ( ssid==pos->ae.ssid && 784 tsid==pos->ae.tsid && 785 tclass==pos->ae.tclass ){ 786 orig = pos; 787 break; 788 } 789 } 790 791 if (!orig) { 792 rc = -ENOENT; 793 avc_node_kill(node); 794 goto out_unlock; 795 } 796 797 /* 798 * Copy and replace original node. 799 */ 800 801 avc_node_populate(node, ssid, tsid, tclass, &orig->ae); 802 803 switch (event) { 804 case AVC_CALLBACK_GRANT: 805 node->ae.avd.allowed |= perms; 806 break; 807 case AVC_CALLBACK_TRY_REVOKE: 808 case AVC_CALLBACK_REVOKE: 809 node->ae.avd.allowed &= ~perms; 810 break; 811 case AVC_CALLBACK_AUDITALLOW_ENABLE: 812 node->ae.avd.auditallow |= perms; 813 break; 814 case AVC_CALLBACK_AUDITALLOW_DISABLE: 815 node->ae.avd.auditallow &= ~perms; 816 break; 817 case AVC_CALLBACK_AUDITDENY_ENABLE: 818 node->ae.avd.auditdeny |= perms; 819 break; 820 case AVC_CALLBACK_AUDITDENY_DISABLE: 821 node->ae.avd.auditdeny &= ~perms; 822 break; 823 } 824 avc_node_replace(node, orig); 825 out_unlock: 826 spin_unlock_irqrestore(&avc_cache.slots_lock[hvalue], flag); 827 out: 828 return rc; 829 } 830 831 /** 832 * avc_ss_reset - Flush the cache and revalidate migrated permissions. 833 * @seqno: policy sequence number 834 */ 835 int avc_ss_reset(u32 seqno) 836 { 837 struct avc_callback_node *c; 838 int i, rc = 0; 839 unsigned long flag; 840 struct avc_node *node; 841 842 for (i = 0; i < AVC_CACHE_SLOTS; i++) { 843 spin_lock_irqsave(&avc_cache.slots_lock[i], flag); 844 list_for_each_entry(node, &avc_cache.slots[i], list) 845 avc_node_delete(node); 846 spin_unlock_irqrestore(&avc_cache.slots_lock[i], flag); 847 } 848 849 for (c = avc_callbacks; c; c = c->next) { 850 if (c->events & AVC_CALLBACK_RESET) { 851 rc = c->callback(AVC_CALLBACK_RESET, 852 0, 0, 0, 0, NULL); 853 if (rc) 854 goto out; 855 } 856 } 857 858 avc_latest_notif_update(seqno, 0); 859 out: 860 return rc; 861 } 862 863 /** 864 * avc_has_perm_noaudit - Check permissions but perform no auditing. 865 * @ssid: source security identifier 866 * @tsid: target security identifier 867 * @tclass: target security class 868 * @requested: requested permissions, interpreted based on @tclass 869 * @avd: access vector decisions 870 * 871 * Check the AVC to determine whether the @requested permissions are granted 872 * for the SID pair (@ssid, @tsid), interpreting the permissions 873 * based on @tclass, and call the security server on a cache miss to obtain 874 * a new decision and add it to the cache. Return a copy of the decisions 875 * in @avd. Return %0 if all @requested permissions are granted, 876 * -%EACCES if any permissions are denied, or another -errno upon 877 * other errors. This function is typically called by avc_has_perm(), 878 * but may also be called directly to separate permission checking from 879 * auditing, e.g. in cases where a lock must be held for the check but 880 * should be released for the auditing. 881 */ 882 int avc_has_perm_noaudit(u32 ssid, u32 tsid, 883 u16 tclass, u32 requested, 884 struct av_decision *avd) 885 { 886 struct avc_node *node; 887 struct avc_entry entry, *p_ae; 888 int rc = 0; 889 u32 denied; 890 891 rcu_read_lock(); 892 893 node = avc_lookup(ssid, tsid, tclass, requested); 894 if (!node) { 895 rcu_read_unlock(); 896 rc = security_compute_av(ssid,tsid,tclass,requested,&entry.avd); 897 if (rc) 898 goto out; 899 rcu_read_lock(); 900 node = avc_insert(ssid,tsid,tclass,&entry); 901 } 902 903 p_ae = node ? &node->ae : &entry; 904 905 if (avd) 906 memcpy(avd, &p_ae->avd, sizeof(*avd)); 907 908 denied = requested & ~(p_ae->avd.allowed); 909 910 if (!requested || denied) { 911 if (selinux_enforcing) 912 rc = -EACCES; 913 else 914 if (node) 915 avc_update_node(AVC_CALLBACK_GRANT,requested, 916 ssid,tsid,tclass); 917 } 918 919 rcu_read_unlock(); 920 out: 921 return rc; 922 } 923 924 /** 925 * avc_has_perm - Check permissions and perform any appropriate auditing. 926 * @ssid: source security identifier 927 * @tsid: target security identifier 928 * @tclass: target security class 929 * @requested: requested permissions, interpreted based on @tclass 930 * @auditdata: auxiliary audit data 931 * 932 * Check the AVC to determine whether the @requested permissions are granted 933 * for the SID pair (@ssid, @tsid), interpreting the permissions 934 * based on @tclass, and call the security server on a cache miss to obtain 935 * a new decision and add it to the cache. Audit the granting or denial of 936 * permissions in accordance with the policy. Return %0 if all @requested 937 * permissions are granted, -%EACCES if any permissions are denied, or 938 * another -errno upon other errors. 939 */ 940 int avc_has_perm(u32 ssid, u32 tsid, u16 tclass, 941 u32 requested, struct avc_audit_data *auditdata) 942 { 943 struct av_decision avd; 944 int rc; 945 946 rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, &avd); 947 avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata); 948 return rc; 949 } 950