1 /* 2 * Implementation of the extensible bitmap type. 3 * 4 * Author : Stephen Smalley, <sds@tycho.nsa.gov> 5 */ 6 /* 7 * Updated: Hewlett-Packard <paul@paul-moore.com> 8 * 9 * Added support to import/export the NetLabel category bitmap 10 * 11 * (c) Copyright Hewlett-Packard Development Company, L.P., 2006 12 */ 13 /* 14 * Updated: KaiGai Kohei <kaigai@ak.jp.nec.com> 15 * Applied standard bit operations to improve bitmap scanning. 16 */ 17 18 #include <linux/kernel.h> 19 #include <linux/slab.h> 20 #include <linux/errno.h> 21 #include <net/netlabel.h> 22 #include "ebitmap.h" 23 #include "policydb.h" 24 25 #define BITS_PER_U64 (sizeof(u64) * 8) 26 27 static struct kmem_cache *ebitmap_node_cachep; 28 29 int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2) 30 { 31 struct ebitmap_node *n1, *n2; 32 33 if (e1->highbit != e2->highbit) 34 return 0; 35 36 n1 = e1->node; 37 n2 = e2->node; 38 while (n1 && n2 && 39 (n1->startbit == n2->startbit) && 40 !memcmp(n1->maps, n2->maps, EBITMAP_SIZE / 8)) { 41 n1 = n1->next; 42 n2 = n2->next; 43 } 44 45 if (n1 || n2) 46 return 0; 47 48 return 1; 49 } 50 51 int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src) 52 { 53 struct ebitmap_node *n, *new, *prev; 54 55 ebitmap_init(dst); 56 n = src->node; 57 prev = NULL; 58 while (n) { 59 new = kmem_cache_zalloc(ebitmap_node_cachep, GFP_ATOMIC); 60 if (!new) { 61 ebitmap_destroy(dst); 62 return -ENOMEM; 63 } 64 new->startbit = n->startbit; 65 memcpy(new->maps, n->maps, EBITMAP_SIZE / 8); 66 new->next = NULL; 67 if (prev) 68 prev->next = new; 69 else 70 dst->node = new; 71 prev = new; 72 n = n->next; 73 } 74 75 dst->highbit = src->highbit; 76 return 0; 77 } 78 79 #ifdef CONFIG_NETLABEL 80 /** 81 * ebitmap_netlbl_export - Export an ebitmap into a NetLabel category bitmap 82 * @ebmap: the ebitmap to export 83 * @catmap: the NetLabel category bitmap 84 * 85 * Description: 86 * Export a SELinux extensibile bitmap into a NetLabel category bitmap. 87 * Returns zero on success, negative values on error. 88 * 89 */ 90 int ebitmap_netlbl_export(struct ebitmap *ebmap, 91 struct netlbl_lsm_catmap **catmap) 92 { 93 struct ebitmap_node *e_iter = ebmap->node; 94 unsigned long e_map; 95 u32 offset; 96 unsigned int iter; 97 int rc; 98 99 if (e_iter == NULL) { 100 *catmap = NULL; 101 return 0; 102 } 103 104 if (*catmap != NULL) 105 netlbl_catmap_free(*catmap); 106 *catmap = NULL; 107 108 while (e_iter) { 109 offset = e_iter->startbit; 110 for (iter = 0; iter < EBITMAP_UNIT_NUMS; iter++) { 111 e_map = e_iter->maps[iter]; 112 if (e_map != 0) { 113 rc = netlbl_catmap_setlong(catmap, 114 offset, 115 e_map, 116 GFP_ATOMIC); 117 if (rc != 0) 118 goto netlbl_export_failure; 119 } 120 offset += EBITMAP_UNIT_SIZE; 121 } 122 e_iter = e_iter->next; 123 } 124 125 return 0; 126 127 netlbl_export_failure: 128 netlbl_catmap_free(*catmap); 129 return -ENOMEM; 130 } 131 132 /** 133 * ebitmap_netlbl_import - Import a NetLabel category bitmap into an ebitmap 134 * @ebmap: the ebitmap to import 135 * @catmap: the NetLabel category bitmap 136 * 137 * Description: 138 * Import a NetLabel category bitmap into a SELinux extensibile bitmap. 139 * Returns zero on success, negative values on error. 140 * 141 */ 142 int ebitmap_netlbl_import(struct ebitmap *ebmap, 143 struct netlbl_lsm_catmap *catmap) 144 { 145 int rc; 146 struct ebitmap_node *e_iter = NULL; 147 struct ebitmap_node *e_prev = NULL; 148 u32 offset = 0, idx; 149 unsigned long bitmap; 150 151 for (;;) { 152 rc = netlbl_catmap_getlong(catmap, &offset, &bitmap); 153 if (rc < 0) 154 goto netlbl_import_failure; 155 if (offset == (u32)-1) 156 return 0; 157 158 /* don't waste ebitmap space if the netlabel bitmap is empty */ 159 if (bitmap == 0) { 160 offset += EBITMAP_UNIT_SIZE; 161 continue; 162 } 163 164 if (e_iter == NULL || 165 offset >= e_iter->startbit + EBITMAP_SIZE) { 166 e_prev = e_iter; 167 e_iter = kmem_cache_zalloc(ebitmap_node_cachep, GFP_ATOMIC); 168 if (e_iter == NULL) 169 goto netlbl_import_failure; 170 e_iter->startbit = offset - (offset % EBITMAP_SIZE); 171 if (e_prev == NULL) 172 ebmap->node = e_iter; 173 else 174 e_prev->next = e_iter; 175 ebmap->highbit = e_iter->startbit + EBITMAP_SIZE; 176 } 177 178 /* offset will always be aligned to an unsigned long */ 179 idx = EBITMAP_NODE_INDEX(e_iter, offset); 180 e_iter->maps[idx] = bitmap; 181 182 /* next */ 183 offset += EBITMAP_UNIT_SIZE; 184 } 185 186 /* NOTE: we should never reach this return */ 187 return 0; 188 189 netlbl_import_failure: 190 ebitmap_destroy(ebmap); 191 return -ENOMEM; 192 } 193 #endif /* CONFIG_NETLABEL */ 194 195 /* 196 * Check to see if all the bits set in e2 are also set in e1. Optionally, 197 * if last_e2bit is non-zero, the highest set bit in e2 cannot exceed 198 * last_e2bit. 199 */ 200 int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2, u32 last_e2bit) 201 { 202 struct ebitmap_node *n1, *n2; 203 int i; 204 205 if (e1->highbit < e2->highbit) 206 return 0; 207 208 n1 = e1->node; 209 n2 = e2->node; 210 211 while (n1 && n2 && (n1->startbit <= n2->startbit)) { 212 if (n1->startbit < n2->startbit) { 213 n1 = n1->next; 214 continue; 215 } 216 for (i = EBITMAP_UNIT_NUMS - 1; (i >= 0) && !n2->maps[i]; ) 217 i--; /* Skip trailing NULL map entries */ 218 if (last_e2bit && (i >= 0)) { 219 u32 lastsetbit = n2->startbit + i * EBITMAP_UNIT_SIZE + 220 __fls(n2->maps[i]); 221 if (lastsetbit > last_e2bit) 222 return 0; 223 } 224 225 while (i >= 0) { 226 if ((n1->maps[i] & n2->maps[i]) != n2->maps[i]) 227 return 0; 228 i--; 229 } 230 231 n1 = n1->next; 232 n2 = n2->next; 233 } 234 235 if (n2) 236 return 0; 237 238 return 1; 239 } 240 241 int ebitmap_get_bit(struct ebitmap *e, unsigned long bit) 242 { 243 struct ebitmap_node *n; 244 245 if (e->highbit < bit) 246 return 0; 247 248 n = e->node; 249 while (n && (n->startbit <= bit)) { 250 if ((n->startbit + EBITMAP_SIZE) > bit) 251 return ebitmap_node_get_bit(n, bit); 252 n = n->next; 253 } 254 255 return 0; 256 } 257 258 int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value) 259 { 260 struct ebitmap_node *n, *prev, *new; 261 262 prev = NULL; 263 n = e->node; 264 while (n && n->startbit <= bit) { 265 if ((n->startbit + EBITMAP_SIZE) > bit) { 266 if (value) { 267 ebitmap_node_set_bit(n, bit); 268 } else { 269 unsigned int s; 270 271 ebitmap_node_clr_bit(n, bit); 272 273 s = find_first_bit(n->maps, EBITMAP_SIZE); 274 if (s < EBITMAP_SIZE) 275 return 0; 276 277 /* drop this node from the bitmap */ 278 if (!n->next) { 279 /* 280 * this was the highest map 281 * within the bitmap 282 */ 283 if (prev) 284 e->highbit = prev->startbit 285 + EBITMAP_SIZE; 286 else 287 e->highbit = 0; 288 } 289 if (prev) 290 prev->next = n->next; 291 else 292 e->node = n->next; 293 kmem_cache_free(ebitmap_node_cachep, n); 294 } 295 return 0; 296 } 297 prev = n; 298 n = n->next; 299 } 300 301 if (!value) 302 return 0; 303 304 new = kmem_cache_zalloc(ebitmap_node_cachep, GFP_ATOMIC); 305 if (!new) 306 return -ENOMEM; 307 308 new->startbit = bit - (bit % EBITMAP_SIZE); 309 ebitmap_node_set_bit(new, bit); 310 311 if (!n) 312 /* this node will be the highest map within the bitmap */ 313 e->highbit = new->startbit + EBITMAP_SIZE; 314 315 if (prev) { 316 new->next = prev->next; 317 prev->next = new; 318 } else { 319 new->next = e->node; 320 e->node = new; 321 } 322 323 return 0; 324 } 325 326 void ebitmap_destroy(struct ebitmap *e) 327 { 328 struct ebitmap_node *n, *temp; 329 330 if (!e) 331 return; 332 333 n = e->node; 334 while (n) { 335 temp = n; 336 n = n->next; 337 kmem_cache_free(ebitmap_node_cachep, temp); 338 } 339 340 e->highbit = 0; 341 e->node = NULL; 342 return; 343 } 344 345 int ebitmap_read(struct ebitmap *e, void *fp) 346 { 347 struct ebitmap_node *n = NULL; 348 u32 mapunit, count, startbit, index; 349 u64 map; 350 __le32 buf[3]; 351 int rc, i; 352 353 ebitmap_init(e); 354 355 rc = next_entry(buf, fp, sizeof buf); 356 if (rc < 0) 357 goto out; 358 359 mapunit = le32_to_cpu(buf[0]); 360 e->highbit = le32_to_cpu(buf[1]); 361 count = le32_to_cpu(buf[2]); 362 363 if (mapunit != BITS_PER_U64) { 364 printk(KERN_ERR "SELinux: ebitmap: map size %u does not " 365 "match my size %zd (high bit was %d)\n", 366 mapunit, BITS_PER_U64, e->highbit); 367 goto bad; 368 } 369 370 /* round up e->highbit */ 371 e->highbit += EBITMAP_SIZE - 1; 372 e->highbit -= (e->highbit % EBITMAP_SIZE); 373 374 if (!e->highbit) { 375 e->node = NULL; 376 goto ok; 377 } 378 379 if (e->highbit && !count) 380 goto bad; 381 382 for (i = 0; i < count; i++) { 383 rc = next_entry(&startbit, fp, sizeof(u32)); 384 if (rc < 0) { 385 printk(KERN_ERR "SELinux: ebitmap: truncated map\n"); 386 goto bad; 387 } 388 startbit = le32_to_cpu(startbit); 389 390 if (startbit & (mapunit - 1)) { 391 printk(KERN_ERR "SELinux: ebitmap start bit (%d) is " 392 "not a multiple of the map unit size (%u)\n", 393 startbit, mapunit); 394 goto bad; 395 } 396 if (startbit > e->highbit - mapunit) { 397 printk(KERN_ERR "SELinux: ebitmap start bit (%d) is " 398 "beyond the end of the bitmap (%u)\n", 399 startbit, (e->highbit - mapunit)); 400 goto bad; 401 } 402 403 if (!n || startbit >= n->startbit + EBITMAP_SIZE) { 404 struct ebitmap_node *tmp; 405 tmp = kmem_cache_zalloc(ebitmap_node_cachep, GFP_KERNEL); 406 if (!tmp) { 407 printk(KERN_ERR 408 "SELinux: ebitmap: out of memory\n"); 409 rc = -ENOMEM; 410 goto bad; 411 } 412 /* round down */ 413 tmp->startbit = startbit - (startbit % EBITMAP_SIZE); 414 if (n) 415 n->next = tmp; 416 else 417 e->node = tmp; 418 n = tmp; 419 } else if (startbit <= n->startbit) { 420 printk(KERN_ERR "SELinux: ebitmap: start bit %d" 421 " comes after start bit %d\n", 422 startbit, n->startbit); 423 goto bad; 424 } 425 426 rc = next_entry(&map, fp, sizeof(u64)); 427 if (rc < 0) { 428 printk(KERN_ERR "SELinux: ebitmap: truncated map\n"); 429 goto bad; 430 } 431 map = le64_to_cpu(map); 432 433 index = (startbit - n->startbit) / EBITMAP_UNIT_SIZE; 434 while (map) { 435 n->maps[index++] = map & (-1UL); 436 map = EBITMAP_SHIFT_UNIT_SIZE(map); 437 } 438 } 439 ok: 440 rc = 0; 441 out: 442 return rc; 443 bad: 444 if (!rc) 445 rc = -EINVAL; 446 ebitmap_destroy(e); 447 goto out; 448 } 449 450 int ebitmap_write(struct ebitmap *e, void *fp) 451 { 452 struct ebitmap_node *n; 453 u32 count; 454 __le32 buf[3]; 455 u64 map; 456 int bit, last_bit, last_startbit, rc; 457 458 buf[0] = cpu_to_le32(BITS_PER_U64); 459 460 count = 0; 461 last_bit = 0; 462 last_startbit = -1; 463 ebitmap_for_each_positive_bit(e, n, bit) { 464 if (rounddown(bit, (int)BITS_PER_U64) > last_startbit) { 465 count++; 466 last_startbit = rounddown(bit, BITS_PER_U64); 467 } 468 last_bit = roundup(bit + 1, BITS_PER_U64); 469 } 470 buf[1] = cpu_to_le32(last_bit); 471 buf[2] = cpu_to_le32(count); 472 473 rc = put_entry(buf, sizeof(u32), 3, fp); 474 if (rc) 475 return rc; 476 477 map = 0; 478 last_startbit = INT_MIN; 479 ebitmap_for_each_positive_bit(e, n, bit) { 480 if (rounddown(bit, (int)BITS_PER_U64) > last_startbit) { 481 __le64 buf64[1]; 482 483 /* this is the very first bit */ 484 if (!map) { 485 last_startbit = rounddown(bit, BITS_PER_U64); 486 map = (u64)1 << (bit - last_startbit); 487 continue; 488 } 489 490 /* write the last node */ 491 buf[0] = cpu_to_le32(last_startbit); 492 rc = put_entry(buf, sizeof(u32), 1, fp); 493 if (rc) 494 return rc; 495 496 buf64[0] = cpu_to_le64(map); 497 rc = put_entry(buf64, sizeof(u64), 1, fp); 498 if (rc) 499 return rc; 500 501 /* set up for the next node */ 502 map = 0; 503 last_startbit = rounddown(bit, BITS_PER_U64); 504 } 505 map |= (u64)1 << (bit - last_startbit); 506 } 507 /* write the last node */ 508 if (map) { 509 __le64 buf64[1]; 510 511 /* write the last node */ 512 buf[0] = cpu_to_le32(last_startbit); 513 rc = put_entry(buf, sizeof(u32), 1, fp); 514 if (rc) 515 return rc; 516 517 buf64[0] = cpu_to_le64(map); 518 rc = put_entry(buf64, sizeof(u64), 1, fp); 519 if (rc) 520 return rc; 521 } 522 return 0; 523 } 524 525 void ebitmap_cache_init(void) 526 { 527 ebitmap_node_cachep = kmem_cache_create("ebitmap_node", 528 sizeof(struct ebitmap_node), 529 0, SLAB_PANIC, NULL); 530 } 531 532 void ebitmap_cache_destroy(void) 533 { 534 kmem_cache_destroy(ebitmap_node_cachep); 535 } 536