slab.c (756a025f00091918d9d09ca3229defb160b409c0) slab.c (1170532bb49f9468aedabdc1d5a560e2521a2bcc)
1/*
2 * linux/mm/slab.c
3 * Written by Mark Hemment, 1996/97.
4 * (markhe@nextd.demon.co.uk)
5 *
6 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
7 *
8 * Major cleanup, different bufctl logic, per-cpu arrays

--- 460 unchanged lines hidden (view full) ---

469}
470
471#if DEBUG
472#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
473
474static void __slab_error(const char *function, struct kmem_cache *cachep,
475 char *msg)
476{
1/*
2 * linux/mm/slab.c
3 * Written by Mark Hemment, 1996/97.
4 * (markhe@nextd.demon.co.uk)
5 *
6 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
7 *
8 * Major cleanup, different bufctl logic, per-cpu arrays

--- 460 unchanged lines hidden (view full) ---

469}
470
471#if DEBUG
472#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
473
474static void __slab_error(const char *function, struct kmem_cache *cachep,
475 char *msg)
476{
477 printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
477 pr_err("slab error in %s(): cache `%s': %s\n",
478 function, cachep->name, msg);
479 dump_stack();
480 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
481}
482#endif
483
484/*
485 * By default on NUMA we use alien caches to stage the freeing of

--- 1062 unchanged lines hidden (view full) ---

1548}
1549
1550static void dump_line(char *data, int offset, int limit)
1551{
1552 int i;
1553 unsigned char error = 0;
1554 int bad_count = 0;
1555
478 function, cachep->name, msg);
479 dump_stack();
480 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
481}
482#endif
483
484/*
485 * By default on NUMA we use alien caches to stage the freeing of

--- 1062 unchanged lines hidden (view full) ---

1548}
1549
1550static void dump_line(char *data, int offset, int limit)
1551{
1552 int i;
1553 unsigned char error = 0;
1554 int bad_count = 0;
1555
1556 printk(KERN_ERR "%03x: ", offset);
1556 pr_err("%03x: ", offset);
1557 for (i = 0; i < limit; i++) {
1558 if (data[offset + i] != POISON_FREE) {
1559 error = data[offset + i];
1560 bad_count++;
1561 }
1562 }
1563 print_hex_dump(KERN_CONT, "", 0, 16, 1,
1564 &data[offset], limit, 1);
1565
1566 if (bad_count == 1) {
1567 error ^= POISON_FREE;
1568 if (!(error & (error - 1))) {
1557 for (i = 0; i < limit; i++) {
1558 if (data[offset + i] != POISON_FREE) {
1559 error = data[offset + i];
1560 bad_count++;
1561 }
1562 }
1563 print_hex_dump(KERN_CONT, "", 0, 16, 1,
1564 &data[offset], limit, 1);
1565
1566 if (bad_count == 1) {
1567 error ^= POISON_FREE;
1568 if (!(error & (error - 1))) {
1569 printk(KERN_ERR "Single bit error detected. Probably bad RAM.\n");
1569 pr_err("Single bit error detected. Probably bad RAM.\n");
1570#ifdef CONFIG_X86
1570#ifdef CONFIG_X86
1571 printk(KERN_ERR "Run memtest86+ or a similar memory test tool.\n");
1571 pr_err("Run memtest86+ or a similar memory test tool.\n");
1572#else
1572#else
1573 printk(KERN_ERR "Run a memory test tool.\n");
1573 pr_err("Run a memory test tool.\n");
1574#endif
1575 }
1576 }
1577}
1578#endif
1579
1580#if DEBUG
1581
1582static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1583{
1584 int i, size;
1585 char *realobj;
1586
1587 if (cachep->flags & SLAB_RED_ZONE) {
1574#endif
1575 }
1576 }
1577}
1578#endif
1579
1580#if DEBUG
1581
1582static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1583{
1584 int i, size;
1585 char *realobj;
1586
1587 if (cachep->flags & SLAB_RED_ZONE) {
1588 printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n",
1589 *dbg_redzone1(cachep, objp),
1590 *dbg_redzone2(cachep, objp));
1588 pr_err("Redzone: 0x%llx/0x%llx\n",
1589 *dbg_redzone1(cachep, objp),
1590 *dbg_redzone2(cachep, objp));
1591 }
1592
1593 if (cachep->flags & SLAB_STORE_USER) {
1591 }
1592
1593 if (cachep->flags & SLAB_STORE_USER) {
1594 printk(KERN_ERR "Last user: [<%p>](%pSR)\n",
1594 pr_err("Last user: [<%p>](%pSR)\n",
1595 *dbg_userword(cachep, objp),
1596 *dbg_userword(cachep, objp));
1597 }
1598 realobj = (char *)objp + obj_offset(cachep);
1599 size = cachep->object_size;
1600 for (i = 0; i < size && lines; i += 16, lines--) {
1601 int limit;
1602 limit = 16;

--- 19 unchanged lines hidden (view full) ---

1622 char exp = POISON_FREE;
1623 if (i == size - 1)
1624 exp = POISON_END;
1625 if (realobj[i] != exp) {
1626 int limit;
1627 /* Mismatch ! */
1628 /* Print header */
1629 if (lines == 0) {
1595 *dbg_userword(cachep, objp),
1596 *dbg_userword(cachep, objp));
1597 }
1598 realobj = (char *)objp + obj_offset(cachep);
1599 size = cachep->object_size;
1600 for (i = 0; i < size && lines; i += 16, lines--) {
1601 int limit;
1602 limit = 16;

--- 19 unchanged lines hidden (view full) ---

1622 char exp = POISON_FREE;
1623 if (i == size - 1)
1624 exp = POISON_END;
1625 if (realobj[i] != exp) {
1626 int limit;
1627 /* Mismatch ! */
1628 /* Print header */
1629 if (lines == 0) {
1630 printk(KERN_ERR
1631 "Slab corruption (%s): %s start=%p, len=%d\n",
1632 print_tainted(), cachep->name, realobj, size);
1630 pr_err("Slab corruption (%s): %s start=%p, len=%d\n",
1631 print_tainted(), cachep->name,
1632 realobj, size);
1633 print_objinfo(cachep, objp, 0);
1634 }
1635 /* Hexdump the affected line */
1636 i = (i / 16) * 16;
1637 limit = 16;
1638 if (i + limit > size)
1639 limit = size - i;
1640 dump_line(realobj, i, limit);

--- 10 unchanged lines hidden (view full) ---

1651 */
1652 struct page *page = virt_to_head_page(objp);
1653 unsigned int objnr;
1654
1655 objnr = obj_to_index(cachep, page, objp);
1656 if (objnr) {
1657 objp = index_to_obj(cachep, page, objnr - 1);
1658 realobj = (char *)objp + obj_offset(cachep);
1633 print_objinfo(cachep, objp, 0);
1634 }
1635 /* Hexdump the affected line */
1636 i = (i / 16) * 16;
1637 limit = 16;
1638 if (i + limit > size)
1639 limit = size - i;
1640 dump_line(realobj, i, limit);

--- 10 unchanged lines hidden (view full) ---

1651 */
1652 struct page *page = virt_to_head_page(objp);
1653 unsigned int objnr;
1654
1655 objnr = obj_to_index(cachep, page, objp);
1656 if (objnr) {
1657 objp = index_to_obj(cachep, page, objnr - 1);
1658 realobj = (char *)objp + obj_offset(cachep);
1659 printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
1660 realobj, size);
1659 pr_err("Prev obj: start=%p, len=%d\n", realobj, size);
1661 print_objinfo(cachep, objp, 2);
1662 }
1663 if (objnr + 1 < cachep->num) {
1664 objp = index_to_obj(cachep, page, objnr + 1);
1665 realobj = (char *)objp + obj_offset(cachep);
1660 print_objinfo(cachep, objp, 2);
1661 }
1662 if (objnr + 1 < cachep->num) {
1663 objp = index_to_obj(cachep, page, objnr + 1);
1664 realobj = (char *)objp + obj_offset(cachep);
1666 printk(KERN_ERR "Next obj: start=%p, len=%d\n",
1667 realobj, size);
1665 pr_err("Next obj: start=%p, len=%d\n", realobj, size);
1668 print_objinfo(cachep, objp, 2);
1669 }
1670 }
1671}
1672#endif
1673
1674#if DEBUG
1675static void slab_destroy_debugcheck(struct kmem_cache *cachep,

--- 782 unchanged lines hidden (view full) ---

2458{
2459 unsigned int objnr = obj_to_index(cachep, page, objp);
2460#if DEBUG
2461 unsigned int i;
2462
2463 /* Verify double free bug */
2464 for (i = page->active; i < cachep->num; i++) {
2465 if (get_free_obj(page, i) == objnr) {
1666 print_objinfo(cachep, objp, 2);
1667 }
1668 }
1669}
1670#endif
1671
1672#if DEBUG
1673static void slab_destroy_debugcheck(struct kmem_cache *cachep,

--- 782 unchanged lines hidden (view full) ---

2456{
2457 unsigned int objnr = obj_to_index(cachep, page, objp);
2458#if DEBUG
2459 unsigned int i;
2460
2461 /* Verify double free bug */
2462 for (i = page->active; i < cachep->num; i++) {
2463 if (get_free_obj(page, i) == objnr) {
2466 printk(KERN_ERR "slab: double free detected in cache '%s', objp %p\n",
2464 pr_err("slab: double free detected in cache '%s', objp %p\n",
2467 cachep->name, objp);
2468 BUG();
2469 }
2470 }
2471#endif
2472 page->active--;
2473 if (!page->freelist)
2474 page->freelist = objp + obj_offset(cachep);

--- 103 unchanged lines hidden (view full) ---

2578/*
2579 * Perform extra freeing checks:
2580 * - detect bad pointers.
2581 * - POISON/RED_ZONE checking
2582 */
2583static void kfree_debugcheck(const void *objp)
2584{
2585 if (!virt_addr_valid(objp)) {
2465 cachep->name, objp);
2466 BUG();
2467 }
2468 }
2469#endif
2470 page->active--;
2471 if (!page->freelist)
2472 page->freelist = objp + obj_offset(cachep);

--- 103 unchanged lines hidden (view full) ---

2576/*
2577 * Perform extra freeing checks:
2578 * - detect bad pointers.
2579 * - POISON/RED_ZONE checking
2580 */
2581static void kfree_debugcheck(const void *objp)
2582{
2583 if (!virt_addr_valid(objp)) {
2586 printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
2584 pr_err("kfree_debugcheck: out of range ptr %lxh\n",
2587 (unsigned long)objp);
2588 BUG();
2589 }
2590}
2591
2592static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2593{
2594 unsigned long long redzone1, redzone2;

--- 7 unchanged lines hidden (view full) ---

2602 if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
2603 return;
2604
2605 if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
2606 slab_error(cache, "double free detected");
2607 else
2608 slab_error(cache, "memory outside object was overwritten");
2609
2585 (unsigned long)objp);
2586 BUG();
2587 }
2588}
2589
2590static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2591{
2592 unsigned long long redzone1, redzone2;

--- 7 unchanged lines hidden (view full) ---

2600 if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
2601 return;
2602
2603 if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
2604 slab_error(cache, "double free detected");
2605 else
2606 slab_error(cache, "memory outside object was overwritten");
2607
2610 printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n",
2611 obj, redzone1, redzone2);
2608 pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
2609 obj, redzone1, redzone2);
2612}
2613
2614static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2615 unsigned long caller)
2616{
2617 unsigned int objnr;
2618 struct page *page;
2619

--- 271 unchanged lines hidden (view full) ---

2891 }
2892 if (cachep->flags & SLAB_STORE_USER)
2893 *dbg_userword(cachep, objp) = (void *)caller;
2894
2895 if (cachep->flags & SLAB_RED_ZONE) {
2896 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
2897 *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
2898 slab_error(cachep, "double free, or memory outside object was overwritten");
2610}
2611
2612static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2613 unsigned long caller)
2614{
2615 unsigned int objnr;
2616 struct page *page;
2617

--- 271 unchanged lines hidden (view full) ---

2889 }
2890 if (cachep->flags & SLAB_STORE_USER)
2891 *dbg_userword(cachep, objp) = (void *)caller;
2892
2893 if (cachep->flags & SLAB_RED_ZONE) {
2894 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
2895 *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
2896 slab_error(cachep, "double free, or memory outside object was overwritten");
2899 printk(KERN_ERR
2900 "%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
2901 objp, *dbg_redzone1(cachep, objp),
2902 *dbg_redzone2(cachep, objp));
2897 pr_err("%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
2898 objp, *dbg_redzone1(cachep, objp),
2899 *dbg_redzone2(cachep, objp));
2903 }
2904 *dbg_redzone1(cachep, objp) = RED_ACTIVE;
2905 *dbg_redzone2(cachep, objp) = RED_ACTIVE;
2906 }
2907
2908 objp += obj_offset(cachep);
2909 if (cachep->ctor && cachep->flags & SLAB_POISON)
2910 cachep->ctor(objp);
2911 if (ARCH_SLAB_MINALIGN &&
2912 ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
2900 }
2901 *dbg_redzone1(cachep, objp) = RED_ACTIVE;
2902 *dbg_redzone2(cachep, objp) = RED_ACTIVE;
2903 }
2904
2905 objp += obj_offset(cachep);
2906 if (cachep->ctor && cachep->flags & SLAB_POISON)
2907 cachep->ctor(objp);
2908 if (ARCH_SLAB_MINALIGN &&
2909 ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
2913 printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
2910 pr_err("0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
2914 objp, (int)ARCH_SLAB_MINALIGN);
2915 }
2916 return objp;
2917}
2918#else
2919#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
2920#endif
2921

--- 910 unchanged lines hidden (view full) ---

3832 */
3833 if (limit > 32)
3834 limit = 32;
3835#endif
3836 batchcount = (limit + 1) / 2;
3837skip_setup:
3838 err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
3839 if (err)
2911 objp, (int)ARCH_SLAB_MINALIGN);
2912 }
2913 return objp;
2914}
2915#else
2916#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
2917#endif
2918

--- 910 unchanged lines hidden (view full) ---

3829 */
3830 if (limit > 32)
3831 limit = 32;
3832#endif
3833 batchcount = (limit + 1) / 2;
3834skip_setup:
3835 err = do_tune_cpucache(cachep, limit, batchcount, shared, gfp);
3836 if (err)
3840 printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
3837 pr_err("enable_cpucache failed for %s, error %d\n",
3841 cachep->name, -err);
3842 return err;
3843}
3844
3845/*
3846 * Drain an array if it contains any elements taking the node lock only if
3847 * necessary. Note that the node listlock also protects the array_cache
3848 * if drain_array() is used on the shared array.

--- 139 unchanged lines hidden (view full) ---

3988 }
3989 num_slabs += active_slabs;
3990 num_objs = num_slabs * cachep->num;
3991 if (num_objs - active_objs != free_objects && !error)
3992 error = "free_objects accounting error";
3993
3994 name = cachep->name;
3995 if (error)
3838 cachep->name, -err);
3839 return err;
3840}
3841
3842/*
3843 * Drain an array if it contains any elements taking the node lock only if
3844 * necessary. Note that the node listlock also protects the array_cache
3845 * if drain_array() is used on the shared array.

--- 139 unchanged lines hidden (view full) ---

3985 }
3986 num_slabs += active_slabs;
3987 num_objs = num_slabs * cachep->num;
3988 if (num_objs - active_objs != free_objects && !error)
3989 error = "free_objects accounting error";
3990
3991 name = cachep->name;
3992 if (error)
3996 printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
3993 pr_err("slab: cache %s error: %s\n", name, error);
3997
3998 sinfo->active_objs = active_objs;
3999 sinfo->num_objs = num_objs;
4000 sinfo->active_slabs = active_slabs;
4001 sinfo->num_slabs = num_slabs;
4002 sinfo->shared_avail = shared_avail;
4003 sinfo->limit = cachep->limit;
4004 sinfo->batchcount = cachep->batchcount;

--- 298 unchanged lines hidden ---
3994
3995 sinfo->active_objs = active_objs;
3996 sinfo->num_objs = num_objs;
3997 sinfo->active_slabs = active_slabs;
3998 sinfo->num_slabs = num_slabs;
3999 sinfo->shared_avail = shared_avail;
4000 sinfo->limit = cachep->limit;
4001 sinfo->batchcount = cachep->batchcount;

--- 298 unchanged lines hidden ---