vmalloc.c (7bee946358c3cb957d4aa648fc5ab3cad0b232d0) | vmalloc.c (d086817dc0d42f1be8db4138233d33e1dd16a956) |
---|---|
1/* 2 * linux/mm/vmalloc.c 3 * 4 * Copyright (C) 1993 Linus Torvalds 5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 8 * Numa awareness, Christoph Lameter, SGI, June 2005 --- 657 unchanged lines hidden (view full) --- 666struct vmap_block { 667 spinlock_t lock; 668 struct vmap_area *va; 669 struct vmap_block_queue *vbq; 670 unsigned long free, dirty; 671 DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS); 672 DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS); 673 union { | 1/* 2 * linux/mm/vmalloc.c 3 * 4 * Copyright (C) 1993 Linus Torvalds 5 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 6 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000 7 * Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002 8 * Numa awareness, Christoph Lameter, SGI, June 2005 --- 657 unchanged lines hidden (view full) --- 666struct vmap_block { 667 spinlock_t lock; 668 struct vmap_area *va; 669 struct vmap_block_queue *vbq; 670 unsigned long free, dirty; 671 DECLARE_BITMAP(alloc_map, VMAP_BBMAP_BITS); 672 DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS); 673 union { |
674 struct { 675 struct list_head free_list; 676 struct list_head dirty_list; 677 }; | 674 struct list_head free_list; |
678 struct rcu_head rcu_head; 679 }; 680}; 681 682/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ 683static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); 684 685/* --- 50 unchanged lines hidden (view full) --- 736 737 spin_lock_init(&vb->lock); 738 vb->va = va; 739 vb->free = VMAP_BBMAP_BITS; 740 vb->dirty = 0; 741 bitmap_zero(vb->alloc_map, VMAP_BBMAP_BITS); 742 bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS); 743 INIT_LIST_HEAD(&vb->free_list); | 675 struct rcu_head rcu_head; 676 }; 677}; 678 679/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */ 680static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue); 681 682/* --- 50 unchanged lines hidden (view full) --- 733 734 spin_lock_init(&vb->lock); 735 vb->va = va; 736 vb->free = VMAP_BBMAP_BITS; 737 vb->dirty = 0; 738 bitmap_zero(vb->alloc_map, VMAP_BBMAP_BITS); 739 bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS); 740 INIT_LIST_HEAD(&vb->free_list); |
744 INIT_LIST_HEAD(&vb->dirty_list); | |
745 746 vb_idx = addr_to_vb_idx(va->va_start); 747 spin_lock(&vmap_block_tree_lock); 748 err = radix_tree_insert(&vmap_block_tree, vb_idx, vb); 749 spin_unlock(&vmap_block_tree_lock); 750 BUG_ON(err); 751 radix_tree_preload_end(); 752 --- 14 unchanged lines hidden (view full) --- 767 kfree(vb); 768} 769 770static void free_vmap_block(struct vmap_block *vb) 771{ 772 struct vmap_block *tmp; 773 unsigned long vb_idx; 774 | 741 742 vb_idx = addr_to_vb_idx(va->va_start); 743 spin_lock(&vmap_block_tree_lock); 744 err = radix_tree_insert(&vmap_block_tree, vb_idx, vb); 745 spin_unlock(&vmap_block_tree_lock); 746 BUG_ON(err); 747 radix_tree_preload_end(); 748 --- 14 unchanged lines hidden (view full) --- 763 kfree(vb); 764} 765 766static void free_vmap_block(struct vmap_block *vb) 767{ 768 struct vmap_block *tmp; 769 unsigned long vb_idx; 770 |
775 spin_lock(&vb->vbq->lock); 776 if (!list_empty(&vb->free_list)) 777 list_del(&vb->free_list); 778 if (!list_empty(&vb->dirty_list)) 779 list_del(&vb->dirty_list); 780 spin_unlock(&vb->vbq->lock); | 771 BUG_ON(!list_empty(&vb->free_list)); |
781 782 vb_idx = addr_to_vb_idx(vb->va->va_start); 783 spin_lock(&vmap_block_tree_lock); 784 tmp = radix_tree_delete(&vmap_block_tree, vb_idx); 785 spin_unlock(&vmap_block_tree_lock); 786 BUG_ON(tmp != vb); 787 788 free_unmap_vmap_area_noflush(vb->va); --- 68 unchanged lines hidden (view full) --- 857 vb_idx = addr_to_vb_idx((unsigned long)addr); 858 rcu_read_lock(); 859 vb = radix_tree_lookup(&vmap_block_tree, vb_idx); 860 rcu_read_unlock(); 861 BUG_ON(!vb); 862 863 spin_lock(&vb->lock); 864 bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order); | 772 773 vb_idx = addr_to_vb_idx(vb->va->va_start); 774 spin_lock(&vmap_block_tree_lock); 775 tmp = radix_tree_delete(&vmap_block_tree, vb_idx); 776 spin_unlock(&vmap_block_tree_lock); 777 BUG_ON(tmp != vb); 778 779 free_unmap_vmap_area_noflush(vb->va); --- 68 unchanged lines hidden (view full) --- 848 vb_idx = addr_to_vb_idx((unsigned long)addr); 849 rcu_read_lock(); 850 vb = radix_tree_lookup(&vmap_block_tree, vb_idx); 851 rcu_read_unlock(); 852 BUG_ON(!vb); 853 854 spin_lock(&vb->lock); 855 bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order); |
865 if (!vb->dirty) { 866 spin_lock(&vb->vbq->lock); 867 list_add(&vb->dirty_list, &vb->vbq->dirty); 868 spin_unlock(&vb->vbq->lock); 869 } | 856 |
870 vb->dirty += 1UL << order; 871 if (vb->dirty == VMAP_BBMAP_BITS) { 872 BUG_ON(vb->free || !list_empty(&vb->free_list)); 873 spin_unlock(&vb->lock); 874 free_vmap_block(vb); 875 } else 876 spin_unlock(&vb->lock); 877} --- 1063 unchanged lines hidden --- | 857 vb->dirty += 1UL << order; 858 if (vb->dirty == VMAP_BBMAP_BITS) { 859 BUG_ON(vb->free || !list_empty(&vb->free_list)); 860 spin_unlock(&vb->lock); 861 free_vmap_block(vb); 862 } else 863 spin_unlock(&vb->lock); 864} --- 1063 unchanged lines hidden --- |