slab.c (b28a02de8c70d41d6b6ba8911e83ed3ccf2e13f8) | slab.c (cd105df4590c89837a1c300843238148cfef9b5f) |
---|---|
1/* 2 * linux/mm/slab.c 3 * Written by Mark Hemment, 1996/97. 4 * (markhe@nextd.demon.co.uk) 5 * 6 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli 7 * 8 * Major cleanup, different bufctl logic, per-cpu arrays --- 840 unchanged lines hidden (view full) --- 849static int __devinit cpuup_callback(struct notifier_block *nfb, 850 unsigned long action, void *hcpu) 851{ 852 long cpu = (long)hcpu; 853 kmem_cache_t *cachep; 854 struct kmem_list3 *l3 = NULL; 855 int node = cpu_to_node(cpu); 856 int memsize = sizeof(struct kmem_list3); | 1/* 2 * linux/mm/slab.c 3 * Written by Mark Hemment, 1996/97. 4 * (markhe@nextd.demon.co.uk) 5 * 6 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli 7 * 8 * Major cleanup, different bufctl logic, per-cpu arrays --- 840 unchanged lines hidden (view full) --- 849static int __devinit cpuup_callback(struct notifier_block *nfb, 850 unsigned long action, void *hcpu) 851{ 852 long cpu = (long)hcpu; 853 kmem_cache_t *cachep; 854 struct kmem_list3 *l3 = NULL; 855 int node = cpu_to_node(cpu); 856 int memsize = sizeof(struct kmem_list3); |
857 struct array_cache *nc = NULL; | |
858 859 switch (action) { 860 case CPU_UP_PREPARE: 861 down(&cache_chain_sem); 862 /* we need to do this right in the beginning since 863 * alloc_arraycache's are going to use this list. 864 * kmalloc_node allows us to add the slab to the right 865 * kmem_list3 and not this cpu's kmem_list3 --- 20 unchanged lines hidden (view full) --- 886 (1 + nr_cpus_node(node)) * 887 cachep->batchcount + cachep->num; 888 spin_unlock_irq(&cachep->nodelists[node]->list_lock); 889 } 890 891 /* Now we can go ahead with allocating the shared array's 892 & array cache's */ 893 list_for_each_entry(cachep, &cache_chain, next) { | 857 858 switch (action) { 859 case CPU_UP_PREPARE: 860 down(&cache_chain_sem); 861 /* we need to do this right in the beginning since 862 * alloc_arraycache's are going to use this list. 863 * kmalloc_node allows us to add the slab to the right 864 * kmem_list3 and not this cpu's kmem_list3 --- 20 unchanged lines hidden (view full) --- 885 (1 + nr_cpus_node(node)) * 886 cachep->batchcount + cachep->num; 887 spin_unlock_irq(&cachep->nodelists[node]->list_lock); 888 } 889 890 /* Now we can go ahead with allocating the shared array's 891 & array cache's */ 892 list_for_each_entry(cachep, &cache_chain, next) { |
893 struct array_cache *nc; 894 |
|
894 nc = alloc_arraycache(node, cachep->limit, 895 cachep->batchcount); 896 if (!nc) 897 goto bad; 898 cachep->array[cpu] = nc; 899 900 l3 = cachep->nodelists[node]; 901 BUG_ON(!l3); --- 2782 unchanged lines hidden --- | 895 nc = alloc_arraycache(node, cachep->limit, 896 cachep->batchcount); 897 if (!nc) 898 goto bad; 899 cachep->array[cpu] = nc; 900 901 l3 = cachep->nodelists[node]; 902 BUG_ON(!l3); --- 2782 unchanged lines hidden --- |