1 /* 2 * Basic general purpose allocator for managing special purpose memory 3 * not managed by the regular kmalloc/kfree interface. 4 * Uses for this includes on-device special memory, uncached memory 5 * etc. 6 * 7 * This code is based on the buddy allocator found in the sym53c8xx_2 8 * driver Copyright (C) 1999-2001 Gerard Roudier <groudier@free.fr>, 9 * and adapted for general purpose use. 10 * 11 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org> 12 * 13 * This source code is licensed under the GNU General Public License, 14 * Version 2. See the file COPYING for more details. 15 */ 16 17 #include <linux/module.h> 18 #include <linux/stddef.h> 19 #include <linux/kernel.h> 20 #include <linux/string.h> 21 #include <linux/slab.h> 22 #include <linux/init.h> 23 #include <linux/mm.h> 24 #include <linux/spinlock.h> 25 #include <linux/genalloc.h> 26 27 #include <asm/page.h> 28 29 30 struct gen_pool *gen_pool_create(int nr_chunks, int max_chunk_shift, 31 unsigned long (*fp)(struct gen_pool *), 32 unsigned long data) 33 { 34 struct gen_pool *poolp; 35 unsigned long tmp; 36 int i; 37 38 /* 39 * This is really an arbitrary limit, +10 is enough for 40 * IA64_GRANULE_SHIFT, aka 16MB. If anyone needs a large limit 41 * this can be increased without problems. 42 */ 43 if ((max_chunk_shift > (PAGE_SHIFT + 10)) || 44 ((max_chunk_shift < ALLOC_MIN_SHIFT) && max_chunk_shift)) 45 return NULL; 46 47 if (!max_chunk_shift) 48 max_chunk_shift = PAGE_SHIFT; 49 50 poolp = kmalloc(sizeof(struct gen_pool), GFP_KERNEL); 51 if (!poolp) 52 return NULL; 53 memset(poolp, 0, sizeof(struct gen_pool)); 54 poolp->h = kmalloc(sizeof(struct gen_pool_link) * 55 (max_chunk_shift - ALLOC_MIN_SHIFT + 1), 56 GFP_KERNEL); 57 if (!poolp->h) { 58 printk(KERN_WARNING "gen_pool_alloc() failed to allocate\n"); 59 kfree(poolp); 60 return NULL; 61 } 62 memset(poolp->h, 0, sizeof(struct gen_pool_link) * 63 (max_chunk_shift - ALLOC_MIN_SHIFT + 1)); 64 65 spin_lock_init(&poolp->lock); 66 poolp->get_new_chunk = fp; 67 poolp->max_chunk_shift = max_chunk_shift; 68 poolp->private = data; 69 70 for (i = 0; i < nr_chunks; i++) { 71 tmp = poolp->get_new_chunk(poolp); 72 printk(KERN_INFO "allocated %lx\n", tmp); 73 if (!tmp) 74 break; 75 gen_pool_free(poolp, tmp, (1 << poolp->max_chunk_shift)); 76 } 77 78 return poolp; 79 } 80 EXPORT_SYMBOL(gen_pool_create); 81 82 83 /* 84 * Simple power of two buddy-like generic allocator. 85 * Provides naturally aligned memory chunks. 86 */ 87 unsigned long gen_pool_alloc(struct gen_pool *poolp, int size) 88 { 89 int j, i, s, max_chunk_size; 90 unsigned long a, flags; 91 struct gen_pool_link *h = poolp->h; 92 93 max_chunk_size = 1 << poolp->max_chunk_shift; 94 95 if (size > max_chunk_size) 96 return 0; 97 98 i = 0; 99 100 size = max(size, 1 << ALLOC_MIN_SHIFT); 101 s = roundup_pow_of_two(size); 102 103 j = i; 104 105 spin_lock_irqsave(&poolp->lock, flags); 106 while (!h[j].next) { 107 if (s == max_chunk_size) { 108 struct gen_pool_link *ptr; 109 spin_unlock_irqrestore(&poolp->lock, flags); 110 ptr = (struct gen_pool_link *)poolp->get_new_chunk(poolp); 111 spin_lock_irqsave(&poolp->lock, flags); 112 h[j].next = ptr; 113 if (h[j].next) 114 h[j].next->next = NULL; 115 break; 116 } 117 j++; 118 s <<= 1; 119 } 120 a = (unsigned long) h[j].next; 121 if (a) { 122 h[j].next = h[j].next->next; 123 /* 124 * This should be split into a seperate function doing 125 * the chunk split in order to support custom 126 * handling memory not physically accessible by host 127 */ 128 while (j > i) { 129 j -= 1; 130 s >>= 1; 131 h[j].next = (struct gen_pool_link *) (a + s); 132 h[j].next->next = NULL; 133 } 134 } 135 spin_unlock_irqrestore(&poolp->lock, flags); 136 return a; 137 } 138 EXPORT_SYMBOL(gen_pool_alloc); 139 140 141 /* 142 * Counter-part of the generic allocator. 143 */ 144 void gen_pool_free(struct gen_pool *poolp, unsigned long ptr, int size) 145 { 146 struct gen_pool_link *q; 147 struct gen_pool_link *h = poolp->h; 148 unsigned long a, b, flags; 149 int i, s, max_chunk_size; 150 151 max_chunk_size = 1 << poolp->max_chunk_shift; 152 153 if (size > max_chunk_size) 154 return; 155 156 i = 0; 157 158 size = max(size, 1 << ALLOC_MIN_SHIFT); 159 s = roundup_pow_of_two(size); 160 161 a = ptr; 162 163 spin_lock_irqsave(&poolp->lock, flags); 164 while (1) { 165 if (s == max_chunk_size) { 166 ((struct gen_pool_link *)a)->next = h[i].next; 167 h[i].next = (struct gen_pool_link *)a; 168 break; 169 } 170 b = a ^ s; 171 q = &h[i]; 172 173 while (q->next && q->next != (struct gen_pool_link *)b) 174 q = q->next; 175 176 if (!q->next) { 177 ((struct gen_pool_link *)a)->next = h[i].next; 178 h[i].next = (struct gen_pool_link *)a; 179 break; 180 } 181 q->next = q->next->next; 182 a = a & b; 183 s <<= 1; 184 i++; 185 } 186 spin_unlock_irqrestore(&poolp->lock, flags); 187 } 188 EXPORT_SYMBOL(gen_pool_free); 189