aoedev.c (fea05a26c3a215796b7a4fa5cbc25278d3e16d30) | aoedev.c (0c966214589b9767fd8771b71328f83bac58cb25) |
---|---|
1/* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */ 2/* 3 * aoedev.c 4 * AoE device utility functions; maintains device list. 5 */ 6 7#include <linux/hdreg.h> 8#include <linux/blkdev.h> 9#include <linux/netdevice.h> 10#include <linux/delay.h> 11#include <linux/slab.h> | 1/* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */ 2/* 3 * aoedev.c 4 * AoE device utility functions; maintains device list. 5 */ 6 7#include <linux/hdreg.h> 8#include <linux/blkdev.h> 9#include <linux/netdevice.h> 10#include <linux/delay.h> 11#include <linux/slab.h> |
12#include <linux/bitmap.h> 13#include <linux/kdev_t.h> |
|
12#include "aoe.h" 13 14static void dummy_timer(ulong); 15static void aoedev_freedev(struct aoedev *); 16static void freetgt(struct aoedev *d, struct aoetgt *t); 17static void skbpoolfree(struct aoedev *d); 18 19static struct aoedev *devlist; 20static DEFINE_SPINLOCK(devlist_lock); 21 | 14#include "aoe.h" 15 16static void dummy_timer(ulong); 17static void aoedev_freedev(struct aoedev *); 18static void freetgt(struct aoedev *d, struct aoetgt *t); 19static void skbpoolfree(struct aoedev *d); 20 21static struct aoedev *devlist; 22static DEFINE_SPINLOCK(devlist_lock); 23 |
22/* 23 * Users who grab a pointer to the device with aoedev_by_aoeaddr or 24 * aoedev_by_sysminor_m automatically get a reference count and must 25 * be responsible for performing a aoedev_put. With the addition of 26 * async kthread processing I'm no longer confident that we can 27 * guarantee consistency in the face of device flushes. 28 * 29 * For the time being, we only bother to add extra references for 30 * frames sitting on the iocq. When the kthreads finish processing 31 * these frames, they will aoedev_put the device. | 24/* Because some systems will have one, many, or no 25 * - partitions, 26 * - slots per shelf, 27 * - or shelves, 28 * we need some flexibility in the way the minor numbers 29 * are allocated. So they are dynamic. |
32 */ | 30 */ |
33struct aoedev * 34aoedev_by_aoeaddr(int maj, int min) | 31#define N_DEVS ((1U<<MINORBITS)/AOE_PARTITIONS) 32 33static DEFINE_SPINLOCK(used_minors_lock); 34static DECLARE_BITMAP(used_minors, N_DEVS); 35 36static int 37minor_get(ulong *minor) |
35{ | 38{ |
36 struct aoedev *d; | |
37 ulong flags; | 39 ulong flags; |
40 ulong n; 41 int error = 0; |
|
38 | 42 |
39 spin_lock_irqsave(&devlist_lock, flags); | 43 spin_lock_irqsave(&used_minors_lock, flags); 44 n = find_first_zero_bit(used_minors, N_DEVS); 45 if (n < N_DEVS) 46 set_bit(n, used_minors); 47 else 48 error = -1; 49 spin_unlock_irqrestore(&used_minors_lock, flags); |
40 | 50 |
41 for (d=devlist; d; d=d->next) 42 if (d->aoemajor == maj && d->aoeminor == min) { 43 d->ref++; 44 break; 45 } | 51 *minor = n * AOE_PARTITIONS; 52 return error; 53} |
46 | 54 |
47 spin_unlock_irqrestore(&devlist_lock, flags); 48 return d; | 55static void 56minor_free(ulong minor) 57{ 58 ulong flags; 59 60 minor /= AOE_PARTITIONS; 61 BUG_ON(minor >= N_DEVS); 62 63 spin_lock_irqsave(&used_minors_lock, flags); 64 BUG_ON(!test_bit(minor, used_minors)); 65 clear_bit(minor, used_minors); 66 spin_unlock_irqrestore(&used_minors_lock, flags); |
49} 50 | 67} 68 |
69/* 70 * Users who grab a pointer to the device with aoedev_by_aoeaddr 71 * automatically get a reference count and must be responsible 72 * for performing a aoedev_put. With the addition of async 73 * kthread processing I'm no longer confident that we can 74 * guarantee consistency in the face of device flushes. 75 * 76 * For the time being, we only bother to add extra references for 77 * frames sitting on the iocq. When the kthreads finish processing 78 * these frames, they will aoedev_put the device. 79 */ 80 |
|
51void 52aoedev_put(struct aoedev *d) 53{ 54 ulong flags; 55 56 spin_lock_irqsave(&devlist_lock, flags); 57 d->ref--; 58 spin_unlock_irqrestore(&devlist_lock, flags); --- 95 unchanged lines hidden (view full) --- 154 } 155 t = d->targets; 156 e = t + NTARGETS; 157 for (; t < e && *t; t++) 158 freetgt(d, *t); 159 if (d->bufpool) 160 mempool_destroy(d->bufpool); 161 skbpoolfree(d); | 81void 82aoedev_put(struct aoedev *d) 83{ 84 ulong flags; 85 86 spin_lock_irqsave(&devlist_lock, flags); 87 d->ref--; 88 spin_unlock_irqrestore(&devlist_lock, flags); --- 95 unchanged lines hidden (view full) --- 184 } 185 t = d->targets; 186 e = t + NTARGETS; 187 for (; t < e && *t; t++) 188 freetgt(d, *t); 189 if (d->bufpool) 190 mempool_destroy(d->bufpool); 191 skbpoolfree(d); |
192 minor_free(d->sysminor); |
|
162 kfree(d); 163} 164 165int 166aoedev_flush(const char __user *str, size_t cnt) 167{ 168 ulong flags; 169 struct aoedev *d, **dd; --- 71 unchanged lines hidden (view full) --- 241 struct sk_buff *skb, *tmp; 242 243 skb_queue_walk_safe(&d->skbpool, skb, tmp) 244 skbfree(skb); 245 246 __skb_queue_head_init(&d->skbpool); 247} 248 | 193 kfree(d); 194} 195 196int 197aoedev_flush(const char __user *str, size_t cnt) 198{ 199 ulong flags; 200 struct aoedev *d, **dd; --- 71 unchanged lines hidden (view full) --- 272 struct sk_buff *skb, *tmp; 273 274 skb_queue_walk_safe(&d->skbpool, skb, tmp) 275 skbfree(skb); 276 277 __skb_queue_head_init(&d->skbpool); 278} 279 |
249/* find it or malloc it */ | 280/* find it or allocate it */ |
250struct aoedev * | 281struct aoedev * |
251aoedev_by_sysminor_m(ulong sysminor) | 282aoedev_by_aoeaddr(ulong maj, int min, int do_alloc) |
252{ 253 struct aoedev *d; 254 int i; 255 ulong flags; | 283{ 284 struct aoedev *d; 285 int i; 286 ulong flags; |
287 ulong sysminor; |
|
256 257 spin_lock_irqsave(&devlist_lock, flags); 258 259 for (d=devlist; d; d=d->next) | 288 289 spin_lock_irqsave(&devlist_lock, flags); 290 291 for (d=devlist; d; d=d->next) |
260 if (d->sysminor == sysminor) { | 292 if (d->aoemajor == maj && d->aoeminor == min) { |
261 d->ref++; 262 break; 263 } | 293 d->ref++; 294 break; 295 } |
264 if (d) | 296 if (d || !do_alloc || minor_get(&sysminor) < 0) |
265 goto out; 266 d = kcalloc(1, sizeof *d, GFP_ATOMIC); 267 if (!d) 268 goto out; 269 INIT_WORK(&d->work, aoecmd_sleepwork); 270 spin_lock_init(&d->lock); 271 skb_queue_head_init(&d->skbpool); 272 init_timer(&d->timer); 273 d->timer.data = (ulong) d; 274 d->timer.function = dummy_timer; 275 d->timer.expires = jiffies + HZ; 276 add_timer(&d->timer); 277 d->bufpool = NULL; /* defer to aoeblk_gdalloc */ 278 d->tgt = d->targets; 279 d->ref = 1; 280 for (i = 0; i < NFACTIVE; i++) 281 INIT_LIST_HEAD(&d->factive[i]); 282 d->sysminor = sysminor; | 297 goto out; 298 d = kcalloc(1, sizeof *d, GFP_ATOMIC); 299 if (!d) 300 goto out; 301 INIT_WORK(&d->work, aoecmd_sleepwork); 302 spin_lock_init(&d->lock); 303 skb_queue_head_init(&d->skbpool); 304 init_timer(&d->timer); 305 d->timer.data = (ulong) d; 306 d->timer.function = dummy_timer; 307 d->timer.expires = jiffies + HZ; 308 add_timer(&d->timer); 309 d->bufpool = NULL; /* defer to aoeblk_gdalloc */ 310 d->tgt = d->targets; 311 d->ref = 1; 312 for (i = 0; i < NFACTIVE; i++) 313 INIT_LIST_HEAD(&d->factive[i]); 314 d->sysminor = sysminor; |
283 d->aoemajor = AOEMAJOR(sysminor); 284 d->aoeminor = AOEMINOR(sysminor); | 315 d->aoemajor = maj; 316 d->aoeminor = min; |
285 d->mintimer = MINTIMER; 286 d->next = devlist; 287 devlist = d; 288 out: 289 spin_unlock_irqrestore(&devlist_lock, flags); 290 return d; 291} 292 --- 48 unchanged lines hidden --- | 317 d->mintimer = MINTIMER; 318 d->next = devlist; 319 devlist = d; 320 out: 321 spin_unlock_irqrestore(&devlist_lock, flags); 322 return d; 323} 324 --- 48 unchanged lines hidden --- |