1 /* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */ 2 /* 3 * aoedev.c 4 * AoE device utility functions; maintains device list. 5 */ 6 7 #include <linux/hdreg.h> 8 #include <linux/blkdev.h> 9 #include <linux/netdevice.h> 10 #include <linux/delay.h> 11 #include "aoe.h" 12 13 static void dummy_timer(ulong); 14 static void aoedev_freedev(struct aoedev *); 15 static void freetgt(struct aoedev *d, struct aoetgt *t); 16 static void skbpoolfree(struct aoedev *d); 17 18 static struct aoedev *devlist; 19 static DEFINE_SPINLOCK(devlist_lock); 20 21 struct aoedev * 22 aoedev_by_aoeaddr(int maj, int min) 23 { 24 struct aoedev *d; 25 ulong flags; 26 27 spin_lock_irqsave(&devlist_lock, flags); 28 29 for (d=devlist; d; d=d->next) 30 if (d->aoemajor == maj && d->aoeminor == min) 31 break; 32 33 spin_unlock_irqrestore(&devlist_lock, flags); 34 return d; 35 } 36 37 static void 38 dummy_timer(ulong vp) 39 { 40 struct aoedev *d; 41 42 d = (struct aoedev *)vp; 43 if (d->flags & DEVFL_TKILL) 44 return; 45 d->timer.expires = jiffies + HZ; 46 add_timer(&d->timer); 47 } 48 49 void 50 aoedev_downdev(struct aoedev *d) 51 { 52 struct aoetgt **t, **te; 53 struct frame *f, *e; 54 struct buf *buf; 55 struct bio *bio; 56 57 t = d->targets; 58 te = t + NTARGETS; 59 for (; t < te && *t; t++) { 60 f = (*t)->frames; 61 e = f + (*t)->nframes; 62 for (; f < e; f->tag = FREETAG, f->buf = NULL, f++) { 63 if (f->tag == FREETAG || f->buf == NULL) 64 continue; 65 buf = f->buf; 66 bio = buf->bio; 67 if (--buf->nframesout == 0 68 && buf != d->inprocess) { 69 mempool_free(buf, d->bufpool); 70 bio_endio(bio, -EIO); 71 } 72 } 73 (*t)->maxout = (*t)->nframes; 74 (*t)->nout = 0; 75 } 76 buf = d->inprocess; 77 if (buf) { 78 bio = buf->bio; 79 mempool_free(buf, d->bufpool); 80 bio_endio(bio, -EIO); 81 } 82 d->inprocess = NULL; 83 d->htgt = NULL; 84 85 while (!list_empty(&d->bufq)) { 86 buf = container_of(d->bufq.next, struct buf, bufs); 87 list_del(d->bufq.next); 88 bio = buf->bio; 89 mempool_free(buf, d->bufpool); 90 bio_endio(bio, -EIO); 91 } 92 93 if (d->gd) 94 d->gd->capacity = 0; 95 96 d->flags &= ~DEVFL_UP; 97 } 98 99 static void 100 aoedev_freedev(struct aoedev *d) 101 { 102 struct aoetgt **t, **e; 103 104 if (d->gd) { 105 aoedisk_rm_sysfs(d); 106 del_gendisk(d->gd); 107 put_disk(d->gd); 108 } 109 t = d->targets; 110 e = t + NTARGETS; 111 for (; t < e && *t; t++) 112 freetgt(d, *t); 113 if (d->bufpool) 114 mempool_destroy(d->bufpool); 115 skbpoolfree(d); 116 kfree(d); 117 } 118 119 int 120 aoedev_flush(const char __user *str, size_t cnt) 121 { 122 ulong flags; 123 struct aoedev *d, **dd; 124 struct aoedev *rmd = NULL; 125 char buf[16]; 126 int all = 0; 127 128 if (cnt >= 3) { 129 if (cnt > sizeof buf) 130 cnt = sizeof buf; 131 if (copy_from_user(buf, str, cnt)) 132 return -EFAULT; 133 all = !strncmp(buf, "all", 3); 134 } 135 136 flush_scheduled_work(); 137 spin_lock_irqsave(&devlist_lock, flags); 138 dd = &devlist; 139 while ((d = *dd)) { 140 spin_lock(&d->lock); 141 if ((!all && (d->flags & DEVFL_UP)) 142 || (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE)) 143 || d->nopen) { 144 spin_unlock(&d->lock); 145 dd = &d->next; 146 continue; 147 } 148 *dd = d->next; 149 aoedev_downdev(d); 150 d->flags |= DEVFL_TKILL; 151 spin_unlock(&d->lock); 152 d->next = rmd; 153 rmd = d; 154 } 155 spin_unlock_irqrestore(&devlist_lock, flags); 156 while ((d = rmd)) { 157 rmd = d->next; 158 del_timer_sync(&d->timer); 159 aoedev_freedev(d); /* must be able to sleep */ 160 } 161 return 0; 162 } 163 164 /* I'm not really sure that this is a realistic problem, but if the 165 network driver goes gonzo let's just leak memory after complaining. */ 166 static void 167 skbfree(struct sk_buff *skb) 168 { 169 enum { Sms = 100, Tms = 3*1000}; 170 int i = Tms / Sms; 171 172 if (skb == NULL) 173 return; 174 while (atomic_read(&skb_shinfo(skb)->dataref) != 1 && i-- > 0) 175 msleep(Sms); 176 if (i <= 0) { 177 printk(KERN_ERR 178 "aoe: %s holds ref: %s\n", 179 skb->dev ? skb->dev->name : "netif", 180 "cannot free skb -- memory leaked."); 181 return; 182 } 183 skb_shinfo(skb)->nr_frags = skb->data_len = 0; 184 skb_trim(skb, 0); 185 dev_kfree_skb(skb); 186 } 187 188 static void 189 skbpoolfree(struct aoedev *d) 190 { 191 struct sk_buff *skb; 192 193 while ((skb = d->skbpool_hd)) { 194 d->skbpool_hd = skb->next; 195 skb->next = NULL; 196 skbfree(skb); 197 } 198 d->skbpool_tl = NULL; 199 } 200 201 /* find it or malloc it */ 202 struct aoedev * 203 aoedev_by_sysminor_m(ulong sysminor) 204 { 205 struct aoedev *d; 206 ulong flags; 207 208 spin_lock_irqsave(&devlist_lock, flags); 209 210 for (d=devlist; d; d=d->next) 211 if (d->sysminor == sysminor) 212 break; 213 if (d) 214 goto out; 215 d = kcalloc(1, sizeof *d, GFP_ATOMIC); 216 if (!d) 217 goto out; 218 INIT_WORK(&d->work, aoecmd_sleepwork); 219 spin_lock_init(&d->lock); 220 init_timer(&d->timer); 221 d->timer.data = (ulong) d; 222 d->timer.function = dummy_timer; 223 d->timer.expires = jiffies + HZ; 224 add_timer(&d->timer); 225 d->bufpool = NULL; /* defer to aoeblk_gdalloc */ 226 d->tgt = d->targets; 227 INIT_LIST_HEAD(&d->bufq); 228 d->sysminor = sysminor; 229 d->aoemajor = AOEMAJOR(sysminor); 230 d->aoeminor = AOEMINOR(sysminor); 231 d->mintimer = MINTIMER; 232 d->next = devlist; 233 devlist = d; 234 out: 235 spin_unlock_irqrestore(&devlist_lock, flags); 236 return d; 237 } 238 239 static void 240 freetgt(struct aoedev *d, struct aoetgt *t) 241 { 242 struct frame *f, *e; 243 244 f = t->frames; 245 e = f + t->nframes; 246 for (; f < e; f++) 247 skbfree(f->skb); 248 kfree(t->frames); 249 kfree(t); 250 } 251 252 void 253 aoedev_exit(void) 254 { 255 struct aoedev *d; 256 ulong flags; 257 258 flush_scheduled_work(); 259 260 while ((d = devlist)) { 261 devlist = d->next; 262 263 spin_lock_irqsave(&d->lock, flags); 264 aoedev_downdev(d); 265 d->flags |= DEVFL_TKILL; 266 spin_unlock_irqrestore(&d->lock, flags); 267 268 del_timer_sync(&d->timer); 269 aoedev_freedev(d); 270 } 271 } 272 273 int __init 274 aoedev_init(void) 275 { 276 return 0; 277 } 278