1 /* Copyright (c) 2012 Coraid, Inc. See COPYING for GPL terms. */ 2 /* 3 * aoeblk.c 4 * block device routines 5 */ 6 7 #include <linux/kernel.h> 8 #include <linux/hdreg.h> 9 #include <linux/blkdev.h> 10 #include <linux/backing-dev.h> 11 #include <linux/fs.h> 12 #include <linux/ioctl.h> 13 #include <linux/slab.h> 14 #include <linux/ratelimit.h> 15 #include <linux/genhd.h> 16 #include <linux/netdevice.h> 17 #include <linux/mutex.h> 18 #include <linux/export.h> 19 #include <linux/moduleparam.h> 20 #include <scsi/sg.h> 21 #include "aoe.h" 22 23 static DEFINE_MUTEX(aoeblk_mutex); 24 static struct kmem_cache *buf_pool_cache; 25 26 /* GPFS needs a larger value than the default. */ 27 static int aoe_maxsectors; 28 module_param(aoe_maxsectors, int, 0644); 29 MODULE_PARM_DESC(aoe_maxsectors, 30 "When nonzero, set the maximum number of sectors per I/O request"); 31 32 static ssize_t aoedisk_show_state(struct device *dev, 33 struct device_attribute *attr, char *page) 34 { 35 struct gendisk *disk = dev_to_disk(dev); 36 struct aoedev *d = disk->private_data; 37 38 return snprintf(page, PAGE_SIZE, 39 "%s%s\n", 40 (d->flags & DEVFL_UP) ? "up" : "down", 41 (d->flags & DEVFL_KICKME) ? ",kickme" : 42 (d->nopen && !(d->flags & DEVFL_UP)) ? ",closewait" : ""); 43 /* I'd rather see nopen exported so we can ditch closewait */ 44 } 45 static ssize_t aoedisk_show_mac(struct device *dev, 46 struct device_attribute *attr, char *page) 47 { 48 struct gendisk *disk = dev_to_disk(dev); 49 struct aoedev *d = disk->private_data; 50 struct aoetgt *t = d->targets[0]; 51 52 if (t == NULL) 53 return snprintf(page, PAGE_SIZE, "none\n"); 54 return snprintf(page, PAGE_SIZE, "%pm\n", t->addr); 55 } 56 static ssize_t aoedisk_show_netif(struct device *dev, 57 struct device_attribute *attr, char *page) 58 { 59 struct gendisk *disk = dev_to_disk(dev); 60 struct aoedev *d = disk->private_data; 61 struct net_device *nds[8], **nd, **nnd, **ne; 62 struct aoetgt **t, **te; 63 struct aoeif *ifp, *e; 64 char *p; 65 66 memset(nds, 0, sizeof nds); 67 nd = nds; 68 ne = nd + ARRAY_SIZE(nds); 69 t = d->targets; 70 te = t + d->ntargets; 71 for (; t < te && *t; t++) { 72 ifp = (*t)->ifs; 73 e = ifp + NAOEIFS; 74 for (; ifp < e && ifp->nd; ifp++) { 75 for (nnd = nds; nnd < nd; nnd++) 76 if (*nnd == ifp->nd) 77 break; 78 if (nnd == nd && nd != ne) 79 *nd++ = ifp->nd; 80 } 81 } 82 83 ne = nd; 84 nd = nds; 85 if (*nd == NULL) 86 return snprintf(page, PAGE_SIZE, "none\n"); 87 for (p = page; nd < ne; nd++) 88 p += snprintf(p, PAGE_SIZE - (p-page), "%s%s", 89 p == page ? "" : ",", (*nd)->name); 90 p += snprintf(p, PAGE_SIZE - (p-page), "\n"); 91 return p-page; 92 } 93 /* firmware version */ 94 static ssize_t aoedisk_show_fwver(struct device *dev, 95 struct device_attribute *attr, char *page) 96 { 97 struct gendisk *disk = dev_to_disk(dev); 98 struct aoedev *d = disk->private_data; 99 100 return snprintf(page, PAGE_SIZE, "0x%04x\n", (unsigned int) d->fw_ver); 101 } 102 static ssize_t aoedisk_show_payload(struct device *dev, 103 struct device_attribute *attr, char *page) 104 { 105 struct gendisk *disk = dev_to_disk(dev); 106 struct aoedev *d = disk->private_data; 107 108 return snprintf(page, PAGE_SIZE, "%lu\n", d->maxbcnt); 109 } 110 111 static DEVICE_ATTR(state, S_IRUGO, aoedisk_show_state, NULL); 112 static DEVICE_ATTR(mac, S_IRUGO, aoedisk_show_mac, NULL); 113 static DEVICE_ATTR(netif, S_IRUGO, aoedisk_show_netif, NULL); 114 static struct device_attribute dev_attr_firmware_version = { 115 .attr = { .name = "firmware-version", .mode = S_IRUGO }, 116 .show = aoedisk_show_fwver, 117 }; 118 static DEVICE_ATTR(payload, S_IRUGO, aoedisk_show_payload, NULL); 119 120 static struct attribute *aoe_attrs[] = { 121 &dev_attr_state.attr, 122 &dev_attr_mac.attr, 123 &dev_attr_netif.attr, 124 &dev_attr_firmware_version.attr, 125 &dev_attr_payload.attr, 126 NULL, 127 }; 128 129 static const struct attribute_group attr_group = { 130 .attrs = aoe_attrs, 131 }; 132 133 static int 134 aoedisk_add_sysfs(struct aoedev *d) 135 { 136 return sysfs_create_group(&disk_to_dev(d->gd)->kobj, &attr_group); 137 } 138 void 139 aoedisk_rm_sysfs(struct aoedev *d) 140 { 141 sysfs_remove_group(&disk_to_dev(d->gd)->kobj, &attr_group); 142 } 143 144 static int 145 aoeblk_open(struct block_device *bdev, fmode_t mode) 146 { 147 struct aoedev *d = bdev->bd_disk->private_data; 148 ulong flags; 149 150 if (!virt_addr_valid(d)) { 151 pr_crit("aoe: invalid device pointer in %s\n", 152 __func__); 153 WARN_ON(1); 154 return -ENODEV; 155 } 156 if (!(d->flags & DEVFL_UP) || d->flags & DEVFL_TKILL) 157 return -ENODEV; 158 159 mutex_lock(&aoeblk_mutex); 160 spin_lock_irqsave(&d->lock, flags); 161 if (d->flags & DEVFL_UP && !(d->flags & DEVFL_TKILL)) { 162 d->nopen++; 163 spin_unlock_irqrestore(&d->lock, flags); 164 mutex_unlock(&aoeblk_mutex); 165 return 0; 166 } 167 spin_unlock_irqrestore(&d->lock, flags); 168 mutex_unlock(&aoeblk_mutex); 169 return -ENODEV; 170 } 171 172 static int 173 aoeblk_release(struct gendisk *disk, fmode_t mode) 174 { 175 struct aoedev *d = disk->private_data; 176 ulong flags; 177 178 spin_lock_irqsave(&d->lock, flags); 179 180 if (--d->nopen == 0) { 181 spin_unlock_irqrestore(&d->lock, flags); 182 aoecmd_cfg(d->aoemajor, d->aoeminor); 183 return 0; 184 } 185 spin_unlock_irqrestore(&d->lock, flags); 186 187 return 0; 188 } 189 190 static void 191 aoeblk_request(struct request_queue *q) 192 { 193 struct aoedev *d; 194 struct request *rq; 195 196 d = q->queuedata; 197 if ((d->flags & DEVFL_UP) == 0) { 198 pr_info_ratelimited("aoe: device %ld.%d is not up\n", 199 d->aoemajor, d->aoeminor); 200 while ((rq = blk_peek_request(q))) { 201 blk_start_request(rq); 202 aoe_end_request(d, rq, 1); 203 } 204 return; 205 } 206 aoecmd_work(d); 207 } 208 209 static int 210 aoeblk_getgeo(struct block_device *bdev, struct hd_geometry *geo) 211 { 212 struct aoedev *d = bdev->bd_disk->private_data; 213 214 if ((d->flags & DEVFL_UP) == 0) { 215 printk(KERN_ERR "aoe: disk not up\n"); 216 return -ENODEV; 217 } 218 219 geo->cylinders = d->geo.cylinders; 220 geo->heads = d->geo.heads; 221 geo->sectors = d->geo.sectors; 222 return 0; 223 } 224 225 static int 226 aoeblk_ioctl(struct block_device *bdev, fmode_t mode, uint cmd, ulong arg) 227 { 228 struct aoedev *d; 229 230 if (!arg) 231 return -EINVAL; 232 233 d = bdev->bd_disk->private_data; 234 if ((d->flags & DEVFL_UP) == 0) { 235 pr_err("aoe: disk not up\n"); 236 return -ENODEV; 237 } 238 239 if (cmd == HDIO_GET_IDENTITY) { 240 if (!copy_to_user((void __user *) arg, &d->ident, 241 sizeof(d->ident))) 242 return 0; 243 return -EFAULT; 244 } 245 246 /* udev calls scsi_id, which uses SG_IO, resulting in noise */ 247 if (cmd != SG_IO) 248 pr_info("aoe: unknown ioctl 0x%x\n", cmd); 249 250 return -ENOTTY; 251 } 252 253 static const struct block_device_operations aoe_bdops = { 254 .open = aoeblk_open, 255 .release = aoeblk_release, 256 .ioctl = aoeblk_ioctl, 257 .getgeo = aoeblk_getgeo, 258 .owner = THIS_MODULE, 259 }; 260 261 /* alloc_disk and add_disk can sleep */ 262 void 263 aoeblk_gdalloc(void *vp) 264 { 265 struct aoedev *d = vp; 266 struct gendisk *gd; 267 mempool_t *mp; 268 struct request_queue *q; 269 enum { KB = 1024, MB = KB * KB, READ_AHEAD = 2 * MB, }; 270 ulong flags; 271 int late = 0; 272 273 spin_lock_irqsave(&d->lock, flags); 274 if (d->flags & DEVFL_GDALLOC 275 && !(d->flags & DEVFL_TKILL) 276 && !(d->flags & DEVFL_GD_NOW)) 277 d->flags |= DEVFL_GD_NOW; 278 else 279 late = 1; 280 spin_unlock_irqrestore(&d->lock, flags); 281 if (late) 282 return; 283 284 gd = alloc_disk(AOE_PARTITIONS); 285 if (gd == NULL) { 286 pr_err("aoe: cannot allocate disk structure for %ld.%d\n", 287 d->aoemajor, d->aoeminor); 288 goto err; 289 } 290 291 mp = mempool_create(MIN_BUFS, mempool_alloc_slab, mempool_free_slab, 292 buf_pool_cache); 293 if (mp == NULL) { 294 printk(KERN_ERR "aoe: cannot allocate bufpool for %ld.%d\n", 295 d->aoemajor, d->aoeminor); 296 goto err_disk; 297 } 298 q = blk_init_queue(aoeblk_request, &d->lock); 299 if (q == NULL) { 300 pr_err("aoe: cannot allocate block queue for %ld.%d\n", 301 d->aoemajor, d->aoeminor); 302 goto err_mempool; 303 } 304 305 spin_lock_irqsave(&d->lock, flags); 306 WARN_ON(!(d->flags & DEVFL_GD_NOW)); 307 WARN_ON(!(d->flags & DEVFL_GDALLOC)); 308 WARN_ON(d->flags & DEVFL_TKILL); 309 WARN_ON(d->gd); 310 WARN_ON(d->flags & DEVFL_UP); 311 blk_queue_max_hw_sectors(q, BLK_DEF_MAX_SECTORS); 312 q->backing_dev_info.name = "aoe"; 313 q->backing_dev_info.ra_pages = READ_AHEAD / PAGE_CACHE_SIZE; 314 d->bufpool = mp; 315 d->blkq = gd->queue = q; 316 q->queuedata = d; 317 d->gd = gd; 318 if (aoe_maxsectors) 319 blk_queue_max_hw_sectors(q, aoe_maxsectors); 320 gd->major = AOE_MAJOR; 321 gd->first_minor = d->sysminor; 322 gd->fops = &aoe_bdops; 323 gd->private_data = d; 324 set_capacity(gd, d->ssize); 325 snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d", 326 d->aoemajor, d->aoeminor); 327 328 d->flags &= ~DEVFL_GDALLOC; 329 d->flags |= DEVFL_UP; 330 331 spin_unlock_irqrestore(&d->lock, flags); 332 333 add_disk(gd); 334 aoedisk_add_sysfs(d); 335 336 spin_lock_irqsave(&d->lock, flags); 337 WARN_ON(!(d->flags & DEVFL_GD_NOW)); 338 d->flags &= ~DEVFL_GD_NOW; 339 spin_unlock_irqrestore(&d->lock, flags); 340 return; 341 342 err_mempool: 343 mempool_destroy(mp); 344 err_disk: 345 put_disk(gd); 346 err: 347 spin_lock_irqsave(&d->lock, flags); 348 d->flags &= ~DEVFL_GD_NOW; 349 schedule_work(&d->work); 350 spin_unlock_irqrestore(&d->lock, flags); 351 } 352 353 void 354 aoeblk_exit(void) 355 { 356 kmem_cache_destroy(buf_pool_cache); 357 } 358 359 int __init 360 aoeblk_init(void) 361 { 362 buf_pool_cache = kmem_cache_create("aoe_bufs", 363 sizeof(struct buf), 364 0, 0, NULL); 365 if (buf_pool_cache == NULL) 366 return -ENOMEM; 367 368 return 0; 369 } 370 371