1 /* 2 * ccw based virtio transport 3 * 4 * Copyright IBM Corp. 2012, 2014 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License (version 2 only) 8 * as published by the Free Software Foundation. 9 * 10 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> 11 */ 12 13 #include <linux/kernel_stat.h> 14 #include <linux/init.h> 15 #include <linux/bootmem.h> 16 #include <linux/err.h> 17 #include <linux/virtio.h> 18 #include <linux/virtio_config.h> 19 #include <linux/slab.h> 20 #include <linux/interrupt.h> 21 #include <linux/virtio_ring.h> 22 #include <linux/pfn.h> 23 #include <linux/async.h> 24 #include <linux/wait.h> 25 #include <linux/list.h> 26 #include <linux/bitops.h> 27 #include <linux/module.h> 28 #include <linux/io.h> 29 #include <linux/kvm_para.h> 30 #include <linux/notifier.h> 31 #include <asm/setup.h> 32 #include <asm/irq.h> 33 #include <asm/cio.h> 34 #include <asm/ccwdev.h> 35 #include <asm/virtio-ccw.h> 36 #include <asm/isc.h> 37 #include <asm/airq.h> 38 39 /* 40 * virtio related functions 41 */ 42 43 struct vq_config_block { 44 __u16 index; 45 __u16 num; 46 } __packed; 47 48 #define VIRTIO_CCW_CONFIG_SIZE 0x100 49 /* same as PCI config space size, should be enough for all drivers */ 50 51 struct virtio_ccw_device { 52 struct virtio_device vdev; 53 __u8 *status; 54 __u8 config[VIRTIO_CCW_CONFIG_SIZE]; 55 struct ccw_device *cdev; 56 __u32 curr_io; 57 int err; 58 unsigned int revision; /* Transport revision */ 59 wait_queue_head_t wait_q; 60 spinlock_t lock; 61 struct list_head virtqueues; 62 unsigned long indicators; 63 unsigned long indicators2; 64 struct vq_config_block *config_block; 65 bool is_thinint; 66 bool going_away; 67 bool device_lost; 68 unsigned int config_ready; 69 void *airq_info; 70 }; 71 72 struct vq_info_block_legacy { 73 __u64 queue; 74 __u32 align; 75 __u16 index; 76 __u16 num; 77 } __packed; 78 79 struct vq_info_block { 80 __u64 desc; 81 __u32 res0; 82 __u16 index; 83 __u16 num; 84 __u64 avail; 85 __u64 used; 86 } __packed; 87 88 struct virtio_feature_desc { 89 __u32 features; 90 __u8 index; 91 } __packed; 92 93 struct virtio_thinint_area { 94 unsigned long summary_indicator; 95 unsigned long indicator; 96 u64 bit_nr; 97 u8 isc; 98 } __packed; 99 100 struct virtio_rev_info { 101 __u16 revision; 102 __u16 length; 103 __u8 data[]; 104 }; 105 106 /* the highest virtio-ccw revision we support */ 107 #define VIRTIO_CCW_REV_MAX 1 108 109 struct virtio_ccw_vq_info { 110 struct virtqueue *vq; 111 int num; 112 void *queue; 113 union { 114 struct vq_info_block s; 115 struct vq_info_block_legacy l; 116 } *info_block; 117 int bit_nr; 118 struct list_head node; 119 long cookie; 120 }; 121 122 #define VIRTIO_AIRQ_ISC IO_SCH_ISC /* inherit from subchannel */ 123 124 #define VIRTIO_IV_BITS (L1_CACHE_BYTES * 8) 125 #define MAX_AIRQ_AREAS 20 126 127 static int virtio_ccw_use_airq = 1; 128 129 struct airq_info { 130 rwlock_t lock; 131 u8 summary_indicator; 132 struct airq_struct airq; 133 struct airq_iv *aiv; 134 }; 135 static struct airq_info *airq_areas[MAX_AIRQ_AREAS]; 136 137 #define CCW_CMD_SET_VQ 0x13 138 #define CCW_CMD_VDEV_RESET 0x33 139 #define CCW_CMD_SET_IND 0x43 140 #define CCW_CMD_SET_CONF_IND 0x53 141 #define CCW_CMD_READ_FEAT 0x12 142 #define CCW_CMD_WRITE_FEAT 0x11 143 #define CCW_CMD_READ_CONF 0x22 144 #define CCW_CMD_WRITE_CONF 0x21 145 #define CCW_CMD_WRITE_STATUS 0x31 146 #define CCW_CMD_READ_VQ_CONF 0x32 147 #define CCW_CMD_SET_IND_ADAPTER 0x73 148 #define CCW_CMD_SET_VIRTIO_REV 0x83 149 150 #define VIRTIO_CCW_DOING_SET_VQ 0x00010000 151 #define VIRTIO_CCW_DOING_RESET 0x00040000 152 #define VIRTIO_CCW_DOING_READ_FEAT 0x00080000 153 #define VIRTIO_CCW_DOING_WRITE_FEAT 0x00100000 154 #define VIRTIO_CCW_DOING_READ_CONFIG 0x00200000 155 #define VIRTIO_CCW_DOING_WRITE_CONFIG 0x00400000 156 #define VIRTIO_CCW_DOING_WRITE_STATUS 0x00800000 157 #define VIRTIO_CCW_DOING_SET_IND 0x01000000 158 #define VIRTIO_CCW_DOING_READ_VQ_CONF 0x02000000 159 #define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000 160 #define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000 161 #define VIRTIO_CCW_DOING_SET_VIRTIO_REV 0x10000000 162 #define VIRTIO_CCW_INTPARM_MASK 0xffff0000 163 164 static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev) 165 { 166 return container_of(vdev, struct virtio_ccw_device, vdev); 167 } 168 169 static void drop_airq_indicator(struct virtqueue *vq, struct airq_info *info) 170 { 171 unsigned long i, flags; 172 173 write_lock_irqsave(&info->lock, flags); 174 for (i = 0; i < airq_iv_end(info->aiv); i++) { 175 if (vq == (void *)airq_iv_get_ptr(info->aiv, i)) { 176 airq_iv_free_bit(info->aiv, i); 177 airq_iv_set_ptr(info->aiv, i, 0); 178 break; 179 } 180 } 181 write_unlock_irqrestore(&info->lock, flags); 182 } 183 184 static void virtio_airq_handler(struct airq_struct *airq) 185 { 186 struct airq_info *info = container_of(airq, struct airq_info, airq); 187 unsigned long ai; 188 189 inc_irq_stat(IRQIO_VAI); 190 read_lock(&info->lock); 191 /* Walk through indicators field, summary indicator active. */ 192 for (ai = 0;;) { 193 ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv)); 194 if (ai == -1UL) 195 break; 196 vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai)); 197 } 198 info->summary_indicator = 0; 199 smp_wmb(); 200 /* Walk through indicators field, summary indicator not active. */ 201 for (ai = 0;;) { 202 ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv)); 203 if (ai == -1UL) 204 break; 205 vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai)); 206 } 207 read_unlock(&info->lock); 208 } 209 210 static struct airq_info *new_airq_info(void) 211 { 212 struct airq_info *info; 213 int rc; 214 215 info = kzalloc(sizeof(*info), GFP_KERNEL); 216 if (!info) 217 return NULL; 218 rwlock_init(&info->lock); 219 info->aiv = airq_iv_create(VIRTIO_IV_BITS, AIRQ_IV_ALLOC | AIRQ_IV_PTR); 220 if (!info->aiv) { 221 kfree(info); 222 return NULL; 223 } 224 info->airq.handler = virtio_airq_handler; 225 info->airq.lsi_ptr = &info->summary_indicator; 226 info->airq.lsi_mask = 0xff; 227 info->airq.isc = VIRTIO_AIRQ_ISC; 228 rc = register_adapter_interrupt(&info->airq); 229 if (rc) { 230 airq_iv_release(info->aiv); 231 kfree(info); 232 return NULL; 233 } 234 return info; 235 } 236 237 static void destroy_airq_info(struct airq_info *info) 238 { 239 if (!info) 240 return; 241 242 unregister_adapter_interrupt(&info->airq); 243 airq_iv_release(info->aiv); 244 kfree(info); 245 } 246 247 static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs, 248 u64 *first, void **airq_info) 249 { 250 int i, j; 251 struct airq_info *info; 252 unsigned long indicator_addr = 0; 253 unsigned long bit, flags; 254 255 for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) { 256 if (!airq_areas[i]) 257 airq_areas[i] = new_airq_info(); 258 info = airq_areas[i]; 259 if (!info) 260 return 0; 261 write_lock_irqsave(&info->lock, flags); 262 bit = airq_iv_alloc(info->aiv, nvqs); 263 if (bit == -1UL) { 264 /* Not enough vacancies. */ 265 write_unlock_irqrestore(&info->lock, flags); 266 continue; 267 } 268 *first = bit; 269 *airq_info = info; 270 indicator_addr = (unsigned long)info->aiv->vector; 271 for (j = 0; j < nvqs; j++) { 272 airq_iv_set_ptr(info->aiv, bit + j, 273 (unsigned long)vqs[j]); 274 } 275 write_unlock_irqrestore(&info->lock, flags); 276 } 277 return indicator_addr; 278 } 279 280 static void virtio_ccw_drop_indicators(struct virtio_ccw_device *vcdev) 281 { 282 struct virtio_ccw_vq_info *info; 283 284 list_for_each_entry(info, &vcdev->virtqueues, node) 285 drop_airq_indicator(info->vq, vcdev->airq_info); 286 } 287 288 static int doing_io(struct virtio_ccw_device *vcdev, __u32 flag) 289 { 290 unsigned long flags; 291 __u32 ret; 292 293 spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags); 294 if (vcdev->err) 295 ret = 0; 296 else 297 ret = vcdev->curr_io & flag; 298 spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags); 299 return ret; 300 } 301 302 static int ccw_io_helper(struct virtio_ccw_device *vcdev, 303 struct ccw1 *ccw, __u32 intparm) 304 { 305 int ret; 306 unsigned long flags; 307 int flag = intparm & VIRTIO_CCW_INTPARM_MASK; 308 309 do { 310 spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags); 311 ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0); 312 if (!ret) { 313 if (!vcdev->curr_io) 314 vcdev->err = 0; 315 vcdev->curr_io |= flag; 316 } 317 spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags); 318 cpu_relax(); 319 } while (ret == -EBUSY); 320 wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0); 321 return ret ? ret : vcdev->err; 322 } 323 324 static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev, 325 struct ccw1 *ccw) 326 { 327 int ret; 328 unsigned long *indicatorp = NULL; 329 struct virtio_thinint_area *thinint_area = NULL; 330 struct airq_info *airq_info = vcdev->airq_info; 331 332 if (vcdev->is_thinint) { 333 thinint_area = kzalloc(sizeof(*thinint_area), 334 GFP_DMA | GFP_KERNEL); 335 if (!thinint_area) 336 return; 337 thinint_area->summary_indicator = 338 (unsigned long) &airq_info->summary_indicator; 339 thinint_area->isc = VIRTIO_AIRQ_ISC; 340 ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER; 341 ccw->count = sizeof(*thinint_area); 342 ccw->cda = (__u32)(unsigned long) thinint_area; 343 } else { 344 indicatorp = kmalloc(sizeof(&vcdev->indicators), 345 GFP_DMA | GFP_KERNEL); 346 if (!indicatorp) 347 return; 348 *indicatorp = 0; 349 ccw->cmd_code = CCW_CMD_SET_IND; 350 ccw->count = sizeof(vcdev->indicators); 351 ccw->cda = (__u32)(unsigned long) indicatorp; 352 } 353 /* Deregister indicators from host. */ 354 vcdev->indicators = 0; 355 ccw->flags = 0; 356 ret = ccw_io_helper(vcdev, ccw, 357 vcdev->is_thinint ? 358 VIRTIO_CCW_DOING_SET_IND_ADAPTER : 359 VIRTIO_CCW_DOING_SET_IND); 360 if (ret && (ret != -ENODEV)) 361 dev_info(&vcdev->cdev->dev, 362 "Failed to deregister indicators (%d)\n", ret); 363 else if (vcdev->is_thinint) 364 virtio_ccw_drop_indicators(vcdev); 365 kfree(indicatorp); 366 kfree(thinint_area); 367 } 368 369 static inline long do_kvm_notify(struct subchannel_id schid, 370 unsigned long queue_index, 371 long cookie) 372 { 373 register unsigned long __nr asm("1") = KVM_S390_VIRTIO_CCW_NOTIFY; 374 register struct subchannel_id __schid asm("2") = schid; 375 register unsigned long __index asm("3") = queue_index; 376 register long __rc asm("2"); 377 register long __cookie asm("4") = cookie; 378 379 asm volatile ("diag 2,4,0x500\n" 380 : "=d" (__rc) : "d" (__nr), "d" (__schid), "d" (__index), 381 "d"(__cookie) 382 : "memory", "cc"); 383 return __rc; 384 } 385 386 static bool virtio_ccw_kvm_notify(struct virtqueue *vq) 387 { 388 struct virtio_ccw_vq_info *info = vq->priv; 389 struct virtio_ccw_device *vcdev; 390 struct subchannel_id schid; 391 392 vcdev = to_vc_device(info->vq->vdev); 393 ccw_device_get_schid(vcdev->cdev, &schid); 394 info->cookie = do_kvm_notify(schid, vq->index, info->cookie); 395 if (info->cookie < 0) 396 return false; 397 return true; 398 } 399 400 static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev, 401 struct ccw1 *ccw, int index) 402 { 403 vcdev->config_block->index = index; 404 ccw->cmd_code = CCW_CMD_READ_VQ_CONF; 405 ccw->flags = 0; 406 ccw->count = sizeof(struct vq_config_block); 407 ccw->cda = (__u32)(unsigned long)(vcdev->config_block); 408 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF); 409 return vcdev->config_block->num; 410 } 411 412 static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw) 413 { 414 struct virtio_ccw_device *vcdev = to_vc_device(vq->vdev); 415 struct virtio_ccw_vq_info *info = vq->priv; 416 unsigned long flags; 417 unsigned long size; 418 int ret; 419 unsigned int index = vq->index; 420 421 /* Remove from our list. */ 422 spin_lock_irqsave(&vcdev->lock, flags); 423 list_del(&info->node); 424 spin_unlock_irqrestore(&vcdev->lock, flags); 425 426 /* Release from host. */ 427 if (vcdev->revision == 0) { 428 info->info_block->l.queue = 0; 429 info->info_block->l.align = 0; 430 info->info_block->l.index = index; 431 info->info_block->l.num = 0; 432 ccw->count = sizeof(info->info_block->l); 433 } else { 434 info->info_block->s.desc = 0; 435 info->info_block->s.index = index; 436 info->info_block->s.num = 0; 437 info->info_block->s.avail = 0; 438 info->info_block->s.used = 0; 439 ccw->count = sizeof(info->info_block->s); 440 } 441 ccw->cmd_code = CCW_CMD_SET_VQ; 442 ccw->flags = 0; 443 ccw->cda = (__u32)(unsigned long)(info->info_block); 444 ret = ccw_io_helper(vcdev, ccw, 445 VIRTIO_CCW_DOING_SET_VQ | index); 446 /* 447 * -ENODEV isn't considered an error: The device is gone anyway. 448 * This may happen on device detach. 449 */ 450 if (ret && (ret != -ENODEV)) 451 dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d", 452 ret, index); 453 454 vring_del_virtqueue(vq); 455 size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN)); 456 free_pages_exact(info->queue, size); 457 kfree(info->info_block); 458 kfree(info); 459 } 460 461 static void virtio_ccw_del_vqs(struct virtio_device *vdev) 462 { 463 struct virtqueue *vq, *n; 464 struct ccw1 *ccw; 465 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 466 467 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); 468 if (!ccw) 469 return; 470 471 virtio_ccw_drop_indicator(vcdev, ccw); 472 473 list_for_each_entry_safe(vq, n, &vdev->vqs, list) 474 virtio_ccw_del_vq(vq, ccw); 475 476 kfree(ccw); 477 } 478 479 static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev, 480 int i, vq_callback_t *callback, 481 const char *name, 482 struct ccw1 *ccw) 483 { 484 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 485 int err; 486 struct virtqueue *vq = NULL; 487 struct virtio_ccw_vq_info *info; 488 unsigned long size = 0; /* silence the compiler */ 489 unsigned long flags; 490 491 /* Allocate queue. */ 492 info = kzalloc(sizeof(struct virtio_ccw_vq_info), GFP_KERNEL); 493 if (!info) { 494 dev_warn(&vcdev->cdev->dev, "no info\n"); 495 err = -ENOMEM; 496 goto out_err; 497 } 498 info->info_block = kzalloc(sizeof(*info->info_block), 499 GFP_DMA | GFP_KERNEL); 500 if (!info->info_block) { 501 dev_warn(&vcdev->cdev->dev, "no info block\n"); 502 err = -ENOMEM; 503 goto out_err; 504 } 505 info->num = virtio_ccw_read_vq_conf(vcdev, ccw, i); 506 size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN)); 507 info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); 508 if (info->queue == NULL) { 509 dev_warn(&vcdev->cdev->dev, "no queue\n"); 510 err = -ENOMEM; 511 goto out_err; 512 } 513 514 vq = vring_new_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN, vdev, 515 true, info->queue, virtio_ccw_kvm_notify, 516 callback, name); 517 if (!vq) { 518 /* For now, we fail if we can't get the requested size. */ 519 dev_warn(&vcdev->cdev->dev, "no vq\n"); 520 err = -ENOMEM; 521 goto out_err; 522 } 523 524 /* Register it with the host. */ 525 if (vcdev->revision == 0) { 526 info->info_block->l.queue = (__u64)info->queue; 527 info->info_block->l.align = KVM_VIRTIO_CCW_RING_ALIGN; 528 info->info_block->l.index = i; 529 info->info_block->l.num = info->num; 530 ccw->count = sizeof(info->info_block->l); 531 } else { 532 info->info_block->s.desc = (__u64)info->queue; 533 info->info_block->s.index = i; 534 info->info_block->s.num = info->num; 535 info->info_block->s.avail = (__u64)virtqueue_get_avail(vq); 536 info->info_block->s.used = (__u64)virtqueue_get_used(vq); 537 ccw->count = sizeof(info->info_block->s); 538 } 539 ccw->cmd_code = CCW_CMD_SET_VQ; 540 ccw->flags = 0; 541 ccw->cda = (__u32)(unsigned long)(info->info_block); 542 err = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_VQ | i); 543 if (err) { 544 dev_warn(&vcdev->cdev->dev, "SET_VQ failed\n"); 545 goto out_err; 546 } 547 548 info->vq = vq; 549 vq->priv = info; 550 551 /* Save it to our list. */ 552 spin_lock_irqsave(&vcdev->lock, flags); 553 list_add(&info->node, &vcdev->virtqueues); 554 spin_unlock_irqrestore(&vcdev->lock, flags); 555 556 return vq; 557 558 out_err: 559 if (vq) 560 vring_del_virtqueue(vq); 561 if (info) { 562 if (info->queue) 563 free_pages_exact(info->queue, size); 564 kfree(info->info_block); 565 } 566 kfree(info); 567 return ERR_PTR(err); 568 } 569 570 static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev, 571 struct virtqueue *vqs[], int nvqs, 572 struct ccw1 *ccw) 573 { 574 int ret; 575 struct virtio_thinint_area *thinint_area = NULL; 576 struct airq_info *info; 577 578 thinint_area = kzalloc(sizeof(*thinint_area), GFP_DMA | GFP_KERNEL); 579 if (!thinint_area) { 580 ret = -ENOMEM; 581 goto out; 582 } 583 /* Try to get an indicator. */ 584 thinint_area->indicator = get_airq_indicator(vqs, nvqs, 585 &thinint_area->bit_nr, 586 &vcdev->airq_info); 587 if (!thinint_area->indicator) { 588 ret = -ENOSPC; 589 goto out; 590 } 591 info = vcdev->airq_info; 592 thinint_area->summary_indicator = 593 (unsigned long) &info->summary_indicator; 594 thinint_area->isc = VIRTIO_AIRQ_ISC; 595 ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER; 596 ccw->flags = CCW_FLAG_SLI; 597 ccw->count = sizeof(*thinint_area); 598 ccw->cda = (__u32)(unsigned long)thinint_area; 599 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND_ADAPTER); 600 if (ret) { 601 if (ret == -EOPNOTSUPP) { 602 /* 603 * The host does not support adapter interrupts 604 * for virtio-ccw, stop trying. 605 */ 606 virtio_ccw_use_airq = 0; 607 pr_info("Adapter interrupts unsupported on host\n"); 608 } else 609 dev_warn(&vcdev->cdev->dev, 610 "enabling adapter interrupts = %d\n", ret); 611 virtio_ccw_drop_indicators(vcdev); 612 } 613 out: 614 kfree(thinint_area); 615 return ret; 616 } 617 618 static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, 619 struct virtqueue *vqs[], 620 vq_callback_t *callbacks[], 621 const char *names[]) 622 { 623 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 624 unsigned long *indicatorp = NULL; 625 int ret, i; 626 struct ccw1 *ccw; 627 628 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); 629 if (!ccw) 630 return -ENOMEM; 631 632 for (i = 0; i < nvqs; ++i) { 633 vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i], 634 ccw); 635 if (IS_ERR(vqs[i])) { 636 ret = PTR_ERR(vqs[i]); 637 vqs[i] = NULL; 638 goto out; 639 } 640 } 641 ret = -ENOMEM; 642 /* We need a data area under 2G to communicate. */ 643 indicatorp = kmalloc(sizeof(&vcdev->indicators), GFP_DMA | GFP_KERNEL); 644 if (!indicatorp) 645 goto out; 646 *indicatorp = (unsigned long) &vcdev->indicators; 647 if (vcdev->is_thinint) { 648 ret = virtio_ccw_register_adapter_ind(vcdev, vqs, nvqs, ccw); 649 if (ret) 650 /* no error, just fall back to legacy interrupts */ 651 vcdev->is_thinint = 0; 652 } 653 if (!vcdev->is_thinint) { 654 /* Register queue indicators with host. */ 655 vcdev->indicators = 0; 656 ccw->cmd_code = CCW_CMD_SET_IND; 657 ccw->flags = 0; 658 ccw->count = sizeof(vcdev->indicators); 659 ccw->cda = (__u32)(unsigned long) indicatorp; 660 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND); 661 if (ret) 662 goto out; 663 } 664 /* Register indicators2 with host for config changes */ 665 *indicatorp = (unsigned long) &vcdev->indicators2; 666 vcdev->indicators2 = 0; 667 ccw->cmd_code = CCW_CMD_SET_CONF_IND; 668 ccw->flags = 0; 669 ccw->count = sizeof(vcdev->indicators2); 670 ccw->cda = (__u32)(unsigned long) indicatorp; 671 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND); 672 if (ret) 673 goto out; 674 675 kfree(indicatorp); 676 kfree(ccw); 677 return 0; 678 out: 679 kfree(indicatorp); 680 kfree(ccw); 681 virtio_ccw_del_vqs(vdev); 682 return ret; 683 } 684 685 static void virtio_ccw_reset(struct virtio_device *vdev) 686 { 687 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 688 struct ccw1 *ccw; 689 690 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); 691 if (!ccw) 692 return; 693 694 /* Zero status bits. */ 695 *vcdev->status = 0; 696 697 /* Send a reset ccw on device. */ 698 ccw->cmd_code = CCW_CMD_VDEV_RESET; 699 ccw->flags = 0; 700 ccw->count = 0; 701 ccw->cda = 0; 702 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_RESET); 703 kfree(ccw); 704 } 705 706 static u64 virtio_ccw_get_features(struct virtio_device *vdev) 707 { 708 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 709 struct virtio_feature_desc *features; 710 int ret; 711 u64 rc; 712 struct ccw1 *ccw; 713 714 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); 715 if (!ccw) 716 return 0; 717 718 features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL); 719 if (!features) { 720 rc = 0; 721 goto out_free; 722 } 723 /* Read the feature bits from the host. */ 724 features->index = 0; 725 ccw->cmd_code = CCW_CMD_READ_FEAT; 726 ccw->flags = 0; 727 ccw->count = sizeof(*features); 728 ccw->cda = (__u32)(unsigned long)features; 729 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT); 730 if (ret) { 731 rc = 0; 732 goto out_free; 733 } 734 735 rc = le32_to_cpu(features->features); 736 737 if (vcdev->revision == 0) 738 goto out_free; 739 740 /* Read second half of the feature bits from the host. */ 741 features->index = 1; 742 ccw->cmd_code = CCW_CMD_READ_FEAT; 743 ccw->flags = 0; 744 ccw->count = sizeof(*features); 745 ccw->cda = (__u32)(unsigned long)features; 746 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT); 747 if (ret == 0) 748 rc |= (u64)le32_to_cpu(features->features) << 32; 749 750 out_free: 751 kfree(features); 752 kfree(ccw); 753 return rc; 754 } 755 756 static int virtio_ccw_finalize_features(struct virtio_device *vdev) 757 { 758 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 759 struct virtio_feature_desc *features; 760 struct ccw1 *ccw; 761 int ret; 762 763 if (vcdev->revision >= 1 && 764 !__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) { 765 dev_err(&vdev->dev, "virtio: device uses revision 1 " 766 "but does not have VIRTIO_F_VERSION_1\n"); 767 return -EINVAL; 768 } 769 770 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); 771 if (!ccw) 772 return -ENOMEM; 773 774 features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL); 775 if (!features) { 776 ret = -ENOMEM; 777 goto out_free; 778 } 779 /* Give virtio_ring a chance to accept features. */ 780 vring_transport_features(vdev); 781 782 features->index = 0; 783 features->features = cpu_to_le32((u32)vdev->features); 784 /* Write the first half of the feature bits to the host. */ 785 ccw->cmd_code = CCW_CMD_WRITE_FEAT; 786 ccw->flags = 0; 787 ccw->count = sizeof(*features); 788 ccw->cda = (__u32)(unsigned long)features; 789 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT); 790 if (ret) 791 goto out_free; 792 793 if (vcdev->revision == 0) 794 goto out_free; 795 796 features->index = 1; 797 features->features = cpu_to_le32(vdev->features >> 32); 798 /* Write the second half of the feature bits to the host. */ 799 ccw->cmd_code = CCW_CMD_WRITE_FEAT; 800 ccw->flags = 0; 801 ccw->count = sizeof(*features); 802 ccw->cda = (__u32)(unsigned long)features; 803 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT); 804 805 out_free: 806 kfree(features); 807 kfree(ccw); 808 809 return ret; 810 } 811 812 static void virtio_ccw_get_config(struct virtio_device *vdev, 813 unsigned int offset, void *buf, unsigned len) 814 { 815 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 816 int ret; 817 struct ccw1 *ccw; 818 void *config_area; 819 820 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); 821 if (!ccw) 822 return; 823 824 config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL); 825 if (!config_area) 826 goto out_free; 827 828 /* Read the config area from the host. */ 829 ccw->cmd_code = CCW_CMD_READ_CONF; 830 ccw->flags = 0; 831 ccw->count = offset + len; 832 ccw->cda = (__u32)(unsigned long)config_area; 833 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_CONFIG); 834 if (ret) 835 goto out_free; 836 837 memcpy(vcdev->config, config_area, offset + len); 838 if (buf) 839 memcpy(buf, &vcdev->config[offset], len); 840 if (vcdev->config_ready < offset + len) 841 vcdev->config_ready = offset + len; 842 843 out_free: 844 kfree(config_area); 845 kfree(ccw); 846 } 847 848 static void virtio_ccw_set_config(struct virtio_device *vdev, 849 unsigned int offset, const void *buf, 850 unsigned len) 851 { 852 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 853 struct ccw1 *ccw; 854 void *config_area; 855 856 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); 857 if (!ccw) 858 return; 859 860 config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL); 861 if (!config_area) 862 goto out_free; 863 864 /* Make sure we don't overwrite fields. */ 865 if (vcdev->config_ready < offset) 866 virtio_ccw_get_config(vdev, 0, NULL, offset); 867 memcpy(&vcdev->config[offset], buf, len); 868 /* Write the config area to the host. */ 869 memcpy(config_area, vcdev->config, sizeof(vcdev->config)); 870 ccw->cmd_code = CCW_CMD_WRITE_CONF; 871 ccw->flags = 0; 872 ccw->count = offset + len; 873 ccw->cda = (__u32)(unsigned long)config_area; 874 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_CONFIG); 875 876 out_free: 877 kfree(config_area); 878 kfree(ccw); 879 } 880 881 static u8 virtio_ccw_get_status(struct virtio_device *vdev) 882 { 883 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 884 885 return *vcdev->status; 886 } 887 888 static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status) 889 { 890 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 891 u8 old_status = *vcdev->status; 892 struct ccw1 *ccw; 893 int ret; 894 895 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); 896 if (!ccw) 897 return; 898 899 /* Write the status to the host. */ 900 *vcdev->status = status; 901 ccw->cmd_code = CCW_CMD_WRITE_STATUS; 902 ccw->flags = 0; 903 ccw->count = sizeof(status); 904 ccw->cda = (__u32)(unsigned long)vcdev->status; 905 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS); 906 /* Write failed? We assume status is unchanged. */ 907 if (ret) 908 *vcdev->status = old_status; 909 kfree(ccw); 910 } 911 912 static struct virtio_config_ops virtio_ccw_config_ops = { 913 .get_features = virtio_ccw_get_features, 914 .finalize_features = virtio_ccw_finalize_features, 915 .get = virtio_ccw_get_config, 916 .set = virtio_ccw_set_config, 917 .get_status = virtio_ccw_get_status, 918 .set_status = virtio_ccw_set_status, 919 .reset = virtio_ccw_reset, 920 .find_vqs = virtio_ccw_find_vqs, 921 .del_vqs = virtio_ccw_del_vqs, 922 }; 923 924 925 /* 926 * ccw bus driver related functions 927 */ 928 929 static void virtio_ccw_release_dev(struct device *_d) 930 { 931 struct virtio_device *dev = container_of(_d, struct virtio_device, 932 dev); 933 struct virtio_ccw_device *vcdev = to_vc_device(dev); 934 935 kfree(vcdev->status); 936 kfree(vcdev->config_block); 937 kfree(vcdev); 938 } 939 940 static int irb_is_error(struct irb *irb) 941 { 942 if (scsw_cstat(&irb->scsw) != 0) 943 return 1; 944 if (scsw_dstat(&irb->scsw) & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) 945 return 1; 946 if (scsw_cc(&irb->scsw) != 0) 947 return 1; 948 return 0; 949 } 950 951 static struct virtqueue *virtio_ccw_vq_by_ind(struct virtio_ccw_device *vcdev, 952 int index) 953 { 954 struct virtio_ccw_vq_info *info; 955 unsigned long flags; 956 struct virtqueue *vq; 957 958 vq = NULL; 959 spin_lock_irqsave(&vcdev->lock, flags); 960 list_for_each_entry(info, &vcdev->virtqueues, node) { 961 if (info->vq->index == index) { 962 vq = info->vq; 963 break; 964 } 965 } 966 spin_unlock_irqrestore(&vcdev->lock, flags); 967 return vq; 968 } 969 970 static void virtio_ccw_int_handler(struct ccw_device *cdev, 971 unsigned long intparm, 972 struct irb *irb) 973 { 974 __u32 activity = intparm & VIRTIO_CCW_INTPARM_MASK; 975 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev); 976 int i; 977 struct virtqueue *vq; 978 979 if (!vcdev) 980 return; 981 /* Check if it's a notification from the host. */ 982 if ((intparm == 0) && 983 (scsw_stctl(&irb->scsw) == 984 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) { 985 /* OK */ 986 } 987 if (irb_is_error(irb)) { 988 /* Command reject? */ 989 if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) && 990 (irb->ecw[0] & SNS0_CMD_REJECT)) 991 vcdev->err = -EOPNOTSUPP; 992 else 993 /* Map everything else to -EIO. */ 994 vcdev->err = -EIO; 995 } 996 if (vcdev->curr_io & activity) { 997 switch (activity) { 998 case VIRTIO_CCW_DOING_READ_FEAT: 999 case VIRTIO_CCW_DOING_WRITE_FEAT: 1000 case VIRTIO_CCW_DOING_READ_CONFIG: 1001 case VIRTIO_CCW_DOING_WRITE_CONFIG: 1002 case VIRTIO_CCW_DOING_WRITE_STATUS: 1003 case VIRTIO_CCW_DOING_SET_VQ: 1004 case VIRTIO_CCW_DOING_SET_IND: 1005 case VIRTIO_CCW_DOING_SET_CONF_IND: 1006 case VIRTIO_CCW_DOING_RESET: 1007 case VIRTIO_CCW_DOING_READ_VQ_CONF: 1008 case VIRTIO_CCW_DOING_SET_IND_ADAPTER: 1009 case VIRTIO_CCW_DOING_SET_VIRTIO_REV: 1010 vcdev->curr_io &= ~activity; 1011 wake_up(&vcdev->wait_q); 1012 break; 1013 default: 1014 /* don't know what to do... */ 1015 dev_warn(&cdev->dev, "Suspicious activity '%08x'\n", 1016 activity); 1017 WARN_ON(1); 1018 break; 1019 } 1020 } 1021 for_each_set_bit(i, &vcdev->indicators, 1022 sizeof(vcdev->indicators) * BITS_PER_BYTE) { 1023 /* The bit clear must happen before the vring kick. */ 1024 clear_bit(i, &vcdev->indicators); 1025 barrier(); 1026 vq = virtio_ccw_vq_by_ind(vcdev, i); 1027 vring_interrupt(0, vq); 1028 } 1029 if (test_bit(0, &vcdev->indicators2)) { 1030 virtio_config_changed(&vcdev->vdev); 1031 clear_bit(0, &vcdev->indicators2); 1032 } 1033 } 1034 1035 /* 1036 * We usually want to autoonline all devices, but give the admin 1037 * a way to exempt devices from this. 1038 */ 1039 #define __DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \ 1040 (8*sizeof(long))) 1041 static unsigned long devs_no_auto[__MAX_SSID + 1][__DEV_WORDS]; 1042 1043 static char *no_auto = ""; 1044 1045 module_param(no_auto, charp, 0444); 1046 MODULE_PARM_DESC(no_auto, "list of ccw bus id ranges not to be auto-onlined"); 1047 1048 static int virtio_ccw_check_autoonline(struct ccw_device *cdev) 1049 { 1050 struct ccw_dev_id id; 1051 1052 ccw_device_get_id(cdev, &id); 1053 if (test_bit(id.devno, devs_no_auto[id.ssid])) 1054 return 0; 1055 return 1; 1056 } 1057 1058 static void virtio_ccw_auto_online(void *data, async_cookie_t cookie) 1059 { 1060 struct ccw_device *cdev = data; 1061 int ret; 1062 1063 ret = ccw_device_set_online(cdev); 1064 if (ret) 1065 dev_warn(&cdev->dev, "Failed to set online: %d\n", ret); 1066 } 1067 1068 static int virtio_ccw_probe(struct ccw_device *cdev) 1069 { 1070 cdev->handler = virtio_ccw_int_handler; 1071 1072 if (virtio_ccw_check_autoonline(cdev)) 1073 async_schedule(virtio_ccw_auto_online, cdev); 1074 return 0; 1075 } 1076 1077 static struct virtio_ccw_device *virtio_grab_drvdata(struct ccw_device *cdev) 1078 { 1079 unsigned long flags; 1080 struct virtio_ccw_device *vcdev; 1081 1082 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1083 vcdev = dev_get_drvdata(&cdev->dev); 1084 if (!vcdev || vcdev->going_away) { 1085 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1086 return NULL; 1087 } 1088 vcdev->going_away = true; 1089 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1090 return vcdev; 1091 } 1092 1093 static void virtio_ccw_remove(struct ccw_device *cdev) 1094 { 1095 unsigned long flags; 1096 struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev); 1097 1098 if (vcdev && cdev->online) { 1099 if (vcdev->device_lost) 1100 virtio_break_device(&vcdev->vdev); 1101 unregister_virtio_device(&vcdev->vdev); 1102 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1103 dev_set_drvdata(&cdev->dev, NULL); 1104 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1105 } 1106 cdev->handler = NULL; 1107 } 1108 1109 static int virtio_ccw_offline(struct ccw_device *cdev) 1110 { 1111 unsigned long flags; 1112 struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev); 1113 1114 if (!vcdev) 1115 return 0; 1116 if (vcdev->device_lost) 1117 virtio_break_device(&vcdev->vdev); 1118 unregister_virtio_device(&vcdev->vdev); 1119 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1120 dev_set_drvdata(&cdev->dev, NULL); 1121 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1122 return 0; 1123 } 1124 1125 static int virtio_ccw_set_transport_rev(struct virtio_ccw_device *vcdev) 1126 { 1127 struct virtio_rev_info *rev; 1128 struct ccw1 *ccw; 1129 int ret; 1130 1131 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); 1132 if (!ccw) 1133 return -ENOMEM; 1134 rev = kzalloc(sizeof(*rev), GFP_DMA | GFP_KERNEL); 1135 if (!rev) { 1136 kfree(ccw); 1137 return -ENOMEM; 1138 } 1139 1140 /* Set transport revision */ 1141 ccw->cmd_code = CCW_CMD_SET_VIRTIO_REV; 1142 ccw->flags = 0; 1143 ccw->count = sizeof(*rev); 1144 ccw->cda = (__u32)(unsigned long)rev; 1145 1146 vcdev->revision = VIRTIO_CCW_REV_MAX; 1147 do { 1148 rev->revision = vcdev->revision; 1149 /* none of our supported revisions carry payload */ 1150 rev->length = 0; 1151 ret = ccw_io_helper(vcdev, ccw, 1152 VIRTIO_CCW_DOING_SET_VIRTIO_REV); 1153 if (ret == -EOPNOTSUPP) { 1154 if (vcdev->revision == 0) 1155 /* 1156 * The host device does not support setting 1157 * the revision: let's operate it in legacy 1158 * mode. 1159 */ 1160 ret = 0; 1161 else 1162 vcdev->revision--; 1163 } 1164 } while (ret == -EOPNOTSUPP); 1165 1166 kfree(ccw); 1167 kfree(rev); 1168 return ret; 1169 } 1170 1171 static int virtio_ccw_online(struct ccw_device *cdev) 1172 { 1173 int ret; 1174 struct virtio_ccw_device *vcdev; 1175 unsigned long flags; 1176 1177 vcdev = kzalloc(sizeof(*vcdev), GFP_KERNEL); 1178 if (!vcdev) { 1179 dev_warn(&cdev->dev, "Could not get memory for virtio\n"); 1180 ret = -ENOMEM; 1181 goto out_free; 1182 } 1183 vcdev->config_block = kzalloc(sizeof(*vcdev->config_block), 1184 GFP_DMA | GFP_KERNEL); 1185 if (!vcdev->config_block) { 1186 ret = -ENOMEM; 1187 goto out_free; 1188 } 1189 vcdev->status = kzalloc(sizeof(*vcdev->status), GFP_DMA | GFP_KERNEL); 1190 if (!vcdev->status) { 1191 ret = -ENOMEM; 1192 goto out_free; 1193 } 1194 1195 vcdev->is_thinint = virtio_ccw_use_airq; /* at least try */ 1196 1197 vcdev->vdev.dev.parent = &cdev->dev; 1198 vcdev->vdev.dev.release = virtio_ccw_release_dev; 1199 vcdev->vdev.config = &virtio_ccw_config_ops; 1200 vcdev->cdev = cdev; 1201 init_waitqueue_head(&vcdev->wait_q); 1202 INIT_LIST_HEAD(&vcdev->virtqueues); 1203 spin_lock_init(&vcdev->lock); 1204 1205 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1206 dev_set_drvdata(&cdev->dev, vcdev); 1207 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1208 vcdev->vdev.id.vendor = cdev->id.cu_type; 1209 vcdev->vdev.id.device = cdev->id.cu_model; 1210 1211 ret = virtio_ccw_set_transport_rev(vcdev); 1212 if (ret) 1213 goto out_free; 1214 1215 ret = register_virtio_device(&vcdev->vdev); 1216 if (ret) { 1217 dev_warn(&cdev->dev, "Failed to register virtio device: %d\n", 1218 ret); 1219 goto out_put; 1220 } 1221 return 0; 1222 out_put: 1223 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1224 dev_set_drvdata(&cdev->dev, NULL); 1225 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1226 put_device(&vcdev->vdev.dev); 1227 return ret; 1228 out_free: 1229 if (vcdev) { 1230 kfree(vcdev->status); 1231 kfree(vcdev->config_block); 1232 } 1233 kfree(vcdev); 1234 return ret; 1235 } 1236 1237 static int virtio_ccw_cio_notify(struct ccw_device *cdev, int event) 1238 { 1239 int rc; 1240 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev); 1241 1242 /* 1243 * Make sure vcdev is set 1244 * i.e. set_offline/remove callback not already running 1245 */ 1246 if (!vcdev) 1247 return NOTIFY_DONE; 1248 1249 switch (event) { 1250 case CIO_GONE: 1251 vcdev->device_lost = true; 1252 rc = NOTIFY_DONE; 1253 break; 1254 default: 1255 rc = NOTIFY_DONE; 1256 break; 1257 } 1258 return rc; 1259 } 1260 1261 static struct ccw_device_id virtio_ids[] = { 1262 { CCW_DEVICE(0x3832, 0) }, 1263 {}, 1264 }; 1265 MODULE_DEVICE_TABLE(ccw, virtio_ids); 1266 1267 static struct ccw_driver virtio_ccw_driver = { 1268 .driver = { 1269 .owner = THIS_MODULE, 1270 .name = "virtio_ccw", 1271 }, 1272 .ids = virtio_ids, 1273 .probe = virtio_ccw_probe, 1274 .remove = virtio_ccw_remove, 1275 .set_offline = virtio_ccw_offline, 1276 .set_online = virtio_ccw_online, 1277 .notify = virtio_ccw_cio_notify, 1278 .int_class = IRQIO_VIR, 1279 }; 1280 1281 static int __init pure_hex(char **cp, unsigned int *val, int min_digit, 1282 int max_digit, int max_val) 1283 { 1284 int diff; 1285 1286 diff = 0; 1287 *val = 0; 1288 1289 while (diff <= max_digit) { 1290 int value = hex_to_bin(**cp); 1291 1292 if (value < 0) 1293 break; 1294 *val = *val * 16 + value; 1295 (*cp)++; 1296 diff++; 1297 } 1298 1299 if ((diff < min_digit) || (diff > max_digit) || (*val > max_val)) 1300 return 1; 1301 1302 return 0; 1303 } 1304 1305 static int __init parse_busid(char *str, unsigned int *cssid, 1306 unsigned int *ssid, unsigned int *devno) 1307 { 1308 char *str_work; 1309 int rc, ret; 1310 1311 rc = 1; 1312 1313 if (*str == '\0') 1314 goto out; 1315 1316 str_work = str; 1317 ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID); 1318 if (ret || (str_work[0] != '.')) 1319 goto out; 1320 str_work++; 1321 ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID); 1322 if (ret || (str_work[0] != '.')) 1323 goto out; 1324 str_work++; 1325 ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL); 1326 if (ret || (str_work[0] != '\0')) 1327 goto out; 1328 1329 rc = 0; 1330 out: 1331 return rc; 1332 } 1333 1334 static void __init no_auto_parse(void) 1335 { 1336 unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to; 1337 char *parm, *str; 1338 int rc; 1339 1340 str = no_auto; 1341 while ((parm = strsep(&str, ","))) { 1342 rc = parse_busid(strsep(&parm, "-"), &from_cssid, 1343 &from_ssid, &from); 1344 if (rc) 1345 continue; 1346 if (parm != NULL) { 1347 rc = parse_busid(parm, &to_cssid, 1348 &to_ssid, &to); 1349 if ((from_ssid > to_ssid) || 1350 ((from_ssid == to_ssid) && (from > to))) 1351 rc = -EINVAL; 1352 } else { 1353 to_cssid = from_cssid; 1354 to_ssid = from_ssid; 1355 to = from; 1356 } 1357 if (rc) 1358 continue; 1359 while ((from_ssid < to_ssid) || 1360 ((from_ssid == to_ssid) && (from <= to))) { 1361 set_bit(from, devs_no_auto[from_ssid]); 1362 from++; 1363 if (from > __MAX_SUBCHANNEL) { 1364 from_ssid++; 1365 from = 0; 1366 } 1367 } 1368 } 1369 } 1370 1371 static int __init virtio_ccw_init(void) 1372 { 1373 /* parse no_auto string before we do anything further */ 1374 no_auto_parse(); 1375 return ccw_driver_register(&virtio_ccw_driver); 1376 } 1377 module_init(virtio_ccw_init); 1378 1379 static void __exit virtio_ccw_exit(void) 1380 { 1381 int i; 1382 1383 ccw_driver_unregister(&virtio_ccw_driver); 1384 for (i = 0; i < MAX_AIRQ_AREAS; i++) 1385 destroy_airq_info(airq_areas[i]); 1386 } 1387 module_exit(virtio_ccw_exit); 1388