1 /* 2 * ccw based virtio transport 3 * 4 * Copyright IBM Corp. 2012, 2014 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License (version 2 only) 8 * as published by the Free Software Foundation. 9 * 10 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> 11 */ 12 13 #include <linux/kernel_stat.h> 14 #include <linux/init.h> 15 #include <linux/bootmem.h> 16 #include <linux/err.h> 17 #include <linux/virtio.h> 18 #include <linux/virtio_config.h> 19 #include <linux/slab.h> 20 #include <linux/interrupt.h> 21 #include <linux/virtio_ring.h> 22 #include <linux/pfn.h> 23 #include <linux/async.h> 24 #include <linux/wait.h> 25 #include <linux/list.h> 26 #include <linux/bitops.h> 27 #include <linux/moduleparam.h> 28 #include <linux/io.h> 29 #include <linux/kvm_para.h> 30 #include <linux/notifier.h> 31 #include <asm/diag.h> 32 #include <asm/setup.h> 33 #include <asm/irq.h> 34 #include <asm/cio.h> 35 #include <asm/ccwdev.h> 36 #include <asm/virtio-ccw.h> 37 #include <asm/isc.h> 38 #include <asm/airq.h> 39 40 /* 41 * virtio related functions 42 */ 43 44 struct vq_config_block { 45 __u16 index; 46 __u16 num; 47 } __packed; 48 49 #define VIRTIO_CCW_CONFIG_SIZE 0x100 50 /* same as PCI config space size, should be enough for all drivers */ 51 52 struct virtio_ccw_device { 53 struct virtio_device vdev; 54 __u8 *status; 55 __u8 config[VIRTIO_CCW_CONFIG_SIZE]; 56 struct ccw_device *cdev; 57 __u32 curr_io; 58 int err; 59 unsigned int revision; /* Transport revision */ 60 wait_queue_head_t wait_q; 61 spinlock_t lock; 62 struct list_head virtqueues; 63 unsigned long indicators; 64 unsigned long indicators2; 65 struct vq_config_block *config_block; 66 bool is_thinint; 67 bool going_away; 68 bool device_lost; 69 unsigned int config_ready; 70 void *airq_info; 71 }; 72 73 struct vq_info_block_legacy { 74 __u64 queue; 75 __u32 align; 76 __u16 index; 77 __u16 num; 78 } __packed; 79 80 struct vq_info_block { 81 __u64 desc; 82 __u32 res0; 83 __u16 index; 84 __u16 num; 85 __u64 avail; 86 __u64 used; 87 } __packed; 88 89 struct virtio_feature_desc { 90 __le32 features; 91 __u8 index; 92 } __packed; 93 94 struct virtio_thinint_area { 95 unsigned long summary_indicator; 96 unsigned long indicator; 97 u64 bit_nr; 98 u8 isc; 99 } __packed; 100 101 struct virtio_rev_info { 102 __u16 revision; 103 __u16 length; 104 __u8 data[]; 105 }; 106 107 /* the highest virtio-ccw revision we support */ 108 #define VIRTIO_CCW_REV_MAX 1 109 110 struct virtio_ccw_vq_info { 111 struct virtqueue *vq; 112 int num; 113 void *queue; 114 union { 115 struct vq_info_block s; 116 struct vq_info_block_legacy l; 117 } *info_block; 118 int bit_nr; 119 struct list_head node; 120 long cookie; 121 }; 122 123 #define VIRTIO_AIRQ_ISC IO_SCH_ISC /* inherit from subchannel */ 124 125 #define VIRTIO_IV_BITS (L1_CACHE_BYTES * 8) 126 #define MAX_AIRQ_AREAS 20 127 128 static int virtio_ccw_use_airq = 1; 129 130 struct airq_info { 131 rwlock_t lock; 132 u8 summary_indicator; 133 struct airq_struct airq; 134 struct airq_iv *aiv; 135 }; 136 static struct airq_info *airq_areas[MAX_AIRQ_AREAS]; 137 138 #define CCW_CMD_SET_VQ 0x13 139 #define CCW_CMD_VDEV_RESET 0x33 140 #define CCW_CMD_SET_IND 0x43 141 #define CCW_CMD_SET_CONF_IND 0x53 142 #define CCW_CMD_READ_FEAT 0x12 143 #define CCW_CMD_WRITE_FEAT 0x11 144 #define CCW_CMD_READ_CONF 0x22 145 #define CCW_CMD_WRITE_CONF 0x21 146 #define CCW_CMD_WRITE_STATUS 0x31 147 #define CCW_CMD_READ_VQ_CONF 0x32 148 #define CCW_CMD_READ_STATUS 0x72 149 #define CCW_CMD_SET_IND_ADAPTER 0x73 150 #define CCW_CMD_SET_VIRTIO_REV 0x83 151 152 #define VIRTIO_CCW_DOING_SET_VQ 0x00010000 153 #define VIRTIO_CCW_DOING_RESET 0x00040000 154 #define VIRTIO_CCW_DOING_READ_FEAT 0x00080000 155 #define VIRTIO_CCW_DOING_WRITE_FEAT 0x00100000 156 #define VIRTIO_CCW_DOING_READ_CONFIG 0x00200000 157 #define VIRTIO_CCW_DOING_WRITE_CONFIG 0x00400000 158 #define VIRTIO_CCW_DOING_WRITE_STATUS 0x00800000 159 #define VIRTIO_CCW_DOING_SET_IND 0x01000000 160 #define VIRTIO_CCW_DOING_READ_VQ_CONF 0x02000000 161 #define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000 162 #define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000 163 #define VIRTIO_CCW_DOING_SET_VIRTIO_REV 0x10000000 164 #define VIRTIO_CCW_DOING_READ_STATUS 0x20000000 165 #define VIRTIO_CCW_INTPARM_MASK 0xffff0000 166 167 static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev) 168 { 169 return container_of(vdev, struct virtio_ccw_device, vdev); 170 } 171 172 static void drop_airq_indicator(struct virtqueue *vq, struct airq_info *info) 173 { 174 unsigned long i, flags; 175 176 write_lock_irqsave(&info->lock, flags); 177 for (i = 0; i < airq_iv_end(info->aiv); i++) { 178 if (vq == (void *)airq_iv_get_ptr(info->aiv, i)) { 179 airq_iv_free_bit(info->aiv, i); 180 airq_iv_set_ptr(info->aiv, i, 0); 181 break; 182 } 183 } 184 write_unlock_irqrestore(&info->lock, flags); 185 } 186 187 static void virtio_airq_handler(struct airq_struct *airq) 188 { 189 struct airq_info *info = container_of(airq, struct airq_info, airq); 190 unsigned long ai; 191 192 inc_irq_stat(IRQIO_VAI); 193 read_lock(&info->lock); 194 /* Walk through indicators field, summary indicator active. */ 195 for (ai = 0;;) { 196 ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv)); 197 if (ai == -1UL) 198 break; 199 vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai)); 200 } 201 info->summary_indicator = 0; 202 smp_wmb(); 203 /* Walk through indicators field, summary indicator not active. */ 204 for (ai = 0;;) { 205 ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv)); 206 if (ai == -1UL) 207 break; 208 vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai)); 209 } 210 read_unlock(&info->lock); 211 } 212 213 static struct airq_info *new_airq_info(void) 214 { 215 struct airq_info *info; 216 int rc; 217 218 info = kzalloc(sizeof(*info), GFP_KERNEL); 219 if (!info) 220 return NULL; 221 rwlock_init(&info->lock); 222 info->aiv = airq_iv_create(VIRTIO_IV_BITS, AIRQ_IV_ALLOC | AIRQ_IV_PTR); 223 if (!info->aiv) { 224 kfree(info); 225 return NULL; 226 } 227 info->airq.handler = virtio_airq_handler; 228 info->airq.lsi_ptr = &info->summary_indicator; 229 info->airq.lsi_mask = 0xff; 230 info->airq.isc = VIRTIO_AIRQ_ISC; 231 rc = register_adapter_interrupt(&info->airq); 232 if (rc) { 233 airq_iv_release(info->aiv); 234 kfree(info); 235 return NULL; 236 } 237 return info; 238 } 239 240 static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs, 241 u64 *first, void **airq_info) 242 { 243 int i, j; 244 struct airq_info *info; 245 unsigned long indicator_addr = 0; 246 unsigned long bit, flags; 247 248 for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) { 249 if (!airq_areas[i]) 250 airq_areas[i] = new_airq_info(); 251 info = airq_areas[i]; 252 if (!info) 253 return 0; 254 write_lock_irqsave(&info->lock, flags); 255 bit = airq_iv_alloc(info->aiv, nvqs); 256 if (bit == -1UL) { 257 /* Not enough vacancies. */ 258 write_unlock_irqrestore(&info->lock, flags); 259 continue; 260 } 261 *first = bit; 262 *airq_info = info; 263 indicator_addr = (unsigned long)info->aiv->vector; 264 for (j = 0; j < nvqs; j++) { 265 airq_iv_set_ptr(info->aiv, bit + j, 266 (unsigned long)vqs[j]); 267 } 268 write_unlock_irqrestore(&info->lock, flags); 269 } 270 return indicator_addr; 271 } 272 273 static void virtio_ccw_drop_indicators(struct virtio_ccw_device *vcdev) 274 { 275 struct virtio_ccw_vq_info *info; 276 277 list_for_each_entry(info, &vcdev->virtqueues, node) 278 drop_airq_indicator(info->vq, vcdev->airq_info); 279 } 280 281 static int doing_io(struct virtio_ccw_device *vcdev, __u32 flag) 282 { 283 unsigned long flags; 284 __u32 ret; 285 286 spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags); 287 if (vcdev->err) 288 ret = 0; 289 else 290 ret = vcdev->curr_io & flag; 291 spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags); 292 return ret; 293 } 294 295 static int ccw_io_helper(struct virtio_ccw_device *vcdev, 296 struct ccw1 *ccw, __u32 intparm) 297 { 298 int ret; 299 unsigned long flags; 300 int flag = intparm & VIRTIO_CCW_INTPARM_MASK; 301 302 do { 303 spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags); 304 ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0); 305 if (!ret) { 306 if (!vcdev->curr_io) 307 vcdev->err = 0; 308 vcdev->curr_io |= flag; 309 } 310 spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags); 311 cpu_relax(); 312 } while (ret == -EBUSY); 313 wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0); 314 return ret ? ret : vcdev->err; 315 } 316 317 static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev, 318 struct ccw1 *ccw) 319 { 320 int ret; 321 unsigned long *indicatorp = NULL; 322 struct virtio_thinint_area *thinint_area = NULL; 323 struct airq_info *airq_info = vcdev->airq_info; 324 325 if (vcdev->is_thinint) { 326 thinint_area = kzalloc(sizeof(*thinint_area), 327 GFP_DMA | GFP_KERNEL); 328 if (!thinint_area) 329 return; 330 thinint_area->summary_indicator = 331 (unsigned long) &airq_info->summary_indicator; 332 thinint_area->isc = VIRTIO_AIRQ_ISC; 333 ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER; 334 ccw->count = sizeof(*thinint_area); 335 ccw->cda = (__u32)(unsigned long) thinint_area; 336 } else { 337 /* payload is the address of the indicators */ 338 indicatorp = kmalloc(sizeof(&vcdev->indicators), 339 GFP_DMA | GFP_KERNEL); 340 if (!indicatorp) 341 return; 342 *indicatorp = 0; 343 ccw->cmd_code = CCW_CMD_SET_IND; 344 ccw->count = sizeof(&vcdev->indicators); 345 ccw->cda = (__u32)(unsigned long) indicatorp; 346 } 347 /* Deregister indicators from host. */ 348 vcdev->indicators = 0; 349 ccw->flags = 0; 350 ret = ccw_io_helper(vcdev, ccw, 351 vcdev->is_thinint ? 352 VIRTIO_CCW_DOING_SET_IND_ADAPTER : 353 VIRTIO_CCW_DOING_SET_IND); 354 if (ret && (ret != -ENODEV)) 355 dev_info(&vcdev->cdev->dev, 356 "Failed to deregister indicators (%d)\n", ret); 357 else if (vcdev->is_thinint) 358 virtio_ccw_drop_indicators(vcdev); 359 kfree(indicatorp); 360 kfree(thinint_area); 361 } 362 363 static inline long __do_kvm_notify(struct subchannel_id schid, 364 unsigned long queue_index, 365 long cookie) 366 { 367 register unsigned long __nr asm("1") = KVM_S390_VIRTIO_CCW_NOTIFY; 368 register struct subchannel_id __schid asm("2") = schid; 369 register unsigned long __index asm("3") = queue_index; 370 register long __rc asm("2"); 371 register long __cookie asm("4") = cookie; 372 373 asm volatile ("diag 2,4,0x500\n" 374 : "=d" (__rc) : "d" (__nr), "d" (__schid), "d" (__index), 375 "d"(__cookie) 376 : "memory", "cc"); 377 return __rc; 378 } 379 380 static inline long do_kvm_notify(struct subchannel_id schid, 381 unsigned long queue_index, 382 long cookie) 383 { 384 diag_stat_inc(DIAG_STAT_X500); 385 return __do_kvm_notify(schid, queue_index, cookie); 386 } 387 388 static bool virtio_ccw_kvm_notify(struct virtqueue *vq) 389 { 390 struct virtio_ccw_vq_info *info = vq->priv; 391 struct virtio_ccw_device *vcdev; 392 struct subchannel_id schid; 393 394 vcdev = to_vc_device(info->vq->vdev); 395 ccw_device_get_schid(vcdev->cdev, &schid); 396 info->cookie = do_kvm_notify(schid, vq->index, info->cookie); 397 if (info->cookie < 0) 398 return false; 399 return true; 400 } 401 402 static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev, 403 struct ccw1 *ccw, int index) 404 { 405 int ret; 406 407 vcdev->config_block->index = index; 408 ccw->cmd_code = CCW_CMD_READ_VQ_CONF; 409 ccw->flags = 0; 410 ccw->count = sizeof(struct vq_config_block); 411 ccw->cda = (__u32)(unsigned long)(vcdev->config_block); 412 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF); 413 if (ret) 414 return ret; 415 return vcdev->config_block->num; 416 } 417 418 static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw) 419 { 420 struct virtio_ccw_device *vcdev = to_vc_device(vq->vdev); 421 struct virtio_ccw_vq_info *info = vq->priv; 422 unsigned long flags; 423 unsigned long size; 424 int ret; 425 unsigned int index = vq->index; 426 427 /* Remove from our list. */ 428 spin_lock_irqsave(&vcdev->lock, flags); 429 list_del(&info->node); 430 spin_unlock_irqrestore(&vcdev->lock, flags); 431 432 /* Release from host. */ 433 if (vcdev->revision == 0) { 434 info->info_block->l.queue = 0; 435 info->info_block->l.align = 0; 436 info->info_block->l.index = index; 437 info->info_block->l.num = 0; 438 ccw->count = sizeof(info->info_block->l); 439 } else { 440 info->info_block->s.desc = 0; 441 info->info_block->s.index = index; 442 info->info_block->s.num = 0; 443 info->info_block->s.avail = 0; 444 info->info_block->s.used = 0; 445 ccw->count = sizeof(info->info_block->s); 446 } 447 ccw->cmd_code = CCW_CMD_SET_VQ; 448 ccw->flags = 0; 449 ccw->cda = (__u32)(unsigned long)(info->info_block); 450 ret = ccw_io_helper(vcdev, ccw, 451 VIRTIO_CCW_DOING_SET_VQ | index); 452 /* 453 * -ENODEV isn't considered an error: The device is gone anyway. 454 * This may happen on device detach. 455 */ 456 if (ret && (ret != -ENODEV)) 457 dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d\n", 458 ret, index); 459 460 vring_del_virtqueue(vq); 461 size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN)); 462 free_pages_exact(info->queue, size); 463 kfree(info->info_block); 464 kfree(info); 465 } 466 467 static void virtio_ccw_del_vqs(struct virtio_device *vdev) 468 { 469 struct virtqueue *vq, *n; 470 struct ccw1 *ccw; 471 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 472 473 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); 474 if (!ccw) 475 return; 476 477 virtio_ccw_drop_indicator(vcdev, ccw); 478 479 list_for_each_entry_safe(vq, n, &vdev->vqs, list) 480 virtio_ccw_del_vq(vq, ccw); 481 482 kfree(ccw); 483 } 484 485 static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev, 486 int i, vq_callback_t *callback, 487 const char *name, bool ctx, 488 struct ccw1 *ccw) 489 { 490 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 491 int err; 492 struct virtqueue *vq = NULL; 493 struct virtio_ccw_vq_info *info; 494 unsigned long size = 0; /* silence the compiler */ 495 unsigned long flags; 496 497 /* Allocate queue. */ 498 info = kzalloc(sizeof(struct virtio_ccw_vq_info), GFP_KERNEL); 499 if (!info) { 500 dev_warn(&vcdev->cdev->dev, "no info\n"); 501 err = -ENOMEM; 502 goto out_err; 503 } 504 info->info_block = kzalloc(sizeof(*info->info_block), 505 GFP_DMA | GFP_KERNEL); 506 if (!info->info_block) { 507 dev_warn(&vcdev->cdev->dev, "no info block\n"); 508 err = -ENOMEM; 509 goto out_err; 510 } 511 info->num = virtio_ccw_read_vq_conf(vcdev, ccw, i); 512 if (info->num < 0) { 513 err = info->num; 514 goto out_err; 515 } 516 size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN)); 517 info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); 518 if (info->queue == NULL) { 519 dev_warn(&vcdev->cdev->dev, "no queue\n"); 520 err = -ENOMEM; 521 goto out_err; 522 } 523 524 vq = vring_new_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN, vdev, 525 true, ctx, info->queue, virtio_ccw_kvm_notify, 526 callback, name); 527 if (!vq) { 528 /* For now, we fail if we can't get the requested size. */ 529 dev_warn(&vcdev->cdev->dev, "no vq\n"); 530 err = -ENOMEM; 531 goto out_err; 532 } 533 534 /* Register it with the host. */ 535 if (vcdev->revision == 0) { 536 info->info_block->l.queue = (__u64)info->queue; 537 info->info_block->l.align = KVM_VIRTIO_CCW_RING_ALIGN; 538 info->info_block->l.index = i; 539 info->info_block->l.num = info->num; 540 ccw->count = sizeof(info->info_block->l); 541 } else { 542 info->info_block->s.desc = (__u64)info->queue; 543 info->info_block->s.index = i; 544 info->info_block->s.num = info->num; 545 info->info_block->s.avail = (__u64)virtqueue_get_avail(vq); 546 info->info_block->s.used = (__u64)virtqueue_get_used(vq); 547 ccw->count = sizeof(info->info_block->s); 548 } 549 ccw->cmd_code = CCW_CMD_SET_VQ; 550 ccw->flags = 0; 551 ccw->cda = (__u32)(unsigned long)(info->info_block); 552 err = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_VQ | i); 553 if (err) { 554 dev_warn(&vcdev->cdev->dev, "SET_VQ failed\n"); 555 goto out_err; 556 } 557 558 info->vq = vq; 559 vq->priv = info; 560 561 /* Save it to our list. */ 562 spin_lock_irqsave(&vcdev->lock, flags); 563 list_add(&info->node, &vcdev->virtqueues); 564 spin_unlock_irqrestore(&vcdev->lock, flags); 565 566 return vq; 567 568 out_err: 569 if (vq) 570 vring_del_virtqueue(vq); 571 if (info) { 572 if (info->queue) 573 free_pages_exact(info->queue, size); 574 kfree(info->info_block); 575 } 576 kfree(info); 577 return ERR_PTR(err); 578 } 579 580 static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev, 581 struct virtqueue *vqs[], int nvqs, 582 struct ccw1 *ccw) 583 { 584 int ret; 585 struct virtio_thinint_area *thinint_area = NULL; 586 struct airq_info *info; 587 588 thinint_area = kzalloc(sizeof(*thinint_area), GFP_DMA | GFP_KERNEL); 589 if (!thinint_area) { 590 ret = -ENOMEM; 591 goto out; 592 } 593 /* Try to get an indicator. */ 594 thinint_area->indicator = get_airq_indicator(vqs, nvqs, 595 &thinint_area->bit_nr, 596 &vcdev->airq_info); 597 if (!thinint_area->indicator) { 598 ret = -ENOSPC; 599 goto out; 600 } 601 info = vcdev->airq_info; 602 thinint_area->summary_indicator = 603 (unsigned long) &info->summary_indicator; 604 thinint_area->isc = VIRTIO_AIRQ_ISC; 605 ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER; 606 ccw->flags = CCW_FLAG_SLI; 607 ccw->count = sizeof(*thinint_area); 608 ccw->cda = (__u32)(unsigned long)thinint_area; 609 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND_ADAPTER); 610 if (ret) { 611 if (ret == -EOPNOTSUPP) { 612 /* 613 * The host does not support adapter interrupts 614 * for virtio-ccw, stop trying. 615 */ 616 virtio_ccw_use_airq = 0; 617 pr_info("Adapter interrupts unsupported on host\n"); 618 } else 619 dev_warn(&vcdev->cdev->dev, 620 "enabling adapter interrupts = %d\n", ret); 621 virtio_ccw_drop_indicators(vcdev); 622 } 623 out: 624 kfree(thinint_area); 625 return ret; 626 } 627 628 static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, 629 struct virtqueue *vqs[], 630 vq_callback_t *callbacks[], 631 const char * const names[], 632 const bool *ctx, 633 struct irq_affinity *desc) 634 { 635 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 636 unsigned long *indicatorp = NULL; 637 int ret, i; 638 struct ccw1 *ccw; 639 640 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); 641 if (!ccw) 642 return -ENOMEM; 643 644 for (i = 0; i < nvqs; ++i) { 645 vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i], 646 ctx ? ctx[i] : false, ccw); 647 if (IS_ERR(vqs[i])) { 648 ret = PTR_ERR(vqs[i]); 649 vqs[i] = NULL; 650 goto out; 651 } 652 } 653 ret = -ENOMEM; 654 /* 655 * We need a data area under 2G to communicate. Our payload is 656 * the address of the indicators. 657 */ 658 indicatorp = kmalloc(sizeof(&vcdev->indicators), GFP_DMA | GFP_KERNEL); 659 if (!indicatorp) 660 goto out; 661 *indicatorp = (unsigned long) &vcdev->indicators; 662 if (vcdev->is_thinint) { 663 ret = virtio_ccw_register_adapter_ind(vcdev, vqs, nvqs, ccw); 664 if (ret) 665 /* no error, just fall back to legacy interrupts */ 666 vcdev->is_thinint = false; 667 } 668 if (!vcdev->is_thinint) { 669 /* Register queue indicators with host. */ 670 vcdev->indicators = 0; 671 ccw->cmd_code = CCW_CMD_SET_IND; 672 ccw->flags = 0; 673 ccw->count = sizeof(&vcdev->indicators); 674 ccw->cda = (__u32)(unsigned long) indicatorp; 675 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND); 676 if (ret) 677 goto out; 678 } 679 /* Register indicators2 with host for config changes */ 680 *indicatorp = (unsigned long) &vcdev->indicators2; 681 vcdev->indicators2 = 0; 682 ccw->cmd_code = CCW_CMD_SET_CONF_IND; 683 ccw->flags = 0; 684 ccw->count = sizeof(&vcdev->indicators2); 685 ccw->cda = (__u32)(unsigned long) indicatorp; 686 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND); 687 if (ret) 688 goto out; 689 690 kfree(indicatorp); 691 kfree(ccw); 692 return 0; 693 out: 694 kfree(indicatorp); 695 kfree(ccw); 696 virtio_ccw_del_vqs(vdev); 697 return ret; 698 } 699 700 static void virtio_ccw_reset(struct virtio_device *vdev) 701 { 702 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 703 struct ccw1 *ccw; 704 705 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); 706 if (!ccw) 707 return; 708 709 /* Zero status bits. */ 710 *vcdev->status = 0; 711 712 /* Send a reset ccw on device. */ 713 ccw->cmd_code = CCW_CMD_VDEV_RESET; 714 ccw->flags = 0; 715 ccw->count = 0; 716 ccw->cda = 0; 717 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_RESET); 718 kfree(ccw); 719 } 720 721 static u64 virtio_ccw_get_features(struct virtio_device *vdev) 722 { 723 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 724 struct virtio_feature_desc *features; 725 int ret; 726 u64 rc; 727 struct ccw1 *ccw; 728 729 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); 730 if (!ccw) 731 return 0; 732 733 features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL); 734 if (!features) { 735 rc = 0; 736 goto out_free; 737 } 738 /* Read the feature bits from the host. */ 739 features->index = 0; 740 ccw->cmd_code = CCW_CMD_READ_FEAT; 741 ccw->flags = 0; 742 ccw->count = sizeof(*features); 743 ccw->cda = (__u32)(unsigned long)features; 744 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT); 745 if (ret) { 746 rc = 0; 747 goto out_free; 748 } 749 750 rc = le32_to_cpu(features->features); 751 752 if (vcdev->revision == 0) 753 goto out_free; 754 755 /* Read second half of the feature bits from the host. */ 756 features->index = 1; 757 ccw->cmd_code = CCW_CMD_READ_FEAT; 758 ccw->flags = 0; 759 ccw->count = sizeof(*features); 760 ccw->cda = (__u32)(unsigned long)features; 761 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT); 762 if (ret == 0) 763 rc |= (u64)le32_to_cpu(features->features) << 32; 764 765 out_free: 766 kfree(features); 767 kfree(ccw); 768 return rc; 769 } 770 771 static int virtio_ccw_finalize_features(struct virtio_device *vdev) 772 { 773 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 774 struct virtio_feature_desc *features; 775 struct ccw1 *ccw; 776 int ret; 777 778 if (vcdev->revision >= 1 && 779 !__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) { 780 dev_err(&vdev->dev, "virtio: device uses revision 1 " 781 "but does not have VIRTIO_F_VERSION_1\n"); 782 return -EINVAL; 783 } 784 785 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); 786 if (!ccw) 787 return -ENOMEM; 788 789 features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL); 790 if (!features) { 791 ret = -ENOMEM; 792 goto out_free; 793 } 794 /* Give virtio_ring a chance to accept features. */ 795 vring_transport_features(vdev); 796 797 features->index = 0; 798 features->features = cpu_to_le32((u32)vdev->features); 799 /* Write the first half of the feature bits to the host. */ 800 ccw->cmd_code = CCW_CMD_WRITE_FEAT; 801 ccw->flags = 0; 802 ccw->count = sizeof(*features); 803 ccw->cda = (__u32)(unsigned long)features; 804 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT); 805 if (ret) 806 goto out_free; 807 808 if (vcdev->revision == 0) 809 goto out_free; 810 811 features->index = 1; 812 features->features = cpu_to_le32(vdev->features >> 32); 813 /* Write the second half of the feature bits to the host. */ 814 ccw->cmd_code = CCW_CMD_WRITE_FEAT; 815 ccw->flags = 0; 816 ccw->count = sizeof(*features); 817 ccw->cda = (__u32)(unsigned long)features; 818 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT); 819 820 out_free: 821 kfree(features); 822 kfree(ccw); 823 824 return ret; 825 } 826 827 static void virtio_ccw_get_config(struct virtio_device *vdev, 828 unsigned int offset, void *buf, unsigned len) 829 { 830 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 831 int ret; 832 struct ccw1 *ccw; 833 void *config_area; 834 835 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); 836 if (!ccw) 837 return; 838 839 config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL); 840 if (!config_area) 841 goto out_free; 842 843 /* Read the config area from the host. */ 844 ccw->cmd_code = CCW_CMD_READ_CONF; 845 ccw->flags = 0; 846 ccw->count = offset + len; 847 ccw->cda = (__u32)(unsigned long)config_area; 848 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_CONFIG); 849 if (ret) 850 goto out_free; 851 852 memcpy(vcdev->config, config_area, offset + len); 853 if (buf) 854 memcpy(buf, &vcdev->config[offset], len); 855 if (vcdev->config_ready < offset + len) 856 vcdev->config_ready = offset + len; 857 858 out_free: 859 kfree(config_area); 860 kfree(ccw); 861 } 862 863 static void virtio_ccw_set_config(struct virtio_device *vdev, 864 unsigned int offset, const void *buf, 865 unsigned len) 866 { 867 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 868 struct ccw1 *ccw; 869 void *config_area; 870 871 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); 872 if (!ccw) 873 return; 874 875 config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL); 876 if (!config_area) 877 goto out_free; 878 879 /* Make sure we don't overwrite fields. */ 880 if (vcdev->config_ready < offset) 881 virtio_ccw_get_config(vdev, 0, NULL, offset); 882 memcpy(&vcdev->config[offset], buf, len); 883 /* Write the config area to the host. */ 884 memcpy(config_area, vcdev->config, sizeof(vcdev->config)); 885 ccw->cmd_code = CCW_CMD_WRITE_CONF; 886 ccw->flags = 0; 887 ccw->count = offset + len; 888 ccw->cda = (__u32)(unsigned long)config_area; 889 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_CONFIG); 890 891 out_free: 892 kfree(config_area); 893 kfree(ccw); 894 } 895 896 static u8 virtio_ccw_get_status(struct virtio_device *vdev) 897 { 898 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 899 u8 old_status = *vcdev->status; 900 struct ccw1 *ccw; 901 902 if (vcdev->revision < 1) 903 return *vcdev->status; 904 905 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); 906 if (!ccw) 907 return old_status; 908 909 ccw->cmd_code = CCW_CMD_READ_STATUS; 910 ccw->flags = 0; 911 ccw->count = sizeof(*vcdev->status); 912 ccw->cda = (__u32)(unsigned long)vcdev->status; 913 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_STATUS); 914 /* 915 * If the channel program failed (should only happen if the device 916 * was hotunplugged, and then we clean up via the machine check 917 * handler anyway), vcdev->status was not overwritten and we just 918 * return the old status, which is fine. 919 */ 920 kfree(ccw); 921 922 return *vcdev->status; 923 } 924 925 static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status) 926 { 927 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 928 u8 old_status = *vcdev->status; 929 struct ccw1 *ccw; 930 int ret; 931 932 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); 933 if (!ccw) 934 return; 935 936 /* Write the status to the host. */ 937 *vcdev->status = status; 938 ccw->cmd_code = CCW_CMD_WRITE_STATUS; 939 ccw->flags = 0; 940 ccw->count = sizeof(status); 941 ccw->cda = (__u32)(unsigned long)vcdev->status; 942 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS); 943 /* Write failed? We assume status is unchanged. */ 944 if (ret) 945 *vcdev->status = old_status; 946 kfree(ccw); 947 } 948 949 static const struct virtio_config_ops virtio_ccw_config_ops = { 950 .get_features = virtio_ccw_get_features, 951 .finalize_features = virtio_ccw_finalize_features, 952 .get = virtio_ccw_get_config, 953 .set = virtio_ccw_set_config, 954 .get_status = virtio_ccw_get_status, 955 .set_status = virtio_ccw_set_status, 956 .reset = virtio_ccw_reset, 957 .find_vqs = virtio_ccw_find_vqs, 958 .del_vqs = virtio_ccw_del_vqs, 959 }; 960 961 962 /* 963 * ccw bus driver related functions 964 */ 965 966 static void virtio_ccw_release_dev(struct device *_d) 967 { 968 struct virtio_device *dev = dev_to_virtio(_d); 969 struct virtio_ccw_device *vcdev = to_vc_device(dev); 970 971 kfree(vcdev->status); 972 kfree(vcdev->config_block); 973 kfree(vcdev); 974 } 975 976 static int irb_is_error(struct irb *irb) 977 { 978 if (scsw_cstat(&irb->scsw) != 0) 979 return 1; 980 if (scsw_dstat(&irb->scsw) & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) 981 return 1; 982 if (scsw_cc(&irb->scsw) != 0) 983 return 1; 984 return 0; 985 } 986 987 static struct virtqueue *virtio_ccw_vq_by_ind(struct virtio_ccw_device *vcdev, 988 int index) 989 { 990 struct virtio_ccw_vq_info *info; 991 unsigned long flags; 992 struct virtqueue *vq; 993 994 vq = NULL; 995 spin_lock_irqsave(&vcdev->lock, flags); 996 list_for_each_entry(info, &vcdev->virtqueues, node) { 997 if (info->vq->index == index) { 998 vq = info->vq; 999 break; 1000 } 1001 } 1002 spin_unlock_irqrestore(&vcdev->lock, flags); 1003 return vq; 1004 } 1005 1006 static void virtio_ccw_check_activity(struct virtio_ccw_device *vcdev, 1007 __u32 activity) 1008 { 1009 if (vcdev->curr_io & activity) { 1010 switch (activity) { 1011 case VIRTIO_CCW_DOING_READ_FEAT: 1012 case VIRTIO_CCW_DOING_WRITE_FEAT: 1013 case VIRTIO_CCW_DOING_READ_CONFIG: 1014 case VIRTIO_CCW_DOING_WRITE_CONFIG: 1015 case VIRTIO_CCW_DOING_WRITE_STATUS: 1016 case VIRTIO_CCW_DOING_READ_STATUS: 1017 case VIRTIO_CCW_DOING_SET_VQ: 1018 case VIRTIO_CCW_DOING_SET_IND: 1019 case VIRTIO_CCW_DOING_SET_CONF_IND: 1020 case VIRTIO_CCW_DOING_RESET: 1021 case VIRTIO_CCW_DOING_READ_VQ_CONF: 1022 case VIRTIO_CCW_DOING_SET_IND_ADAPTER: 1023 case VIRTIO_CCW_DOING_SET_VIRTIO_REV: 1024 vcdev->curr_io &= ~activity; 1025 wake_up(&vcdev->wait_q); 1026 break; 1027 default: 1028 /* don't know what to do... */ 1029 dev_warn(&vcdev->cdev->dev, 1030 "Suspicious activity '%08x'\n", activity); 1031 WARN_ON(1); 1032 break; 1033 } 1034 } 1035 } 1036 1037 static void virtio_ccw_int_handler(struct ccw_device *cdev, 1038 unsigned long intparm, 1039 struct irb *irb) 1040 { 1041 __u32 activity = intparm & VIRTIO_CCW_INTPARM_MASK; 1042 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev); 1043 int i; 1044 struct virtqueue *vq; 1045 1046 if (!vcdev) 1047 return; 1048 if (IS_ERR(irb)) { 1049 vcdev->err = PTR_ERR(irb); 1050 virtio_ccw_check_activity(vcdev, activity); 1051 /* Don't poke around indicators, something's wrong. */ 1052 return; 1053 } 1054 /* Check if it's a notification from the host. */ 1055 if ((intparm == 0) && 1056 (scsw_stctl(&irb->scsw) == 1057 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) { 1058 /* OK */ 1059 } 1060 if (irb_is_error(irb)) { 1061 /* Command reject? */ 1062 if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) && 1063 (irb->ecw[0] & SNS0_CMD_REJECT)) 1064 vcdev->err = -EOPNOTSUPP; 1065 else 1066 /* Map everything else to -EIO. */ 1067 vcdev->err = -EIO; 1068 } 1069 virtio_ccw_check_activity(vcdev, activity); 1070 for_each_set_bit(i, &vcdev->indicators, 1071 sizeof(vcdev->indicators) * BITS_PER_BYTE) { 1072 /* The bit clear must happen before the vring kick. */ 1073 clear_bit(i, &vcdev->indicators); 1074 barrier(); 1075 vq = virtio_ccw_vq_by_ind(vcdev, i); 1076 vring_interrupt(0, vq); 1077 } 1078 if (test_bit(0, &vcdev->indicators2)) { 1079 virtio_config_changed(&vcdev->vdev); 1080 clear_bit(0, &vcdev->indicators2); 1081 } 1082 } 1083 1084 /* 1085 * We usually want to autoonline all devices, but give the admin 1086 * a way to exempt devices from this. 1087 */ 1088 #define __DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \ 1089 (8*sizeof(long))) 1090 static unsigned long devs_no_auto[__MAX_SSID + 1][__DEV_WORDS]; 1091 1092 static char *no_auto = ""; 1093 1094 module_param(no_auto, charp, 0444); 1095 MODULE_PARM_DESC(no_auto, "list of ccw bus id ranges not to be auto-onlined"); 1096 1097 static int virtio_ccw_check_autoonline(struct ccw_device *cdev) 1098 { 1099 struct ccw_dev_id id; 1100 1101 ccw_device_get_id(cdev, &id); 1102 if (test_bit(id.devno, devs_no_auto[id.ssid])) 1103 return 0; 1104 return 1; 1105 } 1106 1107 static void virtio_ccw_auto_online(void *data, async_cookie_t cookie) 1108 { 1109 struct ccw_device *cdev = data; 1110 int ret; 1111 1112 ret = ccw_device_set_online(cdev); 1113 if (ret) 1114 dev_warn(&cdev->dev, "Failed to set online: %d\n", ret); 1115 } 1116 1117 static int virtio_ccw_probe(struct ccw_device *cdev) 1118 { 1119 cdev->handler = virtio_ccw_int_handler; 1120 1121 if (virtio_ccw_check_autoonline(cdev)) 1122 async_schedule(virtio_ccw_auto_online, cdev); 1123 return 0; 1124 } 1125 1126 static struct virtio_ccw_device *virtio_grab_drvdata(struct ccw_device *cdev) 1127 { 1128 unsigned long flags; 1129 struct virtio_ccw_device *vcdev; 1130 1131 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1132 vcdev = dev_get_drvdata(&cdev->dev); 1133 if (!vcdev || vcdev->going_away) { 1134 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1135 return NULL; 1136 } 1137 vcdev->going_away = true; 1138 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1139 return vcdev; 1140 } 1141 1142 static void virtio_ccw_remove(struct ccw_device *cdev) 1143 { 1144 unsigned long flags; 1145 struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev); 1146 1147 if (vcdev && cdev->online) { 1148 if (vcdev->device_lost) 1149 virtio_break_device(&vcdev->vdev); 1150 unregister_virtio_device(&vcdev->vdev); 1151 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1152 dev_set_drvdata(&cdev->dev, NULL); 1153 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1154 } 1155 cdev->handler = NULL; 1156 } 1157 1158 static int virtio_ccw_offline(struct ccw_device *cdev) 1159 { 1160 unsigned long flags; 1161 struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev); 1162 1163 if (!vcdev) 1164 return 0; 1165 if (vcdev->device_lost) 1166 virtio_break_device(&vcdev->vdev); 1167 unregister_virtio_device(&vcdev->vdev); 1168 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1169 dev_set_drvdata(&cdev->dev, NULL); 1170 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1171 return 0; 1172 } 1173 1174 static int virtio_ccw_set_transport_rev(struct virtio_ccw_device *vcdev) 1175 { 1176 struct virtio_rev_info *rev; 1177 struct ccw1 *ccw; 1178 int ret; 1179 1180 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); 1181 if (!ccw) 1182 return -ENOMEM; 1183 rev = kzalloc(sizeof(*rev), GFP_DMA | GFP_KERNEL); 1184 if (!rev) { 1185 kfree(ccw); 1186 return -ENOMEM; 1187 } 1188 1189 /* Set transport revision */ 1190 ccw->cmd_code = CCW_CMD_SET_VIRTIO_REV; 1191 ccw->flags = 0; 1192 ccw->count = sizeof(*rev); 1193 ccw->cda = (__u32)(unsigned long)rev; 1194 1195 vcdev->revision = VIRTIO_CCW_REV_MAX; 1196 do { 1197 rev->revision = vcdev->revision; 1198 /* none of our supported revisions carry payload */ 1199 rev->length = 0; 1200 ret = ccw_io_helper(vcdev, ccw, 1201 VIRTIO_CCW_DOING_SET_VIRTIO_REV); 1202 if (ret == -EOPNOTSUPP) { 1203 if (vcdev->revision == 0) 1204 /* 1205 * The host device does not support setting 1206 * the revision: let's operate it in legacy 1207 * mode. 1208 */ 1209 ret = 0; 1210 else 1211 vcdev->revision--; 1212 } 1213 } while (ret == -EOPNOTSUPP); 1214 1215 kfree(ccw); 1216 kfree(rev); 1217 return ret; 1218 } 1219 1220 static int virtio_ccw_online(struct ccw_device *cdev) 1221 { 1222 int ret; 1223 struct virtio_ccw_device *vcdev; 1224 unsigned long flags; 1225 1226 vcdev = kzalloc(sizeof(*vcdev), GFP_KERNEL); 1227 if (!vcdev) { 1228 dev_warn(&cdev->dev, "Could not get memory for virtio\n"); 1229 ret = -ENOMEM; 1230 goto out_free; 1231 } 1232 vcdev->config_block = kzalloc(sizeof(*vcdev->config_block), 1233 GFP_DMA | GFP_KERNEL); 1234 if (!vcdev->config_block) { 1235 ret = -ENOMEM; 1236 goto out_free; 1237 } 1238 vcdev->status = kzalloc(sizeof(*vcdev->status), GFP_DMA | GFP_KERNEL); 1239 if (!vcdev->status) { 1240 ret = -ENOMEM; 1241 goto out_free; 1242 } 1243 1244 vcdev->is_thinint = virtio_ccw_use_airq; /* at least try */ 1245 1246 vcdev->vdev.dev.parent = &cdev->dev; 1247 vcdev->vdev.dev.release = virtio_ccw_release_dev; 1248 vcdev->vdev.config = &virtio_ccw_config_ops; 1249 vcdev->cdev = cdev; 1250 init_waitqueue_head(&vcdev->wait_q); 1251 INIT_LIST_HEAD(&vcdev->virtqueues); 1252 spin_lock_init(&vcdev->lock); 1253 1254 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1255 dev_set_drvdata(&cdev->dev, vcdev); 1256 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1257 vcdev->vdev.id.vendor = cdev->id.cu_type; 1258 vcdev->vdev.id.device = cdev->id.cu_model; 1259 1260 ret = virtio_ccw_set_transport_rev(vcdev); 1261 if (ret) 1262 goto out_free; 1263 1264 ret = register_virtio_device(&vcdev->vdev); 1265 if (ret) { 1266 dev_warn(&cdev->dev, "Failed to register virtio device: %d\n", 1267 ret); 1268 goto out_put; 1269 } 1270 return 0; 1271 out_put: 1272 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1273 dev_set_drvdata(&cdev->dev, NULL); 1274 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1275 put_device(&vcdev->vdev.dev); 1276 return ret; 1277 out_free: 1278 if (vcdev) { 1279 kfree(vcdev->status); 1280 kfree(vcdev->config_block); 1281 } 1282 kfree(vcdev); 1283 return ret; 1284 } 1285 1286 static int virtio_ccw_cio_notify(struct ccw_device *cdev, int event) 1287 { 1288 int rc; 1289 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev); 1290 1291 /* 1292 * Make sure vcdev is set 1293 * i.e. set_offline/remove callback not already running 1294 */ 1295 if (!vcdev) 1296 return NOTIFY_DONE; 1297 1298 switch (event) { 1299 case CIO_GONE: 1300 vcdev->device_lost = true; 1301 rc = NOTIFY_DONE; 1302 break; 1303 default: 1304 rc = NOTIFY_DONE; 1305 break; 1306 } 1307 return rc; 1308 } 1309 1310 static struct ccw_device_id virtio_ids[] = { 1311 { CCW_DEVICE(0x3832, 0) }, 1312 {}, 1313 }; 1314 1315 static struct ccw_driver virtio_ccw_driver = { 1316 .driver = { 1317 .owner = THIS_MODULE, 1318 .name = "virtio_ccw", 1319 }, 1320 .ids = virtio_ids, 1321 .probe = virtio_ccw_probe, 1322 .remove = virtio_ccw_remove, 1323 .set_offline = virtio_ccw_offline, 1324 .set_online = virtio_ccw_online, 1325 .notify = virtio_ccw_cio_notify, 1326 .int_class = IRQIO_VIR, 1327 }; 1328 1329 static int __init pure_hex(char **cp, unsigned int *val, int min_digit, 1330 int max_digit, int max_val) 1331 { 1332 int diff; 1333 1334 diff = 0; 1335 *val = 0; 1336 1337 while (diff <= max_digit) { 1338 int value = hex_to_bin(**cp); 1339 1340 if (value < 0) 1341 break; 1342 *val = *val * 16 + value; 1343 (*cp)++; 1344 diff++; 1345 } 1346 1347 if ((diff < min_digit) || (diff > max_digit) || (*val > max_val)) 1348 return 1; 1349 1350 return 0; 1351 } 1352 1353 static int __init parse_busid(char *str, unsigned int *cssid, 1354 unsigned int *ssid, unsigned int *devno) 1355 { 1356 char *str_work; 1357 int rc, ret; 1358 1359 rc = 1; 1360 1361 if (*str == '\0') 1362 goto out; 1363 1364 str_work = str; 1365 ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID); 1366 if (ret || (str_work[0] != '.')) 1367 goto out; 1368 str_work++; 1369 ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID); 1370 if (ret || (str_work[0] != '.')) 1371 goto out; 1372 str_work++; 1373 ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL); 1374 if (ret || (str_work[0] != '\0')) 1375 goto out; 1376 1377 rc = 0; 1378 out: 1379 return rc; 1380 } 1381 1382 static void __init no_auto_parse(void) 1383 { 1384 unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to; 1385 char *parm, *str; 1386 int rc; 1387 1388 str = no_auto; 1389 while ((parm = strsep(&str, ","))) { 1390 rc = parse_busid(strsep(&parm, "-"), &from_cssid, 1391 &from_ssid, &from); 1392 if (rc) 1393 continue; 1394 if (parm != NULL) { 1395 rc = parse_busid(parm, &to_cssid, 1396 &to_ssid, &to); 1397 if ((from_ssid > to_ssid) || 1398 ((from_ssid == to_ssid) && (from > to))) 1399 rc = -EINVAL; 1400 } else { 1401 to_cssid = from_cssid; 1402 to_ssid = from_ssid; 1403 to = from; 1404 } 1405 if (rc) 1406 continue; 1407 while ((from_ssid < to_ssid) || 1408 ((from_ssid == to_ssid) && (from <= to))) { 1409 set_bit(from, devs_no_auto[from_ssid]); 1410 from++; 1411 if (from > __MAX_SUBCHANNEL) { 1412 from_ssid++; 1413 from = 0; 1414 } 1415 } 1416 } 1417 } 1418 1419 static int __init virtio_ccw_init(void) 1420 { 1421 /* parse no_auto string before we do anything further */ 1422 no_auto_parse(); 1423 return ccw_driver_register(&virtio_ccw_driver); 1424 } 1425 device_initcall(virtio_ccw_init); 1426