1 /* 2 * ccw based virtio transport 3 * 4 * Copyright IBM Corp. 2012, 2014 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License (version 2 only) 8 * as published by the Free Software Foundation. 9 * 10 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> 11 */ 12 13 #include <linux/kernel_stat.h> 14 #include <linux/init.h> 15 #include <linux/bootmem.h> 16 #include <linux/err.h> 17 #include <linux/virtio.h> 18 #include <linux/virtio_config.h> 19 #include <linux/slab.h> 20 #include <linux/interrupt.h> 21 #include <linux/virtio_ring.h> 22 #include <linux/pfn.h> 23 #include <linux/async.h> 24 #include <linux/wait.h> 25 #include <linux/list.h> 26 #include <linux/bitops.h> 27 #include <linux/moduleparam.h> 28 #include <linux/io.h> 29 #include <linux/kvm_para.h> 30 #include <linux/notifier.h> 31 #include <asm/diag.h> 32 #include <asm/setup.h> 33 #include <asm/irq.h> 34 #include <asm/cio.h> 35 #include <asm/ccwdev.h> 36 #include <asm/virtio-ccw.h> 37 #include <asm/isc.h> 38 #include <asm/airq.h> 39 40 /* 41 * virtio related functions 42 */ 43 44 struct vq_config_block { 45 __u16 index; 46 __u16 num; 47 } __packed; 48 49 #define VIRTIO_CCW_CONFIG_SIZE 0x100 50 /* same as PCI config space size, should be enough for all drivers */ 51 52 struct virtio_ccw_device { 53 struct virtio_device vdev; 54 __u8 *status; 55 __u8 config[VIRTIO_CCW_CONFIG_SIZE]; 56 struct ccw_device *cdev; 57 __u32 curr_io; 58 int err; 59 unsigned int revision; /* Transport revision */ 60 wait_queue_head_t wait_q; 61 spinlock_t lock; 62 struct list_head virtqueues; 63 unsigned long indicators; 64 unsigned long indicators2; 65 struct vq_config_block *config_block; 66 bool is_thinint; 67 bool going_away; 68 bool device_lost; 69 unsigned int config_ready; 70 void *airq_info; 71 }; 72 73 struct vq_info_block_legacy { 74 __u64 queue; 75 __u32 align; 76 __u16 index; 77 __u16 num; 78 } __packed; 79 80 struct vq_info_block { 81 __u64 desc; 82 __u32 res0; 83 __u16 index; 84 __u16 num; 85 __u64 avail; 86 __u64 used; 87 } __packed; 88 89 struct virtio_feature_desc { 90 __u32 features; 91 __u8 index; 92 } __packed; 93 94 struct virtio_thinint_area { 95 unsigned long summary_indicator; 96 unsigned long indicator; 97 u64 bit_nr; 98 u8 isc; 99 } __packed; 100 101 struct virtio_rev_info { 102 __u16 revision; 103 __u16 length; 104 __u8 data[]; 105 }; 106 107 /* the highest virtio-ccw revision we support */ 108 #define VIRTIO_CCW_REV_MAX 1 109 110 struct virtio_ccw_vq_info { 111 struct virtqueue *vq; 112 int num; 113 void *queue; 114 union { 115 struct vq_info_block s; 116 struct vq_info_block_legacy l; 117 } *info_block; 118 int bit_nr; 119 struct list_head node; 120 long cookie; 121 }; 122 123 #define VIRTIO_AIRQ_ISC IO_SCH_ISC /* inherit from subchannel */ 124 125 #define VIRTIO_IV_BITS (L1_CACHE_BYTES * 8) 126 #define MAX_AIRQ_AREAS 20 127 128 static int virtio_ccw_use_airq = 1; 129 130 struct airq_info { 131 rwlock_t lock; 132 u8 summary_indicator; 133 struct airq_struct airq; 134 struct airq_iv *aiv; 135 }; 136 static struct airq_info *airq_areas[MAX_AIRQ_AREAS]; 137 138 #define CCW_CMD_SET_VQ 0x13 139 #define CCW_CMD_VDEV_RESET 0x33 140 #define CCW_CMD_SET_IND 0x43 141 #define CCW_CMD_SET_CONF_IND 0x53 142 #define CCW_CMD_READ_FEAT 0x12 143 #define CCW_CMD_WRITE_FEAT 0x11 144 #define CCW_CMD_READ_CONF 0x22 145 #define CCW_CMD_WRITE_CONF 0x21 146 #define CCW_CMD_WRITE_STATUS 0x31 147 #define CCW_CMD_READ_VQ_CONF 0x32 148 #define CCW_CMD_READ_STATUS 0x72 149 #define CCW_CMD_SET_IND_ADAPTER 0x73 150 #define CCW_CMD_SET_VIRTIO_REV 0x83 151 152 #define VIRTIO_CCW_DOING_SET_VQ 0x00010000 153 #define VIRTIO_CCW_DOING_RESET 0x00040000 154 #define VIRTIO_CCW_DOING_READ_FEAT 0x00080000 155 #define VIRTIO_CCW_DOING_WRITE_FEAT 0x00100000 156 #define VIRTIO_CCW_DOING_READ_CONFIG 0x00200000 157 #define VIRTIO_CCW_DOING_WRITE_CONFIG 0x00400000 158 #define VIRTIO_CCW_DOING_WRITE_STATUS 0x00800000 159 #define VIRTIO_CCW_DOING_SET_IND 0x01000000 160 #define VIRTIO_CCW_DOING_READ_VQ_CONF 0x02000000 161 #define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000 162 #define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000 163 #define VIRTIO_CCW_DOING_SET_VIRTIO_REV 0x10000000 164 #define VIRTIO_CCW_DOING_READ_STATUS 0x20000000 165 #define VIRTIO_CCW_INTPARM_MASK 0xffff0000 166 167 static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev) 168 { 169 return container_of(vdev, struct virtio_ccw_device, vdev); 170 } 171 172 static void drop_airq_indicator(struct virtqueue *vq, struct airq_info *info) 173 { 174 unsigned long i, flags; 175 176 write_lock_irqsave(&info->lock, flags); 177 for (i = 0; i < airq_iv_end(info->aiv); i++) { 178 if (vq == (void *)airq_iv_get_ptr(info->aiv, i)) { 179 airq_iv_free_bit(info->aiv, i); 180 airq_iv_set_ptr(info->aiv, i, 0); 181 break; 182 } 183 } 184 write_unlock_irqrestore(&info->lock, flags); 185 } 186 187 static void virtio_airq_handler(struct airq_struct *airq) 188 { 189 struct airq_info *info = container_of(airq, struct airq_info, airq); 190 unsigned long ai; 191 192 inc_irq_stat(IRQIO_VAI); 193 read_lock(&info->lock); 194 /* Walk through indicators field, summary indicator active. */ 195 for (ai = 0;;) { 196 ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv)); 197 if (ai == -1UL) 198 break; 199 vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai)); 200 } 201 info->summary_indicator = 0; 202 smp_wmb(); 203 /* Walk through indicators field, summary indicator not active. */ 204 for (ai = 0;;) { 205 ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv)); 206 if (ai == -1UL) 207 break; 208 vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai)); 209 } 210 read_unlock(&info->lock); 211 } 212 213 static struct airq_info *new_airq_info(void) 214 { 215 struct airq_info *info; 216 int rc; 217 218 info = kzalloc(sizeof(*info), GFP_KERNEL); 219 if (!info) 220 return NULL; 221 rwlock_init(&info->lock); 222 info->aiv = airq_iv_create(VIRTIO_IV_BITS, AIRQ_IV_ALLOC | AIRQ_IV_PTR); 223 if (!info->aiv) { 224 kfree(info); 225 return NULL; 226 } 227 info->airq.handler = virtio_airq_handler; 228 info->airq.lsi_ptr = &info->summary_indicator; 229 info->airq.lsi_mask = 0xff; 230 info->airq.isc = VIRTIO_AIRQ_ISC; 231 rc = register_adapter_interrupt(&info->airq); 232 if (rc) { 233 airq_iv_release(info->aiv); 234 kfree(info); 235 return NULL; 236 } 237 return info; 238 } 239 240 static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs, 241 u64 *first, void **airq_info) 242 { 243 int i, j; 244 struct airq_info *info; 245 unsigned long indicator_addr = 0; 246 unsigned long bit, flags; 247 248 for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) { 249 if (!airq_areas[i]) 250 airq_areas[i] = new_airq_info(); 251 info = airq_areas[i]; 252 if (!info) 253 return 0; 254 write_lock_irqsave(&info->lock, flags); 255 bit = airq_iv_alloc(info->aiv, nvqs); 256 if (bit == -1UL) { 257 /* Not enough vacancies. */ 258 write_unlock_irqrestore(&info->lock, flags); 259 continue; 260 } 261 *first = bit; 262 *airq_info = info; 263 indicator_addr = (unsigned long)info->aiv->vector; 264 for (j = 0; j < nvqs; j++) { 265 airq_iv_set_ptr(info->aiv, bit + j, 266 (unsigned long)vqs[j]); 267 } 268 write_unlock_irqrestore(&info->lock, flags); 269 } 270 return indicator_addr; 271 } 272 273 static void virtio_ccw_drop_indicators(struct virtio_ccw_device *vcdev) 274 { 275 struct virtio_ccw_vq_info *info; 276 277 list_for_each_entry(info, &vcdev->virtqueues, node) 278 drop_airq_indicator(info->vq, vcdev->airq_info); 279 } 280 281 static int doing_io(struct virtio_ccw_device *vcdev, __u32 flag) 282 { 283 unsigned long flags; 284 __u32 ret; 285 286 spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags); 287 if (vcdev->err) 288 ret = 0; 289 else 290 ret = vcdev->curr_io & flag; 291 spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags); 292 return ret; 293 } 294 295 static int ccw_io_helper(struct virtio_ccw_device *vcdev, 296 struct ccw1 *ccw, __u32 intparm) 297 { 298 int ret; 299 unsigned long flags; 300 int flag = intparm & VIRTIO_CCW_INTPARM_MASK; 301 302 do { 303 spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags); 304 ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0); 305 if (!ret) { 306 if (!vcdev->curr_io) 307 vcdev->err = 0; 308 vcdev->curr_io |= flag; 309 } 310 spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags); 311 cpu_relax(); 312 } while (ret == -EBUSY); 313 wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0); 314 return ret ? ret : vcdev->err; 315 } 316 317 static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev, 318 struct ccw1 *ccw) 319 { 320 int ret; 321 unsigned long *indicatorp = NULL; 322 struct virtio_thinint_area *thinint_area = NULL; 323 struct airq_info *airq_info = vcdev->airq_info; 324 325 if (vcdev->is_thinint) { 326 thinint_area = kzalloc(sizeof(*thinint_area), 327 GFP_DMA | GFP_KERNEL); 328 if (!thinint_area) 329 return; 330 thinint_area->summary_indicator = 331 (unsigned long) &airq_info->summary_indicator; 332 thinint_area->isc = VIRTIO_AIRQ_ISC; 333 ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER; 334 ccw->count = sizeof(*thinint_area); 335 ccw->cda = (__u32)(unsigned long) thinint_area; 336 } else { 337 /* payload is the address of the indicators */ 338 indicatorp = kmalloc(sizeof(&vcdev->indicators), 339 GFP_DMA | GFP_KERNEL); 340 if (!indicatorp) 341 return; 342 *indicatorp = 0; 343 ccw->cmd_code = CCW_CMD_SET_IND; 344 ccw->count = sizeof(&vcdev->indicators); 345 ccw->cda = (__u32)(unsigned long) indicatorp; 346 } 347 /* Deregister indicators from host. */ 348 vcdev->indicators = 0; 349 ccw->flags = 0; 350 ret = ccw_io_helper(vcdev, ccw, 351 vcdev->is_thinint ? 352 VIRTIO_CCW_DOING_SET_IND_ADAPTER : 353 VIRTIO_CCW_DOING_SET_IND); 354 if (ret && (ret != -ENODEV)) 355 dev_info(&vcdev->cdev->dev, 356 "Failed to deregister indicators (%d)\n", ret); 357 else if (vcdev->is_thinint) 358 virtio_ccw_drop_indicators(vcdev); 359 kfree(indicatorp); 360 kfree(thinint_area); 361 } 362 363 static inline long __do_kvm_notify(struct subchannel_id schid, 364 unsigned long queue_index, 365 long cookie) 366 { 367 register unsigned long __nr asm("1") = KVM_S390_VIRTIO_CCW_NOTIFY; 368 register struct subchannel_id __schid asm("2") = schid; 369 register unsigned long __index asm("3") = queue_index; 370 register long __rc asm("2"); 371 register long __cookie asm("4") = cookie; 372 373 asm volatile ("diag 2,4,0x500\n" 374 : "=d" (__rc) : "d" (__nr), "d" (__schid), "d" (__index), 375 "d"(__cookie) 376 : "memory", "cc"); 377 return __rc; 378 } 379 380 static inline long do_kvm_notify(struct subchannel_id schid, 381 unsigned long queue_index, 382 long cookie) 383 { 384 diag_stat_inc(DIAG_STAT_X500); 385 return __do_kvm_notify(schid, queue_index, cookie); 386 } 387 388 static bool virtio_ccw_kvm_notify(struct virtqueue *vq) 389 { 390 struct virtio_ccw_vq_info *info = vq->priv; 391 struct virtio_ccw_device *vcdev; 392 struct subchannel_id schid; 393 394 vcdev = to_vc_device(info->vq->vdev); 395 ccw_device_get_schid(vcdev->cdev, &schid); 396 info->cookie = do_kvm_notify(schid, vq->index, info->cookie); 397 if (info->cookie < 0) 398 return false; 399 return true; 400 } 401 402 static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev, 403 struct ccw1 *ccw, int index) 404 { 405 int ret; 406 407 vcdev->config_block->index = index; 408 ccw->cmd_code = CCW_CMD_READ_VQ_CONF; 409 ccw->flags = 0; 410 ccw->count = sizeof(struct vq_config_block); 411 ccw->cda = (__u32)(unsigned long)(vcdev->config_block); 412 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF); 413 if (ret) 414 return ret; 415 return vcdev->config_block->num; 416 } 417 418 static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw) 419 { 420 struct virtio_ccw_device *vcdev = to_vc_device(vq->vdev); 421 struct virtio_ccw_vq_info *info = vq->priv; 422 unsigned long flags; 423 unsigned long size; 424 int ret; 425 unsigned int index = vq->index; 426 427 /* Remove from our list. */ 428 spin_lock_irqsave(&vcdev->lock, flags); 429 list_del(&info->node); 430 spin_unlock_irqrestore(&vcdev->lock, flags); 431 432 /* Release from host. */ 433 if (vcdev->revision == 0) { 434 info->info_block->l.queue = 0; 435 info->info_block->l.align = 0; 436 info->info_block->l.index = index; 437 info->info_block->l.num = 0; 438 ccw->count = sizeof(info->info_block->l); 439 } else { 440 info->info_block->s.desc = 0; 441 info->info_block->s.index = index; 442 info->info_block->s.num = 0; 443 info->info_block->s.avail = 0; 444 info->info_block->s.used = 0; 445 ccw->count = sizeof(info->info_block->s); 446 } 447 ccw->cmd_code = CCW_CMD_SET_VQ; 448 ccw->flags = 0; 449 ccw->cda = (__u32)(unsigned long)(info->info_block); 450 ret = ccw_io_helper(vcdev, ccw, 451 VIRTIO_CCW_DOING_SET_VQ | index); 452 /* 453 * -ENODEV isn't considered an error: The device is gone anyway. 454 * This may happen on device detach. 455 */ 456 if (ret && (ret != -ENODEV)) 457 dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d\n", 458 ret, index); 459 460 vring_del_virtqueue(vq); 461 size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN)); 462 free_pages_exact(info->queue, size); 463 kfree(info->info_block); 464 kfree(info); 465 } 466 467 static void virtio_ccw_del_vqs(struct virtio_device *vdev) 468 { 469 struct virtqueue *vq, *n; 470 struct ccw1 *ccw; 471 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 472 473 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); 474 if (!ccw) 475 return; 476 477 virtio_ccw_drop_indicator(vcdev, ccw); 478 479 list_for_each_entry_safe(vq, n, &vdev->vqs, list) 480 virtio_ccw_del_vq(vq, ccw); 481 482 kfree(ccw); 483 } 484 485 static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev, 486 int i, vq_callback_t *callback, 487 const char *name, 488 struct ccw1 *ccw) 489 { 490 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 491 int err; 492 struct virtqueue *vq = NULL; 493 struct virtio_ccw_vq_info *info; 494 unsigned long size = 0; /* silence the compiler */ 495 unsigned long flags; 496 497 /* Allocate queue. */ 498 info = kzalloc(sizeof(struct virtio_ccw_vq_info), GFP_KERNEL); 499 if (!info) { 500 dev_warn(&vcdev->cdev->dev, "no info\n"); 501 err = -ENOMEM; 502 goto out_err; 503 } 504 info->info_block = kzalloc(sizeof(*info->info_block), 505 GFP_DMA | GFP_KERNEL); 506 if (!info->info_block) { 507 dev_warn(&vcdev->cdev->dev, "no info block\n"); 508 err = -ENOMEM; 509 goto out_err; 510 } 511 info->num = virtio_ccw_read_vq_conf(vcdev, ccw, i); 512 if (info->num < 0) { 513 err = info->num; 514 goto out_err; 515 } 516 size = PAGE_ALIGN(vring_size(info->num, KVM_VIRTIO_CCW_RING_ALIGN)); 517 info->queue = alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); 518 if (info->queue == NULL) { 519 dev_warn(&vcdev->cdev->dev, "no queue\n"); 520 err = -ENOMEM; 521 goto out_err; 522 } 523 524 vq = vring_new_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN, vdev, 525 true, info->queue, virtio_ccw_kvm_notify, 526 callback, name); 527 if (!vq) { 528 /* For now, we fail if we can't get the requested size. */ 529 dev_warn(&vcdev->cdev->dev, "no vq\n"); 530 err = -ENOMEM; 531 goto out_err; 532 } 533 534 /* Register it with the host. */ 535 if (vcdev->revision == 0) { 536 info->info_block->l.queue = (__u64)info->queue; 537 info->info_block->l.align = KVM_VIRTIO_CCW_RING_ALIGN; 538 info->info_block->l.index = i; 539 info->info_block->l.num = info->num; 540 ccw->count = sizeof(info->info_block->l); 541 } else { 542 info->info_block->s.desc = (__u64)info->queue; 543 info->info_block->s.index = i; 544 info->info_block->s.num = info->num; 545 info->info_block->s.avail = (__u64)virtqueue_get_avail(vq); 546 info->info_block->s.used = (__u64)virtqueue_get_used(vq); 547 ccw->count = sizeof(info->info_block->s); 548 } 549 ccw->cmd_code = CCW_CMD_SET_VQ; 550 ccw->flags = 0; 551 ccw->cda = (__u32)(unsigned long)(info->info_block); 552 err = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_VQ | i); 553 if (err) { 554 dev_warn(&vcdev->cdev->dev, "SET_VQ failed\n"); 555 goto out_err; 556 } 557 558 info->vq = vq; 559 vq->priv = info; 560 561 /* Save it to our list. */ 562 spin_lock_irqsave(&vcdev->lock, flags); 563 list_add(&info->node, &vcdev->virtqueues); 564 spin_unlock_irqrestore(&vcdev->lock, flags); 565 566 return vq; 567 568 out_err: 569 if (vq) 570 vring_del_virtqueue(vq); 571 if (info) { 572 if (info->queue) 573 free_pages_exact(info->queue, size); 574 kfree(info->info_block); 575 } 576 kfree(info); 577 return ERR_PTR(err); 578 } 579 580 static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev, 581 struct virtqueue *vqs[], int nvqs, 582 struct ccw1 *ccw) 583 { 584 int ret; 585 struct virtio_thinint_area *thinint_area = NULL; 586 struct airq_info *info; 587 588 thinint_area = kzalloc(sizeof(*thinint_area), GFP_DMA | GFP_KERNEL); 589 if (!thinint_area) { 590 ret = -ENOMEM; 591 goto out; 592 } 593 /* Try to get an indicator. */ 594 thinint_area->indicator = get_airq_indicator(vqs, nvqs, 595 &thinint_area->bit_nr, 596 &vcdev->airq_info); 597 if (!thinint_area->indicator) { 598 ret = -ENOSPC; 599 goto out; 600 } 601 info = vcdev->airq_info; 602 thinint_area->summary_indicator = 603 (unsigned long) &info->summary_indicator; 604 thinint_area->isc = VIRTIO_AIRQ_ISC; 605 ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER; 606 ccw->flags = CCW_FLAG_SLI; 607 ccw->count = sizeof(*thinint_area); 608 ccw->cda = (__u32)(unsigned long)thinint_area; 609 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND_ADAPTER); 610 if (ret) { 611 if (ret == -EOPNOTSUPP) { 612 /* 613 * The host does not support adapter interrupts 614 * for virtio-ccw, stop trying. 615 */ 616 virtio_ccw_use_airq = 0; 617 pr_info("Adapter interrupts unsupported on host\n"); 618 } else 619 dev_warn(&vcdev->cdev->dev, 620 "enabling adapter interrupts = %d\n", ret); 621 virtio_ccw_drop_indicators(vcdev); 622 } 623 out: 624 kfree(thinint_area); 625 return ret; 626 } 627 628 static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, 629 struct virtqueue *vqs[], 630 vq_callback_t *callbacks[], 631 const char * const names[], 632 struct irq_affinity *desc) 633 { 634 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 635 unsigned long *indicatorp = NULL; 636 int ret, i; 637 struct ccw1 *ccw; 638 639 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); 640 if (!ccw) 641 return -ENOMEM; 642 643 for (i = 0; i < nvqs; ++i) { 644 vqs[i] = virtio_ccw_setup_vq(vdev, i, callbacks[i], names[i], 645 ccw); 646 if (IS_ERR(vqs[i])) { 647 ret = PTR_ERR(vqs[i]); 648 vqs[i] = NULL; 649 goto out; 650 } 651 } 652 ret = -ENOMEM; 653 /* 654 * We need a data area under 2G to communicate. Our payload is 655 * the address of the indicators. 656 */ 657 indicatorp = kmalloc(sizeof(&vcdev->indicators), GFP_DMA | GFP_KERNEL); 658 if (!indicatorp) 659 goto out; 660 *indicatorp = (unsigned long) &vcdev->indicators; 661 if (vcdev->is_thinint) { 662 ret = virtio_ccw_register_adapter_ind(vcdev, vqs, nvqs, ccw); 663 if (ret) 664 /* no error, just fall back to legacy interrupts */ 665 vcdev->is_thinint = false; 666 } 667 if (!vcdev->is_thinint) { 668 /* Register queue indicators with host. */ 669 vcdev->indicators = 0; 670 ccw->cmd_code = CCW_CMD_SET_IND; 671 ccw->flags = 0; 672 ccw->count = sizeof(&vcdev->indicators); 673 ccw->cda = (__u32)(unsigned long) indicatorp; 674 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND); 675 if (ret) 676 goto out; 677 } 678 /* Register indicators2 with host for config changes */ 679 *indicatorp = (unsigned long) &vcdev->indicators2; 680 vcdev->indicators2 = 0; 681 ccw->cmd_code = CCW_CMD_SET_CONF_IND; 682 ccw->flags = 0; 683 ccw->count = sizeof(&vcdev->indicators2); 684 ccw->cda = (__u32)(unsigned long) indicatorp; 685 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND); 686 if (ret) 687 goto out; 688 689 kfree(indicatorp); 690 kfree(ccw); 691 return 0; 692 out: 693 kfree(indicatorp); 694 kfree(ccw); 695 virtio_ccw_del_vqs(vdev); 696 return ret; 697 } 698 699 static void virtio_ccw_reset(struct virtio_device *vdev) 700 { 701 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 702 struct ccw1 *ccw; 703 704 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); 705 if (!ccw) 706 return; 707 708 /* Zero status bits. */ 709 *vcdev->status = 0; 710 711 /* Send a reset ccw on device. */ 712 ccw->cmd_code = CCW_CMD_VDEV_RESET; 713 ccw->flags = 0; 714 ccw->count = 0; 715 ccw->cda = 0; 716 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_RESET); 717 kfree(ccw); 718 } 719 720 static u64 virtio_ccw_get_features(struct virtio_device *vdev) 721 { 722 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 723 struct virtio_feature_desc *features; 724 int ret; 725 u64 rc; 726 struct ccw1 *ccw; 727 728 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); 729 if (!ccw) 730 return 0; 731 732 features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL); 733 if (!features) { 734 rc = 0; 735 goto out_free; 736 } 737 /* Read the feature bits from the host. */ 738 features->index = 0; 739 ccw->cmd_code = CCW_CMD_READ_FEAT; 740 ccw->flags = 0; 741 ccw->count = sizeof(*features); 742 ccw->cda = (__u32)(unsigned long)features; 743 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT); 744 if (ret) { 745 rc = 0; 746 goto out_free; 747 } 748 749 rc = le32_to_cpu(features->features); 750 751 if (vcdev->revision == 0) 752 goto out_free; 753 754 /* Read second half of the feature bits from the host. */ 755 features->index = 1; 756 ccw->cmd_code = CCW_CMD_READ_FEAT; 757 ccw->flags = 0; 758 ccw->count = sizeof(*features); 759 ccw->cda = (__u32)(unsigned long)features; 760 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT); 761 if (ret == 0) 762 rc |= (u64)le32_to_cpu(features->features) << 32; 763 764 out_free: 765 kfree(features); 766 kfree(ccw); 767 return rc; 768 } 769 770 static int virtio_ccw_finalize_features(struct virtio_device *vdev) 771 { 772 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 773 struct virtio_feature_desc *features; 774 struct ccw1 *ccw; 775 int ret; 776 777 if (vcdev->revision >= 1 && 778 !__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) { 779 dev_err(&vdev->dev, "virtio: device uses revision 1 " 780 "but does not have VIRTIO_F_VERSION_1\n"); 781 return -EINVAL; 782 } 783 784 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); 785 if (!ccw) 786 return -ENOMEM; 787 788 features = kzalloc(sizeof(*features), GFP_DMA | GFP_KERNEL); 789 if (!features) { 790 ret = -ENOMEM; 791 goto out_free; 792 } 793 /* Give virtio_ring a chance to accept features. */ 794 vring_transport_features(vdev); 795 796 features->index = 0; 797 features->features = cpu_to_le32((u32)vdev->features); 798 /* Write the first half of the feature bits to the host. */ 799 ccw->cmd_code = CCW_CMD_WRITE_FEAT; 800 ccw->flags = 0; 801 ccw->count = sizeof(*features); 802 ccw->cda = (__u32)(unsigned long)features; 803 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT); 804 if (ret) 805 goto out_free; 806 807 if (vcdev->revision == 0) 808 goto out_free; 809 810 features->index = 1; 811 features->features = cpu_to_le32(vdev->features >> 32); 812 /* Write the second half of the feature bits to the host. */ 813 ccw->cmd_code = CCW_CMD_WRITE_FEAT; 814 ccw->flags = 0; 815 ccw->count = sizeof(*features); 816 ccw->cda = (__u32)(unsigned long)features; 817 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT); 818 819 out_free: 820 kfree(features); 821 kfree(ccw); 822 823 return ret; 824 } 825 826 static void virtio_ccw_get_config(struct virtio_device *vdev, 827 unsigned int offset, void *buf, unsigned len) 828 { 829 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 830 int ret; 831 struct ccw1 *ccw; 832 void *config_area; 833 834 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); 835 if (!ccw) 836 return; 837 838 config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL); 839 if (!config_area) 840 goto out_free; 841 842 /* Read the config area from the host. */ 843 ccw->cmd_code = CCW_CMD_READ_CONF; 844 ccw->flags = 0; 845 ccw->count = offset + len; 846 ccw->cda = (__u32)(unsigned long)config_area; 847 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_CONFIG); 848 if (ret) 849 goto out_free; 850 851 memcpy(vcdev->config, config_area, offset + len); 852 if (buf) 853 memcpy(buf, &vcdev->config[offset], len); 854 if (vcdev->config_ready < offset + len) 855 vcdev->config_ready = offset + len; 856 857 out_free: 858 kfree(config_area); 859 kfree(ccw); 860 } 861 862 static void virtio_ccw_set_config(struct virtio_device *vdev, 863 unsigned int offset, const void *buf, 864 unsigned len) 865 { 866 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 867 struct ccw1 *ccw; 868 void *config_area; 869 870 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); 871 if (!ccw) 872 return; 873 874 config_area = kzalloc(VIRTIO_CCW_CONFIG_SIZE, GFP_DMA | GFP_KERNEL); 875 if (!config_area) 876 goto out_free; 877 878 /* Make sure we don't overwrite fields. */ 879 if (vcdev->config_ready < offset) 880 virtio_ccw_get_config(vdev, 0, NULL, offset); 881 memcpy(&vcdev->config[offset], buf, len); 882 /* Write the config area to the host. */ 883 memcpy(config_area, vcdev->config, sizeof(vcdev->config)); 884 ccw->cmd_code = CCW_CMD_WRITE_CONF; 885 ccw->flags = 0; 886 ccw->count = offset + len; 887 ccw->cda = (__u32)(unsigned long)config_area; 888 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_CONFIG); 889 890 out_free: 891 kfree(config_area); 892 kfree(ccw); 893 } 894 895 static u8 virtio_ccw_get_status(struct virtio_device *vdev) 896 { 897 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 898 u8 old_status = *vcdev->status; 899 struct ccw1 *ccw; 900 901 if (vcdev->revision < 1) 902 return *vcdev->status; 903 904 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); 905 if (!ccw) 906 return old_status; 907 908 ccw->cmd_code = CCW_CMD_READ_STATUS; 909 ccw->flags = 0; 910 ccw->count = sizeof(*vcdev->status); 911 ccw->cda = (__u32)(unsigned long)vcdev->status; 912 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_STATUS); 913 /* 914 * If the channel program failed (should only happen if the device 915 * was hotunplugged, and then we clean up via the machine check 916 * handler anyway), vcdev->status was not overwritten and we just 917 * return the old status, which is fine. 918 */ 919 kfree(ccw); 920 921 return *vcdev->status; 922 } 923 924 static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status) 925 { 926 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 927 u8 old_status = *vcdev->status; 928 struct ccw1 *ccw; 929 int ret; 930 931 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); 932 if (!ccw) 933 return; 934 935 /* Write the status to the host. */ 936 *vcdev->status = status; 937 ccw->cmd_code = CCW_CMD_WRITE_STATUS; 938 ccw->flags = 0; 939 ccw->count = sizeof(status); 940 ccw->cda = (__u32)(unsigned long)vcdev->status; 941 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS); 942 /* Write failed? We assume status is unchanged. */ 943 if (ret) 944 *vcdev->status = old_status; 945 kfree(ccw); 946 } 947 948 static const struct virtio_config_ops virtio_ccw_config_ops = { 949 .get_features = virtio_ccw_get_features, 950 .finalize_features = virtio_ccw_finalize_features, 951 .get = virtio_ccw_get_config, 952 .set = virtio_ccw_set_config, 953 .get_status = virtio_ccw_get_status, 954 .set_status = virtio_ccw_set_status, 955 .reset = virtio_ccw_reset, 956 .find_vqs = virtio_ccw_find_vqs, 957 .del_vqs = virtio_ccw_del_vqs, 958 }; 959 960 961 /* 962 * ccw bus driver related functions 963 */ 964 965 static void virtio_ccw_release_dev(struct device *_d) 966 { 967 struct virtio_device *dev = dev_to_virtio(_d); 968 struct virtio_ccw_device *vcdev = to_vc_device(dev); 969 970 kfree(vcdev->status); 971 kfree(vcdev->config_block); 972 kfree(vcdev); 973 } 974 975 static int irb_is_error(struct irb *irb) 976 { 977 if (scsw_cstat(&irb->scsw) != 0) 978 return 1; 979 if (scsw_dstat(&irb->scsw) & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) 980 return 1; 981 if (scsw_cc(&irb->scsw) != 0) 982 return 1; 983 return 0; 984 } 985 986 static struct virtqueue *virtio_ccw_vq_by_ind(struct virtio_ccw_device *vcdev, 987 int index) 988 { 989 struct virtio_ccw_vq_info *info; 990 unsigned long flags; 991 struct virtqueue *vq; 992 993 vq = NULL; 994 spin_lock_irqsave(&vcdev->lock, flags); 995 list_for_each_entry(info, &vcdev->virtqueues, node) { 996 if (info->vq->index == index) { 997 vq = info->vq; 998 break; 999 } 1000 } 1001 spin_unlock_irqrestore(&vcdev->lock, flags); 1002 return vq; 1003 } 1004 1005 static void virtio_ccw_check_activity(struct virtio_ccw_device *vcdev, 1006 __u32 activity) 1007 { 1008 if (vcdev->curr_io & activity) { 1009 switch (activity) { 1010 case VIRTIO_CCW_DOING_READ_FEAT: 1011 case VIRTIO_CCW_DOING_WRITE_FEAT: 1012 case VIRTIO_CCW_DOING_READ_CONFIG: 1013 case VIRTIO_CCW_DOING_WRITE_CONFIG: 1014 case VIRTIO_CCW_DOING_WRITE_STATUS: 1015 case VIRTIO_CCW_DOING_READ_STATUS: 1016 case VIRTIO_CCW_DOING_SET_VQ: 1017 case VIRTIO_CCW_DOING_SET_IND: 1018 case VIRTIO_CCW_DOING_SET_CONF_IND: 1019 case VIRTIO_CCW_DOING_RESET: 1020 case VIRTIO_CCW_DOING_READ_VQ_CONF: 1021 case VIRTIO_CCW_DOING_SET_IND_ADAPTER: 1022 case VIRTIO_CCW_DOING_SET_VIRTIO_REV: 1023 vcdev->curr_io &= ~activity; 1024 wake_up(&vcdev->wait_q); 1025 break; 1026 default: 1027 /* don't know what to do... */ 1028 dev_warn(&vcdev->cdev->dev, 1029 "Suspicious activity '%08x'\n", activity); 1030 WARN_ON(1); 1031 break; 1032 } 1033 } 1034 } 1035 1036 static void virtio_ccw_int_handler(struct ccw_device *cdev, 1037 unsigned long intparm, 1038 struct irb *irb) 1039 { 1040 __u32 activity = intparm & VIRTIO_CCW_INTPARM_MASK; 1041 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev); 1042 int i; 1043 struct virtqueue *vq; 1044 1045 if (!vcdev) 1046 return; 1047 if (IS_ERR(irb)) { 1048 vcdev->err = PTR_ERR(irb); 1049 virtio_ccw_check_activity(vcdev, activity); 1050 /* Don't poke around indicators, something's wrong. */ 1051 return; 1052 } 1053 /* Check if it's a notification from the host. */ 1054 if ((intparm == 0) && 1055 (scsw_stctl(&irb->scsw) == 1056 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) { 1057 /* OK */ 1058 } 1059 if (irb_is_error(irb)) { 1060 /* Command reject? */ 1061 if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) && 1062 (irb->ecw[0] & SNS0_CMD_REJECT)) 1063 vcdev->err = -EOPNOTSUPP; 1064 else 1065 /* Map everything else to -EIO. */ 1066 vcdev->err = -EIO; 1067 } 1068 virtio_ccw_check_activity(vcdev, activity); 1069 for_each_set_bit(i, &vcdev->indicators, 1070 sizeof(vcdev->indicators) * BITS_PER_BYTE) { 1071 /* The bit clear must happen before the vring kick. */ 1072 clear_bit(i, &vcdev->indicators); 1073 barrier(); 1074 vq = virtio_ccw_vq_by_ind(vcdev, i); 1075 vring_interrupt(0, vq); 1076 } 1077 if (test_bit(0, &vcdev->indicators2)) { 1078 virtio_config_changed(&vcdev->vdev); 1079 clear_bit(0, &vcdev->indicators2); 1080 } 1081 } 1082 1083 /* 1084 * We usually want to autoonline all devices, but give the admin 1085 * a way to exempt devices from this. 1086 */ 1087 #define __DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \ 1088 (8*sizeof(long))) 1089 static unsigned long devs_no_auto[__MAX_SSID + 1][__DEV_WORDS]; 1090 1091 static char *no_auto = ""; 1092 1093 module_param(no_auto, charp, 0444); 1094 MODULE_PARM_DESC(no_auto, "list of ccw bus id ranges not to be auto-onlined"); 1095 1096 static int virtio_ccw_check_autoonline(struct ccw_device *cdev) 1097 { 1098 struct ccw_dev_id id; 1099 1100 ccw_device_get_id(cdev, &id); 1101 if (test_bit(id.devno, devs_no_auto[id.ssid])) 1102 return 0; 1103 return 1; 1104 } 1105 1106 static void virtio_ccw_auto_online(void *data, async_cookie_t cookie) 1107 { 1108 struct ccw_device *cdev = data; 1109 int ret; 1110 1111 ret = ccw_device_set_online(cdev); 1112 if (ret) 1113 dev_warn(&cdev->dev, "Failed to set online: %d\n", ret); 1114 } 1115 1116 static int virtio_ccw_probe(struct ccw_device *cdev) 1117 { 1118 cdev->handler = virtio_ccw_int_handler; 1119 1120 if (virtio_ccw_check_autoonline(cdev)) 1121 async_schedule(virtio_ccw_auto_online, cdev); 1122 return 0; 1123 } 1124 1125 static struct virtio_ccw_device *virtio_grab_drvdata(struct ccw_device *cdev) 1126 { 1127 unsigned long flags; 1128 struct virtio_ccw_device *vcdev; 1129 1130 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1131 vcdev = dev_get_drvdata(&cdev->dev); 1132 if (!vcdev || vcdev->going_away) { 1133 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1134 return NULL; 1135 } 1136 vcdev->going_away = true; 1137 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1138 return vcdev; 1139 } 1140 1141 static void virtio_ccw_remove(struct ccw_device *cdev) 1142 { 1143 unsigned long flags; 1144 struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev); 1145 1146 if (vcdev && cdev->online) { 1147 if (vcdev->device_lost) 1148 virtio_break_device(&vcdev->vdev); 1149 unregister_virtio_device(&vcdev->vdev); 1150 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1151 dev_set_drvdata(&cdev->dev, NULL); 1152 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1153 } 1154 cdev->handler = NULL; 1155 } 1156 1157 static int virtio_ccw_offline(struct ccw_device *cdev) 1158 { 1159 unsigned long flags; 1160 struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev); 1161 1162 if (!vcdev) 1163 return 0; 1164 if (vcdev->device_lost) 1165 virtio_break_device(&vcdev->vdev); 1166 unregister_virtio_device(&vcdev->vdev); 1167 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1168 dev_set_drvdata(&cdev->dev, NULL); 1169 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1170 return 0; 1171 } 1172 1173 static int virtio_ccw_set_transport_rev(struct virtio_ccw_device *vcdev) 1174 { 1175 struct virtio_rev_info *rev; 1176 struct ccw1 *ccw; 1177 int ret; 1178 1179 ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); 1180 if (!ccw) 1181 return -ENOMEM; 1182 rev = kzalloc(sizeof(*rev), GFP_DMA | GFP_KERNEL); 1183 if (!rev) { 1184 kfree(ccw); 1185 return -ENOMEM; 1186 } 1187 1188 /* Set transport revision */ 1189 ccw->cmd_code = CCW_CMD_SET_VIRTIO_REV; 1190 ccw->flags = 0; 1191 ccw->count = sizeof(*rev); 1192 ccw->cda = (__u32)(unsigned long)rev; 1193 1194 vcdev->revision = VIRTIO_CCW_REV_MAX; 1195 do { 1196 rev->revision = vcdev->revision; 1197 /* none of our supported revisions carry payload */ 1198 rev->length = 0; 1199 ret = ccw_io_helper(vcdev, ccw, 1200 VIRTIO_CCW_DOING_SET_VIRTIO_REV); 1201 if (ret == -EOPNOTSUPP) { 1202 if (vcdev->revision == 0) 1203 /* 1204 * The host device does not support setting 1205 * the revision: let's operate it in legacy 1206 * mode. 1207 */ 1208 ret = 0; 1209 else 1210 vcdev->revision--; 1211 } 1212 } while (ret == -EOPNOTSUPP); 1213 1214 kfree(ccw); 1215 kfree(rev); 1216 return ret; 1217 } 1218 1219 static int virtio_ccw_online(struct ccw_device *cdev) 1220 { 1221 int ret; 1222 struct virtio_ccw_device *vcdev; 1223 unsigned long flags; 1224 1225 vcdev = kzalloc(sizeof(*vcdev), GFP_KERNEL); 1226 if (!vcdev) { 1227 dev_warn(&cdev->dev, "Could not get memory for virtio\n"); 1228 ret = -ENOMEM; 1229 goto out_free; 1230 } 1231 vcdev->config_block = kzalloc(sizeof(*vcdev->config_block), 1232 GFP_DMA | GFP_KERNEL); 1233 if (!vcdev->config_block) { 1234 ret = -ENOMEM; 1235 goto out_free; 1236 } 1237 vcdev->status = kzalloc(sizeof(*vcdev->status), GFP_DMA | GFP_KERNEL); 1238 if (!vcdev->status) { 1239 ret = -ENOMEM; 1240 goto out_free; 1241 } 1242 1243 vcdev->is_thinint = virtio_ccw_use_airq; /* at least try */ 1244 1245 vcdev->vdev.dev.parent = &cdev->dev; 1246 vcdev->vdev.dev.release = virtio_ccw_release_dev; 1247 vcdev->vdev.config = &virtio_ccw_config_ops; 1248 vcdev->cdev = cdev; 1249 init_waitqueue_head(&vcdev->wait_q); 1250 INIT_LIST_HEAD(&vcdev->virtqueues); 1251 spin_lock_init(&vcdev->lock); 1252 1253 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1254 dev_set_drvdata(&cdev->dev, vcdev); 1255 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1256 vcdev->vdev.id.vendor = cdev->id.cu_type; 1257 vcdev->vdev.id.device = cdev->id.cu_model; 1258 1259 ret = virtio_ccw_set_transport_rev(vcdev); 1260 if (ret) 1261 goto out_free; 1262 1263 ret = register_virtio_device(&vcdev->vdev); 1264 if (ret) { 1265 dev_warn(&cdev->dev, "Failed to register virtio device: %d\n", 1266 ret); 1267 goto out_put; 1268 } 1269 return 0; 1270 out_put: 1271 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1272 dev_set_drvdata(&cdev->dev, NULL); 1273 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1274 put_device(&vcdev->vdev.dev); 1275 return ret; 1276 out_free: 1277 if (vcdev) { 1278 kfree(vcdev->status); 1279 kfree(vcdev->config_block); 1280 } 1281 kfree(vcdev); 1282 return ret; 1283 } 1284 1285 static int virtio_ccw_cio_notify(struct ccw_device *cdev, int event) 1286 { 1287 int rc; 1288 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev); 1289 1290 /* 1291 * Make sure vcdev is set 1292 * i.e. set_offline/remove callback not already running 1293 */ 1294 if (!vcdev) 1295 return NOTIFY_DONE; 1296 1297 switch (event) { 1298 case CIO_GONE: 1299 vcdev->device_lost = true; 1300 rc = NOTIFY_DONE; 1301 break; 1302 default: 1303 rc = NOTIFY_DONE; 1304 break; 1305 } 1306 return rc; 1307 } 1308 1309 static struct ccw_device_id virtio_ids[] = { 1310 { CCW_DEVICE(0x3832, 0) }, 1311 {}, 1312 }; 1313 1314 static struct ccw_driver virtio_ccw_driver = { 1315 .driver = { 1316 .owner = THIS_MODULE, 1317 .name = "virtio_ccw", 1318 }, 1319 .ids = virtio_ids, 1320 .probe = virtio_ccw_probe, 1321 .remove = virtio_ccw_remove, 1322 .set_offline = virtio_ccw_offline, 1323 .set_online = virtio_ccw_online, 1324 .notify = virtio_ccw_cio_notify, 1325 .int_class = IRQIO_VIR, 1326 }; 1327 1328 static int __init pure_hex(char **cp, unsigned int *val, int min_digit, 1329 int max_digit, int max_val) 1330 { 1331 int diff; 1332 1333 diff = 0; 1334 *val = 0; 1335 1336 while (diff <= max_digit) { 1337 int value = hex_to_bin(**cp); 1338 1339 if (value < 0) 1340 break; 1341 *val = *val * 16 + value; 1342 (*cp)++; 1343 diff++; 1344 } 1345 1346 if ((diff < min_digit) || (diff > max_digit) || (*val > max_val)) 1347 return 1; 1348 1349 return 0; 1350 } 1351 1352 static int __init parse_busid(char *str, unsigned int *cssid, 1353 unsigned int *ssid, unsigned int *devno) 1354 { 1355 char *str_work; 1356 int rc, ret; 1357 1358 rc = 1; 1359 1360 if (*str == '\0') 1361 goto out; 1362 1363 str_work = str; 1364 ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID); 1365 if (ret || (str_work[0] != '.')) 1366 goto out; 1367 str_work++; 1368 ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID); 1369 if (ret || (str_work[0] != '.')) 1370 goto out; 1371 str_work++; 1372 ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL); 1373 if (ret || (str_work[0] != '\0')) 1374 goto out; 1375 1376 rc = 0; 1377 out: 1378 return rc; 1379 } 1380 1381 static void __init no_auto_parse(void) 1382 { 1383 unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to; 1384 char *parm, *str; 1385 int rc; 1386 1387 str = no_auto; 1388 while ((parm = strsep(&str, ","))) { 1389 rc = parse_busid(strsep(&parm, "-"), &from_cssid, 1390 &from_ssid, &from); 1391 if (rc) 1392 continue; 1393 if (parm != NULL) { 1394 rc = parse_busid(parm, &to_cssid, 1395 &to_ssid, &to); 1396 if ((from_ssid > to_ssid) || 1397 ((from_ssid == to_ssid) && (from > to))) 1398 rc = -EINVAL; 1399 } else { 1400 to_cssid = from_cssid; 1401 to_ssid = from_ssid; 1402 to = from; 1403 } 1404 if (rc) 1405 continue; 1406 while ((from_ssid < to_ssid) || 1407 ((from_ssid == to_ssid) && (from <= to))) { 1408 set_bit(from, devs_no_auto[from_ssid]); 1409 from++; 1410 if (from > __MAX_SUBCHANNEL) { 1411 from_ssid++; 1412 from = 0; 1413 } 1414 } 1415 } 1416 } 1417 1418 static int __init virtio_ccw_init(void) 1419 { 1420 /* parse no_auto string before we do anything further */ 1421 no_auto_parse(); 1422 return ccw_driver_register(&virtio_ccw_driver); 1423 } 1424 device_initcall(virtio_ccw_init); 1425