1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * ccw based virtio transport 4 * 5 * Copyright IBM Corp. 2012, 2014 6 * 7 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> 8 */ 9 10 #include <linux/kernel_stat.h> 11 #include <linux/init.h> 12 #include <linux/memblock.h> 13 #include <linux/err.h> 14 #include <linux/virtio.h> 15 #include <linux/virtio_config.h> 16 #include <linux/slab.h> 17 #include <linux/interrupt.h> 18 #include <linux/virtio_ring.h> 19 #include <linux/pfn.h> 20 #include <linux/async.h> 21 #include <linux/wait.h> 22 #include <linux/list.h> 23 #include <linux/bitops.h> 24 #include <linux/moduleparam.h> 25 #include <linux/io.h> 26 #include <linux/kvm_para.h> 27 #include <linux/notifier.h> 28 #include <asm/diag.h> 29 #include <asm/setup.h> 30 #include <asm/irq.h> 31 #include <asm/cio.h> 32 #include <asm/ccwdev.h> 33 #include <asm/virtio-ccw.h> 34 #include <asm/isc.h> 35 #include <asm/airq.h> 36 37 /* 38 * virtio related functions 39 */ 40 41 struct vq_config_block { 42 __u16 index; 43 __u16 num; 44 } __packed; 45 46 #define VIRTIO_CCW_CONFIG_SIZE 0x100 47 /* same as PCI config space size, should be enough for all drivers */ 48 49 struct vcdev_dma_area { 50 unsigned long indicators; 51 unsigned long indicators2; 52 struct vq_config_block config_block; 53 __u8 status; 54 }; 55 56 struct virtio_ccw_device { 57 struct virtio_device vdev; 58 __u8 config[VIRTIO_CCW_CONFIG_SIZE]; 59 struct ccw_device *cdev; 60 __u32 curr_io; 61 int err; 62 unsigned int revision; /* Transport revision */ 63 wait_queue_head_t wait_q; 64 spinlock_t lock; 65 struct mutex io_lock; /* Serializes I/O requests */ 66 struct list_head virtqueues; 67 bool is_thinint; 68 bool going_away; 69 bool device_lost; 70 unsigned int config_ready; 71 void *airq_info; 72 struct vcdev_dma_area *dma_area; 73 }; 74 75 static inline unsigned long *indicators(struct virtio_ccw_device *vcdev) 76 { 77 return &vcdev->dma_area->indicators; 78 } 79 80 static inline unsigned long *indicators2(struct virtio_ccw_device *vcdev) 81 { 82 return &vcdev->dma_area->indicators2; 83 } 84 85 struct vq_info_block_legacy { 86 __u64 queue; 87 __u32 align; 88 __u16 index; 89 __u16 num; 90 } __packed; 91 92 struct vq_info_block { 93 __u64 desc; 94 __u32 res0; 95 __u16 index; 96 __u16 num; 97 __u64 avail; 98 __u64 used; 99 } __packed; 100 101 struct virtio_feature_desc { 102 __le32 features; 103 __u8 index; 104 } __packed; 105 106 struct virtio_thinint_area { 107 unsigned long summary_indicator; 108 unsigned long indicator; 109 u64 bit_nr; 110 u8 isc; 111 } __packed; 112 113 struct virtio_rev_info { 114 __u16 revision; 115 __u16 length; 116 __u8 data[]; 117 }; 118 119 /* the highest virtio-ccw revision we support */ 120 #define VIRTIO_CCW_REV_MAX 1 121 122 struct virtio_ccw_vq_info { 123 struct virtqueue *vq; 124 int num; 125 union { 126 struct vq_info_block s; 127 struct vq_info_block_legacy l; 128 } *info_block; 129 int bit_nr; 130 struct list_head node; 131 long cookie; 132 }; 133 134 #define VIRTIO_AIRQ_ISC IO_SCH_ISC /* inherit from subchannel */ 135 136 #define VIRTIO_IV_BITS (L1_CACHE_BYTES * 8) 137 #define MAX_AIRQ_AREAS 20 138 139 static int virtio_ccw_use_airq = 1; 140 141 struct airq_info { 142 rwlock_t lock; 143 u8 summary_indicator_idx; 144 struct airq_struct airq; 145 struct airq_iv *aiv; 146 }; 147 static struct airq_info *airq_areas[MAX_AIRQ_AREAS]; 148 static u8 *summary_indicators; 149 150 static inline u8 *get_summary_indicator(struct airq_info *info) 151 { 152 return summary_indicators + info->summary_indicator_idx; 153 } 154 155 #define CCW_CMD_SET_VQ 0x13 156 #define CCW_CMD_VDEV_RESET 0x33 157 #define CCW_CMD_SET_IND 0x43 158 #define CCW_CMD_SET_CONF_IND 0x53 159 #define CCW_CMD_READ_FEAT 0x12 160 #define CCW_CMD_WRITE_FEAT 0x11 161 #define CCW_CMD_READ_CONF 0x22 162 #define CCW_CMD_WRITE_CONF 0x21 163 #define CCW_CMD_WRITE_STATUS 0x31 164 #define CCW_CMD_READ_VQ_CONF 0x32 165 #define CCW_CMD_READ_STATUS 0x72 166 #define CCW_CMD_SET_IND_ADAPTER 0x73 167 #define CCW_CMD_SET_VIRTIO_REV 0x83 168 169 #define VIRTIO_CCW_DOING_SET_VQ 0x00010000 170 #define VIRTIO_CCW_DOING_RESET 0x00040000 171 #define VIRTIO_CCW_DOING_READ_FEAT 0x00080000 172 #define VIRTIO_CCW_DOING_WRITE_FEAT 0x00100000 173 #define VIRTIO_CCW_DOING_READ_CONFIG 0x00200000 174 #define VIRTIO_CCW_DOING_WRITE_CONFIG 0x00400000 175 #define VIRTIO_CCW_DOING_WRITE_STATUS 0x00800000 176 #define VIRTIO_CCW_DOING_SET_IND 0x01000000 177 #define VIRTIO_CCW_DOING_READ_VQ_CONF 0x02000000 178 #define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000 179 #define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000 180 #define VIRTIO_CCW_DOING_SET_VIRTIO_REV 0x10000000 181 #define VIRTIO_CCW_DOING_READ_STATUS 0x20000000 182 #define VIRTIO_CCW_INTPARM_MASK 0xffff0000 183 184 static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev) 185 { 186 return container_of(vdev, struct virtio_ccw_device, vdev); 187 } 188 189 static void drop_airq_indicator(struct virtqueue *vq, struct airq_info *info) 190 { 191 unsigned long i, flags; 192 193 write_lock_irqsave(&info->lock, flags); 194 for (i = 0; i < airq_iv_end(info->aiv); i++) { 195 if (vq == (void *)airq_iv_get_ptr(info->aiv, i)) { 196 airq_iv_free_bit(info->aiv, i); 197 airq_iv_set_ptr(info->aiv, i, 0); 198 break; 199 } 200 } 201 write_unlock_irqrestore(&info->lock, flags); 202 } 203 204 static void virtio_airq_handler(struct airq_struct *airq, bool floating) 205 { 206 struct airq_info *info = container_of(airq, struct airq_info, airq); 207 unsigned long ai; 208 209 inc_irq_stat(IRQIO_VAI); 210 read_lock(&info->lock); 211 /* Walk through indicators field, summary indicator active. */ 212 for (ai = 0;;) { 213 ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv)); 214 if (ai == -1UL) 215 break; 216 vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai)); 217 } 218 *(get_summary_indicator(info)) = 0; 219 smp_wmb(); 220 /* Walk through indicators field, summary indicator not active. */ 221 for (ai = 0;;) { 222 ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv)); 223 if (ai == -1UL) 224 break; 225 vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai)); 226 } 227 read_unlock(&info->lock); 228 } 229 230 static struct airq_info *new_airq_info(int index) 231 { 232 struct airq_info *info; 233 int rc; 234 235 info = kzalloc(sizeof(*info), GFP_KERNEL); 236 if (!info) 237 return NULL; 238 rwlock_init(&info->lock); 239 info->aiv = airq_iv_create(VIRTIO_IV_BITS, AIRQ_IV_ALLOC | AIRQ_IV_PTR 240 | AIRQ_IV_CACHELINE); 241 if (!info->aiv) { 242 kfree(info); 243 return NULL; 244 } 245 info->airq.handler = virtio_airq_handler; 246 info->summary_indicator_idx = index; 247 info->airq.lsi_ptr = get_summary_indicator(info); 248 info->airq.lsi_mask = 0xff; 249 info->airq.isc = VIRTIO_AIRQ_ISC; 250 rc = register_adapter_interrupt(&info->airq); 251 if (rc) { 252 airq_iv_release(info->aiv); 253 kfree(info); 254 return NULL; 255 } 256 return info; 257 } 258 259 static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs, 260 u64 *first, void **airq_info) 261 { 262 int i, j; 263 struct airq_info *info; 264 unsigned long indicator_addr = 0; 265 unsigned long bit, flags; 266 267 for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) { 268 if (!airq_areas[i]) 269 airq_areas[i] = new_airq_info(i); 270 info = airq_areas[i]; 271 if (!info) 272 return 0; 273 write_lock_irqsave(&info->lock, flags); 274 bit = airq_iv_alloc(info->aiv, nvqs); 275 if (bit == -1UL) { 276 /* Not enough vacancies. */ 277 write_unlock_irqrestore(&info->lock, flags); 278 continue; 279 } 280 *first = bit; 281 *airq_info = info; 282 indicator_addr = (unsigned long)info->aiv->vector; 283 for (j = 0; j < nvqs; j++) { 284 airq_iv_set_ptr(info->aiv, bit + j, 285 (unsigned long)vqs[j]); 286 } 287 write_unlock_irqrestore(&info->lock, flags); 288 } 289 return indicator_addr; 290 } 291 292 static void virtio_ccw_drop_indicators(struct virtio_ccw_device *vcdev) 293 { 294 struct virtio_ccw_vq_info *info; 295 296 if (!vcdev->airq_info) 297 return; 298 list_for_each_entry(info, &vcdev->virtqueues, node) 299 drop_airq_indicator(info->vq, vcdev->airq_info); 300 } 301 302 static int doing_io(struct virtio_ccw_device *vcdev, __u32 flag) 303 { 304 unsigned long flags; 305 __u32 ret; 306 307 spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags); 308 if (vcdev->err) 309 ret = 0; 310 else 311 ret = vcdev->curr_io & flag; 312 spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags); 313 return ret; 314 } 315 316 static int ccw_io_helper(struct virtio_ccw_device *vcdev, 317 struct ccw1 *ccw, __u32 intparm) 318 { 319 int ret; 320 unsigned long flags; 321 int flag = intparm & VIRTIO_CCW_INTPARM_MASK; 322 323 mutex_lock(&vcdev->io_lock); 324 do { 325 spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags); 326 ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0); 327 if (!ret) { 328 if (!vcdev->curr_io) 329 vcdev->err = 0; 330 vcdev->curr_io |= flag; 331 } 332 spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags); 333 cpu_relax(); 334 } while (ret == -EBUSY); 335 wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0); 336 ret = ret ? ret : vcdev->err; 337 mutex_unlock(&vcdev->io_lock); 338 return ret; 339 } 340 341 static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev, 342 struct ccw1 *ccw) 343 { 344 int ret; 345 unsigned long *indicatorp = NULL; 346 struct virtio_thinint_area *thinint_area = NULL; 347 struct airq_info *airq_info = vcdev->airq_info; 348 349 if (vcdev->is_thinint) { 350 thinint_area = ccw_device_dma_zalloc(vcdev->cdev, 351 sizeof(*thinint_area)); 352 if (!thinint_area) 353 return; 354 thinint_area->summary_indicator = 355 (unsigned long) get_summary_indicator(airq_info); 356 thinint_area->isc = VIRTIO_AIRQ_ISC; 357 ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER; 358 ccw->count = sizeof(*thinint_area); 359 ccw->cda = (__u32)(unsigned long) thinint_area; 360 } else { 361 /* payload is the address of the indicators */ 362 indicatorp = ccw_device_dma_zalloc(vcdev->cdev, 363 sizeof(indicators(vcdev))); 364 if (!indicatorp) 365 return; 366 *indicatorp = 0; 367 ccw->cmd_code = CCW_CMD_SET_IND; 368 ccw->count = sizeof(indicators(vcdev)); 369 ccw->cda = (__u32)(unsigned long) indicatorp; 370 } 371 /* Deregister indicators from host. */ 372 *indicators(vcdev) = 0; 373 ccw->flags = 0; 374 ret = ccw_io_helper(vcdev, ccw, 375 vcdev->is_thinint ? 376 VIRTIO_CCW_DOING_SET_IND_ADAPTER : 377 VIRTIO_CCW_DOING_SET_IND); 378 if (ret && (ret != -ENODEV)) 379 dev_info(&vcdev->cdev->dev, 380 "Failed to deregister indicators (%d)\n", ret); 381 else if (vcdev->is_thinint) 382 virtio_ccw_drop_indicators(vcdev); 383 ccw_device_dma_free(vcdev->cdev, indicatorp, sizeof(indicators(vcdev))); 384 ccw_device_dma_free(vcdev->cdev, thinint_area, sizeof(*thinint_area)); 385 } 386 387 static inline long __do_kvm_notify(struct subchannel_id schid, 388 unsigned long queue_index, 389 long cookie) 390 { 391 register unsigned long __nr asm("1") = KVM_S390_VIRTIO_CCW_NOTIFY; 392 register struct subchannel_id __schid asm("2") = schid; 393 register unsigned long __index asm("3") = queue_index; 394 register long __rc asm("2"); 395 register long __cookie asm("4") = cookie; 396 397 asm volatile ("diag 2,4,0x500\n" 398 : "=d" (__rc) : "d" (__nr), "d" (__schid), "d" (__index), 399 "d"(__cookie) 400 : "memory", "cc"); 401 return __rc; 402 } 403 404 static inline long do_kvm_notify(struct subchannel_id schid, 405 unsigned long queue_index, 406 long cookie) 407 { 408 diag_stat_inc(DIAG_STAT_X500); 409 return __do_kvm_notify(schid, queue_index, cookie); 410 } 411 412 static bool virtio_ccw_kvm_notify(struct virtqueue *vq) 413 { 414 struct virtio_ccw_vq_info *info = vq->priv; 415 struct virtio_ccw_device *vcdev; 416 struct subchannel_id schid; 417 418 vcdev = to_vc_device(info->vq->vdev); 419 ccw_device_get_schid(vcdev->cdev, &schid); 420 info->cookie = do_kvm_notify(schid, vq->index, info->cookie); 421 if (info->cookie < 0) 422 return false; 423 return true; 424 } 425 426 static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev, 427 struct ccw1 *ccw, int index) 428 { 429 int ret; 430 431 vcdev->dma_area->config_block.index = index; 432 ccw->cmd_code = CCW_CMD_READ_VQ_CONF; 433 ccw->flags = 0; 434 ccw->count = sizeof(struct vq_config_block); 435 ccw->cda = (__u32)(unsigned long)(&vcdev->dma_area->config_block); 436 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF); 437 if (ret) 438 return ret; 439 return vcdev->dma_area->config_block.num ?: -ENOENT; 440 } 441 442 static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw) 443 { 444 struct virtio_ccw_device *vcdev = to_vc_device(vq->vdev); 445 struct virtio_ccw_vq_info *info = vq->priv; 446 unsigned long flags; 447 int ret; 448 unsigned int index = vq->index; 449 450 /* Remove from our list. */ 451 spin_lock_irqsave(&vcdev->lock, flags); 452 list_del(&info->node); 453 spin_unlock_irqrestore(&vcdev->lock, flags); 454 455 /* Release from host. */ 456 if (vcdev->revision == 0) { 457 info->info_block->l.queue = 0; 458 info->info_block->l.align = 0; 459 info->info_block->l.index = index; 460 info->info_block->l.num = 0; 461 ccw->count = sizeof(info->info_block->l); 462 } else { 463 info->info_block->s.desc = 0; 464 info->info_block->s.index = index; 465 info->info_block->s.num = 0; 466 info->info_block->s.avail = 0; 467 info->info_block->s.used = 0; 468 ccw->count = sizeof(info->info_block->s); 469 } 470 ccw->cmd_code = CCW_CMD_SET_VQ; 471 ccw->flags = 0; 472 ccw->cda = (__u32)(unsigned long)(info->info_block); 473 ret = ccw_io_helper(vcdev, ccw, 474 VIRTIO_CCW_DOING_SET_VQ | index); 475 /* 476 * -ENODEV isn't considered an error: The device is gone anyway. 477 * This may happen on device detach. 478 */ 479 if (ret && (ret != -ENODEV)) 480 dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d\n", 481 ret, index); 482 483 vring_del_virtqueue(vq); 484 ccw_device_dma_free(vcdev->cdev, info->info_block, 485 sizeof(*info->info_block)); 486 kfree(info); 487 } 488 489 static void virtio_ccw_del_vqs(struct virtio_device *vdev) 490 { 491 struct virtqueue *vq, *n; 492 struct ccw1 *ccw; 493 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 494 495 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 496 if (!ccw) 497 return; 498 499 virtio_ccw_drop_indicator(vcdev, ccw); 500 501 list_for_each_entry_safe(vq, n, &vdev->vqs, list) 502 virtio_ccw_del_vq(vq, ccw); 503 504 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 505 } 506 507 static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev, 508 int i, vq_callback_t *callback, 509 const char *name, bool ctx, 510 struct ccw1 *ccw) 511 { 512 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 513 int err; 514 struct virtqueue *vq = NULL; 515 struct virtio_ccw_vq_info *info; 516 u64 queue; 517 unsigned long flags; 518 bool may_reduce; 519 520 /* Allocate queue. */ 521 info = kzalloc(sizeof(struct virtio_ccw_vq_info), GFP_KERNEL); 522 if (!info) { 523 dev_warn(&vcdev->cdev->dev, "no info\n"); 524 err = -ENOMEM; 525 goto out_err; 526 } 527 info->info_block = ccw_device_dma_zalloc(vcdev->cdev, 528 sizeof(*info->info_block)); 529 if (!info->info_block) { 530 dev_warn(&vcdev->cdev->dev, "no info block\n"); 531 err = -ENOMEM; 532 goto out_err; 533 } 534 info->num = virtio_ccw_read_vq_conf(vcdev, ccw, i); 535 if (info->num < 0) { 536 err = info->num; 537 goto out_err; 538 } 539 may_reduce = vcdev->revision > 0; 540 vq = vring_create_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN, 541 vdev, true, may_reduce, ctx, 542 virtio_ccw_kvm_notify, callback, name); 543 544 if (!vq) { 545 /* For now, we fail if we can't get the requested size. */ 546 dev_warn(&vcdev->cdev->dev, "no vq\n"); 547 err = -ENOMEM; 548 goto out_err; 549 } 550 /* it may have been reduced */ 551 info->num = virtqueue_get_vring_size(vq); 552 553 /* Register it with the host. */ 554 queue = virtqueue_get_desc_addr(vq); 555 if (vcdev->revision == 0) { 556 info->info_block->l.queue = queue; 557 info->info_block->l.align = KVM_VIRTIO_CCW_RING_ALIGN; 558 info->info_block->l.index = i; 559 info->info_block->l.num = info->num; 560 ccw->count = sizeof(info->info_block->l); 561 } else { 562 info->info_block->s.desc = queue; 563 info->info_block->s.index = i; 564 info->info_block->s.num = info->num; 565 info->info_block->s.avail = (__u64)virtqueue_get_avail_addr(vq); 566 info->info_block->s.used = (__u64)virtqueue_get_used_addr(vq); 567 ccw->count = sizeof(info->info_block->s); 568 } 569 ccw->cmd_code = CCW_CMD_SET_VQ; 570 ccw->flags = 0; 571 ccw->cda = (__u32)(unsigned long)(info->info_block); 572 err = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_VQ | i); 573 if (err) { 574 dev_warn(&vcdev->cdev->dev, "SET_VQ failed\n"); 575 goto out_err; 576 } 577 578 info->vq = vq; 579 vq->priv = info; 580 581 /* Save it to our list. */ 582 spin_lock_irqsave(&vcdev->lock, flags); 583 list_add(&info->node, &vcdev->virtqueues); 584 spin_unlock_irqrestore(&vcdev->lock, flags); 585 586 return vq; 587 588 out_err: 589 if (vq) 590 vring_del_virtqueue(vq); 591 if (info) { 592 ccw_device_dma_free(vcdev->cdev, info->info_block, 593 sizeof(*info->info_block)); 594 } 595 kfree(info); 596 return ERR_PTR(err); 597 } 598 599 static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev, 600 struct virtqueue *vqs[], int nvqs, 601 struct ccw1 *ccw) 602 { 603 int ret; 604 struct virtio_thinint_area *thinint_area = NULL; 605 struct airq_info *info; 606 607 thinint_area = ccw_device_dma_zalloc(vcdev->cdev, 608 sizeof(*thinint_area)); 609 if (!thinint_area) { 610 ret = -ENOMEM; 611 goto out; 612 } 613 /* Try to get an indicator. */ 614 thinint_area->indicator = get_airq_indicator(vqs, nvqs, 615 &thinint_area->bit_nr, 616 &vcdev->airq_info); 617 if (!thinint_area->indicator) { 618 ret = -ENOSPC; 619 goto out; 620 } 621 info = vcdev->airq_info; 622 thinint_area->summary_indicator = 623 (unsigned long) get_summary_indicator(info); 624 thinint_area->isc = VIRTIO_AIRQ_ISC; 625 ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER; 626 ccw->flags = CCW_FLAG_SLI; 627 ccw->count = sizeof(*thinint_area); 628 ccw->cda = (__u32)(unsigned long)thinint_area; 629 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND_ADAPTER); 630 if (ret) { 631 if (ret == -EOPNOTSUPP) { 632 /* 633 * The host does not support adapter interrupts 634 * for virtio-ccw, stop trying. 635 */ 636 virtio_ccw_use_airq = 0; 637 pr_info("Adapter interrupts unsupported on host\n"); 638 } else 639 dev_warn(&vcdev->cdev->dev, 640 "enabling adapter interrupts = %d\n", ret); 641 virtio_ccw_drop_indicators(vcdev); 642 } 643 out: 644 ccw_device_dma_free(vcdev->cdev, thinint_area, sizeof(*thinint_area)); 645 return ret; 646 } 647 648 static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, 649 struct virtqueue *vqs[], 650 vq_callback_t *callbacks[], 651 const char * const names[], 652 const bool *ctx, 653 struct irq_affinity *desc) 654 { 655 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 656 unsigned long *indicatorp = NULL; 657 int ret, i, queue_idx = 0; 658 struct ccw1 *ccw; 659 660 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 661 if (!ccw) 662 return -ENOMEM; 663 664 for (i = 0; i < nvqs; ++i) { 665 if (!names[i]) { 666 vqs[i] = NULL; 667 continue; 668 } 669 670 vqs[i] = virtio_ccw_setup_vq(vdev, queue_idx++, callbacks[i], 671 names[i], ctx ? ctx[i] : false, 672 ccw); 673 if (IS_ERR(vqs[i])) { 674 ret = PTR_ERR(vqs[i]); 675 vqs[i] = NULL; 676 goto out; 677 } 678 } 679 ret = -ENOMEM; 680 /* 681 * We need a data area under 2G to communicate. Our payload is 682 * the address of the indicators. 683 */ 684 indicatorp = ccw_device_dma_zalloc(vcdev->cdev, 685 sizeof(indicators(vcdev))); 686 if (!indicatorp) 687 goto out; 688 *indicatorp = (unsigned long) indicators(vcdev); 689 if (vcdev->is_thinint) { 690 ret = virtio_ccw_register_adapter_ind(vcdev, vqs, nvqs, ccw); 691 if (ret) 692 /* no error, just fall back to legacy interrupts */ 693 vcdev->is_thinint = false; 694 } 695 if (!vcdev->is_thinint) { 696 /* Register queue indicators with host. */ 697 *indicators(vcdev) = 0; 698 ccw->cmd_code = CCW_CMD_SET_IND; 699 ccw->flags = 0; 700 ccw->count = sizeof(indicators(vcdev)); 701 ccw->cda = (__u32)(unsigned long) indicatorp; 702 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND); 703 if (ret) 704 goto out; 705 } 706 /* Register indicators2 with host for config changes */ 707 *indicatorp = (unsigned long) indicators2(vcdev); 708 *indicators2(vcdev) = 0; 709 ccw->cmd_code = CCW_CMD_SET_CONF_IND; 710 ccw->flags = 0; 711 ccw->count = sizeof(indicators2(vcdev)); 712 ccw->cda = (__u32)(unsigned long) indicatorp; 713 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND); 714 if (ret) 715 goto out; 716 717 if (indicatorp) 718 ccw_device_dma_free(vcdev->cdev, indicatorp, 719 sizeof(indicators(vcdev))); 720 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 721 return 0; 722 out: 723 if (indicatorp) 724 ccw_device_dma_free(vcdev->cdev, indicatorp, 725 sizeof(indicators(vcdev))); 726 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 727 virtio_ccw_del_vqs(vdev); 728 return ret; 729 } 730 731 static void virtio_ccw_reset(struct virtio_device *vdev) 732 { 733 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 734 struct ccw1 *ccw; 735 736 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 737 if (!ccw) 738 return; 739 740 /* Zero status bits. */ 741 vcdev->dma_area->status = 0; 742 743 /* Send a reset ccw on device. */ 744 ccw->cmd_code = CCW_CMD_VDEV_RESET; 745 ccw->flags = 0; 746 ccw->count = 0; 747 ccw->cda = 0; 748 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_RESET); 749 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 750 } 751 752 static u64 virtio_ccw_get_features(struct virtio_device *vdev) 753 { 754 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 755 struct virtio_feature_desc *features; 756 int ret; 757 u64 rc; 758 struct ccw1 *ccw; 759 760 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 761 if (!ccw) 762 return 0; 763 764 features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features)); 765 if (!features) { 766 rc = 0; 767 goto out_free; 768 } 769 /* Read the feature bits from the host. */ 770 features->index = 0; 771 ccw->cmd_code = CCW_CMD_READ_FEAT; 772 ccw->flags = 0; 773 ccw->count = sizeof(*features); 774 ccw->cda = (__u32)(unsigned long)features; 775 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT); 776 if (ret) { 777 rc = 0; 778 goto out_free; 779 } 780 781 rc = le32_to_cpu(features->features); 782 783 if (vcdev->revision == 0) 784 goto out_free; 785 786 /* Read second half of the feature bits from the host. */ 787 features->index = 1; 788 ccw->cmd_code = CCW_CMD_READ_FEAT; 789 ccw->flags = 0; 790 ccw->count = sizeof(*features); 791 ccw->cda = (__u32)(unsigned long)features; 792 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT); 793 if (ret == 0) 794 rc |= (u64)le32_to_cpu(features->features) << 32; 795 796 out_free: 797 ccw_device_dma_free(vcdev->cdev, features, sizeof(*features)); 798 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 799 return rc; 800 } 801 802 static void ccw_transport_features(struct virtio_device *vdev) 803 { 804 /* 805 * Currently nothing to do here. 806 */ 807 } 808 809 static int virtio_ccw_finalize_features(struct virtio_device *vdev) 810 { 811 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 812 struct virtio_feature_desc *features; 813 struct ccw1 *ccw; 814 int ret; 815 816 if (vcdev->revision >= 1 && 817 !__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) { 818 dev_err(&vdev->dev, "virtio: device uses revision 1 " 819 "but does not have VIRTIO_F_VERSION_1\n"); 820 return -EINVAL; 821 } 822 823 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 824 if (!ccw) 825 return -ENOMEM; 826 827 features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features)); 828 if (!features) { 829 ret = -ENOMEM; 830 goto out_free; 831 } 832 /* Give virtio_ring a chance to accept features. */ 833 vring_transport_features(vdev); 834 835 /* Give virtio_ccw a chance to accept features. */ 836 ccw_transport_features(vdev); 837 838 features->index = 0; 839 features->features = cpu_to_le32((u32)vdev->features); 840 /* Write the first half of the feature bits to the host. */ 841 ccw->cmd_code = CCW_CMD_WRITE_FEAT; 842 ccw->flags = 0; 843 ccw->count = sizeof(*features); 844 ccw->cda = (__u32)(unsigned long)features; 845 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT); 846 if (ret) 847 goto out_free; 848 849 if (vcdev->revision == 0) 850 goto out_free; 851 852 features->index = 1; 853 features->features = cpu_to_le32(vdev->features >> 32); 854 /* Write the second half of the feature bits to the host. */ 855 ccw->cmd_code = CCW_CMD_WRITE_FEAT; 856 ccw->flags = 0; 857 ccw->count = sizeof(*features); 858 ccw->cda = (__u32)(unsigned long)features; 859 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT); 860 861 out_free: 862 ccw_device_dma_free(vcdev->cdev, features, sizeof(*features)); 863 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 864 865 return ret; 866 } 867 868 static void virtio_ccw_get_config(struct virtio_device *vdev, 869 unsigned int offset, void *buf, unsigned len) 870 { 871 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 872 int ret; 873 struct ccw1 *ccw; 874 void *config_area; 875 unsigned long flags; 876 877 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 878 if (!ccw) 879 return; 880 881 config_area = ccw_device_dma_zalloc(vcdev->cdev, 882 VIRTIO_CCW_CONFIG_SIZE); 883 if (!config_area) 884 goto out_free; 885 886 /* Read the config area from the host. */ 887 ccw->cmd_code = CCW_CMD_READ_CONF; 888 ccw->flags = 0; 889 ccw->count = offset + len; 890 ccw->cda = (__u32)(unsigned long)config_area; 891 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_CONFIG); 892 if (ret) 893 goto out_free; 894 895 spin_lock_irqsave(&vcdev->lock, flags); 896 memcpy(vcdev->config, config_area, offset + len); 897 if (vcdev->config_ready < offset + len) 898 vcdev->config_ready = offset + len; 899 spin_unlock_irqrestore(&vcdev->lock, flags); 900 if (buf) 901 memcpy(buf, config_area + offset, len); 902 903 out_free: 904 ccw_device_dma_free(vcdev->cdev, config_area, VIRTIO_CCW_CONFIG_SIZE); 905 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 906 } 907 908 static void virtio_ccw_set_config(struct virtio_device *vdev, 909 unsigned int offset, const void *buf, 910 unsigned len) 911 { 912 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 913 struct ccw1 *ccw; 914 void *config_area; 915 unsigned long flags; 916 917 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 918 if (!ccw) 919 return; 920 921 config_area = ccw_device_dma_zalloc(vcdev->cdev, 922 VIRTIO_CCW_CONFIG_SIZE); 923 if (!config_area) 924 goto out_free; 925 926 /* Make sure we don't overwrite fields. */ 927 if (vcdev->config_ready < offset) 928 virtio_ccw_get_config(vdev, 0, NULL, offset); 929 spin_lock_irqsave(&vcdev->lock, flags); 930 memcpy(&vcdev->config[offset], buf, len); 931 /* Write the config area to the host. */ 932 memcpy(config_area, vcdev->config, sizeof(vcdev->config)); 933 spin_unlock_irqrestore(&vcdev->lock, flags); 934 ccw->cmd_code = CCW_CMD_WRITE_CONF; 935 ccw->flags = 0; 936 ccw->count = offset + len; 937 ccw->cda = (__u32)(unsigned long)config_area; 938 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_CONFIG); 939 940 out_free: 941 ccw_device_dma_free(vcdev->cdev, config_area, VIRTIO_CCW_CONFIG_SIZE); 942 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 943 } 944 945 static u8 virtio_ccw_get_status(struct virtio_device *vdev) 946 { 947 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 948 u8 old_status = vcdev->dma_area->status; 949 struct ccw1 *ccw; 950 951 if (vcdev->revision < 1) 952 return vcdev->dma_area->status; 953 954 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 955 if (!ccw) 956 return old_status; 957 958 ccw->cmd_code = CCW_CMD_READ_STATUS; 959 ccw->flags = 0; 960 ccw->count = sizeof(vcdev->dma_area->status); 961 ccw->cda = (__u32)(unsigned long)&vcdev->dma_area->status; 962 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_STATUS); 963 /* 964 * If the channel program failed (should only happen if the device 965 * was hotunplugged, and then we clean up via the machine check 966 * handler anyway), vcdev->dma_area->status was not overwritten and we just 967 * return the old status, which is fine. 968 */ 969 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 970 971 return vcdev->dma_area->status; 972 } 973 974 static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status) 975 { 976 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 977 u8 old_status = vcdev->dma_area->status; 978 struct ccw1 *ccw; 979 int ret; 980 981 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 982 if (!ccw) 983 return; 984 985 /* Write the status to the host. */ 986 vcdev->dma_area->status = status; 987 ccw->cmd_code = CCW_CMD_WRITE_STATUS; 988 ccw->flags = 0; 989 ccw->count = sizeof(status); 990 ccw->cda = (__u32)(unsigned long)&vcdev->dma_area->status; 991 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS); 992 /* Write failed? We assume status is unchanged. */ 993 if (ret) 994 vcdev->dma_area->status = old_status; 995 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 996 } 997 998 static const char *virtio_ccw_bus_name(struct virtio_device *vdev) 999 { 1000 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 1001 1002 return dev_name(&vcdev->cdev->dev); 1003 } 1004 1005 static const struct virtio_config_ops virtio_ccw_config_ops = { 1006 .get_features = virtio_ccw_get_features, 1007 .finalize_features = virtio_ccw_finalize_features, 1008 .get = virtio_ccw_get_config, 1009 .set = virtio_ccw_set_config, 1010 .get_status = virtio_ccw_get_status, 1011 .set_status = virtio_ccw_set_status, 1012 .reset = virtio_ccw_reset, 1013 .find_vqs = virtio_ccw_find_vqs, 1014 .del_vqs = virtio_ccw_del_vqs, 1015 .bus_name = virtio_ccw_bus_name, 1016 }; 1017 1018 1019 /* 1020 * ccw bus driver related functions 1021 */ 1022 1023 static void virtio_ccw_release_dev(struct device *_d) 1024 { 1025 struct virtio_device *dev = dev_to_virtio(_d); 1026 struct virtio_ccw_device *vcdev = to_vc_device(dev); 1027 1028 ccw_device_dma_free(vcdev->cdev, vcdev->dma_area, 1029 sizeof(*vcdev->dma_area)); 1030 kfree(vcdev); 1031 } 1032 1033 static int irb_is_error(struct irb *irb) 1034 { 1035 if (scsw_cstat(&irb->scsw) != 0) 1036 return 1; 1037 if (scsw_dstat(&irb->scsw) & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) 1038 return 1; 1039 if (scsw_cc(&irb->scsw) != 0) 1040 return 1; 1041 return 0; 1042 } 1043 1044 static struct virtqueue *virtio_ccw_vq_by_ind(struct virtio_ccw_device *vcdev, 1045 int index) 1046 { 1047 struct virtio_ccw_vq_info *info; 1048 unsigned long flags; 1049 struct virtqueue *vq; 1050 1051 vq = NULL; 1052 spin_lock_irqsave(&vcdev->lock, flags); 1053 list_for_each_entry(info, &vcdev->virtqueues, node) { 1054 if (info->vq->index == index) { 1055 vq = info->vq; 1056 break; 1057 } 1058 } 1059 spin_unlock_irqrestore(&vcdev->lock, flags); 1060 return vq; 1061 } 1062 1063 static void virtio_ccw_check_activity(struct virtio_ccw_device *vcdev, 1064 __u32 activity) 1065 { 1066 if (vcdev->curr_io & activity) { 1067 switch (activity) { 1068 case VIRTIO_CCW_DOING_READ_FEAT: 1069 case VIRTIO_CCW_DOING_WRITE_FEAT: 1070 case VIRTIO_CCW_DOING_READ_CONFIG: 1071 case VIRTIO_CCW_DOING_WRITE_CONFIG: 1072 case VIRTIO_CCW_DOING_WRITE_STATUS: 1073 case VIRTIO_CCW_DOING_READ_STATUS: 1074 case VIRTIO_CCW_DOING_SET_VQ: 1075 case VIRTIO_CCW_DOING_SET_IND: 1076 case VIRTIO_CCW_DOING_SET_CONF_IND: 1077 case VIRTIO_CCW_DOING_RESET: 1078 case VIRTIO_CCW_DOING_READ_VQ_CONF: 1079 case VIRTIO_CCW_DOING_SET_IND_ADAPTER: 1080 case VIRTIO_CCW_DOING_SET_VIRTIO_REV: 1081 vcdev->curr_io &= ~activity; 1082 wake_up(&vcdev->wait_q); 1083 break; 1084 default: 1085 /* don't know what to do... */ 1086 dev_warn(&vcdev->cdev->dev, 1087 "Suspicious activity '%08x'\n", activity); 1088 WARN_ON(1); 1089 break; 1090 } 1091 } 1092 } 1093 1094 static void virtio_ccw_int_handler(struct ccw_device *cdev, 1095 unsigned long intparm, 1096 struct irb *irb) 1097 { 1098 __u32 activity = intparm & VIRTIO_CCW_INTPARM_MASK; 1099 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev); 1100 int i; 1101 struct virtqueue *vq; 1102 1103 if (!vcdev) 1104 return; 1105 if (IS_ERR(irb)) { 1106 vcdev->err = PTR_ERR(irb); 1107 virtio_ccw_check_activity(vcdev, activity); 1108 /* Don't poke around indicators, something's wrong. */ 1109 return; 1110 } 1111 /* Check if it's a notification from the host. */ 1112 if ((intparm == 0) && 1113 (scsw_stctl(&irb->scsw) == 1114 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) { 1115 /* OK */ 1116 } 1117 if (irb_is_error(irb)) { 1118 /* Command reject? */ 1119 if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) && 1120 (irb->ecw[0] & SNS0_CMD_REJECT)) 1121 vcdev->err = -EOPNOTSUPP; 1122 else 1123 /* Map everything else to -EIO. */ 1124 vcdev->err = -EIO; 1125 } 1126 virtio_ccw_check_activity(vcdev, activity); 1127 for_each_set_bit(i, indicators(vcdev), 1128 sizeof(*indicators(vcdev)) * BITS_PER_BYTE) { 1129 /* The bit clear must happen before the vring kick. */ 1130 clear_bit(i, indicators(vcdev)); 1131 barrier(); 1132 vq = virtio_ccw_vq_by_ind(vcdev, i); 1133 vring_interrupt(0, vq); 1134 } 1135 if (test_bit(0, indicators2(vcdev))) { 1136 virtio_config_changed(&vcdev->vdev); 1137 clear_bit(0, indicators2(vcdev)); 1138 } 1139 } 1140 1141 /* 1142 * We usually want to autoonline all devices, but give the admin 1143 * a way to exempt devices from this. 1144 */ 1145 #define __DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \ 1146 (8*sizeof(long))) 1147 static unsigned long devs_no_auto[__MAX_SSID + 1][__DEV_WORDS]; 1148 1149 static char *no_auto = ""; 1150 1151 module_param(no_auto, charp, 0444); 1152 MODULE_PARM_DESC(no_auto, "list of ccw bus id ranges not to be auto-onlined"); 1153 1154 static int virtio_ccw_check_autoonline(struct ccw_device *cdev) 1155 { 1156 struct ccw_dev_id id; 1157 1158 ccw_device_get_id(cdev, &id); 1159 if (test_bit(id.devno, devs_no_auto[id.ssid])) 1160 return 0; 1161 return 1; 1162 } 1163 1164 static void virtio_ccw_auto_online(void *data, async_cookie_t cookie) 1165 { 1166 struct ccw_device *cdev = data; 1167 int ret; 1168 1169 ret = ccw_device_set_online(cdev); 1170 if (ret) 1171 dev_warn(&cdev->dev, "Failed to set online: %d\n", ret); 1172 } 1173 1174 static int virtio_ccw_probe(struct ccw_device *cdev) 1175 { 1176 cdev->handler = virtio_ccw_int_handler; 1177 1178 if (virtio_ccw_check_autoonline(cdev)) 1179 async_schedule(virtio_ccw_auto_online, cdev); 1180 return 0; 1181 } 1182 1183 static struct virtio_ccw_device *virtio_grab_drvdata(struct ccw_device *cdev) 1184 { 1185 unsigned long flags; 1186 struct virtio_ccw_device *vcdev; 1187 1188 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1189 vcdev = dev_get_drvdata(&cdev->dev); 1190 if (!vcdev || vcdev->going_away) { 1191 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1192 return NULL; 1193 } 1194 vcdev->going_away = true; 1195 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1196 return vcdev; 1197 } 1198 1199 static void virtio_ccw_remove(struct ccw_device *cdev) 1200 { 1201 unsigned long flags; 1202 struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev); 1203 1204 if (vcdev && cdev->online) { 1205 if (vcdev->device_lost) 1206 virtio_break_device(&vcdev->vdev); 1207 unregister_virtio_device(&vcdev->vdev); 1208 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1209 dev_set_drvdata(&cdev->dev, NULL); 1210 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1211 } 1212 cdev->handler = NULL; 1213 } 1214 1215 static int virtio_ccw_offline(struct ccw_device *cdev) 1216 { 1217 unsigned long flags; 1218 struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev); 1219 1220 if (!vcdev) 1221 return 0; 1222 if (vcdev->device_lost) 1223 virtio_break_device(&vcdev->vdev); 1224 unregister_virtio_device(&vcdev->vdev); 1225 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1226 dev_set_drvdata(&cdev->dev, NULL); 1227 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1228 return 0; 1229 } 1230 1231 static int virtio_ccw_set_transport_rev(struct virtio_ccw_device *vcdev) 1232 { 1233 struct virtio_rev_info *rev; 1234 struct ccw1 *ccw; 1235 int ret; 1236 1237 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 1238 if (!ccw) 1239 return -ENOMEM; 1240 rev = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*rev)); 1241 if (!rev) { 1242 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 1243 return -ENOMEM; 1244 } 1245 1246 /* Set transport revision */ 1247 ccw->cmd_code = CCW_CMD_SET_VIRTIO_REV; 1248 ccw->flags = 0; 1249 ccw->count = sizeof(*rev); 1250 ccw->cda = (__u32)(unsigned long)rev; 1251 1252 vcdev->revision = VIRTIO_CCW_REV_MAX; 1253 do { 1254 rev->revision = vcdev->revision; 1255 /* none of our supported revisions carry payload */ 1256 rev->length = 0; 1257 ret = ccw_io_helper(vcdev, ccw, 1258 VIRTIO_CCW_DOING_SET_VIRTIO_REV); 1259 if (ret == -EOPNOTSUPP) { 1260 if (vcdev->revision == 0) 1261 /* 1262 * The host device does not support setting 1263 * the revision: let's operate it in legacy 1264 * mode. 1265 */ 1266 ret = 0; 1267 else 1268 vcdev->revision--; 1269 } 1270 } while (ret == -EOPNOTSUPP); 1271 1272 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 1273 ccw_device_dma_free(vcdev->cdev, rev, sizeof(*rev)); 1274 return ret; 1275 } 1276 1277 static int virtio_ccw_online(struct ccw_device *cdev) 1278 { 1279 int ret; 1280 struct virtio_ccw_device *vcdev; 1281 unsigned long flags; 1282 1283 vcdev = kzalloc(sizeof(*vcdev), GFP_KERNEL); 1284 if (!vcdev) { 1285 dev_warn(&cdev->dev, "Could not get memory for virtio\n"); 1286 ret = -ENOMEM; 1287 goto out_free; 1288 } 1289 vcdev->vdev.dev.parent = &cdev->dev; 1290 vcdev->cdev = cdev; 1291 vcdev->dma_area = ccw_device_dma_zalloc(vcdev->cdev, 1292 sizeof(*vcdev->dma_area)); 1293 if (!vcdev->dma_area) { 1294 ret = -ENOMEM; 1295 goto out_free; 1296 } 1297 1298 vcdev->is_thinint = virtio_ccw_use_airq; /* at least try */ 1299 1300 vcdev->vdev.dev.release = virtio_ccw_release_dev; 1301 vcdev->vdev.config = &virtio_ccw_config_ops; 1302 init_waitqueue_head(&vcdev->wait_q); 1303 INIT_LIST_HEAD(&vcdev->virtqueues); 1304 spin_lock_init(&vcdev->lock); 1305 mutex_init(&vcdev->io_lock); 1306 1307 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1308 dev_set_drvdata(&cdev->dev, vcdev); 1309 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1310 vcdev->vdev.id.vendor = cdev->id.cu_type; 1311 vcdev->vdev.id.device = cdev->id.cu_model; 1312 1313 ret = virtio_ccw_set_transport_rev(vcdev); 1314 if (ret) 1315 goto out_free; 1316 1317 ret = register_virtio_device(&vcdev->vdev); 1318 if (ret) { 1319 dev_warn(&cdev->dev, "Failed to register virtio device: %d\n", 1320 ret); 1321 goto out_put; 1322 } 1323 return 0; 1324 out_put: 1325 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1326 dev_set_drvdata(&cdev->dev, NULL); 1327 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1328 put_device(&vcdev->vdev.dev); 1329 return ret; 1330 out_free: 1331 if (vcdev) { 1332 ccw_device_dma_free(vcdev->cdev, vcdev->dma_area, 1333 sizeof(*vcdev->dma_area)); 1334 } 1335 kfree(vcdev); 1336 return ret; 1337 } 1338 1339 static int virtio_ccw_cio_notify(struct ccw_device *cdev, int event) 1340 { 1341 int rc; 1342 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev); 1343 1344 /* 1345 * Make sure vcdev is set 1346 * i.e. set_offline/remove callback not already running 1347 */ 1348 if (!vcdev) 1349 return NOTIFY_DONE; 1350 1351 switch (event) { 1352 case CIO_GONE: 1353 vcdev->device_lost = true; 1354 rc = NOTIFY_DONE; 1355 break; 1356 case CIO_OPER: 1357 rc = NOTIFY_OK; 1358 break; 1359 default: 1360 rc = NOTIFY_DONE; 1361 break; 1362 } 1363 return rc; 1364 } 1365 1366 static struct ccw_device_id virtio_ids[] = { 1367 { CCW_DEVICE(0x3832, 0) }, 1368 {}, 1369 }; 1370 1371 #ifdef CONFIG_PM_SLEEP 1372 static int virtio_ccw_freeze(struct ccw_device *cdev) 1373 { 1374 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev); 1375 1376 return virtio_device_freeze(&vcdev->vdev); 1377 } 1378 1379 static int virtio_ccw_restore(struct ccw_device *cdev) 1380 { 1381 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev); 1382 int ret; 1383 1384 ret = virtio_ccw_set_transport_rev(vcdev); 1385 if (ret) 1386 return ret; 1387 1388 return virtio_device_restore(&vcdev->vdev); 1389 } 1390 #endif 1391 1392 static struct ccw_driver virtio_ccw_driver = { 1393 .driver = { 1394 .owner = THIS_MODULE, 1395 .name = "virtio_ccw", 1396 }, 1397 .ids = virtio_ids, 1398 .probe = virtio_ccw_probe, 1399 .remove = virtio_ccw_remove, 1400 .set_offline = virtio_ccw_offline, 1401 .set_online = virtio_ccw_online, 1402 .notify = virtio_ccw_cio_notify, 1403 .int_class = IRQIO_VIR, 1404 #ifdef CONFIG_PM_SLEEP 1405 .freeze = virtio_ccw_freeze, 1406 .thaw = virtio_ccw_restore, 1407 .restore = virtio_ccw_restore, 1408 #endif 1409 }; 1410 1411 static int __init pure_hex(char **cp, unsigned int *val, int min_digit, 1412 int max_digit, int max_val) 1413 { 1414 int diff; 1415 1416 diff = 0; 1417 *val = 0; 1418 1419 while (diff <= max_digit) { 1420 int value = hex_to_bin(**cp); 1421 1422 if (value < 0) 1423 break; 1424 *val = *val * 16 + value; 1425 (*cp)++; 1426 diff++; 1427 } 1428 1429 if ((diff < min_digit) || (diff > max_digit) || (*val > max_val)) 1430 return 1; 1431 1432 return 0; 1433 } 1434 1435 static int __init parse_busid(char *str, unsigned int *cssid, 1436 unsigned int *ssid, unsigned int *devno) 1437 { 1438 char *str_work; 1439 int rc, ret; 1440 1441 rc = 1; 1442 1443 if (*str == '\0') 1444 goto out; 1445 1446 str_work = str; 1447 ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID); 1448 if (ret || (str_work[0] != '.')) 1449 goto out; 1450 str_work++; 1451 ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID); 1452 if (ret || (str_work[0] != '.')) 1453 goto out; 1454 str_work++; 1455 ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL); 1456 if (ret || (str_work[0] != '\0')) 1457 goto out; 1458 1459 rc = 0; 1460 out: 1461 return rc; 1462 } 1463 1464 static void __init no_auto_parse(void) 1465 { 1466 unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to; 1467 char *parm, *str; 1468 int rc; 1469 1470 str = no_auto; 1471 while ((parm = strsep(&str, ","))) { 1472 rc = parse_busid(strsep(&parm, "-"), &from_cssid, 1473 &from_ssid, &from); 1474 if (rc) 1475 continue; 1476 if (parm != NULL) { 1477 rc = parse_busid(parm, &to_cssid, 1478 &to_ssid, &to); 1479 if ((from_ssid > to_ssid) || 1480 ((from_ssid == to_ssid) && (from > to))) 1481 rc = -EINVAL; 1482 } else { 1483 to_cssid = from_cssid; 1484 to_ssid = from_ssid; 1485 to = from; 1486 } 1487 if (rc) 1488 continue; 1489 while ((from_ssid < to_ssid) || 1490 ((from_ssid == to_ssid) && (from <= to))) { 1491 set_bit(from, devs_no_auto[from_ssid]); 1492 from++; 1493 if (from > __MAX_SUBCHANNEL) { 1494 from_ssid++; 1495 from = 0; 1496 } 1497 } 1498 } 1499 } 1500 1501 static int __init virtio_ccw_init(void) 1502 { 1503 int rc; 1504 1505 /* parse no_auto string before we do anything further */ 1506 no_auto_parse(); 1507 1508 summary_indicators = cio_dma_zalloc(MAX_AIRQ_AREAS); 1509 if (!summary_indicators) 1510 return -ENOMEM; 1511 rc = ccw_driver_register(&virtio_ccw_driver); 1512 if (rc) 1513 cio_dma_free(summary_indicators, MAX_AIRQ_AREAS); 1514 return rc; 1515 } 1516 device_initcall(virtio_ccw_init); 1517