1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * ccw based virtio transport 4 * 5 * Copyright IBM Corp. 2012, 2014 6 * 7 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> 8 */ 9 10 #include <linux/kernel_stat.h> 11 #include <linux/init.h> 12 #include <linux/memblock.h> 13 #include <linux/err.h> 14 #include <linux/virtio.h> 15 #include <linux/virtio_config.h> 16 #include <linux/slab.h> 17 #include <linux/interrupt.h> 18 #include <linux/virtio_ring.h> 19 #include <linux/pfn.h> 20 #include <linux/async.h> 21 #include <linux/wait.h> 22 #include <linux/list.h> 23 #include <linux/bitops.h> 24 #include <linux/moduleparam.h> 25 #include <linux/io.h> 26 #include <linux/kvm_para.h> 27 #include <linux/notifier.h> 28 #include <asm/diag.h> 29 #include <asm/setup.h> 30 #include <asm/irq.h> 31 #include <asm/cio.h> 32 #include <asm/ccwdev.h> 33 #include <asm/virtio-ccw.h> 34 #include <asm/isc.h> 35 #include <asm/airq.h> 36 37 /* 38 * virtio related functions 39 */ 40 41 struct vq_config_block { 42 __u16 index; 43 __u16 num; 44 } __packed; 45 46 #define VIRTIO_CCW_CONFIG_SIZE 0x100 47 /* same as PCI config space size, should be enough for all drivers */ 48 49 struct vcdev_dma_area { 50 unsigned long indicators; 51 unsigned long indicators2; 52 struct vq_config_block config_block; 53 __u8 status; 54 }; 55 56 struct virtio_ccw_device { 57 struct virtio_device vdev; 58 __u8 config[VIRTIO_CCW_CONFIG_SIZE]; 59 struct ccw_device *cdev; 60 __u32 curr_io; 61 int err; 62 unsigned int revision; /* Transport revision */ 63 wait_queue_head_t wait_q; 64 spinlock_t lock; 65 rwlock_t irq_lock; 66 struct mutex io_lock; /* Serializes I/O requests */ 67 struct list_head virtqueues; 68 bool is_thinint; 69 bool going_away; 70 bool device_lost; 71 unsigned int config_ready; 72 void *airq_info; 73 struct vcdev_dma_area *dma_area; 74 }; 75 76 static inline unsigned long *indicators(struct virtio_ccw_device *vcdev) 77 { 78 return &vcdev->dma_area->indicators; 79 } 80 81 static inline unsigned long *indicators2(struct virtio_ccw_device *vcdev) 82 { 83 return &vcdev->dma_area->indicators2; 84 } 85 86 struct vq_info_block_legacy { 87 __u64 queue; 88 __u32 align; 89 __u16 index; 90 __u16 num; 91 } __packed; 92 93 struct vq_info_block { 94 __u64 desc; 95 __u32 res0; 96 __u16 index; 97 __u16 num; 98 __u64 avail; 99 __u64 used; 100 } __packed; 101 102 struct virtio_feature_desc { 103 __le32 features; 104 __u8 index; 105 } __packed; 106 107 struct virtio_thinint_area { 108 unsigned long summary_indicator; 109 unsigned long indicator; 110 u64 bit_nr; 111 u8 isc; 112 } __packed; 113 114 struct virtio_rev_info { 115 __u16 revision; 116 __u16 length; 117 __u8 data[]; 118 }; 119 120 /* the highest virtio-ccw revision we support */ 121 #define VIRTIO_CCW_REV_MAX 2 122 123 struct virtio_ccw_vq_info { 124 struct virtqueue *vq; 125 int num; 126 union { 127 struct vq_info_block s; 128 struct vq_info_block_legacy l; 129 } *info_block; 130 int bit_nr; 131 struct list_head node; 132 long cookie; 133 }; 134 135 #define VIRTIO_AIRQ_ISC IO_SCH_ISC /* inherit from subchannel */ 136 137 #define VIRTIO_IV_BITS (L1_CACHE_BYTES * 8) 138 #define MAX_AIRQ_AREAS 20 139 140 static int virtio_ccw_use_airq = 1; 141 142 struct airq_info { 143 rwlock_t lock; 144 u8 summary_indicator_idx; 145 struct airq_struct airq; 146 struct airq_iv *aiv; 147 }; 148 static struct airq_info *airq_areas[MAX_AIRQ_AREAS]; 149 static DEFINE_MUTEX(airq_areas_lock); 150 151 static u8 *summary_indicators; 152 153 static inline u8 *get_summary_indicator(struct airq_info *info) 154 { 155 return summary_indicators + info->summary_indicator_idx; 156 } 157 158 #define CCW_CMD_SET_VQ 0x13 159 #define CCW_CMD_VDEV_RESET 0x33 160 #define CCW_CMD_SET_IND 0x43 161 #define CCW_CMD_SET_CONF_IND 0x53 162 #define CCW_CMD_READ_FEAT 0x12 163 #define CCW_CMD_WRITE_FEAT 0x11 164 #define CCW_CMD_READ_CONF 0x22 165 #define CCW_CMD_WRITE_CONF 0x21 166 #define CCW_CMD_WRITE_STATUS 0x31 167 #define CCW_CMD_READ_VQ_CONF 0x32 168 #define CCW_CMD_READ_STATUS 0x72 169 #define CCW_CMD_SET_IND_ADAPTER 0x73 170 #define CCW_CMD_SET_VIRTIO_REV 0x83 171 172 #define VIRTIO_CCW_DOING_SET_VQ 0x00010000 173 #define VIRTIO_CCW_DOING_RESET 0x00040000 174 #define VIRTIO_CCW_DOING_READ_FEAT 0x00080000 175 #define VIRTIO_CCW_DOING_WRITE_FEAT 0x00100000 176 #define VIRTIO_CCW_DOING_READ_CONFIG 0x00200000 177 #define VIRTIO_CCW_DOING_WRITE_CONFIG 0x00400000 178 #define VIRTIO_CCW_DOING_WRITE_STATUS 0x00800000 179 #define VIRTIO_CCW_DOING_SET_IND 0x01000000 180 #define VIRTIO_CCW_DOING_READ_VQ_CONF 0x02000000 181 #define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000 182 #define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000 183 #define VIRTIO_CCW_DOING_SET_VIRTIO_REV 0x10000000 184 #define VIRTIO_CCW_DOING_READ_STATUS 0x20000000 185 #define VIRTIO_CCW_INTPARM_MASK 0xffff0000 186 187 static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev) 188 { 189 return container_of(vdev, struct virtio_ccw_device, vdev); 190 } 191 192 static void drop_airq_indicator(struct virtqueue *vq, struct airq_info *info) 193 { 194 unsigned long i, flags; 195 196 write_lock_irqsave(&info->lock, flags); 197 for (i = 0; i < airq_iv_end(info->aiv); i++) { 198 if (vq == (void *)airq_iv_get_ptr(info->aiv, i)) { 199 airq_iv_free_bit(info->aiv, i); 200 airq_iv_set_ptr(info->aiv, i, 0); 201 break; 202 } 203 } 204 write_unlock_irqrestore(&info->lock, flags); 205 } 206 207 static void virtio_airq_handler(struct airq_struct *airq, bool floating) 208 { 209 struct airq_info *info = container_of(airq, struct airq_info, airq); 210 unsigned long ai; 211 212 inc_irq_stat(IRQIO_VAI); 213 read_lock(&info->lock); 214 /* Walk through indicators field, summary indicator active. */ 215 for (ai = 0;;) { 216 ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv)); 217 if (ai == -1UL) 218 break; 219 vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai)); 220 } 221 *(get_summary_indicator(info)) = 0; 222 smp_wmb(); 223 /* Walk through indicators field, summary indicator not active. */ 224 for (ai = 0;;) { 225 ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv)); 226 if (ai == -1UL) 227 break; 228 vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai)); 229 } 230 read_unlock(&info->lock); 231 } 232 233 static struct airq_info *new_airq_info(int index) 234 { 235 struct airq_info *info; 236 int rc; 237 238 info = kzalloc(sizeof(*info), GFP_KERNEL); 239 if (!info) 240 return NULL; 241 rwlock_init(&info->lock); 242 info->aiv = airq_iv_create(VIRTIO_IV_BITS, AIRQ_IV_ALLOC | AIRQ_IV_PTR 243 | AIRQ_IV_CACHELINE); 244 if (!info->aiv) { 245 kfree(info); 246 return NULL; 247 } 248 info->airq.handler = virtio_airq_handler; 249 info->summary_indicator_idx = index; 250 info->airq.lsi_ptr = get_summary_indicator(info); 251 info->airq.lsi_mask = 0xff; 252 info->airq.isc = VIRTIO_AIRQ_ISC; 253 rc = register_adapter_interrupt(&info->airq); 254 if (rc) { 255 airq_iv_release(info->aiv); 256 kfree(info); 257 return NULL; 258 } 259 return info; 260 } 261 262 static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs, 263 u64 *first, void **airq_info) 264 { 265 int i, j; 266 struct airq_info *info; 267 unsigned long indicator_addr = 0; 268 unsigned long bit, flags; 269 270 for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) { 271 mutex_lock(&airq_areas_lock); 272 if (!airq_areas[i]) 273 airq_areas[i] = new_airq_info(i); 274 info = airq_areas[i]; 275 mutex_unlock(&airq_areas_lock); 276 if (!info) 277 return 0; 278 write_lock_irqsave(&info->lock, flags); 279 bit = airq_iv_alloc(info->aiv, nvqs); 280 if (bit == -1UL) { 281 /* Not enough vacancies. */ 282 write_unlock_irqrestore(&info->lock, flags); 283 continue; 284 } 285 *first = bit; 286 *airq_info = info; 287 indicator_addr = (unsigned long)info->aiv->vector; 288 for (j = 0; j < nvqs; j++) { 289 airq_iv_set_ptr(info->aiv, bit + j, 290 (unsigned long)vqs[j]); 291 } 292 write_unlock_irqrestore(&info->lock, flags); 293 } 294 return indicator_addr; 295 } 296 297 static void virtio_ccw_drop_indicators(struct virtio_ccw_device *vcdev) 298 { 299 struct virtio_ccw_vq_info *info; 300 301 if (!vcdev->airq_info) 302 return; 303 list_for_each_entry(info, &vcdev->virtqueues, node) 304 drop_airq_indicator(info->vq, vcdev->airq_info); 305 } 306 307 static int doing_io(struct virtio_ccw_device *vcdev, __u32 flag) 308 { 309 unsigned long flags; 310 __u32 ret; 311 312 spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags); 313 if (vcdev->err) 314 ret = 0; 315 else 316 ret = vcdev->curr_io & flag; 317 spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags); 318 return ret; 319 } 320 321 static int ccw_io_helper(struct virtio_ccw_device *vcdev, 322 struct ccw1 *ccw, __u32 intparm) 323 { 324 int ret; 325 unsigned long flags; 326 int flag = intparm & VIRTIO_CCW_INTPARM_MASK; 327 328 mutex_lock(&vcdev->io_lock); 329 do { 330 spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags); 331 ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0); 332 if (!ret) { 333 if (!vcdev->curr_io) 334 vcdev->err = 0; 335 vcdev->curr_io |= flag; 336 } 337 spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags); 338 cpu_relax(); 339 } while (ret == -EBUSY); 340 wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0); 341 ret = ret ? ret : vcdev->err; 342 mutex_unlock(&vcdev->io_lock); 343 return ret; 344 } 345 346 static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev, 347 struct ccw1 *ccw) 348 { 349 int ret; 350 unsigned long *indicatorp = NULL; 351 struct virtio_thinint_area *thinint_area = NULL; 352 struct airq_info *airq_info = vcdev->airq_info; 353 354 if (vcdev->is_thinint) { 355 thinint_area = ccw_device_dma_zalloc(vcdev->cdev, 356 sizeof(*thinint_area)); 357 if (!thinint_area) 358 return; 359 thinint_area->summary_indicator = 360 (unsigned long) get_summary_indicator(airq_info); 361 thinint_area->isc = VIRTIO_AIRQ_ISC; 362 ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER; 363 ccw->count = sizeof(*thinint_area); 364 ccw->cda = (__u32)(unsigned long) thinint_area; 365 } else { 366 /* payload is the address of the indicators */ 367 indicatorp = ccw_device_dma_zalloc(vcdev->cdev, 368 sizeof(indicators(vcdev))); 369 if (!indicatorp) 370 return; 371 *indicatorp = 0; 372 ccw->cmd_code = CCW_CMD_SET_IND; 373 ccw->count = sizeof(indicators(vcdev)); 374 ccw->cda = (__u32)(unsigned long) indicatorp; 375 } 376 /* Deregister indicators from host. */ 377 *indicators(vcdev) = 0; 378 ccw->flags = 0; 379 ret = ccw_io_helper(vcdev, ccw, 380 vcdev->is_thinint ? 381 VIRTIO_CCW_DOING_SET_IND_ADAPTER : 382 VIRTIO_CCW_DOING_SET_IND); 383 if (ret && (ret != -ENODEV)) 384 dev_info(&vcdev->cdev->dev, 385 "Failed to deregister indicators (%d)\n", ret); 386 else if (vcdev->is_thinint) 387 virtio_ccw_drop_indicators(vcdev); 388 ccw_device_dma_free(vcdev->cdev, indicatorp, sizeof(indicators(vcdev))); 389 ccw_device_dma_free(vcdev->cdev, thinint_area, sizeof(*thinint_area)); 390 } 391 392 static bool virtio_ccw_kvm_notify(struct virtqueue *vq) 393 { 394 struct virtio_ccw_vq_info *info = vq->priv; 395 struct virtio_ccw_device *vcdev; 396 struct subchannel_id schid; 397 398 vcdev = to_vc_device(info->vq->vdev); 399 ccw_device_get_schid(vcdev->cdev, &schid); 400 BUILD_BUG_ON(sizeof(struct subchannel_id) != sizeof(unsigned int)); 401 info->cookie = kvm_hypercall3(KVM_S390_VIRTIO_CCW_NOTIFY, 402 *((unsigned int *)&schid), 403 vq->index, info->cookie); 404 if (info->cookie < 0) 405 return false; 406 return true; 407 } 408 409 static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev, 410 struct ccw1 *ccw, int index) 411 { 412 int ret; 413 414 vcdev->dma_area->config_block.index = index; 415 ccw->cmd_code = CCW_CMD_READ_VQ_CONF; 416 ccw->flags = 0; 417 ccw->count = sizeof(struct vq_config_block); 418 ccw->cda = (__u32)(unsigned long)(&vcdev->dma_area->config_block); 419 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF); 420 if (ret) 421 return ret; 422 return vcdev->dma_area->config_block.num ?: -ENOENT; 423 } 424 425 static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw) 426 { 427 struct virtio_ccw_device *vcdev = to_vc_device(vq->vdev); 428 struct virtio_ccw_vq_info *info = vq->priv; 429 unsigned long flags; 430 int ret; 431 unsigned int index = vq->index; 432 433 /* Remove from our list. */ 434 spin_lock_irqsave(&vcdev->lock, flags); 435 list_del(&info->node); 436 spin_unlock_irqrestore(&vcdev->lock, flags); 437 438 /* Release from host. */ 439 if (vcdev->revision == 0) { 440 info->info_block->l.queue = 0; 441 info->info_block->l.align = 0; 442 info->info_block->l.index = index; 443 info->info_block->l.num = 0; 444 ccw->count = sizeof(info->info_block->l); 445 } else { 446 info->info_block->s.desc = 0; 447 info->info_block->s.index = index; 448 info->info_block->s.num = 0; 449 info->info_block->s.avail = 0; 450 info->info_block->s.used = 0; 451 ccw->count = sizeof(info->info_block->s); 452 } 453 ccw->cmd_code = CCW_CMD_SET_VQ; 454 ccw->flags = 0; 455 ccw->cda = (__u32)(unsigned long)(info->info_block); 456 ret = ccw_io_helper(vcdev, ccw, 457 VIRTIO_CCW_DOING_SET_VQ | index); 458 /* 459 * -ENODEV isn't considered an error: The device is gone anyway. 460 * This may happen on device detach. 461 */ 462 if (ret && (ret != -ENODEV)) 463 dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d\n", 464 ret, index); 465 466 vring_del_virtqueue(vq); 467 ccw_device_dma_free(vcdev->cdev, info->info_block, 468 sizeof(*info->info_block)); 469 kfree(info); 470 } 471 472 static void virtio_ccw_del_vqs(struct virtio_device *vdev) 473 { 474 struct virtqueue *vq, *n; 475 struct ccw1 *ccw; 476 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 477 478 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 479 if (!ccw) 480 return; 481 482 virtio_ccw_drop_indicator(vcdev, ccw); 483 484 list_for_each_entry_safe(vq, n, &vdev->vqs, list) 485 virtio_ccw_del_vq(vq, ccw); 486 487 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 488 } 489 490 static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev, 491 int i, vq_callback_t *callback, 492 const char *name, bool ctx, 493 struct ccw1 *ccw) 494 { 495 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 496 int err; 497 struct virtqueue *vq = NULL; 498 struct virtio_ccw_vq_info *info; 499 u64 queue; 500 unsigned long flags; 501 bool may_reduce; 502 503 /* Allocate queue. */ 504 info = kzalloc(sizeof(struct virtio_ccw_vq_info), GFP_KERNEL); 505 if (!info) { 506 dev_warn(&vcdev->cdev->dev, "no info\n"); 507 err = -ENOMEM; 508 goto out_err; 509 } 510 info->info_block = ccw_device_dma_zalloc(vcdev->cdev, 511 sizeof(*info->info_block)); 512 if (!info->info_block) { 513 dev_warn(&vcdev->cdev->dev, "no info block\n"); 514 err = -ENOMEM; 515 goto out_err; 516 } 517 info->num = virtio_ccw_read_vq_conf(vcdev, ccw, i); 518 if (info->num < 0) { 519 err = info->num; 520 goto out_err; 521 } 522 may_reduce = vcdev->revision > 0; 523 vq = vring_create_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN, 524 vdev, true, may_reduce, ctx, 525 virtio_ccw_kvm_notify, callback, name); 526 527 if (!vq) { 528 /* For now, we fail if we can't get the requested size. */ 529 dev_warn(&vcdev->cdev->dev, "no vq\n"); 530 err = -ENOMEM; 531 goto out_err; 532 } 533 /* it may have been reduced */ 534 info->num = virtqueue_get_vring_size(vq); 535 536 /* Register it with the host. */ 537 queue = virtqueue_get_desc_addr(vq); 538 if (vcdev->revision == 0) { 539 info->info_block->l.queue = queue; 540 info->info_block->l.align = KVM_VIRTIO_CCW_RING_ALIGN; 541 info->info_block->l.index = i; 542 info->info_block->l.num = info->num; 543 ccw->count = sizeof(info->info_block->l); 544 } else { 545 info->info_block->s.desc = queue; 546 info->info_block->s.index = i; 547 info->info_block->s.num = info->num; 548 info->info_block->s.avail = (__u64)virtqueue_get_avail_addr(vq); 549 info->info_block->s.used = (__u64)virtqueue_get_used_addr(vq); 550 ccw->count = sizeof(info->info_block->s); 551 } 552 ccw->cmd_code = CCW_CMD_SET_VQ; 553 ccw->flags = 0; 554 ccw->cda = (__u32)(unsigned long)(info->info_block); 555 err = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_VQ | i); 556 if (err) { 557 dev_warn(&vcdev->cdev->dev, "SET_VQ failed\n"); 558 goto out_err; 559 } 560 561 info->vq = vq; 562 vq->priv = info; 563 564 /* Save it to our list. */ 565 spin_lock_irqsave(&vcdev->lock, flags); 566 list_add(&info->node, &vcdev->virtqueues); 567 spin_unlock_irqrestore(&vcdev->lock, flags); 568 569 return vq; 570 571 out_err: 572 if (vq) 573 vring_del_virtqueue(vq); 574 if (info) { 575 ccw_device_dma_free(vcdev->cdev, info->info_block, 576 sizeof(*info->info_block)); 577 } 578 kfree(info); 579 return ERR_PTR(err); 580 } 581 582 static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev, 583 struct virtqueue *vqs[], int nvqs, 584 struct ccw1 *ccw) 585 { 586 int ret; 587 struct virtio_thinint_area *thinint_area = NULL; 588 struct airq_info *info; 589 590 thinint_area = ccw_device_dma_zalloc(vcdev->cdev, 591 sizeof(*thinint_area)); 592 if (!thinint_area) { 593 ret = -ENOMEM; 594 goto out; 595 } 596 /* Try to get an indicator. */ 597 thinint_area->indicator = get_airq_indicator(vqs, nvqs, 598 &thinint_area->bit_nr, 599 &vcdev->airq_info); 600 if (!thinint_area->indicator) { 601 ret = -ENOSPC; 602 goto out; 603 } 604 info = vcdev->airq_info; 605 thinint_area->summary_indicator = 606 (unsigned long) get_summary_indicator(info); 607 thinint_area->isc = VIRTIO_AIRQ_ISC; 608 ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER; 609 ccw->flags = CCW_FLAG_SLI; 610 ccw->count = sizeof(*thinint_area); 611 ccw->cda = (__u32)(unsigned long)thinint_area; 612 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND_ADAPTER); 613 if (ret) { 614 if (ret == -EOPNOTSUPP) { 615 /* 616 * The host does not support adapter interrupts 617 * for virtio-ccw, stop trying. 618 */ 619 virtio_ccw_use_airq = 0; 620 pr_info("Adapter interrupts unsupported on host\n"); 621 } else 622 dev_warn(&vcdev->cdev->dev, 623 "enabling adapter interrupts = %d\n", ret); 624 virtio_ccw_drop_indicators(vcdev); 625 } 626 out: 627 ccw_device_dma_free(vcdev->cdev, thinint_area, sizeof(*thinint_area)); 628 return ret; 629 } 630 631 static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, 632 struct virtqueue *vqs[], 633 vq_callback_t *callbacks[], 634 const char * const names[], 635 const bool *ctx, 636 struct irq_affinity *desc) 637 { 638 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 639 unsigned long *indicatorp = NULL; 640 int ret, i, queue_idx = 0; 641 struct ccw1 *ccw; 642 643 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 644 if (!ccw) 645 return -ENOMEM; 646 647 for (i = 0; i < nvqs; ++i) { 648 if (!names[i]) { 649 vqs[i] = NULL; 650 continue; 651 } 652 653 vqs[i] = virtio_ccw_setup_vq(vdev, queue_idx++, callbacks[i], 654 names[i], ctx ? ctx[i] : false, 655 ccw); 656 if (IS_ERR(vqs[i])) { 657 ret = PTR_ERR(vqs[i]); 658 vqs[i] = NULL; 659 goto out; 660 } 661 } 662 ret = -ENOMEM; 663 /* 664 * We need a data area under 2G to communicate. Our payload is 665 * the address of the indicators. 666 */ 667 indicatorp = ccw_device_dma_zalloc(vcdev->cdev, 668 sizeof(indicators(vcdev))); 669 if (!indicatorp) 670 goto out; 671 *indicatorp = (unsigned long) indicators(vcdev); 672 if (vcdev->is_thinint) { 673 ret = virtio_ccw_register_adapter_ind(vcdev, vqs, nvqs, ccw); 674 if (ret) 675 /* no error, just fall back to legacy interrupts */ 676 vcdev->is_thinint = false; 677 } 678 if (!vcdev->is_thinint) { 679 /* Register queue indicators with host. */ 680 *indicators(vcdev) = 0; 681 ccw->cmd_code = CCW_CMD_SET_IND; 682 ccw->flags = 0; 683 ccw->count = sizeof(indicators(vcdev)); 684 ccw->cda = (__u32)(unsigned long) indicatorp; 685 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND); 686 if (ret) 687 goto out; 688 } 689 /* Register indicators2 with host for config changes */ 690 *indicatorp = (unsigned long) indicators2(vcdev); 691 *indicators2(vcdev) = 0; 692 ccw->cmd_code = CCW_CMD_SET_CONF_IND; 693 ccw->flags = 0; 694 ccw->count = sizeof(indicators2(vcdev)); 695 ccw->cda = (__u32)(unsigned long) indicatorp; 696 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND); 697 if (ret) 698 goto out; 699 700 if (indicatorp) 701 ccw_device_dma_free(vcdev->cdev, indicatorp, 702 sizeof(indicators(vcdev))); 703 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 704 return 0; 705 out: 706 if (indicatorp) 707 ccw_device_dma_free(vcdev->cdev, indicatorp, 708 sizeof(indicators(vcdev))); 709 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 710 virtio_ccw_del_vqs(vdev); 711 return ret; 712 } 713 714 static void virtio_ccw_reset(struct virtio_device *vdev) 715 { 716 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 717 struct ccw1 *ccw; 718 719 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 720 if (!ccw) 721 return; 722 723 /* Zero status bits. */ 724 vcdev->dma_area->status = 0; 725 726 /* Send a reset ccw on device. */ 727 ccw->cmd_code = CCW_CMD_VDEV_RESET; 728 ccw->flags = 0; 729 ccw->count = 0; 730 ccw->cda = 0; 731 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_RESET); 732 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 733 } 734 735 static u64 virtio_ccw_get_features(struct virtio_device *vdev) 736 { 737 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 738 struct virtio_feature_desc *features; 739 int ret; 740 u64 rc; 741 struct ccw1 *ccw; 742 743 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 744 if (!ccw) 745 return 0; 746 747 features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features)); 748 if (!features) { 749 rc = 0; 750 goto out_free; 751 } 752 /* Read the feature bits from the host. */ 753 features->index = 0; 754 ccw->cmd_code = CCW_CMD_READ_FEAT; 755 ccw->flags = 0; 756 ccw->count = sizeof(*features); 757 ccw->cda = (__u32)(unsigned long)features; 758 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT); 759 if (ret) { 760 rc = 0; 761 goto out_free; 762 } 763 764 rc = le32_to_cpu(features->features); 765 766 if (vcdev->revision == 0) 767 goto out_free; 768 769 /* Read second half of the feature bits from the host. */ 770 features->index = 1; 771 ccw->cmd_code = CCW_CMD_READ_FEAT; 772 ccw->flags = 0; 773 ccw->count = sizeof(*features); 774 ccw->cda = (__u32)(unsigned long)features; 775 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT); 776 if (ret == 0) 777 rc |= (u64)le32_to_cpu(features->features) << 32; 778 779 out_free: 780 ccw_device_dma_free(vcdev->cdev, features, sizeof(*features)); 781 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 782 return rc; 783 } 784 785 static void ccw_transport_features(struct virtio_device *vdev) 786 { 787 /* 788 * Currently nothing to do here. 789 */ 790 } 791 792 static int virtio_ccw_finalize_features(struct virtio_device *vdev) 793 { 794 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 795 struct virtio_feature_desc *features; 796 struct ccw1 *ccw; 797 int ret; 798 799 if (vcdev->revision >= 1 && 800 !__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) { 801 dev_err(&vdev->dev, "virtio: device uses revision 1 " 802 "but does not have VIRTIO_F_VERSION_1\n"); 803 return -EINVAL; 804 } 805 806 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 807 if (!ccw) 808 return -ENOMEM; 809 810 features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features)); 811 if (!features) { 812 ret = -ENOMEM; 813 goto out_free; 814 } 815 /* Give virtio_ring a chance to accept features. */ 816 vring_transport_features(vdev); 817 818 /* Give virtio_ccw a chance to accept features. */ 819 ccw_transport_features(vdev); 820 821 features->index = 0; 822 features->features = cpu_to_le32((u32)vdev->features); 823 /* Write the first half of the feature bits to the host. */ 824 ccw->cmd_code = CCW_CMD_WRITE_FEAT; 825 ccw->flags = 0; 826 ccw->count = sizeof(*features); 827 ccw->cda = (__u32)(unsigned long)features; 828 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT); 829 if (ret) 830 goto out_free; 831 832 if (vcdev->revision == 0) 833 goto out_free; 834 835 features->index = 1; 836 features->features = cpu_to_le32(vdev->features >> 32); 837 /* Write the second half of the feature bits to the host. */ 838 ccw->cmd_code = CCW_CMD_WRITE_FEAT; 839 ccw->flags = 0; 840 ccw->count = sizeof(*features); 841 ccw->cda = (__u32)(unsigned long)features; 842 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT); 843 844 out_free: 845 ccw_device_dma_free(vcdev->cdev, features, sizeof(*features)); 846 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 847 848 return ret; 849 } 850 851 static void virtio_ccw_get_config(struct virtio_device *vdev, 852 unsigned int offset, void *buf, unsigned len) 853 { 854 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 855 int ret; 856 struct ccw1 *ccw; 857 void *config_area; 858 unsigned long flags; 859 860 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 861 if (!ccw) 862 return; 863 864 config_area = ccw_device_dma_zalloc(vcdev->cdev, 865 VIRTIO_CCW_CONFIG_SIZE); 866 if (!config_area) 867 goto out_free; 868 869 /* Read the config area from the host. */ 870 ccw->cmd_code = CCW_CMD_READ_CONF; 871 ccw->flags = 0; 872 ccw->count = offset + len; 873 ccw->cda = (__u32)(unsigned long)config_area; 874 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_CONFIG); 875 if (ret) 876 goto out_free; 877 878 spin_lock_irqsave(&vcdev->lock, flags); 879 memcpy(vcdev->config, config_area, offset + len); 880 if (vcdev->config_ready < offset + len) 881 vcdev->config_ready = offset + len; 882 spin_unlock_irqrestore(&vcdev->lock, flags); 883 if (buf) 884 memcpy(buf, config_area + offset, len); 885 886 out_free: 887 ccw_device_dma_free(vcdev->cdev, config_area, VIRTIO_CCW_CONFIG_SIZE); 888 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 889 } 890 891 static void virtio_ccw_set_config(struct virtio_device *vdev, 892 unsigned int offset, const void *buf, 893 unsigned len) 894 { 895 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 896 struct ccw1 *ccw; 897 void *config_area; 898 unsigned long flags; 899 900 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 901 if (!ccw) 902 return; 903 904 config_area = ccw_device_dma_zalloc(vcdev->cdev, 905 VIRTIO_CCW_CONFIG_SIZE); 906 if (!config_area) 907 goto out_free; 908 909 /* Make sure we don't overwrite fields. */ 910 if (vcdev->config_ready < offset) 911 virtio_ccw_get_config(vdev, 0, NULL, offset); 912 spin_lock_irqsave(&vcdev->lock, flags); 913 memcpy(&vcdev->config[offset], buf, len); 914 /* Write the config area to the host. */ 915 memcpy(config_area, vcdev->config, sizeof(vcdev->config)); 916 spin_unlock_irqrestore(&vcdev->lock, flags); 917 ccw->cmd_code = CCW_CMD_WRITE_CONF; 918 ccw->flags = 0; 919 ccw->count = offset + len; 920 ccw->cda = (__u32)(unsigned long)config_area; 921 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_CONFIG); 922 923 out_free: 924 ccw_device_dma_free(vcdev->cdev, config_area, VIRTIO_CCW_CONFIG_SIZE); 925 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 926 } 927 928 static u8 virtio_ccw_get_status(struct virtio_device *vdev) 929 { 930 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 931 u8 old_status = vcdev->dma_area->status; 932 struct ccw1 *ccw; 933 934 if (vcdev->revision < 2) 935 return vcdev->dma_area->status; 936 937 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 938 if (!ccw) 939 return old_status; 940 941 ccw->cmd_code = CCW_CMD_READ_STATUS; 942 ccw->flags = 0; 943 ccw->count = sizeof(vcdev->dma_area->status); 944 ccw->cda = (__u32)(unsigned long)&vcdev->dma_area->status; 945 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_STATUS); 946 /* 947 * If the channel program failed (should only happen if the device 948 * was hotunplugged, and then we clean up via the machine check 949 * handler anyway), vcdev->dma_area->status was not overwritten and we just 950 * return the old status, which is fine. 951 */ 952 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 953 954 return vcdev->dma_area->status; 955 } 956 957 static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status) 958 { 959 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 960 u8 old_status = vcdev->dma_area->status; 961 struct ccw1 *ccw; 962 int ret; 963 964 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 965 if (!ccw) 966 return; 967 968 /* Write the status to the host. */ 969 vcdev->dma_area->status = status; 970 ccw->cmd_code = CCW_CMD_WRITE_STATUS; 971 ccw->flags = 0; 972 ccw->count = sizeof(status); 973 ccw->cda = (__u32)(unsigned long)&vcdev->dma_area->status; 974 /* We use ssch for setting the status which is a serializing 975 * instruction that guarantees the memory writes have 976 * completed before ssch. 977 */ 978 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS); 979 /* Write failed? We assume status is unchanged. */ 980 if (ret) 981 vcdev->dma_area->status = old_status; 982 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 983 } 984 985 static const char *virtio_ccw_bus_name(struct virtio_device *vdev) 986 { 987 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 988 989 return dev_name(&vcdev->cdev->dev); 990 } 991 992 static void virtio_ccw_synchronize_cbs(struct virtio_device *vdev) 993 { 994 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 995 struct airq_info *info = vcdev->airq_info; 996 997 if (info) { 998 /* 999 * This device uses adapter interrupts: synchronize with 1000 * vring_interrupt() called by virtio_airq_handler() 1001 * via the indicator area lock. 1002 */ 1003 write_lock_irq(&info->lock); 1004 write_unlock_irq(&info->lock); 1005 } else { 1006 /* This device uses classic interrupts: synchronize 1007 * with vring_interrupt() called by 1008 * virtio_ccw_int_handler() via the per-device 1009 * irq_lock 1010 */ 1011 write_lock_irq(&vcdev->irq_lock); 1012 write_unlock_irq(&vcdev->irq_lock); 1013 } 1014 } 1015 1016 static const struct virtio_config_ops virtio_ccw_config_ops = { 1017 .get_features = virtio_ccw_get_features, 1018 .finalize_features = virtio_ccw_finalize_features, 1019 .get = virtio_ccw_get_config, 1020 .set = virtio_ccw_set_config, 1021 .get_status = virtio_ccw_get_status, 1022 .set_status = virtio_ccw_set_status, 1023 .reset = virtio_ccw_reset, 1024 .find_vqs = virtio_ccw_find_vqs, 1025 .del_vqs = virtio_ccw_del_vqs, 1026 .bus_name = virtio_ccw_bus_name, 1027 .synchronize_cbs = virtio_ccw_synchronize_cbs, 1028 }; 1029 1030 1031 /* 1032 * ccw bus driver related functions 1033 */ 1034 1035 static void virtio_ccw_release_dev(struct device *_d) 1036 { 1037 struct virtio_device *dev = dev_to_virtio(_d); 1038 struct virtio_ccw_device *vcdev = to_vc_device(dev); 1039 1040 ccw_device_dma_free(vcdev->cdev, vcdev->dma_area, 1041 sizeof(*vcdev->dma_area)); 1042 kfree(vcdev); 1043 } 1044 1045 static int irb_is_error(struct irb *irb) 1046 { 1047 if (scsw_cstat(&irb->scsw) != 0) 1048 return 1; 1049 if (scsw_dstat(&irb->scsw) & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) 1050 return 1; 1051 if (scsw_cc(&irb->scsw) != 0) 1052 return 1; 1053 return 0; 1054 } 1055 1056 static struct virtqueue *virtio_ccw_vq_by_ind(struct virtio_ccw_device *vcdev, 1057 int index) 1058 { 1059 struct virtio_ccw_vq_info *info; 1060 unsigned long flags; 1061 struct virtqueue *vq; 1062 1063 vq = NULL; 1064 spin_lock_irqsave(&vcdev->lock, flags); 1065 list_for_each_entry(info, &vcdev->virtqueues, node) { 1066 if (info->vq->index == index) { 1067 vq = info->vq; 1068 break; 1069 } 1070 } 1071 spin_unlock_irqrestore(&vcdev->lock, flags); 1072 return vq; 1073 } 1074 1075 static void virtio_ccw_check_activity(struct virtio_ccw_device *vcdev, 1076 __u32 activity) 1077 { 1078 if (vcdev->curr_io & activity) { 1079 switch (activity) { 1080 case VIRTIO_CCW_DOING_READ_FEAT: 1081 case VIRTIO_CCW_DOING_WRITE_FEAT: 1082 case VIRTIO_CCW_DOING_READ_CONFIG: 1083 case VIRTIO_CCW_DOING_WRITE_CONFIG: 1084 case VIRTIO_CCW_DOING_WRITE_STATUS: 1085 case VIRTIO_CCW_DOING_READ_STATUS: 1086 case VIRTIO_CCW_DOING_SET_VQ: 1087 case VIRTIO_CCW_DOING_SET_IND: 1088 case VIRTIO_CCW_DOING_SET_CONF_IND: 1089 case VIRTIO_CCW_DOING_RESET: 1090 case VIRTIO_CCW_DOING_READ_VQ_CONF: 1091 case VIRTIO_CCW_DOING_SET_IND_ADAPTER: 1092 case VIRTIO_CCW_DOING_SET_VIRTIO_REV: 1093 vcdev->curr_io &= ~activity; 1094 wake_up(&vcdev->wait_q); 1095 break; 1096 default: 1097 /* don't know what to do... */ 1098 dev_warn(&vcdev->cdev->dev, 1099 "Suspicious activity '%08x'\n", activity); 1100 WARN_ON(1); 1101 break; 1102 } 1103 } 1104 } 1105 1106 static void virtio_ccw_int_handler(struct ccw_device *cdev, 1107 unsigned long intparm, 1108 struct irb *irb) 1109 { 1110 __u32 activity = intparm & VIRTIO_CCW_INTPARM_MASK; 1111 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev); 1112 int i; 1113 struct virtqueue *vq; 1114 1115 if (!vcdev) 1116 return; 1117 if (IS_ERR(irb)) { 1118 vcdev->err = PTR_ERR(irb); 1119 virtio_ccw_check_activity(vcdev, activity); 1120 /* Don't poke around indicators, something's wrong. */ 1121 return; 1122 } 1123 /* Check if it's a notification from the host. */ 1124 if ((intparm == 0) && 1125 (scsw_stctl(&irb->scsw) == 1126 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) { 1127 /* OK */ 1128 } 1129 if (irb_is_error(irb)) { 1130 /* Command reject? */ 1131 if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) && 1132 (irb->ecw[0] & SNS0_CMD_REJECT)) 1133 vcdev->err = -EOPNOTSUPP; 1134 else 1135 /* Map everything else to -EIO. */ 1136 vcdev->err = -EIO; 1137 } 1138 virtio_ccw_check_activity(vcdev, activity); 1139 #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION 1140 /* 1141 * Paired with virtio_ccw_synchronize_cbs() and interrupts are 1142 * disabled here. 1143 */ 1144 read_lock(&vcdev->irq_lock); 1145 #endif 1146 for_each_set_bit(i, indicators(vcdev), 1147 sizeof(*indicators(vcdev)) * BITS_PER_BYTE) { 1148 /* The bit clear must happen before the vring kick. */ 1149 clear_bit(i, indicators(vcdev)); 1150 barrier(); 1151 vq = virtio_ccw_vq_by_ind(vcdev, i); 1152 vring_interrupt(0, vq); 1153 } 1154 #ifdef CONFIG_VIRTIO_HARDEN_NOTIFICATION 1155 read_unlock(&vcdev->irq_lock); 1156 #endif 1157 if (test_bit(0, indicators2(vcdev))) { 1158 virtio_config_changed(&vcdev->vdev); 1159 clear_bit(0, indicators2(vcdev)); 1160 } 1161 } 1162 1163 /* 1164 * We usually want to autoonline all devices, but give the admin 1165 * a way to exempt devices from this. 1166 */ 1167 #define __DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \ 1168 (8*sizeof(long))) 1169 static unsigned long devs_no_auto[__MAX_SSID + 1][__DEV_WORDS]; 1170 1171 static char *no_auto = ""; 1172 1173 module_param(no_auto, charp, 0444); 1174 MODULE_PARM_DESC(no_auto, "list of ccw bus id ranges not to be auto-onlined"); 1175 1176 static int virtio_ccw_check_autoonline(struct ccw_device *cdev) 1177 { 1178 struct ccw_dev_id id; 1179 1180 ccw_device_get_id(cdev, &id); 1181 if (test_bit(id.devno, devs_no_auto[id.ssid])) 1182 return 0; 1183 return 1; 1184 } 1185 1186 static void virtio_ccw_auto_online(void *data, async_cookie_t cookie) 1187 { 1188 struct ccw_device *cdev = data; 1189 int ret; 1190 1191 ret = ccw_device_set_online(cdev); 1192 if (ret) 1193 dev_warn(&cdev->dev, "Failed to set online: %d\n", ret); 1194 } 1195 1196 static int virtio_ccw_probe(struct ccw_device *cdev) 1197 { 1198 cdev->handler = virtio_ccw_int_handler; 1199 1200 if (virtio_ccw_check_autoonline(cdev)) 1201 async_schedule(virtio_ccw_auto_online, cdev); 1202 return 0; 1203 } 1204 1205 static struct virtio_ccw_device *virtio_grab_drvdata(struct ccw_device *cdev) 1206 { 1207 unsigned long flags; 1208 struct virtio_ccw_device *vcdev; 1209 1210 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1211 vcdev = dev_get_drvdata(&cdev->dev); 1212 if (!vcdev || vcdev->going_away) { 1213 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1214 return NULL; 1215 } 1216 vcdev->going_away = true; 1217 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1218 return vcdev; 1219 } 1220 1221 static void virtio_ccw_remove(struct ccw_device *cdev) 1222 { 1223 unsigned long flags; 1224 struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev); 1225 1226 if (vcdev && cdev->online) { 1227 if (vcdev->device_lost) 1228 virtio_break_device(&vcdev->vdev); 1229 unregister_virtio_device(&vcdev->vdev); 1230 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1231 dev_set_drvdata(&cdev->dev, NULL); 1232 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1233 } 1234 cdev->handler = NULL; 1235 } 1236 1237 static int virtio_ccw_offline(struct ccw_device *cdev) 1238 { 1239 unsigned long flags; 1240 struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev); 1241 1242 if (!vcdev) 1243 return 0; 1244 if (vcdev->device_lost) 1245 virtio_break_device(&vcdev->vdev); 1246 unregister_virtio_device(&vcdev->vdev); 1247 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1248 dev_set_drvdata(&cdev->dev, NULL); 1249 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1250 return 0; 1251 } 1252 1253 static int virtio_ccw_set_transport_rev(struct virtio_ccw_device *vcdev) 1254 { 1255 struct virtio_rev_info *rev; 1256 struct ccw1 *ccw; 1257 int ret; 1258 1259 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 1260 if (!ccw) 1261 return -ENOMEM; 1262 rev = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*rev)); 1263 if (!rev) { 1264 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 1265 return -ENOMEM; 1266 } 1267 1268 /* Set transport revision */ 1269 ccw->cmd_code = CCW_CMD_SET_VIRTIO_REV; 1270 ccw->flags = 0; 1271 ccw->count = sizeof(*rev); 1272 ccw->cda = (__u32)(unsigned long)rev; 1273 1274 vcdev->revision = VIRTIO_CCW_REV_MAX; 1275 do { 1276 rev->revision = vcdev->revision; 1277 /* none of our supported revisions carry payload */ 1278 rev->length = 0; 1279 ret = ccw_io_helper(vcdev, ccw, 1280 VIRTIO_CCW_DOING_SET_VIRTIO_REV); 1281 if (ret == -EOPNOTSUPP) { 1282 if (vcdev->revision == 0) 1283 /* 1284 * The host device does not support setting 1285 * the revision: let's operate it in legacy 1286 * mode. 1287 */ 1288 ret = 0; 1289 else 1290 vcdev->revision--; 1291 } 1292 } while (ret == -EOPNOTSUPP); 1293 1294 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 1295 ccw_device_dma_free(vcdev->cdev, rev, sizeof(*rev)); 1296 return ret; 1297 } 1298 1299 static int virtio_ccw_online(struct ccw_device *cdev) 1300 { 1301 int ret; 1302 struct virtio_ccw_device *vcdev; 1303 unsigned long flags; 1304 1305 vcdev = kzalloc(sizeof(*vcdev), GFP_KERNEL); 1306 if (!vcdev) { 1307 dev_warn(&cdev->dev, "Could not get memory for virtio\n"); 1308 ret = -ENOMEM; 1309 goto out_free; 1310 } 1311 vcdev->vdev.dev.parent = &cdev->dev; 1312 vcdev->cdev = cdev; 1313 vcdev->dma_area = ccw_device_dma_zalloc(vcdev->cdev, 1314 sizeof(*vcdev->dma_area)); 1315 if (!vcdev->dma_area) { 1316 ret = -ENOMEM; 1317 goto out_free; 1318 } 1319 1320 vcdev->is_thinint = virtio_ccw_use_airq; /* at least try */ 1321 1322 vcdev->vdev.dev.release = virtio_ccw_release_dev; 1323 vcdev->vdev.config = &virtio_ccw_config_ops; 1324 init_waitqueue_head(&vcdev->wait_q); 1325 INIT_LIST_HEAD(&vcdev->virtqueues); 1326 spin_lock_init(&vcdev->lock); 1327 rwlock_init(&vcdev->irq_lock); 1328 mutex_init(&vcdev->io_lock); 1329 1330 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1331 dev_set_drvdata(&cdev->dev, vcdev); 1332 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1333 vcdev->vdev.id.vendor = cdev->id.cu_type; 1334 vcdev->vdev.id.device = cdev->id.cu_model; 1335 1336 ret = virtio_ccw_set_transport_rev(vcdev); 1337 if (ret) 1338 goto out_free; 1339 1340 ret = register_virtio_device(&vcdev->vdev); 1341 if (ret) { 1342 dev_warn(&cdev->dev, "Failed to register virtio device: %d\n", 1343 ret); 1344 goto out_put; 1345 } 1346 return 0; 1347 out_put: 1348 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1349 dev_set_drvdata(&cdev->dev, NULL); 1350 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1351 put_device(&vcdev->vdev.dev); 1352 return ret; 1353 out_free: 1354 if (vcdev) { 1355 ccw_device_dma_free(vcdev->cdev, vcdev->dma_area, 1356 sizeof(*vcdev->dma_area)); 1357 } 1358 kfree(vcdev); 1359 return ret; 1360 } 1361 1362 static int virtio_ccw_cio_notify(struct ccw_device *cdev, int event) 1363 { 1364 int rc; 1365 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev); 1366 1367 /* 1368 * Make sure vcdev is set 1369 * i.e. set_offline/remove callback not already running 1370 */ 1371 if (!vcdev) 1372 return NOTIFY_DONE; 1373 1374 switch (event) { 1375 case CIO_GONE: 1376 vcdev->device_lost = true; 1377 rc = NOTIFY_DONE; 1378 break; 1379 case CIO_OPER: 1380 rc = NOTIFY_OK; 1381 break; 1382 default: 1383 rc = NOTIFY_DONE; 1384 break; 1385 } 1386 return rc; 1387 } 1388 1389 static struct ccw_device_id virtio_ids[] = { 1390 { CCW_DEVICE(0x3832, 0) }, 1391 {}, 1392 }; 1393 1394 static struct ccw_driver virtio_ccw_driver = { 1395 .driver = { 1396 .owner = THIS_MODULE, 1397 .name = "virtio_ccw", 1398 }, 1399 .ids = virtio_ids, 1400 .probe = virtio_ccw_probe, 1401 .remove = virtio_ccw_remove, 1402 .set_offline = virtio_ccw_offline, 1403 .set_online = virtio_ccw_online, 1404 .notify = virtio_ccw_cio_notify, 1405 .int_class = IRQIO_VIR, 1406 }; 1407 1408 static int __init pure_hex(char **cp, unsigned int *val, int min_digit, 1409 int max_digit, int max_val) 1410 { 1411 int diff; 1412 1413 diff = 0; 1414 *val = 0; 1415 1416 while (diff <= max_digit) { 1417 int value = hex_to_bin(**cp); 1418 1419 if (value < 0) 1420 break; 1421 *val = *val * 16 + value; 1422 (*cp)++; 1423 diff++; 1424 } 1425 1426 if ((diff < min_digit) || (diff > max_digit) || (*val > max_val)) 1427 return 1; 1428 1429 return 0; 1430 } 1431 1432 static int __init parse_busid(char *str, unsigned int *cssid, 1433 unsigned int *ssid, unsigned int *devno) 1434 { 1435 char *str_work; 1436 int rc, ret; 1437 1438 rc = 1; 1439 1440 if (*str == '\0') 1441 goto out; 1442 1443 str_work = str; 1444 ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID); 1445 if (ret || (str_work[0] != '.')) 1446 goto out; 1447 str_work++; 1448 ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID); 1449 if (ret || (str_work[0] != '.')) 1450 goto out; 1451 str_work++; 1452 ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL); 1453 if (ret || (str_work[0] != '\0')) 1454 goto out; 1455 1456 rc = 0; 1457 out: 1458 return rc; 1459 } 1460 1461 static void __init no_auto_parse(void) 1462 { 1463 unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to; 1464 char *parm, *str; 1465 int rc; 1466 1467 str = no_auto; 1468 while ((parm = strsep(&str, ","))) { 1469 rc = parse_busid(strsep(&parm, "-"), &from_cssid, 1470 &from_ssid, &from); 1471 if (rc) 1472 continue; 1473 if (parm != NULL) { 1474 rc = parse_busid(parm, &to_cssid, 1475 &to_ssid, &to); 1476 if ((from_ssid > to_ssid) || 1477 ((from_ssid == to_ssid) && (from > to))) 1478 rc = -EINVAL; 1479 } else { 1480 to_cssid = from_cssid; 1481 to_ssid = from_ssid; 1482 to = from; 1483 } 1484 if (rc) 1485 continue; 1486 while ((from_ssid < to_ssid) || 1487 ((from_ssid == to_ssid) && (from <= to))) { 1488 set_bit(from, devs_no_auto[from_ssid]); 1489 from++; 1490 if (from > __MAX_SUBCHANNEL) { 1491 from_ssid++; 1492 from = 0; 1493 } 1494 } 1495 } 1496 } 1497 1498 static int __init virtio_ccw_init(void) 1499 { 1500 int rc; 1501 1502 /* parse no_auto string before we do anything further */ 1503 no_auto_parse(); 1504 1505 summary_indicators = cio_dma_zalloc(MAX_AIRQ_AREAS); 1506 if (!summary_indicators) 1507 return -ENOMEM; 1508 rc = ccw_driver_register(&virtio_ccw_driver); 1509 if (rc) 1510 cio_dma_free(summary_indicators, MAX_AIRQ_AREAS); 1511 return rc; 1512 } 1513 device_initcall(virtio_ccw_init); 1514