1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * ccw based virtio transport 4 * 5 * Copyright IBM Corp. 2012, 2014 6 * 7 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> 8 */ 9 10 #include <linux/kernel_stat.h> 11 #include <linux/init.h> 12 #include <linux/memblock.h> 13 #include <linux/err.h> 14 #include <linux/virtio.h> 15 #include <linux/virtio_config.h> 16 #include <linux/slab.h> 17 #include <linux/interrupt.h> 18 #include <linux/virtio_ring.h> 19 #include <linux/pfn.h> 20 #include <linux/async.h> 21 #include <linux/wait.h> 22 #include <linux/list.h> 23 #include <linux/bitops.h> 24 #include <linux/moduleparam.h> 25 #include <linux/io.h> 26 #include <linux/kvm_para.h> 27 #include <linux/notifier.h> 28 #include <asm/diag.h> 29 #include <asm/setup.h> 30 #include <asm/irq.h> 31 #include <asm/cio.h> 32 #include <asm/ccwdev.h> 33 #include <asm/virtio-ccw.h> 34 #include <asm/isc.h> 35 #include <asm/airq.h> 36 37 /* 38 * virtio related functions 39 */ 40 41 struct vq_config_block { 42 __u16 index; 43 __u16 num; 44 } __packed; 45 46 #define VIRTIO_CCW_CONFIG_SIZE 0x100 47 /* same as PCI config space size, should be enough for all drivers */ 48 49 struct vcdev_dma_area { 50 unsigned long indicators; 51 unsigned long indicators2; 52 struct vq_config_block config_block; 53 __u8 status; 54 }; 55 56 struct virtio_ccw_device { 57 struct virtio_device vdev; 58 __u8 config[VIRTIO_CCW_CONFIG_SIZE]; 59 struct ccw_device *cdev; 60 __u32 curr_io; 61 int err; 62 unsigned int revision; /* Transport revision */ 63 wait_queue_head_t wait_q; 64 spinlock_t lock; 65 struct mutex io_lock; /* Serializes I/O requests */ 66 struct list_head virtqueues; 67 bool is_thinint; 68 bool going_away; 69 bool device_lost; 70 unsigned int config_ready; 71 void *airq_info; 72 struct vcdev_dma_area *dma_area; 73 }; 74 75 static inline unsigned long *indicators(struct virtio_ccw_device *vcdev) 76 { 77 return &vcdev->dma_area->indicators; 78 } 79 80 static inline unsigned long *indicators2(struct virtio_ccw_device *vcdev) 81 { 82 return &vcdev->dma_area->indicators2; 83 } 84 85 struct vq_info_block_legacy { 86 __u64 queue; 87 __u32 align; 88 __u16 index; 89 __u16 num; 90 } __packed; 91 92 struct vq_info_block { 93 __u64 desc; 94 __u32 res0; 95 __u16 index; 96 __u16 num; 97 __u64 avail; 98 __u64 used; 99 } __packed; 100 101 struct virtio_feature_desc { 102 __le32 features; 103 __u8 index; 104 } __packed; 105 106 struct virtio_thinint_area { 107 unsigned long summary_indicator; 108 unsigned long indicator; 109 u64 bit_nr; 110 u8 isc; 111 } __packed; 112 113 struct virtio_rev_info { 114 __u16 revision; 115 __u16 length; 116 __u8 data[]; 117 }; 118 119 /* the highest virtio-ccw revision we support */ 120 #define VIRTIO_CCW_REV_MAX 1 121 122 struct virtio_ccw_vq_info { 123 struct virtqueue *vq; 124 int num; 125 union { 126 struct vq_info_block s; 127 struct vq_info_block_legacy l; 128 } *info_block; 129 int bit_nr; 130 struct list_head node; 131 long cookie; 132 }; 133 134 #define VIRTIO_AIRQ_ISC IO_SCH_ISC /* inherit from subchannel */ 135 136 #define VIRTIO_IV_BITS (L1_CACHE_BYTES * 8) 137 #define MAX_AIRQ_AREAS 20 138 139 static int virtio_ccw_use_airq = 1; 140 141 struct airq_info { 142 rwlock_t lock; 143 u8 summary_indicator_idx; 144 struct airq_struct airq; 145 struct airq_iv *aiv; 146 }; 147 static struct airq_info *airq_areas[MAX_AIRQ_AREAS]; 148 static DEFINE_MUTEX(airq_areas_lock); 149 150 static u8 *summary_indicators; 151 152 static inline u8 *get_summary_indicator(struct airq_info *info) 153 { 154 return summary_indicators + info->summary_indicator_idx; 155 } 156 157 #define CCW_CMD_SET_VQ 0x13 158 #define CCW_CMD_VDEV_RESET 0x33 159 #define CCW_CMD_SET_IND 0x43 160 #define CCW_CMD_SET_CONF_IND 0x53 161 #define CCW_CMD_READ_FEAT 0x12 162 #define CCW_CMD_WRITE_FEAT 0x11 163 #define CCW_CMD_READ_CONF 0x22 164 #define CCW_CMD_WRITE_CONF 0x21 165 #define CCW_CMD_WRITE_STATUS 0x31 166 #define CCW_CMD_READ_VQ_CONF 0x32 167 #define CCW_CMD_READ_STATUS 0x72 168 #define CCW_CMD_SET_IND_ADAPTER 0x73 169 #define CCW_CMD_SET_VIRTIO_REV 0x83 170 171 #define VIRTIO_CCW_DOING_SET_VQ 0x00010000 172 #define VIRTIO_CCW_DOING_RESET 0x00040000 173 #define VIRTIO_CCW_DOING_READ_FEAT 0x00080000 174 #define VIRTIO_CCW_DOING_WRITE_FEAT 0x00100000 175 #define VIRTIO_CCW_DOING_READ_CONFIG 0x00200000 176 #define VIRTIO_CCW_DOING_WRITE_CONFIG 0x00400000 177 #define VIRTIO_CCW_DOING_WRITE_STATUS 0x00800000 178 #define VIRTIO_CCW_DOING_SET_IND 0x01000000 179 #define VIRTIO_CCW_DOING_READ_VQ_CONF 0x02000000 180 #define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000 181 #define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000 182 #define VIRTIO_CCW_DOING_SET_VIRTIO_REV 0x10000000 183 #define VIRTIO_CCW_DOING_READ_STATUS 0x20000000 184 #define VIRTIO_CCW_INTPARM_MASK 0xffff0000 185 186 static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev) 187 { 188 return container_of(vdev, struct virtio_ccw_device, vdev); 189 } 190 191 static void drop_airq_indicator(struct virtqueue *vq, struct airq_info *info) 192 { 193 unsigned long i, flags; 194 195 write_lock_irqsave(&info->lock, flags); 196 for (i = 0; i < airq_iv_end(info->aiv); i++) { 197 if (vq == (void *)airq_iv_get_ptr(info->aiv, i)) { 198 airq_iv_free_bit(info->aiv, i); 199 airq_iv_set_ptr(info->aiv, i, 0); 200 break; 201 } 202 } 203 write_unlock_irqrestore(&info->lock, flags); 204 } 205 206 static void virtio_airq_handler(struct airq_struct *airq, bool floating) 207 { 208 struct airq_info *info = container_of(airq, struct airq_info, airq); 209 unsigned long ai; 210 211 inc_irq_stat(IRQIO_VAI); 212 read_lock(&info->lock); 213 /* Walk through indicators field, summary indicator active. */ 214 for (ai = 0;;) { 215 ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv)); 216 if (ai == -1UL) 217 break; 218 vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai)); 219 } 220 *(get_summary_indicator(info)) = 0; 221 smp_wmb(); 222 /* Walk through indicators field, summary indicator not active. */ 223 for (ai = 0;;) { 224 ai = airq_iv_scan(info->aiv, ai, airq_iv_end(info->aiv)); 225 if (ai == -1UL) 226 break; 227 vring_interrupt(0, (void *)airq_iv_get_ptr(info->aiv, ai)); 228 } 229 read_unlock(&info->lock); 230 } 231 232 static struct airq_info *new_airq_info(int index) 233 { 234 struct airq_info *info; 235 int rc; 236 237 info = kzalloc(sizeof(*info), GFP_KERNEL); 238 if (!info) 239 return NULL; 240 rwlock_init(&info->lock); 241 info->aiv = airq_iv_create(VIRTIO_IV_BITS, AIRQ_IV_ALLOC | AIRQ_IV_PTR 242 | AIRQ_IV_CACHELINE); 243 if (!info->aiv) { 244 kfree(info); 245 return NULL; 246 } 247 info->airq.handler = virtio_airq_handler; 248 info->summary_indicator_idx = index; 249 info->airq.lsi_ptr = get_summary_indicator(info); 250 info->airq.lsi_mask = 0xff; 251 info->airq.isc = VIRTIO_AIRQ_ISC; 252 rc = register_adapter_interrupt(&info->airq); 253 if (rc) { 254 airq_iv_release(info->aiv); 255 kfree(info); 256 return NULL; 257 } 258 return info; 259 } 260 261 static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs, 262 u64 *first, void **airq_info) 263 { 264 int i, j; 265 struct airq_info *info; 266 unsigned long indicator_addr = 0; 267 unsigned long bit, flags; 268 269 for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) { 270 mutex_lock(&airq_areas_lock); 271 if (!airq_areas[i]) 272 airq_areas[i] = new_airq_info(i); 273 info = airq_areas[i]; 274 mutex_unlock(&airq_areas_lock); 275 if (!info) 276 return 0; 277 write_lock_irqsave(&info->lock, flags); 278 bit = airq_iv_alloc(info->aiv, nvqs); 279 if (bit == -1UL) { 280 /* Not enough vacancies. */ 281 write_unlock_irqrestore(&info->lock, flags); 282 continue; 283 } 284 *first = bit; 285 *airq_info = info; 286 indicator_addr = (unsigned long)info->aiv->vector; 287 for (j = 0; j < nvqs; j++) { 288 airq_iv_set_ptr(info->aiv, bit + j, 289 (unsigned long)vqs[j]); 290 } 291 write_unlock_irqrestore(&info->lock, flags); 292 } 293 return indicator_addr; 294 } 295 296 static void virtio_ccw_drop_indicators(struct virtio_ccw_device *vcdev) 297 { 298 struct virtio_ccw_vq_info *info; 299 300 if (!vcdev->airq_info) 301 return; 302 list_for_each_entry(info, &vcdev->virtqueues, node) 303 drop_airq_indicator(info->vq, vcdev->airq_info); 304 } 305 306 static int doing_io(struct virtio_ccw_device *vcdev, __u32 flag) 307 { 308 unsigned long flags; 309 __u32 ret; 310 311 spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags); 312 if (vcdev->err) 313 ret = 0; 314 else 315 ret = vcdev->curr_io & flag; 316 spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags); 317 return ret; 318 } 319 320 static int ccw_io_helper(struct virtio_ccw_device *vcdev, 321 struct ccw1 *ccw, __u32 intparm) 322 { 323 int ret; 324 unsigned long flags; 325 int flag = intparm & VIRTIO_CCW_INTPARM_MASK; 326 327 mutex_lock(&vcdev->io_lock); 328 do { 329 spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags); 330 ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0); 331 if (!ret) { 332 if (!vcdev->curr_io) 333 vcdev->err = 0; 334 vcdev->curr_io |= flag; 335 } 336 spin_unlock_irqrestore(get_ccwdev_lock(vcdev->cdev), flags); 337 cpu_relax(); 338 } while (ret == -EBUSY); 339 wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0); 340 ret = ret ? ret : vcdev->err; 341 mutex_unlock(&vcdev->io_lock); 342 return ret; 343 } 344 345 static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev, 346 struct ccw1 *ccw) 347 { 348 int ret; 349 unsigned long *indicatorp = NULL; 350 struct virtio_thinint_area *thinint_area = NULL; 351 struct airq_info *airq_info = vcdev->airq_info; 352 353 if (vcdev->is_thinint) { 354 thinint_area = ccw_device_dma_zalloc(vcdev->cdev, 355 sizeof(*thinint_area)); 356 if (!thinint_area) 357 return; 358 thinint_area->summary_indicator = 359 (unsigned long) get_summary_indicator(airq_info); 360 thinint_area->isc = VIRTIO_AIRQ_ISC; 361 ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER; 362 ccw->count = sizeof(*thinint_area); 363 ccw->cda = (__u32)(unsigned long) thinint_area; 364 } else { 365 /* payload is the address of the indicators */ 366 indicatorp = ccw_device_dma_zalloc(vcdev->cdev, 367 sizeof(indicators(vcdev))); 368 if (!indicatorp) 369 return; 370 *indicatorp = 0; 371 ccw->cmd_code = CCW_CMD_SET_IND; 372 ccw->count = sizeof(indicators(vcdev)); 373 ccw->cda = (__u32)(unsigned long) indicatorp; 374 } 375 /* Deregister indicators from host. */ 376 *indicators(vcdev) = 0; 377 ccw->flags = 0; 378 ret = ccw_io_helper(vcdev, ccw, 379 vcdev->is_thinint ? 380 VIRTIO_CCW_DOING_SET_IND_ADAPTER : 381 VIRTIO_CCW_DOING_SET_IND); 382 if (ret && (ret != -ENODEV)) 383 dev_info(&vcdev->cdev->dev, 384 "Failed to deregister indicators (%d)\n", ret); 385 else if (vcdev->is_thinint) 386 virtio_ccw_drop_indicators(vcdev); 387 ccw_device_dma_free(vcdev->cdev, indicatorp, sizeof(indicators(vcdev))); 388 ccw_device_dma_free(vcdev->cdev, thinint_area, sizeof(*thinint_area)); 389 } 390 391 static inline long __do_kvm_notify(struct subchannel_id schid, 392 unsigned long queue_index, 393 long cookie) 394 { 395 register unsigned long __nr asm("1") = KVM_S390_VIRTIO_CCW_NOTIFY; 396 register struct subchannel_id __schid asm("2") = schid; 397 register unsigned long __index asm("3") = queue_index; 398 register long __rc asm("2"); 399 register long __cookie asm("4") = cookie; 400 401 asm volatile ("diag 2,4,0x500\n" 402 : "=d" (__rc) : "d" (__nr), "d" (__schid), "d" (__index), 403 "d"(__cookie) 404 : "memory", "cc"); 405 return __rc; 406 } 407 408 static inline long do_kvm_notify(struct subchannel_id schid, 409 unsigned long queue_index, 410 long cookie) 411 { 412 diag_stat_inc(DIAG_STAT_X500); 413 return __do_kvm_notify(schid, queue_index, cookie); 414 } 415 416 static bool virtio_ccw_kvm_notify(struct virtqueue *vq) 417 { 418 struct virtio_ccw_vq_info *info = vq->priv; 419 struct virtio_ccw_device *vcdev; 420 struct subchannel_id schid; 421 422 vcdev = to_vc_device(info->vq->vdev); 423 ccw_device_get_schid(vcdev->cdev, &schid); 424 info->cookie = do_kvm_notify(schid, vq->index, info->cookie); 425 if (info->cookie < 0) 426 return false; 427 return true; 428 } 429 430 static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev, 431 struct ccw1 *ccw, int index) 432 { 433 int ret; 434 435 vcdev->dma_area->config_block.index = index; 436 ccw->cmd_code = CCW_CMD_READ_VQ_CONF; 437 ccw->flags = 0; 438 ccw->count = sizeof(struct vq_config_block); 439 ccw->cda = (__u32)(unsigned long)(&vcdev->dma_area->config_block); 440 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF); 441 if (ret) 442 return ret; 443 return vcdev->dma_area->config_block.num ?: -ENOENT; 444 } 445 446 static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw) 447 { 448 struct virtio_ccw_device *vcdev = to_vc_device(vq->vdev); 449 struct virtio_ccw_vq_info *info = vq->priv; 450 unsigned long flags; 451 int ret; 452 unsigned int index = vq->index; 453 454 /* Remove from our list. */ 455 spin_lock_irqsave(&vcdev->lock, flags); 456 list_del(&info->node); 457 spin_unlock_irqrestore(&vcdev->lock, flags); 458 459 /* Release from host. */ 460 if (vcdev->revision == 0) { 461 info->info_block->l.queue = 0; 462 info->info_block->l.align = 0; 463 info->info_block->l.index = index; 464 info->info_block->l.num = 0; 465 ccw->count = sizeof(info->info_block->l); 466 } else { 467 info->info_block->s.desc = 0; 468 info->info_block->s.index = index; 469 info->info_block->s.num = 0; 470 info->info_block->s.avail = 0; 471 info->info_block->s.used = 0; 472 ccw->count = sizeof(info->info_block->s); 473 } 474 ccw->cmd_code = CCW_CMD_SET_VQ; 475 ccw->flags = 0; 476 ccw->cda = (__u32)(unsigned long)(info->info_block); 477 ret = ccw_io_helper(vcdev, ccw, 478 VIRTIO_CCW_DOING_SET_VQ | index); 479 /* 480 * -ENODEV isn't considered an error: The device is gone anyway. 481 * This may happen on device detach. 482 */ 483 if (ret && (ret != -ENODEV)) 484 dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d\n", 485 ret, index); 486 487 vring_del_virtqueue(vq); 488 ccw_device_dma_free(vcdev->cdev, info->info_block, 489 sizeof(*info->info_block)); 490 kfree(info); 491 } 492 493 static void virtio_ccw_del_vqs(struct virtio_device *vdev) 494 { 495 struct virtqueue *vq, *n; 496 struct ccw1 *ccw; 497 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 498 499 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 500 if (!ccw) 501 return; 502 503 virtio_ccw_drop_indicator(vcdev, ccw); 504 505 list_for_each_entry_safe(vq, n, &vdev->vqs, list) 506 virtio_ccw_del_vq(vq, ccw); 507 508 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 509 } 510 511 static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev, 512 int i, vq_callback_t *callback, 513 const char *name, bool ctx, 514 struct ccw1 *ccw) 515 { 516 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 517 int err; 518 struct virtqueue *vq = NULL; 519 struct virtio_ccw_vq_info *info; 520 u64 queue; 521 unsigned long flags; 522 bool may_reduce; 523 524 /* Allocate queue. */ 525 info = kzalloc(sizeof(struct virtio_ccw_vq_info), GFP_KERNEL); 526 if (!info) { 527 dev_warn(&vcdev->cdev->dev, "no info\n"); 528 err = -ENOMEM; 529 goto out_err; 530 } 531 info->info_block = ccw_device_dma_zalloc(vcdev->cdev, 532 sizeof(*info->info_block)); 533 if (!info->info_block) { 534 dev_warn(&vcdev->cdev->dev, "no info block\n"); 535 err = -ENOMEM; 536 goto out_err; 537 } 538 info->num = virtio_ccw_read_vq_conf(vcdev, ccw, i); 539 if (info->num < 0) { 540 err = info->num; 541 goto out_err; 542 } 543 may_reduce = vcdev->revision > 0; 544 vq = vring_create_virtqueue(i, info->num, KVM_VIRTIO_CCW_RING_ALIGN, 545 vdev, true, may_reduce, ctx, 546 virtio_ccw_kvm_notify, callback, name); 547 548 if (!vq) { 549 /* For now, we fail if we can't get the requested size. */ 550 dev_warn(&vcdev->cdev->dev, "no vq\n"); 551 err = -ENOMEM; 552 goto out_err; 553 } 554 /* it may have been reduced */ 555 info->num = virtqueue_get_vring_size(vq); 556 557 /* Register it with the host. */ 558 queue = virtqueue_get_desc_addr(vq); 559 if (vcdev->revision == 0) { 560 info->info_block->l.queue = queue; 561 info->info_block->l.align = KVM_VIRTIO_CCW_RING_ALIGN; 562 info->info_block->l.index = i; 563 info->info_block->l.num = info->num; 564 ccw->count = sizeof(info->info_block->l); 565 } else { 566 info->info_block->s.desc = queue; 567 info->info_block->s.index = i; 568 info->info_block->s.num = info->num; 569 info->info_block->s.avail = (__u64)virtqueue_get_avail_addr(vq); 570 info->info_block->s.used = (__u64)virtqueue_get_used_addr(vq); 571 ccw->count = sizeof(info->info_block->s); 572 } 573 ccw->cmd_code = CCW_CMD_SET_VQ; 574 ccw->flags = 0; 575 ccw->cda = (__u32)(unsigned long)(info->info_block); 576 err = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_VQ | i); 577 if (err) { 578 dev_warn(&vcdev->cdev->dev, "SET_VQ failed\n"); 579 goto out_err; 580 } 581 582 info->vq = vq; 583 vq->priv = info; 584 585 /* Save it to our list. */ 586 spin_lock_irqsave(&vcdev->lock, flags); 587 list_add(&info->node, &vcdev->virtqueues); 588 spin_unlock_irqrestore(&vcdev->lock, flags); 589 590 return vq; 591 592 out_err: 593 if (vq) 594 vring_del_virtqueue(vq); 595 if (info) { 596 ccw_device_dma_free(vcdev->cdev, info->info_block, 597 sizeof(*info->info_block)); 598 } 599 kfree(info); 600 return ERR_PTR(err); 601 } 602 603 static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev, 604 struct virtqueue *vqs[], int nvqs, 605 struct ccw1 *ccw) 606 { 607 int ret; 608 struct virtio_thinint_area *thinint_area = NULL; 609 struct airq_info *info; 610 611 thinint_area = ccw_device_dma_zalloc(vcdev->cdev, 612 sizeof(*thinint_area)); 613 if (!thinint_area) { 614 ret = -ENOMEM; 615 goto out; 616 } 617 /* Try to get an indicator. */ 618 thinint_area->indicator = get_airq_indicator(vqs, nvqs, 619 &thinint_area->bit_nr, 620 &vcdev->airq_info); 621 if (!thinint_area->indicator) { 622 ret = -ENOSPC; 623 goto out; 624 } 625 info = vcdev->airq_info; 626 thinint_area->summary_indicator = 627 (unsigned long) get_summary_indicator(info); 628 thinint_area->isc = VIRTIO_AIRQ_ISC; 629 ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER; 630 ccw->flags = CCW_FLAG_SLI; 631 ccw->count = sizeof(*thinint_area); 632 ccw->cda = (__u32)(unsigned long)thinint_area; 633 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND_ADAPTER); 634 if (ret) { 635 if (ret == -EOPNOTSUPP) { 636 /* 637 * The host does not support adapter interrupts 638 * for virtio-ccw, stop trying. 639 */ 640 virtio_ccw_use_airq = 0; 641 pr_info("Adapter interrupts unsupported on host\n"); 642 } else 643 dev_warn(&vcdev->cdev->dev, 644 "enabling adapter interrupts = %d\n", ret); 645 virtio_ccw_drop_indicators(vcdev); 646 } 647 out: 648 ccw_device_dma_free(vcdev->cdev, thinint_area, sizeof(*thinint_area)); 649 return ret; 650 } 651 652 static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs, 653 struct virtqueue *vqs[], 654 vq_callback_t *callbacks[], 655 const char * const names[], 656 const bool *ctx, 657 struct irq_affinity *desc) 658 { 659 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 660 unsigned long *indicatorp = NULL; 661 int ret, i, queue_idx = 0; 662 struct ccw1 *ccw; 663 664 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 665 if (!ccw) 666 return -ENOMEM; 667 668 for (i = 0; i < nvqs; ++i) { 669 if (!names[i]) { 670 vqs[i] = NULL; 671 continue; 672 } 673 674 vqs[i] = virtio_ccw_setup_vq(vdev, queue_idx++, callbacks[i], 675 names[i], ctx ? ctx[i] : false, 676 ccw); 677 if (IS_ERR(vqs[i])) { 678 ret = PTR_ERR(vqs[i]); 679 vqs[i] = NULL; 680 goto out; 681 } 682 } 683 ret = -ENOMEM; 684 /* 685 * We need a data area under 2G to communicate. Our payload is 686 * the address of the indicators. 687 */ 688 indicatorp = ccw_device_dma_zalloc(vcdev->cdev, 689 sizeof(indicators(vcdev))); 690 if (!indicatorp) 691 goto out; 692 *indicatorp = (unsigned long) indicators(vcdev); 693 if (vcdev->is_thinint) { 694 ret = virtio_ccw_register_adapter_ind(vcdev, vqs, nvqs, ccw); 695 if (ret) 696 /* no error, just fall back to legacy interrupts */ 697 vcdev->is_thinint = false; 698 } 699 if (!vcdev->is_thinint) { 700 /* Register queue indicators with host. */ 701 *indicators(vcdev) = 0; 702 ccw->cmd_code = CCW_CMD_SET_IND; 703 ccw->flags = 0; 704 ccw->count = sizeof(indicators(vcdev)); 705 ccw->cda = (__u32)(unsigned long) indicatorp; 706 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND); 707 if (ret) 708 goto out; 709 } 710 /* Register indicators2 with host for config changes */ 711 *indicatorp = (unsigned long) indicators2(vcdev); 712 *indicators2(vcdev) = 0; 713 ccw->cmd_code = CCW_CMD_SET_CONF_IND; 714 ccw->flags = 0; 715 ccw->count = sizeof(indicators2(vcdev)); 716 ccw->cda = (__u32)(unsigned long) indicatorp; 717 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND); 718 if (ret) 719 goto out; 720 721 if (indicatorp) 722 ccw_device_dma_free(vcdev->cdev, indicatorp, 723 sizeof(indicators(vcdev))); 724 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 725 return 0; 726 out: 727 if (indicatorp) 728 ccw_device_dma_free(vcdev->cdev, indicatorp, 729 sizeof(indicators(vcdev))); 730 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 731 virtio_ccw_del_vqs(vdev); 732 return ret; 733 } 734 735 static void virtio_ccw_reset(struct virtio_device *vdev) 736 { 737 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 738 struct ccw1 *ccw; 739 740 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 741 if (!ccw) 742 return; 743 744 /* Zero status bits. */ 745 vcdev->dma_area->status = 0; 746 747 /* Send a reset ccw on device. */ 748 ccw->cmd_code = CCW_CMD_VDEV_RESET; 749 ccw->flags = 0; 750 ccw->count = 0; 751 ccw->cda = 0; 752 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_RESET); 753 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 754 } 755 756 static u64 virtio_ccw_get_features(struct virtio_device *vdev) 757 { 758 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 759 struct virtio_feature_desc *features; 760 int ret; 761 u64 rc; 762 struct ccw1 *ccw; 763 764 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 765 if (!ccw) 766 return 0; 767 768 features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features)); 769 if (!features) { 770 rc = 0; 771 goto out_free; 772 } 773 /* Read the feature bits from the host. */ 774 features->index = 0; 775 ccw->cmd_code = CCW_CMD_READ_FEAT; 776 ccw->flags = 0; 777 ccw->count = sizeof(*features); 778 ccw->cda = (__u32)(unsigned long)features; 779 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT); 780 if (ret) { 781 rc = 0; 782 goto out_free; 783 } 784 785 rc = le32_to_cpu(features->features); 786 787 if (vcdev->revision == 0) 788 goto out_free; 789 790 /* Read second half of the feature bits from the host. */ 791 features->index = 1; 792 ccw->cmd_code = CCW_CMD_READ_FEAT; 793 ccw->flags = 0; 794 ccw->count = sizeof(*features); 795 ccw->cda = (__u32)(unsigned long)features; 796 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT); 797 if (ret == 0) 798 rc |= (u64)le32_to_cpu(features->features) << 32; 799 800 out_free: 801 ccw_device_dma_free(vcdev->cdev, features, sizeof(*features)); 802 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 803 return rc; 804 } 805 806 static void ccw_transport_features(struct virtio_device *vdev) 807 { 808 /* 809 * Currently nothing to do here. 810 */ 811 } 812 813 static int virtio_ccw_finalize_features(struct virtio_device *vdev) 814 { 815 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 816 struct virtio_feature_desc *features; 817 struct ccw1 *ccw; 818 int ret; 819 820 if (vcdev->revision >= 1 && 821 !__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) { 822 dev_err(&vdev->dev, "virtio: device uses revision 1 " 823 "but does not have VIRTIO_F_VERSION_1\n"); 824 return -EINVAL; 825 } 826 827 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 828 if (!ccw) 829 return -ENOMEM; 830 831 features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features)); 832 if (!features) { 833 ret = -ENOMEM; 834 goto out_free; 835 } 836 /* Give virtio_ring a chance to accept features. */ 837 vring_transport_features(vdev); 838 839 /* Give virtio_ccw a chance to accept features. */ 840 ccw_transport_features(vdev); 841 842 features->index = 0; 843 features->features = cpu_to_le32((u32)vdev->features); 844 /* Write the first half of the feature bits to the host. */ 845 ccw->cmd_code = CCW_CMD_WRITE_FEAT; 846 ccw->flags = 0; 847 ccw->count = sizeof(*features); 848 ccw->cda = (__u32)(unsigned long)features; 849 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT); 850 if (ret) 851 goto out_free; 852 853 if (vcdev->revision == 0) 854 goto out_free; 855 856 features->index = 1; 857 features->features = cpu_to_le32(vdev->features >> 32); 858 /* Write the second half of the feature bits to the host. */ 859 ccw->cmd_code = CCW_CMD_WRITE_FEAT; 860 ccw->flags = 0; 861 ccw->count = sizeof(*features); 862 ccw->cda = (__u32)(unsigned long)features; 863 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT); 864 865 out_free: 866 ccw_device_dma_free(vcdev->cdev, features, sizeof(*features)); 867 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 868 869 return ret; 870 } 871 872 static void virtio_ccw_get_config(struct virtio_device *vdev, 873 unsigned int offset, void *buf, unsigned len) 874 { 875 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 876 int ret; 877 struct ccw1 *ccw; 878 void *config_area; 879 unsigned long flags; 880 881 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 882 if (!ccw) 883 return; 884 885 config_area = ccw_device_dma_zalloc(vcdev->cdev, 886 VIRTIO_CCW_CONFIG_SIZE); 887 if (!config_area) 888 goto out_free; 889 890 /* Read the config area from the host. */ 891 ccw->cmd_code = CCW_CMD_READ_CONF; 892 ccw->flags = 0; 893 ccw->count = offset + len; 894 ccw->cda = (__u32)(unsigned long)config_area; 895 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_CONFIG); 896 if (ret) 897 goto out_free; 898 899 spin_lock_irqsave(&vcdev->lock, flags); 900 memcpy(vcdev->config, config_area, offset + len); 901 if (vcdev->config_ready < offset + len) 902 vcdev->config_ready = offset + len; 903 spin_unlock_irqrestore(&vcdev->lock, flags); 904 if (buf) 905 memcpy(buf, config_area + offset, len); 906 907 out_free: 908 ccw_device_dma_free(vcdev->cdev, config_area, VIRTIO_CCW_CONFIG_SIZE); 909 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 910 } 911 912 static void virtio_ccw_set_config(struct virtio_device *vdev, 913 unsigned int offset, const void *buf, 914 unsigned len) 915 { 916 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 917 struct ccw1 *ccw; 918 void *config_area; 919 unsigned long flags; 920 921 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 922 if (!ccw) 923 return; 924 925 config_area = ccw_device_dma_zalloc(vcdev->cdev, 926 VIRTIO_CCW_CONFIG_SIZE); 927 if (!config_area) 928 goto out_free; 929 930 /* Make sure we don't overwrite fields. */ 931 if (vcdev->config_ready < offset) 932 virtio_ccw_get_config(vdev, 0, NULL, offset); 933 spin_lock_irqsave(&vcdev->lock, flags); 934 memcpy(&vcdev->config[offset], buf, len); 935 /* Write the config area to the host. */ 936 memcpy(config_area, vcdev->config, sizeof(vcdev->config)); 937 spin_unlock_irqrestore(&vcdev->lock, flags); 938 ccw->cmd_code = CCW_CMD_WRITE_CONF; 939 ccw->flags = 0; 940 ccw->count = offset + len; 941 ccw->cda = (__u32)(unsigned long)config_area; 942 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_CONFIG); 943 944 out_free: 945 ccw_device_dma_free(vcdev->cdev, config_area, VIRTIO_CCW_CONFIG_SIZE); 946 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 947 } 948 949 static u8 virtio_ccw_get_status(struct virtio_device *vdev) 950 { 951 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 952 u8 old_status = vcdev->dma_area->status; 953 struct ccw1 *ccw; 954 955 if (vcdev->revision < 1) 956 return vcdev->dma_area->status; 957 958 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 959 if (!ccw) 960 return old_status; 961 962 ccw->cmd_code = CCW_CMD_READ_STATUS; 963 ccw->flags = 0; 964 ccw->count = sizeof(vcdev->dma_area->status); 965 ccw->cda = (__u32)(unsigned long)&vcdev->dma_area->status; 966 ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_STATUS); 967 /* 968 * If the channel program failed (should only happen if the device 969 * was hotunplugged, and then we clean up via the machine check 970 * handler anyway), vcdev->dma_area->status was not overwritten and we just 971 * return the old status, which is fine. 972 */ 973 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 974 975 return vcdev->dma_area->status; 976 } 977 978 static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status) 979 { 980 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 981 u8 old_status = vcdev->dma_area->status; 982 struct ccw1 *ccw; 983 int ret; 984 985 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 986 if (!ccw) 987 return; 988 989 /* Write the status to the host. */ 990 vcdev->dma_area->status = status; 991 ccw->cmd_code = CCW_CMD_WRITE_STATUS; 992 ccw->flags = 0; 993 ccw->count = sizeof(status); 994 ccw->cda = (__u32)(unsigned long)&vcdev->dma_area->status; 995 ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS); 996 /* Write failed? We assume status is unchanged. */ 997 if (ret) 998 vcdev->dma_area->status = old_status; 999 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 1000 } 1001 1002 static const char *virtio_ccw_bus_name(struct virtio_device *vdev) 1003 { 1004 struct virtio_ccw_device *vcdev = to_vc_device(vdev); 1005 1006 return dev_name(&vcdev->cdev->dev); 1007 } 1008 1009 static const struct virtio_config_ops virtio_ccw_config_ops = { 1010 .get_features = virtio_ccw_get_features, 1011 .finalize_features = virtio_ccw_finalize_features, 1012 .get = virtio_ccw_get_config, 1013 .set = virtio_ccw_set_config, 1014 .get_status = virtio_ccw_get_status, 1015 .set_status = virtio_ccw_set_status, 1016 .reset = virtio_ccw_reset, 1017 .find_vqs = virtio_ccw_find_vqs, 1018 .del_vqs = virtio_ccw_del_vqs, 1019 .bus_name = virtio_ccw_bus_name, 1020 }; 1021 1022 1023 /* 1024 * ccw bus driver related functions 1025 */ 1026 1027 static void virtio_ccw_release_dev(struct device *_d) 1028 { 1029 struct virtio_device *dev = dev_to_virtio(_d); 1030 struct virtio_ccw_device *vcdev = to_vc_device(dev); 1031 1032 ccw_device_dma_free(vcdev->cdev, vcdev->dma_area, 1033 sizeof(*vcdev->dma_area)); 1034 kfree(vcdev); 1035 } 1036 1037 static int irb_is_error(struct irb *irb) 1038 { 1039 if (scsw_cstat(&irb->scsw) != 0) 1040 return 1; 1041 if (scsw_dstat(&irb->scsw) & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) 1042 return 1; 1043 if (scsw_cc(&irb->scsw) != 0) 1044 return 1; 1045 return 0; 1046 } 1047 1048 static struct virtqueue *virtio_ccw_vq_by_ind(struct virtio_ccw_device *vcdev, 1049 int index) 1050 { 1051 struct virtio_ccw_vq_info *info; 1052 unsigned long flags; 1053 struct virtqueue *vq; 1054 1055 vq = NULL; 1056 spin_lock_irqsave(&vcdev->lock, flags); 1057 list_for_each_entry(info, &vcdev->virtqueues, node) { 1058 if (info->vq->index == index) { 1059 vq = info->vq; 1060 break; 1061 } 1062 } 1063 spin_unlock_irqrestore(&vcdev->lock, flags); 1064 return vq; 1065 } 1066 1067 static void virtio_ccw_check_activity(struct virtio_ccw_device *vcdev, 1068 __u32 activity) 1069 { 1070 if (vcdev->curr_io & activity) { 1071 switch (activity) { 1072 case VIRTIO_CCW_DOING_READ_FEAT: 1073 case VIRTIO_CCW_DOING_WRITE_FEAT: 1074 case VIRTIO_CCW_DOING_READ_CONFIG: 1075 case VIRTIO_CCW_DOING_WRITE_CONFIG: 1076 case VIRTIO_CCW_DOING_WRITE_STATUS: 1077 case VIRTIO_CCW_DOING_READ_STATUS: 1078 case VIRTIO_CCW_DOING_SET_VQ: 1079 case VIRTIO_CCW_DOING_SET_IND: 1080 case VIRTIO_CCW_DOING_SET_CONF_IND: 1081 case VIRTIO_CCW_DOING_RESET: 1082 case VIRTIO_CCW_DOING_READ_VQ_CONF: 1083 case VIRTIO_CCW_DOING_SET_IND_ADAPTER: 1084 case VIRTIO_CCW_DOING_SET_VIRTIO_REV: 1085 vcdev->curr_io &= ~activity; 1086 wake_up(&vcdev->wait_q); 1087 break; 1088 default: 1089 /* don't know what to do... */ 1090 dev_warn(&vcdev->cdev->dev, 1091 "Suspicious activity '%08x'\n", activity); 1092 WARN_ON(1); 1093 break; 1094 } 1095 } 1096 } 1097 1098 static void virtio_ccw_int_handler(struct ccw_device *cdev, 1099 unsigned long intparm, 1100 struct irb *irb) 1101 { 1102 __u32 activity = intparm & VIRTIO_CCW_INTPARM_MASK; 1103 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev); 1104 int i; 1105 struct virtqueue *vq; 1106 1107 if (!vcdev) 1108 return; 1109 if (IS_ERR(irb)) { 1110 vcdev->err = PTR_ERR(irb); 1111 virtio_ccw_check_activity(vcdev, activity); 1112 /* Don't poke around indicators, something's wrong. */ 1113 return; 1114 } 1115 /* Check if it's a notification from the host. */ 1116 if ((intparm == 0) && 1117 (scsw_stctl(&irb->scsw) == 1118 (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND))) { 1119 /* OK */ 1120 } 1121 if (irb_is_error(irb)) { 1122 /* Command reject? */ 1123 if ((scsw_dstat(&irb->scsw) & DEV_STAT_UNIT_CHECK) && 1124 (irb->ecw[0] & SNS0_CMD_REJECT)) 1125 vcdev->err = -EOPNOTSUPP; 1126 else 1127 /* Map everything else to -EIO. */ 1128 vcdev->err = -EIO; 1129 } 1130 virtio_ccw_check_activity(vcdev, activity); 1131 for_each_set_bit(i, indicators(vcdev), 1132 sizeof(*indicators(vcdev)) * BITS_PER_BYTE) { 1133 /* The bit clear must happen before the vring kick. */ 1134 clear_bit(i, indicators(vcdev)); 1135 barrier(); 1136 vq = virtio_ccw_vq_by_ind(vcdev, i); 1137 vring_interrupt(0, vq); 1138 } 1139 if (test_bit(0, indicators2(vcdev))) { 1140 virtio_config_changed(&vcdev->vdev); 1141 clear_bit(0, indicators2(vcdev)); 1142 } 1143 } 1144 1145 /* 1146 * We usually want to autoonline all devices, but give the admin 1147 * a way to exempt devices from this. 1148 */ 1149 #define __DEV_WORDS ((__MAX_SUBCHANNEL + (8*sizeof(long) - 1)) / \ 1150 (8*sizeof(long))) 1151 static unsigned long devs_no_auto[__MAX_SSID + 1][__DEV_WORDS]; 1152 1153 static char *no_auto = ""; 1154 1155 module_param(no_auto, charp, 0444); 1156 MODULE_PARM_DESC(no_auto, "list of ccw bus id ranges not to be auto-onlined"); 1157 1158 static int virtio_ccw_check_autoonline(struct ccw_device *cdev) 1159 { 1160 struct ccw_dev_id id; 1161 1162 ccw_device_get_id(cdev, &id); 1163 if (test_bit(id.devno, devs_no_auto[id.ssid])) 1164 return 0; 1165 return 1; 1166 } 1167 1168 static void virtio_ccw_auto_online(void *data, async_cookie_t cookie) 1169 { 1170 struct ccw_device *cdev = data; 1171 int ret; 1172 1173 ret = ccw_device_set_online(cdev); 1174 if (ret) 1175 dev_warn(&cdev->dev, "Failed to set online: %d\n", ret); 1176 } 1177 1178 static int virtio_ccw_probe(struct ccw_device *cdev) 1179 { 1180 cdev->handler = virtio_ccw_int_handler; 1181 1182 if (virtio_ccw_check_autoonline(cdev)) 1183 async_schedule(virtio_ccw_auto_online, cdev); 1184 return 0; 1185 } 1186 1187 static struct virtio_ccw_device *virtio_grab_drvdata(struct ccw_device *cdev) 1188 { 1189 unsigned long flags; 1190 struct virtio_ccw_device *vcdev; 1191 1192 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1193 vcdev = dev_get_drvdata(&cdev->dev); 1194 if (!vcdev || vcdev->going_away) { 1195 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1196 return NULL; 1197 } 1198 vcdev->going_away = true; 1199 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1200 return vcdev; 1201 } 1202 1203 static void virtio_ccw_remove(struct ccw_device *cdev) 1204 { 1205 unsigned long flags; 1206 struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev); 1207 1208 if (vcdev && cdev->online) { 1209 if (vcdev->device_lost) 1210 virtio_break_device(&vcdev->vdev); 1211 unregister_virtio_device(&vcdev->vdev); 1212 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1213 dev_set_drvdata(&cdev->dev, NULL); 1214 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1215 } 1216 cdev->handler = NULL; 1217 } 1218 1219 static int virtio_ccw_offline(struct ccw_device *cdev) 1220 { 1221 unsigned long flags; 1222 struct virtio_ccw_device *vcdev = virtio_grab_drvdata(cdev); 1223 1224 if (!vcdev) 1225 return 0; 1226 if (vcdev->device_lost) 1227 virtio_break_device(&vcdev->vdev); 1228 unregister_virtio_device(&vcdev->vdev); 1229 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1230 dev_set_drvdata(&cdev->dev, NULL); 1231 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1232 return 0; 1233 } 1234 1235 static int virtio_ccw_set_transport_rev(struct virtio_ccw_device *vcdev) 1236 { 1237 struct virtio_rev_info *rev; 1238 struct ccw1 *ccw; 1239 int ret; 1240 1241 ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw)); 1242 if (!ccw) 1243 return -ENOMEM; 1244 rev = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*rev)); 1245 if (!rev) { 1246 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 1247 return -ENOMEM; 1248 } 1249 1250 /* Set transport revision */ 1251 ccw->cmd_code = CCW_CMD_SET_VIRTIO_REV; 1252 ccw->flags = 0; 1253 ccw->count = sizeof(*rev); 1254 ccw->cda = (__u32)(unsigned long)rev; 1255 1256 vcdev->revision = VIRTIO_CCW_REV_MAX; 1257 do { 1258 rev->revision = vcdev->revision; 1259 /* none of our supported revisions carry payload */ 1260 rev->length = 0; 1261 ret = ccw_io_helper(vcdev, ccw, 1262 VIRTIO_CCW_DOING_SET_VIRTIO_REV); 1263 if (ret == -EOPNOTSUPP) { 1264 if (vcdev->revision == 0) 1265 /* 1266 * The host device does not support setting 1267 * the revision: let's operate it in legacy 1268 * mode. 1269 */ 1270 ret = 0; 1271 else 1272 vcdev->revision--; 1273 } 1274 } while (ret == -EOPNOTSUPP); 1275 1276 ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw)); 1277 ccw_device_dma_free(vcdev->cdev, rev, sizeof(*rev)); 1278 return ret; 1279 } 1280 1281 static int virtio_ccw_online(struct ccw_device *cdev) 1282 { 1283 int ret; 1284 struct virtio_ccw_device *vcdev; 1285 unsigned long flags; 1286 1287 vcdev = kzalloc(sizeof(*vcdev), GFP_KERNEL); 1288 if (!vcdev) { 1289 dev_warn(&cdev->dev, "Could not get memory for virtio\n"); 1290 ret = -ENOMEM; 1291 goto out_free; 1292 } 1293 vcdev->vdev.dev.parent = &cdev->dev; 1294 vcdev->cdev = cdev; 1295 vcdev->dma_area = ccw_device_dma_zalloc(vcdev->cdev, 1296 sizeof(*vcdev->dma_area)); 1297 if (!vcdev->dma_area) { 1298 ret = -ENOMEM; 1299 goto out_free; 1300 } 1301 1302 vcdev->is_thinint = virtio_ccw_use_airq; /* at least try */ 1303 1304 vcdev->vdev.dev.release = virtio_ccw_release_dev; 1305 vcdev->vdev.config = &virtio_ccw_config_ops; 1306 init_waitqueue_head(&vcdev->wait_q); 1307 INIT_LIST_HEAD(&vcdev->virtqueues); 1308 spin_lock_init(&vcdev->lock); 1309 mutex_init(&vcdev->io_lock); 1310 1311 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1312 dev_set_drvdata(&cdev->dev, vcdev); 1313 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1314 vcdev->vdev.id.vendor = cdev->id.cu_type; 1315 vcdev->vdev.id.device = cdev->id.cu_model; 1316 1317 ret = virtio_ccw_set_transport_rev(vcdev); 1318 if (ret) 1319 goto out_free; 1320 1321 ret = register_virtio_device(&vcdev->vdev); 1322 if (ret) { 1323 dev_warn(&cdev->dev, "Failed to register virtio device: %d\n", 1324 ret); 1325 goto out_put; 1326 } 1327 return 0; 1328 out_put: 1329 spin_lock_irqsave(get_ccwdev_lock(cdev), flags); 1330 dev_set_drvdata(&cdev->dev, NULL); 1331 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); 1332 put_device(&vcdev->vdev.dev); 1333 return ret; 1334 out_free: 1335 if (vcdev) { 1336 ccw_device_dma_free(vcdev->cdev, vcdev->dma_area, 1337 sizeof(*vcdev->dma_area)); 1338 } 1339 kfree(vcdev); 1340 return ret; 1341 } 1342 1343 static int virtio_ccw_cio_notify(struct ccw_device *cdev, int event) 1344 { 1345 int rc; 1346 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev); 1347 1348 /* 1349 * Make sure vcdev is set 1350 * i.e. set_offline/remove callback not already running 1351 */ 1352 if (!vcdev) 1353 return NOTIFY_DONE; 1354 1355 switch (event) { 1356 case CIO_GONE: 1357 vcdev->device_lost = true; 1358 rc = NOTIFY_DONE; 1359 break; 1360 case CIO_OPER: 1361 rc = NOTIFY_OK; 1362 break; 1363 default: 1364 rc = NOTIFY_DONE; 1365 break; 1366 } 1367 return rc; 1368 } 1369 1370 static struct ccw_device_id virtio_ids[] = { 1371 { CCW_DEVICE(0x3832, 0) }, 1372 {}, 1373 }; 1374 1375 #ifdef CONFIG_PM_SLEEP 1376 static int virtio_ccw_freeze(struct ccw_device *cdev) 1377 { 1378 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev); 1379 1380 return virtio_device_freeze(&vcdev->vdev); 1381 } 1382 1383 static int virtio_ccw_restore(struct ccw_device *cdev) 1384 { 1385 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev); 1386 int ret; 1387 1388 ret = virtio_ccw_set_transport_rev(vcdev); 1389 if (ret) 1390 return ret; 1391 1392 return virtio_device_restore(&vcdev->vdev); 1393 } 1394 #endif 1395 1396 static struct ccw_driver virtio_ccw_driver = { 1397 .driver = { 1398 .owner = THIS_MODULE, 1399 .name = "virtio_ccw", 1400 }, 1401 .ids = virtio_ids, 1402 .probe = virtio_ccw_probe, 1403 .remove = virtio_ccw_remove, 1404 .set_offline = virtio_ccw_offline, 1405 .set_online = virtio_ccw_online, 1406 .notify = virtio_ccw_cio_notify, 1407 .int_class = IRQIO_VIR, 1408 #ifdef CONFIG_PM_SLEEP 1409 .freeze = virtio_ccw_freeze, 1410 .thaw = virtio_ccw_restore, 1411 .restore = virtio_ccw_restore, 1412 #endif 1413 }; 1414 1415 static int __init pure_hex(char **cp, unsigned int *val, int min_digit, 1416 int max_digit, int max_val) 1417 { 1418 int diff; 1419 1420 diff = 0; 1421 *val = 0; 1422 1423 while (diff <= max_digit) { 1424 int value = hex_to_bin(**cp); 1425 1426 if (value < 0) 1427 break; 1428 *val = *val * 16 + value; 1429 (*cp)++; 1430 diff++; 1431 } 1432 1433 if ((diff < min_digit) || (diff > max_digit) || (*val > max_val)) 1434 return 1; 1435 1436 return 0; 1437 } 1438 1439 static int __init parse_busid(char *str, unsigned int *cssid, 1440 unsigned int *ssid, unsigned int *devno) 1441 { 1442 char *str_work; 1443 int rc, ret; 1444 1445 rc = 1; 1446 1447 if (*str == '\0') 1448 goto out; 1449 1450 str_work = str; 1451 ret = pure_hex(&str_work, cssid, 1, 2, __MAX_CSSID); 1452 if (ret || (str_work[0] != '.')) 1453 goto out; 1454 str_work++; 1455 ret = pure_hex(&str_work, ssid, 1, 1, __MAX_SSID); 1456 if (ret || (str_work[0] != '.')) 1457 goto out; 1458 str_work++; 1459 ret = pure_hex(&str_work, devno, 4, 4, __MAX_SUBCHANNEL); 1460 if (ret || (str_work[0] != '\0')) 1461 goto out; 1462 1463 rc = 0; 1464 out: 1465 return rc; 1466 } 1467 1468 static void __init no_auto_parse(void) 1469 { 1470 unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to; 1471 char *parm, *str; 1472 int rc; 1473 1474 str = no_auto; 1475 while ((parm = strsep(&str, ","))) { 1476 rc = parse_busid(strsep(&parm, "-"), &from_cssid, 1477 &from_ssid, &from); 1478 if (rc) 1479 continue; 1480 if (parm != NULL) { 1481 rc = parse_busid(parm, &to_cssid, 1482 &to_ssid, &to); 1483 if ((from_ssid > to_ssid) || 1484 ((from_ssid == to_ssid) && (from > to))) 1485 rc = -EINVAL; 1486 } else { 1487 to_cssid = from_cssid; 1488 to_ssid = from_ssid; 1489 to = from; 1490 } 1491 if (rc) 1492 continue; 1493 while ((from_ssid < to_ssid) || 1494 ((from_ssid == to_ssid) && (from <= to))) { 1495 set_bit(from, devs_no_auto[from_ssid]); 1496 from++; 1497 if (from > __MAX_SUBCHANNEL) { 1498 from_ssid++; 1499 from = 0; 1500 } 1501 } 1502 } 1503 } 1504 1505 static int __init virtio_ccw_init(void) 1506 { 1507 int rc; 1508 1509 /* parse no_auto string before we do anything further */ 1510 no_auto_parse(); 1511 1512 summary_indicators = cio_dma_zalloc(MAX_AIRQ_AREAS); 1513 if (!summary_indicators) 1514 return -ENOMEM; 1515 rc = ccw_driver_register(&virtio_ccw_driver); 1516 if (rc) 1517 cio_dma_free(summary_indicators, MAX_AIRQ_AREAS); 1518 return rc; 1519 } 1520 device_initcall(virtio_ccw_init); 1521