1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * VFIO based Physical Subchannel device driver 4 * 5 * Copyright IBM Corp. 2017 6 * Copyright Red Hat, Inc. 2019 7 * 8 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com> 9 * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com> 10 * Cornelia Huck <cohuck@redhat.com> 11 */ 12 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/slab.h> 16 #include <linux/mdev.h> 17 18 #include <asm/isc.h> 19 20 #include "chp.h" 21 #include "ioasm.h" 22 #include "css.h" 23 #include "vfio_ccw_private.h" 24 25 struct workqueue_struct *vfio_ccw_work_q; 26 static struct kmem_cache *vfio_ccw_io_region; 27 static struct kmem_cache *vfio_ccw_cmd_region; 28 static struct kmem_cache *vfio_ccw_schib_region; 29 static struct kmem_cache *vfio_ccw_crw_region; 30 31 debug_info_t *vfio_ccw_debug_msg_id; 32 debug_info_t *vfio_ccw_debug_trace_id; 33 34 /* 35 * Helpers 36 */ 37 int vfio_ccw_sch_quiesce(struct subchannel *sch) 38 { 39 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); 40 DECLARE_COMPLETION_ONSTACK(completion); 41 int iretry, ret = 0; 42 43 iretry = 255; 44 do { 45 46 ret = cio_cancel_halt_clear(sch, &iretry); 47 48 if (ret == -EIO) { 49 pr_err("vfio_ccw: could not quiesce subchannel 0.%x.%04x!\n", 50 sch->schid.ssid, sch->schid.sch_no); 51 break; 52 } 53 54 /* 55 * Flush all I/O and wait for 56 * cancel/halt/clear completion. 57 */ 58 private->completion = &completion; 59 spin_unlock_irq(sch->lock); 60 61 if (ret == -EBUSY) 62 wait_for_completion_timeout(&completion, 3*HZ); 63 64 private->completion = NULL; 65 flush_workqueue(vfio_ccw_work_q); 66 spin_lock_irq(sch->lock); 67 ret = cio_disable_subchannel(sch); 68 } while (ret == -EBUSY); 69 70 return ret; 71 } 72 73 static void vfio_ccw_sch_io_todo(struct work_struct *work) 74 { 75 struct vfio_ccw_private *private; 76 struct irb *irb; 77 bool is_final; 78 bool cp_is_finished = false; 79 80 private = container_of(work, struct vfio_ccw_private, io_work); 81 irb = &private->irb; 82 83 is_final = !(scsw_actl(&irb->scsw) & 84 (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)); 85 if (scsw_is_solicited(&irb->scsw)) { 86 cp_update_scsw(&private->cp, &irb->scsw); 87 if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING) { 88 cp_free(&private->cp); 89 cp_is_finished = true; 90 } 91 } 92 mutex_lock(&private->io_mutex); 93 memcpy(private->io_region->irb_area, irb, sizeof(*irb)); 94 mutex_unlock(&private->io_mutex); 95 96 /* 97 * Reset to IDLE only if processing of a channel program 98 * has finished. Do not overwrite a possible processing 99 * state if the interrupt was unsolicited, or if the final 100 * interrupt was for HSCH or CSCH. 101 */ 102 if (cp_is_finished) 103 private->state = VFIO_CCW_STATE_IDLE; 104 105 if (private->io_trigger) 106 eventfd_signal(private->io_trigger, 1); 107 } 108 109 static void vfio_ccw_crw_todo(struct work_struct *work) 110 { 111 struct vfio_ccw_private *private; 112 113 private = container_of(work, struct vfio_ccw_private, crw_work); 114 115 if (!list_empty(&private->crw) && private->crw_trigger) 116 eventfd_signal(private->crw_trigger, 1); 117 } 118 119 /* 120 * Css driver callbacks 121 */ 122 static void vfio_ccw_sch_irq(struct subchannel *sch) 123 { 124 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); 125 126 inc_irq_stat(IRQIO_CIO); 127 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT); 128 } 129 130 static struct vfio_ccw_private *vfio_ccw_alloc_private(struct subchannel *sch) 131 { 132 struct vfio_ccw_private *private; 133 134 private = kzalloc(sizeof(*private), GFP_KERNEL); 135 if (!private) 136 return ERR_PTR(-ENOMEM); 137 138 private->sch = sch; 139 mutex_init(&private->io_mutex); 140 private->state = VFIO_CCW_STATE_STANDBY; 141 INIT_LIST_HEAD(&private->crw); 142 INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo); 143 INIT_WORK(&private->crw_work, vfio_ccw_crw_todo); 144 atomic_set(&private->avail, 1); 145 146 private->cp.guest_cp = kcalloc(CCWCHAIN_LEN_MAX, sizeof(struct ccw1), 147 GFP_KERNEL); 148 if (!private->cp.guest_cp) 149 goto out_free_private; 150 151 private->io_region = kmem_cache_zalloc(vfio_ccw_io_region, 152 GFP_KERNEL | GFP_DMA); 153 if (!private->io_region) 154 goto out_free_cp; 155 156 private->cmd_region = kmem_cache_zalloc(vfio_ccw_cmd_region, 157 GFP_KERNEL | GFP_DMA); 158 if (!private->cmd_region) 159 goto out_free_io; 160 161 private->schib_region = kmem_cache_zalloc(vfio_ccw_schib_region, 162 GFP_KERNEL | GFP_DMA); 163 164 if (!private->schib_region) 165 goto out_free_cmd; 166 167 private->crw_region = kmem_cache_zalloc(vfio_ccw_crw_region, 168 GFP_KERNEL | GFP_DMA); 169 170 if (!private->crw_region) 171 goto out_free_schib; 172 return private; 173 174 out_free_schib: 175 kmem_cache_free(vfio_ccw_schib_region, private->schib_region); 176 out_free_cmd: 177 kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region); 178 out_free_io: 179 kmem_cache_free(vfio_ccw_io_region, private->io_region); 180 out_free_cp: 181 kfree(private->cp.guest_cp); 182 out_free_private: 183 mutex_destroy(&private->io_mutex); 184 kfree(private); 185 return ERR_PTR(-ENOMEM); 186 } 187 188 static void vfio_ccw_free_private(struct vfio_ccw_private *private) 189 { 190 struct vfio_ccw_crw *crw, *temp; 191 192 list_for_each_entry_safe(crw, temp, &private->crw, next) { 193 list_del(&crw->next); 194 kfree(crw); 195 } 196 197 kmem_cache_free(vfio_ccw_crw_region, private->crw_region); 198 kmem_cache_free(vfio_ccw_schib_region, private->schib_region); 199 kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region); 200 kmem_cache_free(vfio_ccw_io_region, private->io_region); 201 kfree(private->cp.guest_cp); 202 mutex_destroy(&private->io_mutex); 203 kfree(private); 204 } 205 static int vfio_ccw_sch_probe(struct subchannel *sch) 206 { 207 struct pmcw *pmcw = &sch->schib.pmcw; 208 struct vfio_ccw_private *private; 209 int ret = -ENOMEM; 210 211 if (pmcw->qf) { 212 dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n", 213 dev_name(&sch->dev)); 214 return -ENODEV; 215 } 216 217 private = vfio_ccw_alloc_private(sch); 218 if (IS_ERR(private)) 219 return PTR_ERR(private); 220 221 dev_set_drvdata(&sch->dev, private); 222 223 private->mdev_type.sysfs_name = "io"; 224 private->mdev_type.pretty_name = "I/O subchannel (Non-QDIO)"; 225 private->mdev_types[0] = &private->mdev_type; 226 ret = mdev_register_parent(&private->parent, &sch->dev, 227 &vfio_ccw_mdev_driver, 228 private->mdev_types, 1); 229 if (ret) 230 goto out_free; 231 232 VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n", 233 sch->schid.cssid, sch->schid.ssid, 234 sch->schid.sch_no); 235 return 0; 236 237 out_free: 238 dev_set_drvdata(&sch->dev, NULL); 239 vfio_ccw_free_private(private); 240 return ret; 241 } 242 243 static void vfio_ccw_sch_remove(struct subchannel *sch) 244 { 245 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); 246 247 mdev_unregister_parent(&private->parent); 248 249 dev_set_drvdata(&sch->dev, NULL); 250 251 vfio_ccw_free_private(private); 252 253 VFIO_CCW_MSG_EVENT(4, "unbound from subchannel %x.%x.%04x\n", 254 sch->schid.cssid, sch->schid.ssid, 255 sch->schid.sch_no); 256 } 257 258 static void vfio_ccw_sch_shutdown(struct subchannel *sch) 259 { 260 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); 261 262 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE); 263 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER); 264 } 265 266 /** 267 * vfio_ccw_sch_event - process subchannel event 268 * @sch: subchannel 269 * @process: non-zero if function is called in process context 270 * 271 * An unspecified event occurred for this subchannel. Adjust data according 272 * to the current operational state of the subchannel. Return zero when the 273 * event has been handled sufficiently or -EAGAIN when this function should 274 * be called again in process context. 275 */ 276 static int vfio_ccw_sch_event(struct subchannel *sch, int process) 277 { 278 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); 279 unsigned long flags; 280 int rc = -EAGAIN; 281 282 spin_lock_irqsave(sch->lock, flags); 283 if (!device_is_registered(&sch->dev)) 284 goto out_unlock; 285 286 if (work_pending(&sch->todo_work)) 287 goto out_unlock; 288 289 rc = 0; 290 291 if (cio_update_schib(sch)) 292 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER); 293 294 out_unlock: 295 spin_unlock_irqrestore(sch->lock, flags); 296 297 return rc; 298 } 299 300 static void vfio_ccw_queue_crw(struct vfio_ccw_private *private, 301 unsigned int rsc, 302 unsigned int erc, 303 unsigned int rsid) 304 { 305 struct vfio_ccw_crw *crw; 306 307 /* 308 * If unable to allocate a CRW, just drop the event and 309 * carry on. The guest will either see a later one or 310 * learn when it issues its own store subchannel. 311 */ 312 crw = kzalloc(sizeof(*crw), GFP_ATOMIC); 313 if (!crw) 314 return; 315 316 /* 317 * Build the CRW based on the inputs given to us. 318 */ 319 crw->crw.rsc = rsc; 320 crw->crw.erc = erc; 321 crw->crw.rsid = rsid; 322 323 list_add_tail(&crw->next, &private->crw); 324 queue_work(vfio_ccw_work_q, &private->crw_work); 325 } 326 327 static int vfio_ccw_chp_event(struct subchannel *sch, 328 struct chp_link *link, int event) 329 { 330 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); 331 int mask = chp_ssd_get_mask(&sch->ssd_info, link); 332 int retry = 255; 333 334 if (!private || !mask) 335 return 0; 336 337 trace_vfio_ccw_chp_event(private->sch->schid, mask, event); 338 VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: mask=0x%x event=%d\n", 339 sch->schid.cssid, 340 sch->schid.ssid, sch->schid.sch_no, 341 mask, event); 342 343 if (cio_update_schib(sch)) 344 return -ENODEV; 345 346 switch (event) { 347 case CHP_VARY_OFF: 348 /* Path logically turned off */ 349 sch->opm &= ~mask; 350 sch->lpm &= ~mask; 351 if (sch->schib.pmcw.lpum & mask) 352 cio_cancel_halt_clear(sch, &retry); 353 break; 354 case CHP_OFFLINE: 355 /* Path is gone */ 356 if (sch->schib.pmcw.lpum & mask) 357 cio_cancel_halt_clear(sch, &retry); 358 vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_PERRN, 359 link->chpid.id); 360 break; 361 case CHP_VARY_ON: 362 /* Path logically turned on */ 363 sch->opm |= mask; 364 sch->lpm |= mask; 365 break; 366 case CHP_ONLINE: 367 /* Path became available */ 368 sch->lpm |= mask & sch->opm; 369 vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_INIT, 370 link->chpid.id); 371 break; 372 } 373 374 return 0; 375 } 376 377 static struct css_device_id vfio_ccw_sch_ids[] = { 378 { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, }, 379 { /* end of list */ }, 380 }; 381 MODULE_DEVICE_TABLE(css, vfio_ccw_sch_ids); 382 383 static struct css_driver vfio_ccw_sch_driver = { 384 .drv = { 385 .name = "vfio_ccw", 386 .owner = THIS_MODULE, 387 }, 388 .subchannel_type = vfio_ccw_sch_ids, 389 .irq = vfio_ccw_sch_irq, 390 .probe = vfio_ccw_sch_probe, 391 .remove = vfio_ccw_sch_remove, 392 .shutdown = vfio_ccw_sch_shutdown, 393 .sch_event = vfio_ccw_sch_event, 394 .chp_event = vfio_ccw_chp_event, 395 }; 396 397 static int __init vfio_ccw_debug_init(void) 398 { 399 vfio_ccw_debug_msg_id = debug_register("vfio_ccw_msg", 16, 1, 400 11 * sizeof(long)); 401 if (!vfio_ccw_debug_msg_id) 402 goto out_unregister; 403 debug_register_view(vfio_ccw_debug_msg_id, &debug_sprintf_view); 404 debug_set_level(vfio_ccw_debug_msg_id, 2); 405 vfio_ccw_debug_trace_id = debug_register("vfio_ccw_trace", 16, 1, 16); 406 if (!vfio_ccw_debug_trace_id) 407 goto out_unregister; 408 debug_register_view(vfio_ccw_debug_trace_id, &debug_hex_ascii_view); 409 debug_set_level(vfio_ccw_debug_trace_id, 2); 410 return 0; 411 412 out_unregister: 413 debug_unregister(vfio_ccw_debug_msg_id); 414 debug_unregister(vfio_ccw_debug_trace_id); 415 return -1; 416 } 417 418 static void vfio_ccw_debug_exit(void) 419 { 420 debug_unregister(vfio_ccw_debug_msg_id); 421 debug_unregister(vfio_ccw_debug_trace_id); 422 } 423 424 static void vfio_ccw_destroy_regions(void) 425 { 426 kmem_cache_destroy(vfio_ccw_crw_region); 427 kmem_cache_destroy(vfio_ccw_schib_region); 428 kmem_cache_destroy(vfio_ccw_cmd_region); 429 kmem_cache_destroy(vfio_ccw_io_region); 430 } 431 432 static int __init vfio_ccw_sch_init(void) 433 { 434 int ret; 435 436 ret = vfio_ccw_debug_init(); 437 if (ret) 438 return ret; 439 440 vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw"); 441 if (!vfio_ccw_work_q) { 442 ret = -ENOMEM; 443 goto out_regions; 444 } 445 446 vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region", 447 sizeof(struct ccw_io_region), 0, 448 SLAB_ACCOUNT, 0, 449 sizeof(struct ccw_io_region), NULL); 450 if (!vfio_ccw_io_region) { 451 ret = -ENOMEM; 452 goto out_regions; 453 } 454 455 vfio_ccw_cmd_region = kmem_cache_create_usercopy("vfio_ccw_cmd_region", 456 sizeof(struct ccw_cmd_region), 0, 457 SLAB_ACCOUNT, 0, 458 sizeof(struct ccw_cmd_region), NULL); 459 if (!vfio_ccw_cmd_region) { 460 ret = -ENOMEM; 461 goto out_regions; 462 } 463 464 vfio_ccw_schib_region = kmem_cache_create_usercopy("vfio_ccw_schib_region", 465 sizeof(struct ccw_schib_region), 0, 466 SLAB_ACCOUNT, 0, 467 sizeof(struct ccw_schib_region), NULL); 468 469 if (!vfio_ccw_schib_region) { 470 ret = -ENOMEM; 471 goto out_regions; 472 } 473 474 vfio_ccw_crw_region = kmem_cache_create_usercopy("vfio_ccw_crw_region", 475 sizeof(struct ccw_crw_region), 0, 476 SLAB_ACCOUNT, 0, 477 sizeof(struct ccw_crw_region), NULL); 478 479 if (!vfio_ccw_crw_region) { 480 ret = -ENOMEM; 481 goto out_regions; 482 } 483 484 ret = mdev_register_driver(&vfio_ccw_mdev_driver); 485 if (ret) 486 goto out_regions; 487 488 isc_register(VFIO_CCW_ISC); 489 ret = css_driver_register(&vfio_ccw_sch_driver); 490 if (ret) { 491 isc_unregister(VFIO_CCW_ISC); 492 goto out_driver; 493 } 494 495 return ret; 496 497 out_driver: 498 mdev_unregister_driver(&vfio_ccw_mdev_driver); 499 out_regions: 500 vfio_ccw_destroy_regions(); 501 destroy_workqueue(vfio_ccw_work_q); 502 vfio_ccw_debug_exit(); 503 return ret; 504 } 505 506 static void __exit vfio_ccw_sch_exit(void) 507 { 508 css_driver_unregister(&vfio_ccw_sch_driver); 509 mdev_unregister_driver(&vfio_ccw_mdev_driver); 510 isc_unregister(VFIO_CCW_ISC); 511 vfio_ccw_destroy_regions(); 512 destroy_workqueue(vfio_ccw_work_q); 513 vfio_ccw_debug_exit(); 514 } 515 module_init(vfio_ccw_sch_init); 516 module_exit(vfio_ccw_sch_exit); 517 518 MODULE_LICENSE("GPL v2"); 519