1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * VFIO based Physical Subchannel device driver 4 * 5 * Copyright IBM Corp. 2017 6 * Copyright Red Hat, Inc. 2019 7 * 8 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com> 9 * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com> 10 * Cornelia Huck <cohuck@redhat.com> 11 */ 12 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/device.h> 16 #include <linux/slab.h> 17 #include <linux/mdev.h> 18 19 #include <asm/isc.h> 20 21 #include "chp.h" 22 #include "ioasm.h" 23 #include "css.h" 24 #include "vfio_ccw_private.h" 25 26 struct workqueue_struct *vfio_ccw_work_q; 27 static struct kmem_cache *vfio_ccw_io_region; 28 static struct kmem_cache *vfio_ccw_cmd_region; 29 static struct kmem_cache *vfio_ccw_schib_region; 30 static struct kmem_cache *vfio_ccw_crw_region; 31 32 debug_info_t *vfio_ccw_debug_msg_id; 33 debug_info_t *vfio_ccw_debug_trace_id; 34 35 /* 36 * Helpers 37 */ 38 int vfio_ccw_sch_quiesce(struct subchannel *sch) 39 { 40 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); 41 DECLARE_COMPLETION_ONSTACK(completion); 42 int iretry, ret = 0; 43 44 iretry = 255; 45 do { 46 47 ret = cio_cancel_halt_clear(sch, &iretry); 48 49 if (ret == -EIO) { 50 pr_err("vfio_ccw: could not quiesce subchannel 0.%x.%04x!\n", 51 sch->schid.ssid, sch->schid.sch_no); 52 break; 53 } 54 55 /* 56 * Flush all I/O and wait for 57 * cancel/halt/clear completion. 58 */ 59 private->completion = &completion; 60 spin_unlock_irq(sch->lock); 61 62 if (ret == -EBUSY) 63 wait_for_completion_timeout(&completion, 3*HZ); 64 65 private->completion = NULL; 66 flush_workqueue(vfio_ccw_work_q); 67 spin_lock_irq(sch->lock); 68 ret = cio_disable_subchannel(sch); 69 } while (ret == -EBUSY); 70 71 return ret; 72 } 73 74 static void vfio_ccw_sch_io_todo(struct work_struct *work) 75 { 76 struct vfio_ccw_private *private; 77 struct irb *irb; 78 bool is_final; 79 bool cp_is_finished = false; 80 81 private = container_of(work, struct vfio_ccw_private, io_work); 82 irb = &private->irb; 83 84 is_final = !(scsw_actl(&irb->scsw) & 85 (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)); 86 if (scsw_is_solicited(&irb->scsw)) { 87 cp_update_scsw(&private->cp, &irb->scsw); 88 if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING) { 89 cp_free(&private->cp); 90 cp_is_finished = true; 91 } 92 } 93 mutex_lock(&private->io_mutex); 94 memcpy(private->io_region->irb_area, irb, sizeof(*irb)); 95 mutex_unlock(&private->io_mutex); 96 97 /* 98 * Reset to IDLE only if processing of a channel program 99 * has finished. Do not overwrite a possible processing 100 * state if the interrupt was unsolicited, or if the final 101 * interrupt was for HSCH or CSCH. 102 */ 103 if (cp_is_finished) 104 private->state = VFIO_CCW_STATE_IDLE; 105 106 if (private->io_trigger) 107 eventfd_signal(private->io_trigger, 1); 108 } 109 110 static void vfio_ccw_crw_todo(struct work_struct *work) 111 { 112 struct vfio_ccw_private *private; 113 114 private = container_of(work, struct vfio_ccw_private, crw_work); 115 116 if (!list_empty(&private->crw) && private->crw_trigger) 117 eventfd_signal(private->crw_trigger, 1); 118 } 119 120 /* 121 * Css driver callbacks 122 */ 123 static void vfio_ccw_sch_irq(struct subchannel *sch) 124 { 125 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); 126 127 inc_irq_stat(IRQIO_CIO); 128 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT); 129 } 130 131 static struct vfio_ccw_private *vfio_ccw_alloc_private(struct subchannel *sch) 132 { 133 struct vfio_ccw_private *private; 134 135 private = kzalloc(sizeof(*private), GFP_KERNEL); 136 if (!private) 137 return ERR_PTR(-ENOMEM); 138 139 private->sch = sch; 140 mutex_init(&private->io_mutex); 141 private->state = VFIO_CCW_STATE_STANDBY; 142 INIT_LIST_HEAD(&private->crw); 143 INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo); 144 INIT_WORK(&private->crw_work, vfio_ccw_crw_todo); 145 atomic_set(&private->avail, 1); 146 147 private->cp.guest_cp = kcalloc(CCWCHAIN_LEN_MAX, sizeof(struct ccw1), 148 GFP_KERNEL); 149 if (!private->cp.guest_cp) 150 goto out_free_private; 151 152 private->io_region = kmem_cache_zalloc(vfio_ccw_io_region, 153 GFP_KERNEL | GFP_DMA); 154 if (!private->io_region) 155 goto out_free_cp; 156 157 private->cmd_region = kmem_cache_zalloc(vfio_ccw_cmd_region, 158 GFP_KERNEL | GFP_DMA); 159 if (!private->cmd_region) 160 goto out_free_io; 161 162 private->schib_region = kmem_cache_zalloc(vfio_ccw_schib_region, 163 GFP_KERNEL | GFP_DMA); 164 165 if (!private->schib_region) 166 goto out_free_cmd; 167 168 private->crw_region = kmem_cache_zalloc(vfio_ccw_crw_region, 169 GFP_KERNEL | GFP_DMA); 170 171 if (!private->crw_region) 172 goto out_free_schib; 173 return private; 174 175 out_free_schib: 176 kmem_cache_free(vfio_ccw_schib_region, private->schib_region); 177 out_free_cmd: 178 kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region); 179 out_free_io: 180 kmem_cache_free(vfio_ccw_io_region, private->io_region); 181 out_free_cp: 182 kfree(private->cp.guest_cp); 183 out_free_private: 184 mutex_destroy(&private->io_mutex); 185 kfree(private); 186 return ERR_PTR(-ENOMEM); 187 } 188 189 static void vfio_ccw_free_private(struct vfio_ccw_private *private) 190 { 191 struct vfio_ccw_crw *crw, *temp; 192 193 list_for_each_entry_safe(crw, temp, &private->crw, next) { 194 list_del(&crw->next); 195 kfree(crw); 196 } 197 198 kmem_cache_free(vfio_ccw_crw_region, private->crw_region); 199 kmem_cache_free(vfio_ccw_schib_region, private->schib_region); 200 kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region); 201 kmem_cache_free(vfio_ccw_io_region, private->io_region); 202 kfree(private->cp.guest_cp); 203 mutex_destroy(&private->io_mutex); 204 kfree(private); 205 } 206 207 static int vfio_ccw_sch_probe(struct subchannel *sch) 208 { 209 struct pmcw *pmcw = &sch->schib.pmcw; 210 struct vfio_ccw_private *private; 211 int ret = -ENOMEM; 212 213 if (pmcw->qf) { 214 dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n", 215 dev_name(&sch->dev)); 216 return -ENODEV; 217 } 218 219 private = vfio_ccw_alloc_private(sch); 220 if (IS_ERR(private)) 221 return PTR_ERR(private); 222 223 dev_set_drvdata(&sch->dev, private); 224 225 ret = mdev_register_device(&sch->dev, &vfio_ccw_mdev_driver); 226 if (ret) 227 goto out_free; 228 229 VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n", 230 sch->schid.cssid, sch->schid.ssid, 231 sch->schid.sch_no); 232 return 0; 233 234 out_free: 235 dev_set_drvdata(&sch->dev, NULL); 236 vfio_ccw_free_private(private); 237 return ret; 238 } 239 240 static void vfio_ccw_sch_remove(struct subchannel *sch) 241 { 242 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); 243 244 mdev_unregister_device(&sch->dev); 245 246 dev_set_drvdata(&sch->dev, NULL); 247 248 vfio_ccw_free_private(private); 249 250 VFIO_CCW_MSG_EVENT(4, "unbound from subchannel %x.%x.%04x\n", 251 sch->schid.cssid, sch->schid.ssid, 252 sch->schid.sch_no); 253 } 254 255 static void vfio_ccw_sch_shutdown(struct subchannel *sch) 256 { 257 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); 258 259 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE); 260 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER); 261 } 262 263 /** 264 * vfio_ccw_sch_event - process subchannel event 265 * @sch: subchannel 266 * @process: non-zero if function is called in process context 267 * 268 * An unspecified event occurred for this subchannel. Adjust data according 269 * to the current operational state of the subchannel. Return zero when the 270 * event has been handled sufficiently or -EAGAIN when this function should 271 * be called again in process context. 272 */ 273 static int vfio_ccw_sch_event(struct subchannel *sch, int process) 274 { 275 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); 276 unsigned long flags; 277 int rc = -EAGAIN; 278 279 spin_lock_irqsave(sch->lock, flags); 280 if (!device_is_registered(&sch->dev)) 281 goto out_unlock; 282 283 if (work_pending(&sch->todo_work)) 284 goto out_unlock; 285 286 rc = 0; 287 288 if (cio_update_schib(sch)) 289 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER); 290 291 out_unlock: 292 spin_unlock_irqrestore(sch->lock, flags); 293 294 return rc; 295 } 296 297 static void vfio_ccw_queue_crw(struct vfio_ccw_private *private, 298 unsigned int rsc, 299 unsigned int erc, 300 unsigned int rsid) 301 { 302 struct vfio_ccw_crw *crw; 303 304 /* 305 * If unable to allocate a CRW, just drop the event and 306 * carry on. The guest will either see a later one or 307 * learn when it issues its own store subchannel. 308 */ 309 crw = kzalloc(sizeof(*crw), GFP_ATOMIC); 310 if (!crw) 311 return; 312 313 /* 314 * Build the CRW based on the inputs given to us. 315 */ 316 crw->crw.rsc = rsc; 317 crw->crw.erc = erc; 318 crw->crw.rsid = rsid; 319 320 list_add_tail(&crw->next, &private->crw); 321 queue_work(vfio_ccw_work_q, &private->crw_work); 322 } 323 324 static int vfio_ccw_chp_event(struct subchannel *sch, 325 struct chp_link *link, int event) 326 { 327 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); 328 int mask = chp_ssd_get_mask(&sch->ssd_info, link); 329 int retry = 255; 330 331 if (!private || !mask) 332 return 0; 333 334 trace_vfio_ccw_chp_event(private->sch->schid, mask, event); 335 VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: mask=0x%x event=%d\n", 336 sch->schid.cssid, 337 sch->schid.ssid, sch->schid.sch_no, 338 mask, event); 339 340 if (cio_update_schib(sch)) 341 return -ENODEV; 342 343 switch (event) { 344 case CHP_VARY_OFF: 345 /* Path logically turned off */ 346 sch->opm &= ~mask; 347 sch->lpm &= ~mask; 348 if (sch->schib.pmcw.lpum & mask) 349 cio_cancel_halt_clear(sch, &retry); 350 break; 351 case CHP_OFFLINE: 352 /* Path is gone */ 353 if (sch->schib.pmcw.lpum & mask) 354 cio_cancel_halt_clear(sch, &retry); 355 vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_PERRN, 356 link->chpid.id); 357 break; 358 case CHP_VARY_ON: 359 /* Path logically turned on */ 360 sch->opm |= mask; 361 sch->lpm |= mask; 362 break; 363 case CHP_ONLINE: 364 /* Path became available */ 365 sch->lpm |= mask & sch->opm; 366 vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_INIT, 367 link->chpid.id); 368 break; 369 } 370 371 return 0; 372 } 373 374 static struct css_device_id vfio_ccw_sch_ids[] = { 375 { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, }, 376 { /* end of list */ }, 377 }; 378 MODULE_DEVICE_TABLE(css, vfio_ccw_sch_ids); 379 380 static struct css_driver vfio_ccw_sch_driver = { 381 .drv = { 382 .name = "vfio_ccw", 383 .owner = THIS_MODULE, 384 }, 385 .subchannel_type = vfio_ccw_sch_ids, 386 .irq = vfio_ccw_sch_irq, 387 .probe = vfio_ccw_sch_probe, 388 .remove = vfio_ccw_sch_remove, 389 .shutdown = vfio_ccw_sch_shutdown, 390 .sch_event = vfio_ccw_sch_event, 391 .chp_event = vfio_ccw_chp_event, 392 }; 393 394 static int __init vfio_ccw_debug_init(void) 395 { 396 vfio_ccw_debug_msg_id = debug_register("vfio_ccw_msg", 16, 1, 397 11 * sizeof(long)); 398 if (!vfio_ccw_debug_msg_id) 399 goto out_unregister; 400 debug_register_view(vfio_ccw_debug_msg_id, &debug_sprintf_view); 401 debug_set_level(vfio_ccw_debug_msg_id, 2); 402 vfio_ccw_debug_trace_id = debug_register("vfio_ccw_trace", 16, 1, 16); 403 if (!vfio_ccw_debug_trace_id) 404 goto out_unregister; 405 debug_register_view(vfio_ccw_debug_trace_id, &debug_hex_ascii_view); 406 debug_set_level(vfio_ccw_debug_trace_id, 2); 407 return 0; 408 409 out_unregister: 410 debug_unregister(vfio_ccw_debug_msg_id); 411 debug_unregister(vfio_ccw_debug_trace_id); 412 return -1; 413 } 414 415 static void vfio_ccw_debug_exit(void) 416 { 417 debug_unregister(vfio_ccw_debug_msg_id); 418 debug_unregister(vfio_ccw_debug_trace_id); 419 } 420 421 static void vfio_ccw_destroy_regions(void) 422 { 423 kmem_cache_destroy(vfio_ccw_crw_region); 424 kmem_cache_destroy(vfio_ccw_schib_region); 425 kmem_cache_destroy(vfio_ccw_cmd_region); 426 kmem_cache_destroy(vfio_ccw_io_region); 427 } 428 429 static int __init vfio_ccw_sch_init(void) 430 { 431 int ret; 432 433 ret = vfio_ccw_debug_init(); 434 if (ret) 435 return ret; 436 437 vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw"); 438 if (!vfio_ccw_work_q) { 439 ret = -ENOMEM; 440 goto out_regions; 441 } 442 443 vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region", 444 sizeof(struct ccw_io_region), 0, 445 SLAB_ACCOUNT, 0, 446 sizeof(struct ccw_io_region), NULL); 447 if (!vfio_ccw_io_region) { 448 ret = -ENOMEM; 449 goto out_regions; 450 } 451 452 vfio_ccw_cmd_region = kmem_cache_create_usercopy("vfio_ccw_cmd_region", 453 sizeof(struct ccw_cmd_region), 0, 454 SLAB_ACCOUNT, 0, 455 sizeof(struct ccw_cmd_region), NULL); 456 if (!vfio_ccw_cmd_region) { 457 ret = -ENOMEM; 458 goto out_regions; 459 } 460 461 vfio_ccw_schib_region = kmem_cache_create_usercopy("vfio_ccw_schib_region", 462 sizeof(struct ccw_schib_region), 0, 463 SLAB_ACCOUNT, 0, 464 sizeof(struct ccw_schib_region), NULL); 465 466 if (!vfio_ccw_schib_region) { 467 ret = -ENOMEM; 468 goto out_regions; 469 } 470 471 vfio_ccw_crw_region = kmem_cache_create_usercopy("vfio_ccw_crw_region", 472 sizeof(struct ccw_crw_region), 0, 473 SLAB_ACCOUNT, 0, 474 sizeof(struct ccw_crw_region), NULL); 475 476 if (!vfio_ccw_crw_region) { 477 ret = -ENOMEM; 478 goto out_regions; 479 } 480 481 ret = mdev_register_driver(&vfio_ccw_mdev_driver); 482 if (ret) 483 goto out_regions; 484 485 isc_register(VFIO_CCW_ISC); 486 ret = css_driver_register(&vfio_ccw_sch_driver); 487 if (ret) { 488 isc_unregister(VFIO_CCW_ISC); 489 goto out_driver; 490 } 491 492 return ret; 493 494 out_driver: 495 mdev_unregister_driver(&vfio_ccw_mdev_driver); 496 out_regions: 497 vfio_ccw_destroy_regions(); 498 destroy_workqueue(vfio_ccw_work_q); 499 vfio_ccw_debug_exit(); 500 return ret; 501 } 502 503 static void __exit vfio_ccw_sch_exit(void) 504 { 505 css_driver_unregister(&vfio_ccw_sch_driver); 506 mdev_unregister_driver(&vfio_ccw_mdev_driver); 507 isc_unregister(VFIO_CCW_ISC); 508 vfio_ccw_destroy_regions(); 509 destroy_workqueue(vfio_ccw_work_q); 510 vfio_ccw_debug_exit(); 511 } 512 module_init(vfio_ccw_sch_init); 513 module_exit(vfio_ccw_sch_exit); 514 515 MODULE_LICENSE("GPL v2"); 516