1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * VFIO based Physical Subchannel device driver 4 * 5 * Copyright IBM Corp. 2017 6 * Copyright Red Hat, Inc. 2019 7 * 8 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com> 9 * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com> 10 * Cornelia Huck <cohuck@redhat.com> 11 */ 12 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/slab.h> 16 #include <linux/mdev.h> 17 18 #include <asm/isc.h> 19 20 #include "chp.h" 21 #include "ioasm.h" 22 #include "css.h" 23 #include "vfio_ccw_private.h" 24 25 struct workqueue_struct *vfio_ccw_work_q; 26 static struct kmem_cache *vfio_ccw_io_region; 27 static struct kmem_cache *vfio_ccw_cmd_region; 28 static struct kmem_cache *vfio_ccw_schib_region; 29 static struct kmem_cache *vfio_ccw_crw_region; 30 31 debug_info_t *vfio_ccw_debug_msg_id; 32 debug_info_t *vfio_ccw_debug_trace_id; 33 34 /* 35 * Helpers 36 */ 37 int vfio_ccw_sch_quiesce(struct subchannel *sch) 38 { 39 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); 40 DECLARE_COMPLETION_ONSTACK(completion); 41 int iretry, ret = 0; 42 43 iretry = 255; 44 do { 45 46 ret = cio_cancel_halt_clear(sch, &iretry); 47 48 if (ret == -EIO) { 49 pr_err("vfio_ccw: could not quiesce subchannel 0.%x.%04x!\n", 50 sch->schid.ssid, sch->schid.sch_no); 51 break; 52 } 53 54 /* 55 * Flush all I/O and wait for 56 * cancel/halt/clear completion. 57 */ 58 private->completion = &completion; 59 spin_unlock_irq(sch->lock); 60 61 if (ret == -EBUSY) 62 wait_for_completion_timeout(&completion, 3*HZ); 63 64 private->completion = NULL; 65 flush_workqueue(vfio_ccw_work_q); 66 spin_lock_irq(sch->lock); 67 ret = cio_disable_subchannel(sch); 68 } while (ret == -EBUSY); 69 70 return ret; 71 } 72 73 static void vfio_ccw_sch_io_todo(struct work_struct *work) 74 { 75 struct vfio_ccw_private *private; 76 struct irb *irb; 77 bool is_final; 78 bool cp_is_finished = false; 79 80 private = container_of(work, struct vfio_ccw_private, io_work); 81 irb = &private->irb; 82 83 is_final = !(scsw_actl(&irb->scsw) & 84 (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)); 85 if (scsw_is_solicited(&irb->scsw)) { 86 cp_update_scsw(&private->cp, &irb->scsw); 87 if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING) { 88 cp_free(&private->cp); 89 cp_is_finished = true; 90 } 91 } 92 mutex_lock(&private->io_mutex); 93 memcpy(private->io_region->irb_area, irb, sizeof(*irb)); 94 mutex_unlock(&private->io_mutex); 95 96 /* 97 * Reset to IDLE only if processing of a channel program 98 * has finished. Do not overwrite a possible processing 99 * state if the interrupt was unsolicited, or if the final 100 * interrupt was for HSCH or CSCH. 101 */ 102 if (cp_is_finished) 103 private->state = VFIO_CCW_STATE_IDLE; 104 105 if (private->io_trigger) 106 eventfd_signal(private->io_trigger, 1); 107 } 108 109 static void vfio_ccw_crw_todo(struct work_struct *work) 110 { 111 struct vfio_ccw_private *private; 112 113 private = container_of(work, struct vfio_ccw_private, crw_work); 114 115 if (!list_empty(&private->crw) && private->crw_trigger) 116 eventfd_signal(private->crw_trigger, 1); 117 } 118 119 /* 120 * Css driver callbacks 121 */ 122 static void vfio_ccw_sch_irq(struct subchannel *sch) 123 { 124 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); 125 126 inc_irq_stat(IRQIO_CIO); 127 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT); 128 } 129 130 static struct vfio_ccw_private *vfio_ccw_alloc_private(struct subchannel *sch) 131 { 132 struct vfio_ccw_private *private; 133 134 private = kzalloc(sizeof(*private), GFP_KERNEL); 135 if (!private) 136 return ERR_PTR(-ENOMEM); 137 138 private->sch = sch; 139 mutex_init(&private->io_mutex); 140 private->state = VFIO_CCW_STATE_STANDBY; 141 INIT_LIST_HEAD(&private->crw); 142 INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo); 143 INIT_WORK(&private->crw_work, vfio_ccw_crw_todo); 144 atomic_set(&private->avail, 1); 145 146 private->cp.guest_cp = kcalloc(CCWCHAIN_LEN_MAX, sizeof(struct ccw1), 147 GFP_KERNEL); 148 if (!private->cp.guest_cp) 149 goto out_free_private; 150 151 private->io_region = kmem_cache_zalloc(vfio_ccw_io_region, 152 GFP_KERNEL | GFP_DMA); 153 if (!private->io_region) 154 goto out_free_cp; 155 156 private->cmd_region = kmem_cache_zalloc(vfio_ccw_cmd_region, 157 GFP_KERNEL | GFP_DMA); 158 if (!private->cmd_region) 159 goto out_free_io; 160 161 private->schib_region = kmem_cache_zalloc(vfio_ccw_schib_region, 162 GFP_KERNEL | GFP_DMA); 163 164 if (!private->schib_region) 165 goto out_free_cmd; 166 167 private->crw_region = kmem_cache_zalloc(vfio_ccw_crw_region, 168 GFP_KERNEL | GFP_DMA); 169 170 if (!private->crw_region) 171 goto out_free_schib; 172 return private; 173 174 out_free_schib: 175 kmem_cache_free(vfio_ccw_schib_region, private->schib_region); 176 out_free_cmd: 177 kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region); 178 out_free_io: 179 kmem_cache_free(vfio_ccw_io_region, private->io_region); 180 out_free_cp: 181 kfree(private->cp.guest_cp); 182 out_free_private: 183 mutex_destroy(&private->io_mutex); 184 kfree(private); 185 return ERR_PTR(-ENOMEM); 186 } 187 188 static void vfio_ccw_free_private(struct vfio_ccw_private *private) 189 { 190 struct vfio_ccw_crw *crw, *temp; 191 192 list_for_each_entry_safe(crw, temp, &private->crw, next) { 193 list_del(&crw->next); 194 kfree(crw); 195 } 196 197 kmem_cache_free(vfio_ccw_crw_region, private->crw_region); 198 kmem_cache_free(vfio_ccw_schib_region, private->schib_region); 199 kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region); 200 kmem_cache_free(vfio_ccw_io_region, private->io_region); 201 kfree(private->cp.guest_cp); 202 mutex_destroy(&private->io_mutex); 203 kfree(private); 204 } 205 206 static int vfio_ccw_sch_probe(struct subchannel *sch) 207 { 208 struct pmcw *pmcw = &sch->schib.pmcw; 209 struct vfio_ccw_private *private; 210 int ret = -ENOMEM; 211 212 if (pmcw->qf) { 213 dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n", 214 dev_name(&sch->dev)); 215 return -ENODEV; 216 } 217 218 private = vfio_ccw_alloc_private(sch); 219 if (IS_ERR(private)) 220 return PTR_ERR(private); 221 222 dev_set_drvdata(&sch->dev, private); 223 224 ret = mdev_register_device(&sch->dev, &vfio_ccw_mdev_driver); 225 if (ret) 226 goto out_free; 227 228 VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n", 229 sch->schid.cssid, sch->schid.ssid, 230 sch->schid.sch_no); 231 return 0; 232 233 out_free: 234 dev_set_drvdata(&sch->dev, NULL); 235 vfio_ccw_free_private(private); 236 return ret; 237 } 238 239 static void vfio_ccw_sch_remove(struct subchannel *sch) 240 { 241 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); 242 243 mdev_unregister_device(&sch->dev); 244 245 dev_set_drvdata(&sch->dev, NULL); 246 247 vfio_ccw_free_private(private); 248 249 VFIO_CCW_MSG_EVENT(4, "unbound from subchannel %x.%x.%04x\n", 250 sch->schid.cssid, sch->schid.ssid, 251 sch->schid.sch_no); 252 } 253 254 static void vfio_ccw_sch_shutdown(struct subchannel *sch) 255 { 256 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); 257 258 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE); 259 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER); 260 } 261 262 /** 263 * vfio_ccw_sch_event - process subchannel event 264 * @sch: subchannel 265 * @process: non-zero if function is called in process context 266 * 267 * An unspecified event occurred for this subchannel. Adjust data according 268 * to the current operational state of the subchannel. Return zero when the 269 * event has been handled sufficiently or -EAGAIN when this function should 270 * be called again in process context. 271 */ 272 static int vfio_ccw_sch_event(struct subchannel *sch, int process) 273 { 274 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); 275 unsigned long flags; 276 int rc = -EAGAIN; 277 278 spin_lock_irqsave(sch->lock, flags); 279 if (!device_is_registered(&sch->dev)) 280 goto out_unlock; 281 282 if (work_pending(&sch->todo_work)) 283 goto out_unlock; 284 285 rc = 0; 286 287 if (cio_update_schib(sch)) 288 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER); 289 290 out_unlock: 291 spin_unlock_irqrestore(sch->lock, flags); 292 293 return rc; 294 } 295 296 static void vfio_ccw_queue_crw(struct vfio_ccw_private *private, 297 unsigned int rsc, 298 unsigned int erc, 299 unsigned int rsid) 300 { 301 struct vfio_ccw_crw *crw; 302 303 /* 304 * If unable to allocate a CRW, just drop the event and 305 * carry on. The guest will either see a later one or 306 * learn when it issues its own store subchannel. 307 */ 308 crw = kzalloc(sizeof(*crw), GFP_ATOMIC); 309 if (!crw) 310 return; 311 312 /* 313 * Build the CRW based on the inputs given to us. 314 */ 315 crw->crw.rsc = rsc; 316 crw->crw.erc = erc; 317 crw->crw.rsid = rsid; 318 319 list_add_tail(&crw->next, &private->crw); 320 queue_work(vfio_ccw_work_q, &private->crw_work); 321 } 322 323 static int vfio_ccw_chp_event(struct subchannel *sch, 324 struct chp_link *link, int event) 325 { 326 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); 327 int mask = chp_ssd_get_mask(&sch->ssd_info, link); 328 int retry = 255; 329 330 if (!private || !mask) 331 return 0; 332 333 trace_vfio_ccw_chp_event(private->sch->schid, mask, event); 334 VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: mask=0x%x event=%d\n", 335 sch->schid.cssid, 336 sch->schid.ssid, sch->schid.sch_no, 337 mask, event); 338 339 if (cio_update_schib(sch)) 340 return -ENODEV; 341 342 switch (event) { 343 case CHP_VARY_OFF: 344 /* Path logically turned off */ 345 sch->opm &= ~mask; 346 sch->lpm &= ~mask; 347 if (sch->schib.pmcw.lpum & mask) 348 cio_cancel_halt_clear(sch, &retry); 349 break; 350 case CHP_OFFLINE: 351 /* Path is gone */ 352 if (sch->schib.pmcw.lpum & mask) 353 cio_cancel_halt_clear(sch, &retry); 354 vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_PERRN, 355 link->chpid.id); 356 break; 357 case CHP_VARY_ON: 358 /* Path logically turned on */ 359 sch->opm |= mask; 360 sch->lpm |= mask; 361 break; 362 case CHP_ONLINE: 363 /* Path became available */ 364 sch->lpm |= mask & sch->opm; 365 vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_INIT, 366 link->chpid.id); 367 break; 368 } 369 370 return 0; 371 } 372 373 static struct css_device_id vfio_ccw_sch_ids[] = { 374 { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, }, 375 { /* end of list */ }, 376 }; 377 MODULE_DEVICE_TABLE(css, vfio_ccw_sch_ids); 378 379 static struct css_driver vfio_ccw_sch_driver = { 380 .drv = { 381 .name = "vfio_ccw", 382 .owner = THIS_MODULE, 383 }, 384 .subchannel_type = vfio_ccw_sch_ids, 385 .irq = vfio_ccw_sch_irq, 386 .probe = vfio_ccw_sch_probe, 387 .remove = vfio_ccw_sch_remove, 388 .shutdown = vfio_ccw_sch_shutdown, 389 .sch_event = vfio_ccw_sch_event, 390 .chp_event = vfio_ccw_chp_event, 391 }; 392 393 static int __init vfio_ccw_debug_init(void) 394 { 395 vfio_ccw_debug_msg_id = debug_register("vfio_ccw_msg", 16, 1, 396 11 * sizeof(long)); 397 if (!vfio_ccw_debug_msg_id) 398 goto out_unregister; 399 debug_register_view(vfio_ccw_debug_msg_id, &debug_sprintf_view); 400 debug_set_level(vfio_ccw_debug_msg_id, 2); 401 vfio_ccw_debug_trace_id = debug_register("vfio_ccw_trace", 16, 1, 16); 402 if (!vfio_ccw_debug_trace_id) 403 goto out_unregister; 404 debug_register_view(vfio_ccw_debug_trace_id, &debug_hex_ascii_view); 405 debug_set_level(vfio_ccw_debug_trace_id, 2); 406 return 0; 407 408 out_unregister: 409 debug_unregister(vfio_ccw_debug_msg_id); 410 debug_unregister(vfio_ccw_debug_trace_id); 411 return -1; 412 } 413 414 static void vfio_ccw_debug_exit(void) 415 { 416 debug_unregister(vfio_ccw_debug_msg_id); 417 debug_unregister(vfio_ccw_debug_trace_id); 418 } 419 420 static void vfio_ccw_destroy_regions(void) 421 { 422 kmem_cache_destroy(vfio_ccw_crw_region); 423 kmem_cache_destroy(vfio_ccw_schib_region); 424 kmem_cache_destroy(vfio_ccw_cmd_region); 425 kmem_cache_destroy(vfio_ccw_io_region); 426 } 427 428 static int __init vfio_ccw_sch_init(void) 429 { 430 int ret; 431 432 ret = vfio_ccw_debug_init(); 433 if (ret) 434 return ret; 435 436 vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw"); 437 if (!vfio_ccw_work_q) { 438 ret = -ENOMEM; 439 goto out_regions; 440 } 441 442 vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region", 443 sizeof(struct ccw_io_region), 0, 444 SLAB_ACCOUNT, 0, 445 sizeof(struct ccw_io_region), NULL); 446 if (!vfio_ccw_io_region) { 447 ret = -ENOMEM; 448 goto out_regions; 449 } 450 451 vfio_ccw_cmd_region = kmem_cache_create_usercopy("vfio_ccw_cmd_region", 452 sizeof(struct ccw_cmd_region), 0, 453 SLAB_ACCOUNT, 0, 454 sizeof(struct ccw_cmd_region), NULL); 455 if (!vfio_ccw_cmd_region) { 456 ret = -ENOMEM; 457 goto out_regions; 458 } 459 460 vfio_ccw_schib_region = kmem_cache_create_usercopy("vfio_ccw_schib_region", 461 sizeof(struct ccw_schib_region), 0, 462 SLAB_ACCOUNT, 0, 463 sizeof(struct ccw_schib_region), NULL); 464 465 if (!vfio_ccw_schib_region) { 466 ret = -ENOMEM; 467 goto out_regions; 468 } 469 470 vfio_ccw_crw_region = kmem_cache_create_usercopy("vfio_ccw_crw_region", 471 sizeof(struct ccw_crw_region), 0, 472 SLAB_ACCOUNT, 0, 473 sizeof(struct ccw_crw_region), NULL); 474 475 if (!vfio_ccw_crw_region) { 476 ret = -ENOMEM; 477 goto out_regions; 478 } 479 480 ret = mdev_register_driver(&vfio_ccw_mdev_driver); 481 if (ret) 482 goto out_regions; 483 484 isc_register(VFIO_CCW_ISC); 485 ret = css_driver_register(&vfio_ccw_sch_driver); 486 if (ret) { 487 isc_unregister(VFIO_CCW_ISC); 488 goto out_driver; 489 } 490 491 return ret; 492 493 out_driver: 494 mdev_unregister_driver(&vfio_ccw_mdev_driver); 495 out_regions: 496 vfio_ccw_destroy_regions(); 497 destroy_workqueue(vfio_ccw_work_q); 498 vfio_ccw_debug_exit(); 499 return ret; 500 } 501 502 static void __exit vfio_ccw_sch_exit(void) 503 { 504 css_driver_unregister(&vfio_ccw_sch_driver); 505 mdev_unregister_driver(&vfio_ccw_mdev_driver); 506 isc_unregister(VFIO_CCW_ISC); 507 vfio_ccw_destroy_regions(); 508 destroy_workqueue(vfio_ccw_work_q); 509 vfio_ccw_debug_exit(); 510 } 511 module_init(vfio_ccw_sch_init); 512 module_exit(vfio_ccw_sch_exit); 513 514 MODULE_LICENSE("GPL v2"); 515