1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * VFIO based Physical Subchannel device driver 4 * 5 * Copyright IBM Corp. 2017 6 * Copyright Red Hat, Inc. 2019 7 * 8 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com> 9 * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com> 10 * Cornelia Huck <cohuck@redhat.com> 11 */ 12 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/device.h> 16 #include <linux/slab.h> 17 #include <linux/uuid.h> 18 #include <linux/mdev.h> 19 20 #include <asm/isc.h> 21 22 #include "chp.h" 23 #include "ioasm.h" 24 #include "css.h" 25 #include "vfio_ccw_private.h" 26 27 struct workqueue_struct *vfio_ccw_work_q; 28 static struct kmem_cache *vfio_ccw_io_region; 29 static struct kmem_cache *vfio_ccw_cmd_region; 30 static struct kmem_cache *vfio_ccw_schib_region; 31 static struct kmem_cache *vfio_ccw_crw_region; 32 33 debug_info_t *vfio_ccw_debug_msg_id; 34 debug_info_t *vfio_ccw_debug_trace_id; 35 36 /* 37 * Helpers 38 */ 39 int vfio_ccw_sch_quiesce(struct subchannel *sch) 40 { 41 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); 42 DECLARE_COMPLETION_ONSTACK(completion); 43 int iretry, ret = 0; 44 45 spin_lock_irq(sch->lock); 46 if (!sch->schib.pmcw.ena) 47 goto out_unlock; 48 ret = cio_disable_subchannel(sch); 49 if (ret != -EBUSY) 50 goto out_unlock; 51 52 iretry = 255; 53 do { 54 55 ret = cio_cancel_halt_clear(sch, &iretry); 56 57 if (ret == -EIO) { 58 pr_err("vfio_ccw: could not quiesce subchannel 0.%x.%04x!\n", 59 sch->schid.ssid, sch->schid.sch_no); 60 break; 61 } 62 63 /* 64 * Flush all I/O and wait for 65 * cancel/halt/clear completion. 66 */ 67 private->completion = &completion; 68 spin_unlock_irq(sch->lock); 69 70 if (ret == -EBUSY) 71 wait_for_completion_timeout(&completion, 3*HZ); 72 73 private->completion = NULL; 74 flush_workqueue(vfio_ccw_work_q); 75 spin_lock_irq(sch->lock); 76 ret = cio_disable_subchannel(sch); 77 } while (ret == -EBUSY); 78 out_unlock: 79 private->state = VFIO_CCW_STATE_NOT_OPER; 80 spin_unlock_irq(sch->lock); 81 return ret; 82 } 83 84 static void vfio_ccw_sch_io_todo(struct work_struct *work) 85 { 86 struct vfio_ccw_private *private; 87 struct irb *irb; 88 bool is_final; 89 90 private = container_of(work, struct vfio_ccw_private, io_work); 91 irb = &private->irb; 92 93 is_final = !(scsw_actl(&irb->scsw) & 94 (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)); 95 if (scsw_is_solicited(&irb->scsw)) { 96 cp_update_scsw(&private->cp, &irb->scsw); 97 if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING) 98 cp_free(&private->cp); 99 } 100 mutex_lock(&private->io_mutex); 101 memcpy(private->io_region->irb_area, irb, sizeof(*irb)); 102 mutex_unlock(&private->io_mutex); 103 104 if (private->mdev && is_final) 105 private->state = VFIO_CCW_STATE_IDLE; 106 107 if (private->io_trigger) 108 eventfd_signal(private->io_trigger, 1); 109 } 110 111 static void vfio_ccw_crw_todo(struct work_struct *work) 112 { 113 struct vfio_ccw_private *private; 114 115 private = container_of(work, struct vfio_ccw_private, crw_work); 116 117 if (!list_empty(&private->crw) && private->crw_trigger) 118 eventfd_signal(private->crw_trigger, 1); 119 } 120 121 /* 122 * Css driver callbacks 123 */ 124 static void vfio_ccw_sch_irq(struct subchannel *sch) 125 { 126 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); 127 128 inc_irq_stat(IRQIO_CIO); 129 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT); 130 } 131 132 static void vfio_ccw_free_regions(struct vfio_ccw_private *private) 133 { 134 if (private->crw_region) 135 kmem_cache_free(vfio_ccw_crw_region, private->crw_region); 136 if (private->schib_region) 137 kmem_cache_free(vfio_ccw_schib_region, private->schib_region); 138 if (private->cmd_region) 139 kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region); 140 if (private->io_region) 141 kmem_cache_free(vfio_ccw_io_region, private->io_region); 142 } 143 144 static int vfio_ccw_sch_probe(struct subchannel *sch) 145 { 146 struct pmcw *pmcw = &sch->schib.pmcw; 147 struct vfio_ccw_private *private; 148 int ret = -ENOMEM; 149 150 if (pmcw->qf) { 151 dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n", 152 dev_name(&sch->dev)); 153 return -ENODEV; 154 } 155 156 private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA); 157 if (!private) 158 return -ENOMEM; 159 160 private->cp.guest_cp = kcalloc(CCWCHAIN_LEN_MAX, sizeof(struct ccw1), 161 GFP_KERNEL); 162 if (!private->cp.guest_cp) 163 goto out_free; 164 165 private->io_region = kmem_cache_zalloc(vfio_ccw_io_region, 166 GFP_KERNEL | GFP_DMA); 167 if (!private->io_region) 168 goto out_free; 169 170 private->cmd_region = kmem_cache_zalloc(vfio_ccw_cmd_region, 171 GFP_KERNEL | GFP_DMA); 172 if (!private->cmd_region) 173 goto out_free; 174 175 private->schib_region = kmem_cache_zalloc(vfio_ccw_schib_region, 176 GFP_KERNEL | GFP_DMA); 177 178 if (!private->schib_region) 179 goto out_free; 180 181 private->crw_region = kmem_cache_zalloc(vfio_ccw_crw_region, 182 GFP_KERNEL | GFP_DMA); 183 184 if (!private->crw_region) 185 goto out_free; 186 187 private->sch = sch; 188 dev_set_drvdata(&sch->dev, private); 189 mutex_init(&private->io_mutex); 190 191 spin_lock_irq(sch->lock); 192 private->state = VFIO_CCW_STATE_NOT_OPER; 193 sch->isc = VFIO_CCW_ISC; 194 ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch); 195 spin_unlock_irq(sch->lock); 196 if (ret) 197 goto out_free; 198 199 INIT_LIST_HEAD(&private->crw); 200 INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo); 201 INIT_WORK(&private->crw_work, vfio_ccw_crw_todo); 202 atomic_set(&private->avail, 1); 203 private->state = VFIO_CCW_STATE_STANDBY; 204 205 ret = vfio_ccw_mdev_reg(sch); 206 if (ret) 207 goto out_disable; 208 209 if (dev_get_uevent_suppress(&sch->dev)) { 210 dev_set_uevent_suppress(&sch->dev, 0); 211 kobject_uevent(&sch->dev.kobj, KOBJ_ADD); 212 } 213 214 VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n", 215 sch->schid.cssid, sch->schid.ssid, 216 sch->schid.sch_no); 217 return 0; 218 219 out_disable: 220 cio_disable_subchannel(sch); 221 out_free: 222 dev_set_drvdata(&sch->dev, NULL); 223 vfio_ccw_free_regions(private); 224 kfree(private->cp.guest_cp); 225 kfree(private); 226 return ret; 227 } 228 229 static int vfio_ccw_sch_remove(struct subchannel *sch) 230 { 231 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); 232 struct vfio_ccw_crw *crw, *temp; 233 234 vfio_ccw_sch_quiesce(sch); 235 236 list_for_each_entry_safe(crw, temp, &private->crw, next) { 237 list_del(&crw->next); 238 kfree(crw); 239 } 240 241 vfio_ccw_mdev_unreg(sch); 242 243 dev_set_drvdata(&sch->dev, NULL); 244 245 vfio_ccw_free_regions(private); 246 kfree(private->cp.guest_cp); 247 kfree(private); 248 249 VFIO_CCW_MSG_EVENT(4, "unbound from subchannel %x.%x.%04x\n", 250 sch->schid.cssid, sch->schid.ssid, 251 sch->schid.sch_no); 252 return 0; 253 } 254 255 static void vfio_ccw_sch_shutdown(struct subchannel *sch) 256 { 257 vfio_ccw_sch_quiesce(sch); 258 } 259 260 /** 261 * vfio_ccw_sch_event - process subchannel event 262 * @sch: subchannel 263 * @process: non-zero if function is called in process context 264 * 265 * An unspecified event occurred for this subchannel. Adjust data according 266 * to the current operational state of the subchannel. Return zero when the 267 * event has been handled sufficiently or -EAGAIN when this function should 268 * be called again in process context. 269 */ 270 static int vfio_ccw_sch_event(struct subchannel *sch, int process) 271 { 272 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); 273 unsigned long flags; 274 int rc = -EAGAIN; 275 276 spin_lock_irqsave(sch->lock, flags); 277 if (!device_is_registered(&sch->dev)) 278 goto out_unlock; 279 280 if (work_pending(&sch->todo_work)) 281 goto out_unlock; 282 283 if (cio_update_schib(sch)) { 284 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER); 285 rc = 0; 286 goto out_unlock; 287 } 288 289 private = dev_get_drvdata(&sch->dev); 290 if (private->state == VFIO_CCW_STATE_NOT_OPER) { 291 private->state = private->mdev ? VFIO_CCW_STATE_IDLE : 292 VFIO_CCW_STATE_STANDBY; 293 } 294 rc = 0; 295 296 out_unlock: 297 spin_unlock_irqrestore(sch->lock, flags); 298 299 return rc; 300 } 301 302 static void vfio_ccw_queue_crw(struct vfio_ccw_private *private, 303 unsigned int rsc, 304 unsigned int erc, 305 unsigned int rsid) 306 { 307 struct vfio_ccw_crw *crw; 308 309 /* 310 * If unable to allocate a CRW, just drop the event and 311 * carry on. The guest will either see a later one or 312 * learn when it issues its own store subchannel. 313 */ 314 crw = kzalloc(sizeof(*crw), GFP_ATOMIC); 315 if (!crw) 316 return; 317 318 /* 319 * Build the CRW based on the inputs given to us. 320 */ 321 crw->crw.rsc = rsc; 322 crw->crw.erc = erc; 323 crw->crw.rsid = rsid; 324 325 list_add_tail(&crw->next, &private->crw); 326 queue_work(vfio_ccw_work_q, &private->crw_work); 327 } 328 329 static int vfio_ccw_chp_event(struct subchannel *sch, 330 struct chp_link *link, int event) 331 { 332 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); 333 int mask = chp_ssd_get_mask(&sch->ssd_info, link); 334 int retry = 255; 335 336 if (!private || !mask) 337 return 0; 338 339 trace_vfio_ccw_chp_event(private->sch->schid, mask, event); 340 VFIO_CCW_MSG_EVENT(2, "%pUl (%x.%x.%04x): mask=0x%x event=%d\n", 341 mdev_uuid(private->mdev), sch->schid.cssid, 342 sch->schid.ssid, sch->schid.sch_no, 343 mask, event); 344 345 if (cio_update_schib(sch)) 346 return -ENODEV; 347 348 switch (event) { 349 case CHP_VARY_OFF: 350 /* Path logically turned off */ 351 sch->opm &= ~mask; 352 sch->lpm &= ~mask; 353 if (sch->schib.pmcw.lpum & mask) 354 cio_cancel_halt_clear(sch, &retry); 355 break; 356 case CHP_OFFLINE: 357 /* Path is gone */ 358 if (sch->schib.pmcw.lpum & mask) 359 cio_cancel_halt_clear(sch, &retry); 360 vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_PERRN, 361 link->chpid.id); 362 break; 363 case CHP_VARY_ON: 364 /* Path logically turned on */ 365 sch->opm |= mask; 366 sch->lpm |= mask; 367 break; 368 case CHP_ONLINE: 369 /* Path became available */ 370 sch->lpm |= mask & sch->opm; 371 vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_INIT, 372 link->chpid.id); 373 break; 374 } 375 376 return 0; 377 } 378 379 static struct css_device_id vfio_ccw_sch_ids[] = { 380 { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, }, 381 { /* end of list */ }, 382 }; 383 MODULE_DEVICE_TABLE(css, vfio_ccw_sch_ids); 384 385 static struct css_driver vfio_ccw_sch_driver = { 386 .drv = { 387 .name = "vfio_ccw", 388 .owner = THIS_MODULE, 389 }, 390 .subchannel_type = vfio_ccw_sch_ids, 391 .irq = vfio_ccw_sch_irq, 392 .probe = vfio_ccw_sch_probe, 393 .remove = vfio_ccw_sch_remove, 394 .shutdown = vfio_ccw_sch_shutdown, 395 .sch_event = vfio_ccw_sch_event, 396 .chp_event = vfio_ccw_chp_event, 397 }; 398 399 static int __init vfio_ccw_debug_init(void) 400 { 401 vfio_ccw_debug_msg_id = debug_register("vfio_ccw_msg", 16, 1, 402 11 * sizeof(long)); 403 if (!vfio_ccw_debug_msg_id) 404 goto out_unregister; 405 debug_register_view(vfio_ccw_debug_msg_id, &debug_sprintf_view); 406 debug_set_level(vfio_ccw_debug_msg_id, 2); 407 vfio_ccw_debug_trace_id = debug_register("vfio_ccw_trace", 16, 1, 16); 408 if (!vfio_ccw_debug_trace_id) 409 goto out_unregister; 410 debug_register_view(vfio_ccw_debug_trace_id, &debug_hex_ascii_view); 411 debug_set_level(vfio_ccw_debug_trace_id, 2); 412 return 0; 413 414 out_unregister: 415 debug_unregister(vfio_ccw_debug_msg_id); 416 debug_unregister(vfio_ccw_debug_trace_id); 417 return -1; 418 } 419 420 static void vfio_ccw_debug_exit(void) 421 { 422 debug_unregister(vfio_ccw_debug_msg_id); 423 debug_unregister(vfio_ccw_debug_trace_id); 424 } 425 426 static void vfio_ccw_destroy_regions(void) 427 { 428 kmem_cache_destroy(vfio_ccw_crw_region); 429 kmem_cache_destroy(vfio_ccw_schib_region); 430 kmem_cache_destroy(vfio_ccw_cmd_region); 431 kmem_cache_destroy(vfio_ccw_io_region); 432 } 433 434 static int __init vfio_ccw_sch_init(void) 435 { 436 int ret; 437 438 ret = vfio_ccw_debug_init(); 439 if (ret) 440 return ret; 441 442 vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw"); 443 if (!vfio_ccw_work_q) { 444 ret = -ENOMEM; 445 goto out_err; 446 } 447 448 vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region", 449 sizeof(struct ccw_io_region), 0, 450 SLAB_ACCOUNT, 0, 451 sizeof(struct ccw_io_region), NULL); 452 if (!vfio_ccw_io_region) { 453 ret = -ENOMEM; 454 goto out_err; 455 } 456 457 vfio_ccw_cmd_region = kmem_cache_create_usercopy("vfio_ccw_cmd_region", 458 sizeof(struct ccw_cmd_region), 0, 459 SLAB_ACCOUNT, 0, 460 sizeof(struct ccw_cmd_region), NULL); 461 if (!vfio_ccw_cmd_region) { 462 ret = -ENOMEM; 463 goto out_err; 464 } 465 466 vfio_ccw_schib_region = kmem_cache_create_usercopy("vfio_ccw_schib_region", 467 sizeof(struct ccw_schib_region), 0, 468 SLAB_ACCOUNT, 0, 469 sizeof(struct ccw_schib_region), NULL); 470 471 if (!vfio_ccw_schib_region) { 472 ret = -ENOMEM; 473 goto out_err; 474 } 475 476 vfio_ccw_crw_region = kmem_cache_create_usercopy("vfio_ccw_crw_region", 477 sizeof(struct ccw_crw_region), 0, 478 SLAB_ACCOUNT, 0, 479 sizeof(struct ccw_crw_region), NULL); 480 481 if (!vfio_ccw_crw_region) { 482 ret = -ENOMEM; 483 goto out_err; 484 } 485 486 isc_register(VFIO_CCW_ISC); 487 ret = css_driver_register(&vfio_ccw_sch_driver); 488 if (ret) { 489 isc_unregister(VFIO_CCW_ISC); 490 goto out_err; 491 } 492 493 return ret; 494 495 out_err: 496 vfio_ccw_destroy_regions(); 497 destroy_workqueue(vfio_ccw_work_q); 498 vfio_ccw_debug_exit(); 499 return ret; 500 } 501 502 static void __exit vfio_ccw_sch_exit(void) 503 { 504 css_driver_unregister(&vfio_ccw_sch_driver); 505 isc_unregister(VFIO_CCW_ISC); 506 vfio_ccw_destroy_regions(); 507 destroy_workqueue(vfio_ccw_work_q); 508 vfio_ccw_debug_exit(); 509 } 510 module_init(vfio_ccw_sch_init); 511 module_exit(vfio_ccw_sch_exit); 512 513 MODULE_LICENSE("GPL v2"); 514