1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * VFIO based Physical Subchannel device driver 4 * 5 * Copyright IBM Corp. 2017 6 * Copyright Red Hat, Inc. 2019 7 * 8 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com> 9 * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com> 10 * Cornelia Huck <cohuck@redhat.com> 11 */ 12 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/slab.h> 16 #include <linux/mdev.h> 17 18 #include <asm/isc.h> 19 20 #include "chp.h" 21 #include "ioasm.h" 22 #include "css.h" 23 #include "vfio_ccw_private.h" 24 25 struct workqueue_struct *vfio_ccw_work_q; 26 struct kmem_cache *vfio_ccw_io_region; 27 struct kmem_cache *vfio_ccw_cmd_region; 28 struct kmem_cache *vfio_ccw_schib_region; 29 struct kmem_cache *vfio_ccw_crw_region; 30 31 debug_info_t *vfio_ccw_debug_msg_id; 32 debug_info_t *vfio_ccw_debug_trace_id; 33 34 /* 35 * Helpers 36 */ 37 int vfio_ccw_sch_quiesce(struct subchannel *sch) 38 { 39 struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev); 40 struct vfio_ccw_private *private = dev_get_drvdata(&parent->dev); 41 DECLARE_COMPLETION_ONSTACK(completion); 42 int iretry, ret = 0; 43 44 /* 45 * Probably an impossible situation, after being called through 46 * FSM callbacks. But in the event it did, register a warning 47 * and return as if things were fine. 48 */ 49 if (WARN_ON(!private)) 50 return 0; 51 52 iretry = 255; 53 do { 54 55 ret = cio_cancel_halt_clear(sch, &iretry); 56 57 if (ret == -EIO) { 58 pr_err("vfio_ccw: could not quiesce subchannel 0.%x.%04x!\n", 59 sch->schid.ssid, sch->schid.sch_no); 60 break; 61 } 62 63 /* 64 * Flush all I/O and wait for 65 * cancel/halt/clear completion. 66 */ 67 private->completion = &completion; 68 spin_unlock_irq(sch->lock); 69 70 if (ret == -EBUSY) 71 wait_for_completion_timeout(&completion, 3*HZ); 72 73 private->completion = NULL; 74 flush_workqueue(vfio_ccw_work_q); 75 spin_lock_irq(sch->lock); 76 ret = cio_disable_subchannel(sch); 77 } while (ret == -EBUSY); 78 79 return ret; 80 } 81 82 void vfio_ccw_sch_io_todo(struct work_struct *work) 83 { 84 struct vfio_ccw_private *private; 85 struct irb *irb; 86 bool is_final; 87 bool cp_is_finished = false; 88 89 private = container_of(work, struct vfio_ccw_private, io_work); 90 irb = &private->irb; 91 92 is_final = !(scsw_actl(&irb->scsw) & 93 (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)); 94 if (scsw_is_solicited(&irb->scsw)) { 95 cp_update_scsw(&private->cp, &irb->scsw); 96 if (is_final && private->state == VFIO_CCW_STATE_CP_PENDING) { 97 cp_free(&private->cp); 98 cp_is_finished = true; 99 } 100 } 101 mutex_lock(&private->io_mutex); 102 memcpy(private->io_region->irb_area, irb, sizeof(*irb)); 103 mutex_unlock(&private->io_mutex); 104 105 /* 106 * Reset to IDLE only if processing of a channel program 107 * has finished. Do not overwrite a possible processing 108 * state if the interrupt was unsolicited, or if the final 109 * interrupt was for HSCH or CSCH. 110 */ 111 if (cp_is_finished) 112 private->state = VFIO_CCW_STATE_IDLE; 113 114 if (private->io_trigger) 115 eventfd_signal(private->io_trigger, 1); 116 } 117 118 void vfio_ccw_crw_todo(struct work_struct *work) 119 { 120 struct vfio_ccw_private *private; 121 122 private = container_of(work, struct vfio_ccw_private, crw_work); 123 124 if (!list_empty(&private->crw) && private->crw_trigger) 125 eventfd_signal(private->crw_trigger, 1); 126 } 127 128 /* 129 * Css driver callbacks 130 */ 131 static void vfio_ccw_sch_irq(struct subchannel *sch) 132 { 133 struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev); 134 struct vfio_ccw_private *private = dev_get_drvdata(&parent->dev); 135 136 /* 137 * The subchannel should still be disabled at this point, 138 * so an interrupt would be quite surprising. As with an 139 * interrupt while the FSM is closed, let's attempt to 140 * disable the subchannel again. 141 */ 142 if (!private) { 143 VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: unexpected interrupt\n", 144 sch->schid.cssid, sch->schid.ssid, 145 sch->schid.sch_no); 146 147 cio_disable_subchannel(sch); 148 return; 149 } 150 151 inc_irq_stat(IRQIO_CIO); 152 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT); 153 } 154 155 static void vfio_ccw_free_parent(struct device *dev) 156 { 157 struct vfio_ccw_parent *parent = container_of(dev, struct vfio_ccw_parent, dev); 158 159 kfree(parent); 160 } 161 162 static int vfio_ccw_sch_probe(struct subchannel *sch) 163 { 164 struct pmcw *pmcw = &sch->schib.pmcw; 165 struct vfio_ccw_parent *parent; 166 int ret = -ENOMEM; 167 168 if (pmcw->qf) { 169 dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n", 170 dev_name(&sch->dev)); 171 return -ENODEV; 172 } 173 174 parent = kzalloc(sizeof(*parent), GFP_KERNEL); 175 if (!parent) 176 return -ENOMEM; 177 178 dev_set_name(&parent->dev, "parent"); 179 parent->dev.parent = &sch->dev; 180 parent->dev.release = &vfio_ccw_free_parent; 181 ret = device_register(&parent->dev); 182 if (ret) 183 goto out_free; 184 185 dev_set_drvdata(&sch->dev, parent); 186 187 parent->mdev_type.sysfs_name = "io"; 188 parent->mdev_type.pretty_name = "I/O subchannel (Non-QDIO)"; 189 parent->mdev_types[0] = &parent->mdev_type; 190 ret = mdev_register_parent(&parent->parent, &sch->dev, 191 &vfio_ccw_mdev_driver, 192 parent->mdev_types, 1); 193 if (ret) 194 goto out_unreg; 195 196 VFIO_CCW_MSG_EVENT(4, "bound to subchannel %x.%x.%04x\n", 197 sch->schid.cssid, sch->schid.ssid, 198 sch->schid.sch_no); 199 return 0; 200 201 out_unreg: 202 device_unregister(&parent->dev); 203 out_free: 204 dev_set_drvdata(&sch->dev, NULL); 205 return ret; 206 } 207 208 static void vfio_ccw_sch_remove(struct subchannel *sch) 209 { 210 struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev); 211 212 mdev_unregister_parent(&parent->parent); 213 214 device_unregister(&parent->dev); 215 dev_set_drvdata(&sch->dev, NULL); 216 217 VFIO_CCW_MSG_EVENT(4, "unbound from subchannel %x.%x.%04x\n", 218 sch->schid.cssid, sch->schid.ssid, 219 sch->schid.sch_no); 220 } 221 222 static void vfio_ccw_sch_shutdown(struct subchannel *sch) 223 { 224 struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev); 225 struct vfio_ccw_private *private = dev_get_drvdata(&parent->dev); 226 227 if (WARN_ON(!private)) 228 return; 229 230 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_CLOSE); 231 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER); 232 } 233 234 /** 235 * vfio_ccw_sch_event - process subchannel event 236 * @sch: subchannel 237 * @process: non-zero if function is called in process context 238 * 239 * An unspecified event occurred for this subchannel. Adjust data according 240 * to the current operational state of the subchannel. Return zero when the 241 * event has been handled sufficiently or -EAGAIN when this function should 242 * be called again in process context. 243 */ 244 static int vfio_ccw_sch_event(struct subchannel *sch, int process) 245 { 246 struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev); 247 struct vfio_ccw_private *private = dev_get_drvdata(&parent->dev); 248 unsigned long flags; 249 int rc = -EAGAIN; 250 251 spin_lock_irqsave(sch->lock, flags); 252 if (!device_is_registered(&sch->dev)) 253 goto out_unlock; 254 255 if (work_pending(&sch->todo_work)) 256 goto out_unlock; 257 258 rc = 0; 259 260 if (cio_update_schib(sch)) { 261 if (private) 262 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER); 263 } 264 265 out_unlock: 266 spin_unlock_irqrestore(sch->lock, flags); 267 268 return rc; 269 } 270 271 static void vfio_ccw_queue_crw(struct vfio_ccw_private *private, 272 unsigned int rsc, 273 unsigned int erc, 274 unsigned int rsid) 275 { 276 struct vfio_ccw_crw *crw; 277 278 /* 279 * If unable to allocate a CRW, just drop the event and 280 * carry on. The guest will either see a later one or 281 * learn when it issues its own store subchannel. 282 */ 283 crw = kzalloc(sizeof(*crw), GFP_ATOMIC); 284 if (!crw) 285 return; 286 287 /* 288 * Build the CRW based on the inputs given to us. 289 */ 290 crw->crw.rsc = rsc; 291 crw->crw.erc = erc; 292 crw->crw.rsid = rsid; 293 294 list_add_tail(&crw->next, &private->crw); 295 queue_work(vfio_ccw_work_q, &private->crw_work); 296 } 297 298 static int vfio_ccw_chp_event(struct subchannel *sch, 299 struct chp_link *link, int event) 300 { 301 struct vfio_ccw_parent *parent = dev_get_drvdata(&sch->dev); 302 struct vfio_ccw_private *private = dev_get_drvdata(&parent->dev); 303 int mask = chp_ssd_get_mask(&sch->ssd_info, link); 304 int retry = 255; 305 306 if (!private || !mask) 307 return 0; 308 309 trace_vfio_ccw_chp_event(sch->schid, mask, event); 310 VFIO_CCW_MSG_EVENT(2, "sch %x.%x.%04x: mask=0x%x event=%d\n", 311 sch->schid.cssid, 312 sch->schid.ssid, sch->schid.sch_no, 313 mask, event); 314 315 if (cio_update_schib(sch)) 316 return -ENODEV; 317 318 switch (event) { 319 case CHP_VARY_OFF: 320 /* Path logically turned off */ 321 sch->opm &= ~mask; 322 sch->lpm &= ~mask; 323 if (sch->schib.pmcw.lpum & mask) 324 cio_cancel_halt_clear(sch, &retry); 325 break; 326 case CHP_OFFLINE: 327 /* Path is gone */ 328 if (sch->schib.pmcw.lpum & mask) 329 cio_cancel_halt_clear(sch, &retry); 330 vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_PERRN, 331 link->chpid.id); 332 break; 333 case CHP_VARY_ON: 334 /* Path logically turned on */ 335 sch->opm |= mask; 336 sch->lpm |= mask; 337 break; 338 case CHP_ONLINE: 339 /* Path became available */ 340 sch->lpm |= mask & sch->opm; 341 vfio_ccw_queue_crw(private, CRW_RSC_CPATH, CRW_ERC_INIT, 342 link->chpid.id); 343 break; 344 } 345 346 return 0; 347 } 348 349 static struct css_device_id vfio_ccw_sch_ids[] = { 350 { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, }, 351 { /* end of list */ }, 352 }; 353 MODULE_DEVICE_TABLE(css, vfio_ccw_sch_ids); 354 355 static struct css_driver vfio_ccw_sch_driver = { 356 .drv = { 357 .name = "vfio_ccw", 358 .owner = THIS_MODULE, 359 }, 360 .subchannel_type = vfio_ccw_sch_ids, 361 .irq = vfio_ccw_sch_irq, 362 .probe = vfio_ccw_sch_probe, 363 .remove = vfio_ccw_sch_remove, 364 .shutdown = vfio_ccw_sch_shutdown, 365 .sch_event = vfio_ccw_sch_event, 366 .chp_event = vfio_ccw_chp_event, 367 }; 368 369 static int __init vfio_ccw_debug_init(void) 370 { 371 vfio_ccw_debug_msg_id = debug_register("vfio_ccw_msg", 16, 1, 372 11 * sizeof(long)); 373 if (!vfio_ccw_debug_msg_id) 374 goto out_unregister; 375 debug_register_view(vfio_ccw_debug_msg_id, &debug_sprintf_view); 376 debug_set_level(vfio_ccw_debug_msg_id, 2); 377 vfio_ccw_debug_trace_id = debug_register("vfio_ccw_trace", 16, 1, 16); 378 if (!vfio_ccw_debug_trace_id) 379 goto out_unregister; 380 debug_register_view(vfio_ccw_debug_trace_id, &debug_hex_ascii_view); 381 debug_set_level(vfio_ccw_debug_trace_id, 2); 382 return 0; 383 384 out_unregister: 385 debug_unregister(vfio_ccw_debug_msg_id); 386 debug_unregister(vfio_ccw_debug_trace_id); 387 return -1; 388 } 389 390 static void vfio_ccw_debug_exit(void) 391 { 392 debug_unregister(vfio_ccw_debug_msg_id); 393 debug_unregister(vfio_ccw_debug_trace_id); 394 } 395 396 static void vfio_ccw_destroy_regions(void) 397 { 398 kmem_cache_destroy(vfio_ccw_crw_region); 399 kmem_cache_destroy(vfio_ccw_schib_region); 400 kmem_cache_destroy(vfio_ccw_cmd_region); 401 kmem_cache_destroy(vfio_ccw_io_region); 402 } 403 404 static int __init vfio_ccw_sch_init(void) 405 { 406 int ret; 407 408 ret = vfio_ccw_debug_init(); 409 if (ret) 410 return ret; 411 412 vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw"); 413 if (!vfio_ccw_work_q) { 414 ret = -ENOMEM; 415 goto out_regions; 416 } 417 418 vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region", 419 sizeof(struct ccw_io_region), 0, 420 SLAB_ACCOUNT, 0, 421 sizeof(struct ccw_io_region), NULL); 422 if (!vfio_ccw_io_region) { 423 ret = -ENOMEM; 424 goto out_regions; 425 } 426 427 vfio_ccw_cmd_region = kmem_cache_create_usercopy("vfio_ccw_cmd_region", 428 sizeof(struct ccw_cmd_region), 0, 429 SLAB_ACCOUNT, 0, 430 sizeof(struct ccw_cmd_region), NULL); 431 if (!vfio_ccw_cmd_region) { 432 ret = -ENOMEM; 433 goto out_regions; 434 } 435 436 vfio_ccw_schib_region = kmem_cache_create_usercopy("vfio_ccw_schib_region", 437 sizeof(struct ccw_schib_region), 0, 438 SLAB_ACCOUNT, 0, 439 sizeof(struct ccw_schib_region), NULL); 440 441 if (!vfio_ccw_schib_region) { 442 ret = -ENOMEM; 443 goto out_regions; 444 } 445 446 vfio_ccw_crw_region = kmem_cache_create_usercopy("vfio_ccw_crw_region", 447 sizeof(struct ccw_crw_region), 0, 448 SLAB_ACCOUNT, 0, 449 sizeof(struct ccw_crw_region), NULL); 450 451 if (!vfio_ccw_crw_region) { 452 ret = -ENOMEM; 453 goto out_regions; 454 } 455 456 ret = mdev_register_driver(&vfio_ccw_mdev_driver); 457 if (ret) 458 goto out_regions; 459 460 isc_register(VFIO_CCW_ISC); 461 ret = css_driver_register(&vfio_ccw_sch_driver); 462 if (ret) { 463 isc_unregister(VFIO_CCW_ISC); 464 goto out_driver; 465 } 466 467 return ret; 468 469 out_driver: 470 mdev_unregister_driver(&vfio_ccw_mdev_driver); 471 out_regions: 472 vfio_ccw_destroy_regions(); 473 destroy_workqueue(vfio_ccw_work_q); 474 vfio_ccw_debug_exit(); 475 return ret; 476 } 477 478 static void __exit vfio_ccw_sch_exit(void) 479 { 480 css_driver_unregister(&vfio_ccw_sch_driver); 481 mdev_unregister_driver(&vfio_ccw_mdev_driver); 482 isc_unregister(VFIO_CCW_ISC); 483 vfio_ccw_destroy_regions(); 484 destroy_workqueue(vfio_ccw_work_q); 485 vfio_ccw_debug_exit(); 486 } 487 module_init(vfio_ccw_sch_init); 488 module_exit(vfio_ccw_sch_exit); 489 490 MODULE_LICENSE("GPL v2"); 491