1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * VFIO based Physical Subchannel device driver 4 * 5 * Copyright IBM Corp. 2017 6 * Copyright Red Hat, Inc. 2019 7 * 8 * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com> 9 * Xiao Feng Ren <renxiaof@linux.vnet.ibm.com> 10 * Cornelia Huck <cohuck@redhat.com> 11 */ 12 13 #include <linux/module.h> 14 #include <linux/init.h> 15 #include <linux/device.h> 16 #include <linux/slab.h> 17 #include <linux/uuid.h> 18 #include <linux/mdev.h> 19 20 #include <asm/isc.h> 21 22 #include "ioasm.h" 23 #include "css.h" 24 #include "vfio_ccw_private.h" 25 26 struct workqueue_struct *vfio_ccw_work_q; 27 static struct kmem_cache *vfio_ccw_io_region; 28 static struct kmem_cache *vfio_ccw_cmd_region; 29 30 /* 31 * Helpers 32 */ 33 int vfio_ccw_sch_quiesce(struct subchannel *sch) 34 { 35 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); 36 DECLARE_COMPLETION_ONSTACK(completion); 37 int iretry, ret = 0; 38 39 spin_lock_irq(sch->lock); 40 if (!sch->schib.pmcw.ena) 41 goto out_unlock; 42 ret = cio_disable_subchannel(sch); 43 if (ret != -EBUSY) 44 goto out_unlock; 45 46 iretry = 255; 47 do { 48 49 ret = cio_cancel_halt_clear(sch, &iretry); 50 51 if (ret == -EIO) { 52 pr_err("vfio_ccw: could not quiesce subchannel 0.%x.%04x!\n", 53 sch->schid.ssid, sch->schid.sch_no); 54 break; 55 } 56 57 /* 58 * Flush all I/O and wait for 59 * cancel/halt/clear completion. 60 */ 61 private->completion = &completion; 62 spin_unlock_irq(sch->lock); 63 64 if (ret == -EBUSY) 65 wait_for_completion_timeout(&completion, 3*HZ); 66 67 private->completion = NULL; 68 flush_workqueue(vfio_ccw_work_q); 69 spin_lock_irq(sch->lock); 70 ret = cio_disable_subchannel(sch); 71 } while (ret == -EBUSY); 72 out_unlock: 73 private->state = VFIO_CCW_STATE_NOT_OPER; 74 spin_unlock_irq(sch->lock); 75 return ret; 76 } 77 78 static void vfio_ccw_sch_io_todo(struct work_struct *work) 79 { 80 struct vfio_ccw_private *private; 81 struct irb *irb; 82 bool is_final; 83 84 private = container_of(work, struct vfio_ccw_private, io_work); 85 irb = &private->irb; 86 87 is_final = !(scsw_actl(&irb->scsw) & 88 (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)); 89 if (scsw_is_solicited(&irb->scsw)) { 90 cp_update_scsw(&private->cp, &irb->scsw); 91 if (is_final) 92 cp_free(&private->cp); 93 } 94 mutex_lock(&private->io_mutex); 95 memcpy(private->io_region->irb_area, irb, sizeof(*irb)); 96 mutex_unlock(&private->io_mutex); 97 98 if (private->io_trigger) 99 eventfd_signal(private->io_trigger, 1); 100 101 if (private->mdev && is_final) 102 private->state = VFIO_CCW_STATE_IDLE; 103 } 104 105 /* 106 * Css driver callbacks 107 */ 108 static void vfio_ccw_sch_irq(struct subchannel *sch) 109 { 110 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); 111 112 inc_irq_stat(IRQIO_CIO); 113 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_INTERRUPT); 114 } 115 116 static int vfio_ccw_sch_probe(struct subchannel *sch) 117 { 118 struct pmcw *pmcw = &sch->schib.pmcw; 119 struct vfio_ccw_private *private; 120 int ret = -ENOMEM; 121 122 if (pmcw->qf) { 123 dev_warn(&sch->dev, "vfio: ccw: does not support QDIO: %s\n", 124 dev_name(&sch->dev)); 125 return -ENODEV; 126 } 127 128 private = kzalloc(sizeof(*private), GFP_KERNEL | GFP_DMA); 129 if (!private) 130 return -ENOMEM; 131 132 private->io_region = kmem_cache_zalloc(vfio_ccw_io_region, 133 GFP_KERNEL | GFP_DMA); 134 if (!private->io_region) 135 goto out_free; 136 137 private->cmd_region = kmem_cache_zalloc(vfio_ccw_cmd_region, 138 GFP_KERNEL | GFP_DMA); 139 if (!private->cmd_region) 140 goto out_free; 141 142 private->sch = sch; 143 dev_set_drvdata(&sch->dev, private); 144 mutex_init(&private->io_mutex); 145 146 spin_lock_irq(sch->lock); 147 private->state = VFIO_CCW_STATE_NOT_OPER; 148 sch->isc = VFIO_CCW_ISC; 149 ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch); 150 spin_unlock_irq(sch->lock); 151 if (ret) 152 goto out_free; 153 154 INIT_WORK(&private->io_work, vfio_ccw_sch_io_todo); 155 atomic_set(&private->avail, 1); 156 private->state = VFIO_CCW_STATE_STANDBY; 157 158 ret = vfio_ccw_mdev_reg(sch); 159 if (ret) 160 goto out_disable; 161 162 return 0; 163 164 out_disable: 165 cio_disable_subchannel(sch); 166 out_free: 167 dev_set_drvdata(&sch->dev, NULL); 168 if (private->cmd_region) 169 kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region); 170 if (private->io_region) 171 kmem_cache_free(vfio_ccw_io_region, private->io_region); 172 kfree(private); 173 return ret; 174 } 175 176 static int vfio_ccw_sch_remove(struct subchannel *sch) 177 { 178 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); 179 180 vfio_ccw_sch_quiesce(sch); 181 182 vfio_ccw_mdev_unreg(sch); 183 184 dev_set_drvdata(&sch->dev, NULL); 185 186 kmem_cache_free(vfio_ccw_cmd_region, private->cmd_region); 187 kmem_cache_free(vfio_ccw_io_region, private->io_region); 188 kfree(private); 189 190 return 0; 191 } 192 193 static void vfio_ccw_sch_shutdown(struct subchannel *sch) 194 { 195 vfio_ccw_sch_quiesce(sch); 196 } 197 198 /** 199 * vfio_ccw_sch_event - process subchannel event 200 * @sch: subchannel 201 * @process: non-zero if function is called in process context 202 * 203 * An unspecified event occurred for this subchannel. Adjust data according 204 * to the current operational state of the subchannel. Return zero when the 205 * event has been handled sufficiently or -EAGAIN when this function should 206 * be called again in process context. 207 */ 208 static int vfio_ccw_sch_event(struct subchannel *sch, int process) 209 { 210 struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev); 211 unsigned long flags; 212 int rc = -EAGAIN; 213 214 spin_lock_irqsave(sch->lock, flags); 215 if (!device_is_registered(&sch->dev)) 216 goto out_unlock; 217 218 if (work_pending(&sch->todo_work)) 219 goto out_unlock; 220 221 if (cio_update_schib(sch)) { 222 vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER); 223 rc = 0; 224 goto out_unlock; 225 } 226 227 private = dev_get_drvdata(&sch->dev); 228 if (private->state == VFIO_CCW_STATE_NOT_OPER) { 229 private->state = private->mdev ? VFIO_CCW_STATE_IDLE : 230 VFIO_CCW_STATE_STANDBY; 231 } 232 rc = 0; 233 234 out_unlock: 235 spin_unlock_irqrestore(sch->lock, flags); 236 237 return rc; 238 } 239 240 static struct css_device_id vfio_ccw_sch_ids[] = { 241 { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, }, 242 { /* end of list */ }, 243 }; 244 MODULE_DEVICE_TABLE(css, vfio_ccw_sch_ids); 245 246 static struct css_driver vfio_ccw_sch_driver = { 247 .drv = { 248 .name = "vfio_ccw", 249 .owner = THIS_MODULE, 250 }, 251 .subchannel_type = vfio_ccw_sch_ids, 252 .irq = vfio_ccw_sch_irq, 253 .probe = vfio_ccw_sch_probe, 254 .remove = vfio_ccw_sch_remove, 255 .shutdown = vfio_ccw_sch_shutdown, 256 .sch_event = vfio_ccw_sch_event, 257 }; 258 259 static int __init vfio_ccw_sch_init(void) 260 { 261 int ret = -ENOMEM; 262 263 vfio_ccw_work_q = create_singlethread_workqueue("vfio-ccw"); 264 if (!vfio_ccw_work_q) 265 return -ENOMEM; 266 267 vfio_ccw_io_region = kmem_cache_create_usercopy("vfio_ccw_io_region", 268 sizeof(struct ccw_io_region), 0, 269 SLAB_ACCOUNT, 0, 270 sizeof(struct ccw_io_region), NULL); 271 if (!vfio_ccw_io_region) 272 goto out_err; 273 274 vfio_ccw_cmd_region = kmem_cache_create_usercopy("vfio_ccw_cmd_region", 275 sizeof(struct ccw_cmd_region), 0, 276 SLAB_ACCOUNT, 0, 277 sizeof(struct ccw_cmd_region), NULL); 278 if (!vfio_ccw_cmd_region) 279 goto out_err; 280 281 isc_register(VFIO_CCW_ISC); 282 ret = css_driver_register(&vfio_ccw_sch_driver); 283 if (ret) { 284 isc_unregister(VFIO_CCW_ISC); 285 goto out_err; 286 } 287 288 return ret; 289 290 out_err: 291 kmem_cache_destroy(vfio_ccw_cmd_region); 292 kmem_cache_destroy(vfio_ccw_io_region); 293 destroy_workqueue(vfio_ccw_work_q); 294 return ret; 295 } 296 297 static void __exit vfio_ccw_sch_exit(void) 298 { 299 css_driver_unregister(&vfio_ccw_sch_driver); 300 isc_unregister(VFIO_CCW_ISC); 301 kmem_cache_destroy(vfio_ccw_io_region); 302 destroy_workqueue(vfio_ccw_work_q); 303 } 304 module_init(vfio_ccw_sch_init); 305 module_exit(vfio_ccw_sch_exit); 306 307 MODULE_LICENSE("GPL v2"); 308