1 /* 2 * QEMU educational PCI device 3 * 4 * Copyright (c) 2012-2015 Jiri Slaby 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 */ 24 25 #include "qemu/osdep.h" 26 #include "qemu/log.h" 27 #include "qemu/units.h" 28 #include "hw/pci/pci.h" 29 #include "hw/pci/msi.h" 30 #include "qemu/timer.h" 31 #include "qom/object.h" 32 #include "qemu/main-loop.h" /* iothread mutex */ 33 #include "qemu/module.h" 34 #include "qapi/visitor.h" 35 36 #define TYPE_PCI_EDU_DEVICE "edu" 37 typedef struct EduState EduState; 38 DECLARE_INSTANCE_CHECKER(EduState, EDU, 39 TYPE_PCI_EDU_DEVICE) 40 41 #define FACT_IRQ 0x00000001 42 #define DMA_IRQ 0x00000100 43 44 #define DMA_START 0x40000 45 #define DMA_SIZE 4096 46 47 struct EduState { 48 PCIDevice pdev; 49 MemoryRegion mmio; 50 51 QemuThread thread; 52 QemuMutex thr_mutex; 53 QemuCond thr_cond; 54 bool stopping; 55 56 uint32_t addr4; 57 uint32_t fact; 58 #define EDU_STATUS_COMPUTING 0x01 59 #define EDU_STATUS_IRQFACT 0x80 60 uint32_t status; 61 62 uint32_t irq_status; 63 64 #define EDU_DMA_RUN 0x1 65 #define EDU_DMA_DIR(cmd) (((cmd) & 0x2) >> 1) 66 # define EDU_DMA_FROM_PCI 0 67 # define EDU_DMA_TO_PCI 1 68 #define EDU_DMA_IRQ 0x4 69 struct dma_state { 70 dma_addr_t src; 71 dma_addr_t dst; 72 dma_addr_t cnt; 73 dma_addr_t cmd; 74 } dma; 75 QEMUTimer dma_timer; 76 char dma_buf[DMA_SIZE]; 77 uint64_t dma_mask; 78 }; 79 80 static bool edu_msi_enabled(EduState *edu) 81 { 82 return msi_enabled(&edu->pdev); 83 } 84 85 static void edu_raise_irq(EduState *edu, uint32_t val) 86 { 87 edu->irq_status |= val; 88 if (edu->irq_status) { 89 if (edu_msi_enabled(edu)) { 90 msi_notify(&edu->pdev, 0); 91 } else { 92 pci_set_irq(&edu->pdev, 1); 93 } 94 } 95 } 96 97 static void edu_lower_irq(EduState *edu, uint32_t val) 98 { 99 edu->irq_status &= ~val; 100 101 if (!edu->irq_status && !edu_msi_enabled(edu)) { 102 pci_set_irq(&edu->pdev, 0); 103 } 104 } 105 106 static void edu_check_range(uint64_t xfer_start, uint64_t xfer_size, 107 uint64_t dma_start, uint64_t dma_size) 108 { 109 uint64_t xfer_end = xfer_start + xfer_size; 110 uint64_t dma_end = dma_start + dma_size; 111 112 /* 113 * 1. ensure we aren't overflowing 114 * 2. ensure that xfer is within dma address range 115 */ 116 if (dma_end >= dma_start && xfer_end >= xfer_start && 117 xfer_start >= dma_start && xfer_end <= dma_end) { 118 return; 119 } 120 121 qemu_log_mask(LOG_GUEST_ERROR, 122 "EDU: DMA range 0x%016"PRIx64"-0x%016"PRIx64 123 " out of bounds (0x%016"PRIx64"-0x%016"PRIx64")!", 124 xfer_start, xfer_end - 1, dma_start, dma_end - 1); 125 } 126 127 static dma_addr_t edu_clamp_addr(const EduState *edu, dma_addr_t addr) 128 { 129 dma_addr_t res = addr & edu->dma_mask; 130 131 if (addr != res) { 132 qemu_log_mask(LOG_GUEST_ERROR, 133 "EDU: clamping DMA 0x%016"PRIx64" to 0x%016"PRIx64"!", 134 addr, res); 135 } 136 137 return res; 138 } 139 140 static void edu_dma_timer(void *opaque) 141 { 142 EduState *edu = opaque; 143 bool raise_irq = false; 144 145 if (!(edu->dma.cmd & EDU_DMA_RUN)) { 146 return; 147 } 148 149 if (EDU_DMA_DIR(edu->dma.cmd) == EDU_DMA_FROM_PCI) { 150 uint64_t dst = edu->dma.dst; 151 edu_check_range(dst, edu->dma.cnt, DMA_START, DMA_SIZE); 152 dst -= DMA_START; 153 pci_dma_read(&edu->pdev, edu_clamp_addr(edu, edu->dma.src), 154 edu->dma_buf + dst, edu->dma.cnt); 155 } else { 156 uint64_t src = edu->dma.src; 157 edu_check_range(src, edu->dma.cnt, DMA_START, DMA_SIZE); 158 src -= DMA_START; 159 pci_dma_write(&edu->pdev, edu_clamp_addr(edu, edu->dma.dst), 160 edu->dma_buf + src, edu->dma.cnt); 161 } 162 163 edu->dma.cmd &= ~EDU_DMA_RUN; 164 if (edu->dma.cmd & EDU_DMA_IRQ) { 165 raise_irq = true; 166 } 167 168 if (raise_irq) { 169 edu_raise_irq(edu, DMA_IRQ); 170 } 171 } 172 173 static void dma_rw(EduState *edu, bool write, dma_addr_t *val, dma_addr_t *dma, 174 bool timer) 175 { 176 if (write && (edu->dma.cmd & EDU_DMA_RUN)) { 177 return; 178 } 179 180 if (write) { 181 *dma = *val; 182 } else { 183 *val = *dma; 184 } 185 186 if (timer) { 187 timer_mod(&edu->dma_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 100); 188 } 189 } 190 191 static uint64_t edu_mmio_read(void *opaque, hwaddr addr, unsigned size) 192 { 193 EduState *edu = opaque; 194 uint64_t val = ~0ULL; 195 196 if (addr < 0x80 && size != 4) { 197 return val; 198 } 199 200 if (addr >= 0x80 && size != 4 && size != 8) { 201 return val; 202 } 203 204 switch (addr) { 205 case 0x00: 206 val = 0x010000edu; 207 break; 208 case 0x04: 209 val = edu->addr4; 210 break; 211 case 0x08: 212 qemu_mutex_lock(&edu->thr_mutex); 213 val = edu->fact; 214 qemu_mutex_unlock(&edu->thr_mutex); 215 break; 216 case 0x20: 217 val = qatomic_read(&edu->status); 218 break; 219 case 0x24: 220 val = edu->irq_status; 221 break; 222 case 0x80: 223 dma_rw(edu, false, &val, &edu->dma.src, false); 224 break; 225 case 0x88: 226 dma_rw(edu, false, &val, &edu->dma.dst, false); 227 break; 228 case 0x90: 229 dma_rw(edu, false, &val, &edu->dma.cnt, false); 230 break; 231 case 0x98: 232 dma_rw(edu, false, &val, &edu->dma.cmd, false); 233 break; 234 } 235 236 return val; 237 } 238 239 static void edu_mmio_write(void *opaque, hwaddr addr, uint64_t val, 240 unsigned size) 241 { 242 EduState *edu = opaque; 243 244 if (addr < 0x80 && size != 4) { 245 return; 246 } 247 248 if (addr >= 0x80 && size != 4 && size != 8) { 249 return; 250 } 251 252 switch (addr) { 253 case 0x04: 254 edu->addr4 = ~val; 255 break; 256 case 0x08: 257 if (qatomic_read(&edu->status) & EDU_STATUS_COMPUTING) { 258 break; 259 } 260 /* EDU_STATUS_COMPUTING cannot go 0->1 concurrently, because it is only 261 * set in this function and it is under the iothread mutex. 262 */ 263 qemu_mutex_lock(&edu->thr_mutex); 264 edu->fact = val; 265 qatomic_or(&edu->status, EDU_STATUS_COMPUTING); 266 qemu_cond_signal(&edu->thr_cond); 267 qemu_mutex_unlock(&edu->thr_mutex); 268 break; 269 case 0x20: 270 if (val & EDU_STATUS_IRQFACT) { 271 qatomic_or(&edu->status, EDU_STATUS_IRQFACT); 272 /* Order check of the COMPUTING flag after setting IRQFACT. */ 273 smp_mb__after_rmw(); 274 } else { 275 qatomic_and(&edu->status, ~EDU_STATUS_IRQFACT); 276 } 277 break; 278 case 0x60: 279 edu_raise_irq(edu, val); 280 break; 281 case 0x64: 282 edu_lower_irq(edu, val); 283 break; 284 case 0x80: 285 dma_rw(edu, true, &val, &edu->dma.src, false); 286 break; 287 case 0x88: 288 dma_rw(edu, true, &val, &edu->dma.dst, false); 289 break; 290 case 0x90: 291 dma_rw(edu, true, &val, &edu->dma.cnt, false); 292 break; 293 case 0x98: 294 if (!(val & EDU_DMA_RUN)) { 295 break; 296 } 297 dma_rw(edu, true, &val, &edu->dma.cmd, true); 298 break; 299 } 300 } 301 302 static const MemoryRegionOps edu_mmio_ops = { 303 .read = edu_mmio_read, 304 .write = edu_mmio_write, 305 .endianness = DEVICE_NATIVE_ENDIAN, 306 .valid = { 307 .min_access_size = 4, 308 .max_access_size = 8, 309 }, 310 .impl = { 311 .min_access_size = 4, 312 .max_access_size = 8, 313 }, 314 315 }; 316 317 /* 318 * We purposely use a thread, so that users are forced to wait for the status 319 * register. 320 */ 321 static void *edu_fact_thread(void *opaque) 322 { 323 EduState *edu = opaque; 324 325 while (1) { 326 uint32_t val, ret = 1; 327 328 qemu_mutex_lock(&edu->thr_mutex); 329 while ((qatomic_read(&edu->status) & EDU_STATUS_COMPUTING) == 0 && 330 !edu->stopping) { 331 qemu_cond_wait(&edu->thr_cond, &edu->thr_mutex); 332 } 333 334 if (edu->stopping) { 335 qemu_mutex_unlock(&edu->thr_mutex); 336 break; 337 } 338 339 val = edu->fact; 340 qemu_mutex_unlock(&edu->thr_mutex); 341 342 while (val > 0) { 343 ret *= val--; 344 } 345 346 /* 347 * We should sleep for a random period here, so that students are 348 * forced to check the status properly. 349 */ 350 351 qemu_mutex_lock(&edu->thr_mutex); 352 edu->fact = ret; 353 qemu_mutex_unlock(&edu->thr_mutex); 354 qatomic_and(&edu->status, ~EDU_STATUS_COMPUTING); 355 356 /* Clear COMPUTING flag before checking IRQFACT. */ 357 smp_mb__after_rmw(); 358 359 if (qatomic_read(&edu->status) & EDU_STATUS_IRQFACT) { 360 bql_lock(); 361 edu_raise_irq(edu, FACT_IRQ); 362 bql_unlock(); 363 } 364 } 365 366 return NULL; 367 } 368 369 static void pci_edu_realize(PCIDevice *pdev, Error **errp) 370 { 371 EduState *edu = EDU(pdev); 372 uint8_t *pci_conf = pdev->config; 373 374 pci_config_set_interrupt_pin(pci_conf, 1); 375 376 if (msi_init(pdev, 0, 1, true, false, errp)) { 377 return; 378 } 379 380 timer_init_ms(&edu->dma_timer, QEMU_CLOCK_VIRTUAL, edu_dma_timer, edu); 381 382 qemu_mutex_init(&edu->thr_mutex); 383 qemu_cond_init(&edu->thr_cond); 384 qemu_thread_create(&edu->thread, "edu", edu_fact_thread, 385 edu, QEMU_THREAD_JOINABLE); 386 387 memory_region_init_io(&edu->mmio, OBJECT(edu), &edu_mmio_ops, edu, 388 "edu-mmio", 1 * MiB); 389 pci_register_bar(pdev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &edu->mmio); 390 } 391 392 static void pci_edu_uninit(PCIDevice *pdev) 393 { 394 EduState *edu = EDU(pdev); 395 396 qemu_mutex_lock(&edu->thr_mutex); 397 edu->stopping = true; 398 qemu_mutex_unlock(&edu->thr_mutex); 399 qemu_cond_signal(&edu->thr_cond); 400 qemu_thread_join(&edu->thread); 401 402 qemu_cond_destroy(&edu->thr_cond); 403 qemu_mutex_destroy(&edu->thr_mutex); 404 405 timer_del(&edu->dma_timer); 406 msi_uninit(pdev); 407 } 408 409 static void edu_instance_init(Object *obj) 410 { 411 EduState *edu = EDU(obj); 412 413 edu->dma_mask = (1UL << 28) - 1; 414 object_property_add_uint64_ptr(obj, "dma_mask", 415 &edu->dma_mask, OBJ_PROP_FLAG_READWRITE); 416 } 417 418 static void edu_class_init(ObjectClass *class, void *data) 419 { 420 DeviceClass *dc = DEVICE_CLASS(class); 421 PCIDeviceClass *k = PCI_DEVICE_CLASS(class); 422 423 k->realize = pci_edu_realize; 424 k->exit = pci_edu_uninit; 425 k->vendor_id = PCI_VENDOR_ID_QEMU; 426 k->device_id = 0x11e8; 427 k->revision = 0x10; 428 k->class_id = PCI_CLASS_OTHERS; 429 set_bit(DEVICE_CATEGORY_MISC, dc->categories); 430 } 431 432 static void pci_edu_register_types(void) 433 { 434 static InterfaceInfo interfaces[] = { 435 { INTERFACE_CONVENTIONAL_PCI_DEVICE }, 436 { }, 437 }; 438 static const TypeInfo edu_info = { 439 .name = TYPE_PCI_EDU_DEVICE, 440 .parent = TYPE_PCI_DEVICE, 441 .instance_size = sizeof(EduState), 442 .instance_init = edu_instance_init, 443 .class_init = edu_class_init, 444 .interfaces = interfaces, 445 }; 446 447 type_register_static(&edu_info); 448 } 449 type_init(pci_edu_register_types) 450