1 /* 2 * QEMU sPAPR IOMMU (TCE) code 3 * 4 * Copyright (c) 2010 David Gibson, IBM Corporation <dwg@au1.ibm.com> 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "qemu/error-report.h" 21 #include "hw/hw.h" 22 #include "qemu/log.h" 23 #include "sysemu/kvm.h" 24 #include "hw/qdev.h" 25 #include "kvm_ppc.h" 26 #include "sysemu/dma.h" 27 #include "exec/address-spaces.h" 28 #include "trace.h" 29 30 #include "hw/ppc/spapr.h" 31 #include "hw/ppc/spapr_vio.h" 32 33 #include <libfdt.h> 34 35 enum sPAPRTCEAccess { 36 SPAPR_TCE_FAULT = 0, 37 SPAPR_TCE_RO = 1, 38 SPAPR_TCE_WO = 2, 39 SPAPR_TCE_RW = 3, 40 }; 41 42 #define IOMMU_PAGE_SIZE(shift) (1ULL << (shift)) 43 #define IOMMU_PAGE_MASK(shift) (~(IOMMU_PAGE_SIZE(shift) - 1)) 44 45 static QLIST_HEAD(spapr_tce_tables, sPAPRTCETable) spapr_tce_tables; 46 47 sPAPRTCETable *spapr_tce_find_by_liobn(target_ulong liobn) 48 { 49 sPAPRTCETable *tcet; 50 51 if (liobn & 0xFFFFFFFF00000000ULL) { 52 hcall_dprintf("Request for out-of-bounds LIOBN 0x" TARGET_FMT_lx "\n", 53 liobn); 54 return NULL; 55 } 56 57 QLIST_FOREACH(tcet, &spapr_tce_tables, list) { 58 if (tcet->liobn == (uint32_t)liobn) { 59 return tcet; 60 } 61 } 62 63 return NULL; 64 } 65 66 static IOMMUAccessFlags spapr_tce_iommu_access_flags(uint64_t tce) 67 { 68 switch (tce & SPAPR_TCE_RW) { 69 case SPAPR_TCE_FAULT: 70 return IOMMU_NONE; 71 case SPAPR_TCE_RO: 72 return IOMMU_RO; 73 case SPAPR_TCE_WO: 74 return IOMMU_WO; 75 default: /* SPAPR_TCE_RW */ 76 return IOMMU_RW; 77 } 78 } 79 80 static uint64_t *spapr_tce_alloc_table(uint32_t liobn, 81 uint32_t page_shift, 82 uint64_t bus_offset, 83 uint32_t nb_table, 84 int *fd, 85 bool need_vfio) 86 { 87 uint64_t *table = NULL; 88 89 if (kvm_enabled()) { 90 table = kvmppc_create_spapr_tce(liobn, page_shift, bus_offset, nb_table, 91 fd, need_vfio); 92 } 93 94 if (!table) { 95 *fd = -1; 96 table = g_malloc0(nb_table * sizeof(uint64_t)); 97 } 98 99 trace_spapr_iommu_new_table(liobn, table, *fd); 100 101 return table; 102 } 103 104 static void spapr_tce_free_table(uint64_t *table, int fd, uint32_t nb_table) 105 { 106 if (!kvm_enabled() || 107 (kvmppc_remove_spapr_tce(table, fd, nb_table) != 0)) { 108 g_free(table); 109 } 110 } 111 112 /* Called from RCU critical section */ 113 static IOMMUTLBEntry spapr_tce_translate_iommu(MemoryRegion *iommu, hwaddr addr, 114 bool is_write) 115 { 116 sPAPRTCETable *tcet = container_of(iommu, sPAPRTCETable, iommu); 117 uint64_t tce; 118 IOMMUTLBEntry ret = { 119 .target_as = &address_space_memory, 120 .iova = 0, 121 .translated_addr = 0, 122 .addr_mask = ~(hwaddr)0, 123 .perm = IOMMU_NONE, 124 }; 125 126 if ((addr >> tcet->page_shift) < tcet->nb_table) { 127 /* Check if we are in bound */ 128 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift); 129 130 tce = tcet->table[addr >> tcet->page_shift]; 131 ret.iova = addr & page_mask; 132 ret.translated_addr = tce & page_mask; 133 ret.addr_mask = ~page_mask; 134 ret.perm = spapr_tce_iommu_access_flags(tce); 135 } 136 trace_spapr_iommu_xlate(tcet->liobn, addr, ret.iova, ret.perm, 137 ret.addr_mask); 138 139 return ret; 140 } 141 142 static void spapr_tce_table_pre_save(void *opaque) 143 { 144 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(opaque); 145 146 tcet->mig_table = tcet->table; 147 tcet->mig_nb_table = tcet->nb_table; 148 149 trace_spapr_iommu_pre_save(tcet->liobn, tcet->mig_nb_table, 150 tcet->bus_offset, tcet->page_shift); 151 } 152 153 static uint64_t spapr_tce_get_min_page_size(MemoryRegion *iommu) 154 { 155 sPAPRTCETable *tcet = container_of(iommu, sPAPRTCETable, iommu); 156 157 return 1ULL << tcet->page_shift; 158 } 159 160 static void spapr_tce_notify_flag_changed(MemoryRegion *iommu, 161 IOMMUNotifierFlag old, 162 IOMMUNotifierFlag new) 163 { 164 struct sPAPRTCETable *tbl = container_of(iommu, sPAPRTCETable, iommu); 165 166 if (old == IOMMU_NOTIFIER_NONE && new != IOMMU_NOTIFIER_NONE) { 167 spapr_tce_set_need_vfio(tbl, true); 168 } else if (old != IOMMU_NOTIFIER_NONE && new == IOMMU_NOTIFIER_NONE) { 169 spapr_tce_set_need_vfio(tbl, false); 170 } 171 } 172 173 static int spapr_tce_table_post_load(void *opaque, int version_id) 174 { 175 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(opaque); 176 uint32_t old_nb_table = tcet->nb_table; 177 uint64_t old_bus_offset = tcet->bus_offset; 178 uint32_t old_page_shift = tcet->page_shift; 179 180 if (tcet->vdev) { 181 spapr_vio_set_bypass(tcet->vdev, tcet->bypass); 182 } 183 184 if (tcet->mig_nb_table != tcet->nb_table) { 185 spapr_tce_table_disable(tcet); 186 } 187 188 if (tcet->mig_nb_table) { 189 if (!tcet->nb_table) { 190 spapr_tce_table_enable(tcet, old_page_shift, old_bus_offset, 191 tcet->mig_nb_table); 192 } 193 194 memcpy(tcet->table, tcet->mig_table, 195 tcet->nb_table * sizeof(tcet->table[0])); 196 197 free(tcet->mig_table); 198 tcet->mig_table = NULL; 199 } 200 201 trace_spapr_iommu_post_load(tcet->liobn, old_nb_table, tcet->nb_table, 202 tcet->bus_offset, tcet->page_shift); 203 204 return 0; 205 } 206 207 static bool spapr_tce_table_ex_needed(void *opaque) 208 { 209 sPAPRTCETable *tcet = opaque; 210 211 return tcet->bus_offset || tcet->page_shift != 0xC; 212 } 213 214 static const VMStateDescription vmstate_spapr_tce_table_ex = { 215 .name = "spapr_iommu_ex", 216 .version_id = 1, 217 .minimum_version_id = 1, 218 .needed = spapr_tce_table_ex_needed, 219 .fields = (VMStateField[]) { 220 VMSTATE_UINT64(bus_offset, sPAPRTCETable), 221 VMSTATE_UINT32(page_shift, sPAPRTCETable), 222 VMSTATE_END_OF_LIST() 223 }, 224 }; 225 226 static const VMStateDescription vmstate_spapr_tce_table = { 227 .name = "spapr_iommu", 228 .version_id = 2, 229 .minimum_version_id = 2, 230 .pre_save = spapr_tce_table_pre_save, 231 .post_load = spapr_tce_table_post_load, 232 .fields = (VMStateField []) { 233 /* Sanity check */ 234 VMSTATE_UINT32_EQUAL(liobn, sPAPRTCETable), 235 236 /* IOMMU state */ 237 VMSTATE_UINT32(mig_nb_table, sPAPRTCETable), 238 VMSTATE_BOOL(bypass, sPAPRTCETable), 239 VMSTATE_VARRAY_UINT32_ALLOC(mig_table, sPAPRTCETable, mig_nb_table, 0, 240 vmstate_info_uint64, uint64_t), 241 242 VMSTATE_END_OF_LIST() 243 }, 244 .subsections = (const VMStateDescription*[]) { 245 &vmstate_spapr_tce_table_ex, 246 NULL 247 } 248 }; 249 250 static MemoryRegionIOMMUOps spapr_iommu_ops = { 251 .translate = spapr_tce_translate_iommu, 252 .get_min_page_size = spapr_tce_get_min_page_size, 253 .notify_flag_changed = spapr_tce_notify_flag_changed, 254 }; 255 256 static int spapr_tce_table_realize(DeviceState *dev) 257 { 258 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev); 259 Object *tcetobj = OBJECT(tcet); 260 char tmp[32]; 261 262 tcet->fd = -1; 263 tcet->need_vfio = false; 264 snprintf(tmp, sizeof(tmp), "tce-root-%x", tcet->liobn); 265 memory_region_init(&tcet->root, tcetobj, tmp, UINT64_MAX); 266 267 snprintf(tmp, sizeof(tmp), "tce-iommu-%x", tcet->liobn); 268 memory_region_init_iommu(&tcet->iommu, tcetobj, &spapr_iommu_ops, tmp, 0); 269 270 QLIST_INSERT_HEAD(&spapr_tce_tables, tcet, list); 271 272 vmstate_register(DEVICE(tcet), tcet->liobn, &vmstate_spapr_tce_table, 273 tcet); 274 275 return 0; 276 } 277 278 void spapr_tce_set_need_vfio(sPAPRTCETable *tcet, bool need_vfio) 279 { 280 size_t table_size = tcet->nb_table * sizeof(uint64_t); 281 void *newtable; 282 283 if (need_vfio == tcet->need_vfio) { 284 /* Nothing to do */ 285 return; 286 } 287 288 if (!need_vfio) { 289 /* FIXME: We don't support transition back to KVM accelerated 290 * TCEs yet */ 291 return; 292 } 293 294 tcet->need_vfio = true; 295 296 if (tcet->fd < 0) { 297 /* Table is already in userspace, nothing to be do */ 298 return; 299 } 300 301 newtable = g_malloc(table_size); 302 memcpy(newtable, tcet->table, table_size); 303 304 kvmppc_remove_spapr_tce(tcet->table, tcet->fd, tcet->nb_table); 305 306 tcet->fd = -1; 307 tcet->table = newtable; 308 } 309 310 sPAPRTCETable *spapr_tce_new_table(DeviceState *owner, uint32_t liobn) 311 { 312 sPAPRTCETable *tcet; 313 char tmp[32]; 314 315 if (spapr_tce_find_by_liobn(liobn)) { 316 error_report("Attempted to create TCE table with duplicate" 317 " LIOBN 0x%x", liobn); 318 return NULL; 319 } 320 321 tcet = SPAPR_TCE_TABLE(object_new(TYPE_SPAPR_TCE_TABLE)); 322 tcet->liobn = liobn; 323 324 snprintf(tmp, sizeof(tmp), "tce-table-%x", liobn); 325 object_property_add_child(OBJECT(owner), tmp, OBJECT(tcet), NULL); 326 327 object_property_set_bool(OBJECT(tcet), true, "realized", NULL); 328 329 return tcet; 330 } 331 332 void spapr_tce_table_enable(sPAPRTCETable *tcet, 333 uint32_t page_shift, uint64_t bus_offset, 334 uint32_t nb_table) 335 { 336 if (tcet->nb_table) { 337 error_report("Warning: trying to enable already enabled TCE table"); 338 return; 339 } 340 341 tcet->bus_offset = bus_offset; 342 tcet->page_shift = page_shift; 343 tcet->nb_table = nb_table; 344 tcet->table = spapr_tce_alloc_table(tcet->liobn, 345 tcet->page_shift, 346 tcet->bus_offset, 347 tcet->nb_table, 348 &tcet->fd, 349 tcet->need_vfio); 350 351 memory_region_set_size(&tcet->iommu, 352 (uint64_t)tcet->nb_table << tcet->page_shift); 353 memory_region_add_subregion(&tcet->root, tcet->bus_offset, &tcet->iommu); 354 } 355 356 void spapr_tce_table_disable(sPAPRTCETable *tcet) 357 { 358 if (!tcet->nb_table) { 359 return; 360 } 361 362 memory_region_del_subregion(&tcet->root, &tcet->iommu); 363 memory_region_set_size(&tcet->iommu, 0); 364 365 spapr_tce_free_table(tcet->table, tcet->fd, tcet->nb_table); 366 tcet->fd = -1; 367 tcet->table = NULL; 368 tcet->bus_offset = 0; 369 tcet->page_shift = 0; 370 tcet->nb_table = 0; 371 } 372 373 static void spapr_tce_table_unrealize(DeviceState *dev, Error **errp) 374 { 375 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev); 376 377 QLIST_REMOVE(tcet, list); 378 379 spapr_tce_table_disable(tcet); 380 } 381 382 MemoryRegion *spapr_tce_get_iommu(sPAPRTCETable *tcet) 383 { 384 return &tcet->root; 385 } 386 387 static void spapr_tce_reset(DeviceState *dev) 388 { 389 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev); 390 size_t table_size = tcet->nb_table * sizeof(uint64_t); 391 392 if (tcet->nb_table) { 393 memset(tcet->table, 0, table_size); 394 } 395 } 396 397 static target_ulong put_tce_emu(sPAPRTCETable *tcet, target_ulong ioba, 398 target_ulong tce) 399 { 400 IOMMUTLBEntry entry; 401 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift); 402 unsigned long index = (ioba - tcet->bus_offset) >> tcet->page_shift; 403 404 if (index >= tcet->nb_table) { 405 hcall_dprintf("spapr_vio_put_tce on out-of-bounds IOBA 0x" 406 TARGET_FMT_lx "\n", ioba); 407 return H_PARAMETER; 408 } 409 410 tcet->table[index] = tce; 411 412 entry.target_as = &address_space_memory, 413 entry.iova = (ioba - tcet->bus_offset) & page_mask; 414 entry.translated_addr = tce & page_mask; 415 entry.addr_mask = ~page_mask; 416 entry.perm = spapr_tce_iommu_access_flags(tce); 417 memory_region_notify_iommu(&tcet->iommu, entry); 418 419 return H_SUCCESS; 420 } 421 422 static target_ulong h_put_tce_indirect(PowerPCCPU *cpu, 423 sPAPRMachineState *spapr, 424 target_ulong opcode, target_ulong *args) 425 { 426 int i; 427 target_ulong liobn = args[0]; 428 target_ulong ioba = args[1]; 429 target_ulong ioba1 = ioba; 430 target_ulong tce_list = args[2]; 431 target_ulong npages = args[3]; 432 target_ulong ret = H_PARAMETER, tce = 0; 433 sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn); 434 CPUState *cs = CPU(cpu); 435 hwaddr page_mask, page_size; 436 437 if (!tcet) { 438 return H_PARAMETER; 439 } 440 441 if ((npages > 512) || (tce_list & SPAPR_TCE_PAGE_MASK)) { 442 return H_PARAMETER; 443 } 444 445 page_mask = IOMMU_PAGE_MASK(tcet->page_shift); 446 page_size = IOMMU_PAGE_SIZE(tcet->page_shift); 447 ioba &= page_mask; 448 449 for (i = 0; i < npages; ++i, ioba += page_size) { 450 tce = ldq_be_phys(cs->as, tce_list + i * sizeof(target_ulong)); 451 452 ret = put_tce_emu(tcet, ioba, tce); 453 if (ret) { 454 break; 455 } 456 } 457 458 /* Trace last successful or the first problematic entry */ 459 i = i ? (i - 1) : 0; 460 if (SPAPR_IS_PCI_LIOBN(liobn)) { 461 trace_spapr_iommu_pci_indirect(liobn, ioba1, tce_list, i, tce, ret); 462 } else { 463 trace_spapr_iommu_indirect(liobn, ioba1, tce_list, i, tce, ret); 464 } 465 return ret; 466 } 467 468 static target_ulong h_stuff_tce(PowerPCCPU *cpu, sPAPRMachineState *spapr, 469 target_ulong opcode, target_ulong *args) 470 { 471 int i; 472 target_ulong liobn = args[0]; 473 target_ulong ioba = args[1]; 474 target_ulong tce_value = args[2]; 475 target_ulong npages = args[3]; 476 target_ulong ret = H_PARAMETER; 477 sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn); 478 hwaddr page_mask, page_size; 479 480 if (!tcet) { 481 return H_PARAMETER; 482 } 483 484 if (npages > tcet->nb_table) { 485 return H_PARAMETER; 486 } 487 488 page_mask = IOMMU_PAGE_MASK(tcet->page_shift); 489 page_size = IOMMU_PAGE_SIZE(tcet->page_shift); 490 ioba &= page_mask; 491 492 for (i = 0; i < npages; ++i, ioba += page_size) { 493 ret = put_tce_emu(tcet, ioba, tce_value); 494 if (ret) { 495 break; 496 } 497 } 498 if (SPAPR_IS_PCI_LIOBN(liobn)) { 499 trace_spapr_iommu_pci_stuff(liobn, ioba, tce_value, npages, ret); 500 } else { 501 trace_spapr_iommu_stuff(liobn, ioba, tce_value, npages, ret); 502 } 503 504 return ret; 505 } 506 507 static target_ulong h_put_tce(PowerPCCPU *cpu, sPAPRMachineState *spapr, 508 target_ulong opcode, target_ulong *args) 509 { 510 target_ulong liobn = args[0]; 511 target_ulong ioba = args[1]; 512 target_ulong tce = args[2]; 513 target_ulong ret = H_PARAMETER; 514 sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn); 515 516 if (tcet) { 517 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift); 518 519 ioba &= page_mask; 520 521 ret = put_tce_emu(tcet, ioba, tce); 522 } 523 if (SPAPR_IS_PCI_LIOBN(liobn)) { 524 trace_spapr_iommu_pci_put(liobn, ioba, tce, ret); 525 } else { 526 trace_spapr_iommu_put(liobn, ioba, tce, ret); 527 } 528 529 return ret; 530 } 531 532 static target_ulong get_tce_emu(sPAPRTCETable *tcet, target_ulong ioba, 533 target_ulong *tce) 534 { 535 unsigned long index = (ioba - tcet->bus_offset) >> tcet->page_shift; 536 537 if (index >= tcet->nb_table) { 538 hcall_dprintf("spapr_iommu_get_tce on out-of-bounds IOBA 0x" 539 TARGET_FMT_lx "\n", ioba); 540 return H_PARAMETER; 541 } 542 543 *tce = tcet->table[index]; 544 545 return H_SUCCESS; 546 } 547 548 static target_ulong h_get_tce(PowerPCCPU *cpu, sPAPRMachineState *spapr, 549 target_ulong opcode, target_ulong *args) 550 { 551 target_ulong liobn = args[0]; 552 target_ulong ioba = args[1]; 553 target_ulong tce = 0; 554 target_ulong ret = H_PARAMETER; 555 sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn); 556 557 if (tcet) { 558 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift); 559 560 ioba &= page_mask; 561 562 ret = get_tce_emu(tcet, ioba, &tce); 563 if (!ret) { 564 args[0] = tce; 565 } 566 } 567 if (SPAPR_IS_PCI_LIOBN(liobn)) { 568 trace_spapr_iommu_pci_get(liobn, ioba, ret, tce); 569 } else { 570 trace_spapr_iommu_get(liobn, ioba, ret, tce); 571 } 572 573 return ret; 574 } 575 576 int spapr_dma_dt(void *fdt, int node_off, const char *propname, 577 uint32_t liobn, uint64_t window, uint32_t size) 578 { 579 uint32_t dma_prop[5]; 580 int ret; 581 582 dma_prop[0] = cpu_to_be32(liobn); 583 dma_prop[1] = cpu_to_be32(window >> 32); 584 dma_prop[2] = cpu_to_be32(window & 0xFFFFFFFF); 585 dma_prop[3] = 0; /* window size is 32 bits */ 586 dma_prop[4] = cpu_to_be32(size); 587 588 ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-address-cells", 2); 589 if (ret < 0) { 590 return ret; 591 } 592 593 ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-size-cells", 2); 594 if (ret < 0) { 595 return ret; 596 } 597 598 ret = fdt_setprop(fdt, node_off, propname, dma_prop, sizeof(dma_prop)); 599 if (ret < 0) { 600 return ret; 601 } 602 603 return 0; 604 } 605 606 int spapr_tcet_dma_dt(void *fdt, int node_off, const char *propname, 607 sPAPRTCETable *tcet) 608 { 609 if (!tcet) { 610 return 0; 611 } 612 613 return spapr_dma_dt(fdt, node_off, propname, 614 tcet->liobn, 0, tcet->nb_table << tcet->page_shift); 615 } 616 617 static void spapr_tce_table_class_init(ObjectClass *klass, void *data) 618 { 619 DeviceClass *dc = DEVICE_CLASS(klass); 620 dc->init = spapr_tce_table_realize; 621 dc->reset = spapr_tce_reset; 622 dc->unrealize = spapr_tce_table_unrealize; 623 624 QLIST_INIT(&spapr_tce_tables); 625 626 /* hcall-tce */ 627 spapr_register_hypercall(H_PUT_TCE, h_put_tce); 628 spapr_register_hypercall(H_GET_TCE, h_get_tce); 629 spapr_register_hypercall(H_PUT_TCE_INDIRECT, h_put_tce_indirect); 630 spapr_register_hypercall(H_STUFF_TCE, h_stuff_tce); 631 } 632 633 static TypeInfo spapr_tce_table_info = { 634 .name = TYPE_SPAPR_TCE_TABLE, 635 .parent = TYPE_DEVICE, 636 .instance_size = sizeof(sPAPRTCETable), 637 .class_init = spapr_tce_table_class_init, 638 }; 639 640 static void register_types(void) 641 { 642 type_register_static(&spapr_tce_table_info); 643 } 644 645 type_init(register_types); 646