1 /* 2 * QEMU sPAPR IOMMU (TCE) code 3 * 4 * Copyright (c) 2010 David Gibson, IBM Corporation <dwg@au1.ibm.com> 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "qemu/osdep.h" 20 #include "hw/hw.h" 21 #include "qemu/log.h" 22 #include "sysemu/kvm.h" 23 #include "hw/qdev.h" 24 #include "kvm_ppc.h" 25 #include "sysemu/dma.h" 26 #include "exec/address-spaces.h" 27 #include "trace.h" 28 29 #include "hw/ppc/spapr.h" 30 #include "hw/ppc/spapr_vio.h" 31 32 #include <libfdt.h> 33 34 enum sPAPRTCEAccess { 35 SPAPR_TCE_FAULT = 0, 36 SPAPR_TCE_RO = 1, 37 SPAPR_TCE_WO = 2, 38 SPAPR_TCE_RW = 3, 39 }; 40 41 #define IOMMU_PAGE_SIZE(shift) (1ULL << (shift)) 42 #define IOMMU_PAGE_MASK(shift) (~(IOMMU_PAGE_SIZE(shift) - 1)) 43 44 static QLIST_HEAD(spapr_tce_tables, sPAPRTCETable) spapr_tce_tables; 45 46 sPAPRTCETable *spapr_tce_find_by_liobn(target_ulong liobn) 47 { 48 sPAPRTCETable *tcet; 49 50 if (liobn & 0xFFFFFFFF00000000ULL) { 51 hcall_dprintf("Request for out-of-bounds LIOBN 0x" TARGET_FMT_lx "\n", 52 liobn); 53 return NULL; 54 } 55 56 QLIST_FOREACH(tcet, &spapr_tce_tables, list) { 57 if (tcet->liobn == (uint32_t)liobn) { 58 return tcet; 59 } 60 } 61 62 return NULL; 63 } 64 65 static IOMMUAccessFlags spapr_tce_iommu_access_flags(uint64_t tce) 66 { 67 switch (tce & SPAPR_TCE_RW) { 68 case SPAPR_TCE_FAULT: 69 return IOMMU_NONE; 70 case SPAPR_TCE_RO: 71 return IOMMU_RO; 72 case SPAPR_TCE_WO: 73 return IOMMU_WO; 74 default: /* SPAPR_TCE_RW */ 75 return IOMMU_RW; 76 } 77 } 78 79 static uint64_t *spapr_tce_alloc_table(uint32_t liobn, 80 uint32_t page_shift, 81 uint32_t nb_table, 82 int *fd, 83 bool need_vfio) 84 { 85 uint64_t *table = NULL; 86 uint64_t window_size = (uint64_t)nb_table << page_shift; 87 88 if (kvm_enabled() && !(window_size >> 32)) { 89 table = kvmppc_create_spapr_tce(liobn, window_size, fd, need_vfio); 90 } 91 92 if (!table) { 93 *fd = -1; 94 table = g_malloc0(nb_table * sizeof(uint64_t)); 95 } 96 97 trace_spapr_iommu_new_table(liobn, table, *fd); 98 99 return table; 100 } 101 102 static void spapr_tce_free_table(uint64_t *table, int fd, uint32_t nb_table) 103 { 104 if (!kvm_enabled() || 105 (kvmppc_remove_spapr_tce(table, fd, nb_table) != 0)) { 106 g_free(table); 107 } 108 } 109 110 /* Called from RCU critical section */ 111 static IOMMUTLBEntry spapr_tce_translate_iommu(MemoryRegion *iommu, hwaddr addr, 112 bool is_write) 113 { 114 sPAPRTCETable *tcet = container_of(iommu, sPAPRTCETable, iommu); 115 uint64_t tce; 116 IOMMUTLBEntry ret = { 117 .target_as = &address_space_memory, 118 .iova = 0, 119 .translated_addr = 0, 120 .addr_mask = ~(hwaddr)0, 121 .perm = IOMMU_NONE, 122 }; 123 124 if ((addr >> tcet->page_shift) < tcet->nb_table) { 125 /* Check if we are in bound */ 126 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift); 127 128 tce = tcet->table[addr >> tcet->page_shift]; 129 ret.iova = addr & page_mask; 130 ret.translated_addr = tce & page_mask; 131 ret.addr_mask = ~page_mask; 132 ret.perm = spapr_tce_iommu_access_flags(tce); 133 } 134 trace_spapr_iommu_xlate(tcet->liobn, addr, ret.iova, ret.perm, 135 ret.addr_mask); 136 137 return ret; 138 } 139 140 static int spapr_tce_table_post_load(void *opaque, int version_id) 141 { 142 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(opaque); 143 144 if (tcet->vdev) { 145 spapr_vio_set_bypass(tcet->vdev, tcet->bypass); 146 } 147 148 return 0; 149 } 150 151 static const VMStateDescription vmstate_spapr_tce_table = { 152 .name = "spapr_iommu", 153 .version_id = 2, 154 .minimum_version_id = 2, 155 .post_load = spapr_tce_table_post_load, 156 .fields = (VMStateField []) { 157 /* Sanity check */ 158 VMSTATE_UINT32_EQUAL(liobn, sPAPRTCETable), 159 VMSTATE_UINT32_EQUAL(nb_table, sPAPRTCETable), 160 161 /* IOMMU state */ 162 VMSTATE_BOOL(bypass, sPAPRTCETable), 163 VMSTATE_VARRAY_UINT32(table, sPAPRTCETable, nb_table, 0, vmstate_info_uint64, uint64_t), 164 165 VMSTATE_END_OF_LIST() 166 }, 167 }; 168 169 static MemoryRegionIOMMUOps spapr_iommu_ops = { 170 .translate = spapr_tce_translate_iommu, 171 }; 172 173 static int spapr_tce_table_realize(DeviceState *dev) 174 { 175 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev); 176 177 tcet->fd = -1; 178 tcet->table = spapr_tce_alloc_table(tcet->liobn, 179 tcet->page_shift, 180 tcet->nb_table, 181 &tcet->fd, 182 tcet->need_vfio); 183 184 memory_region_init_iommu(&tcet->iommu, OBJECT(dev), &spapr_iommu_ops, 185 "iommu-spapr", 186 (uint64_t)tcet->nb_table << tcet->page_shift); 187 188 QLIST_INSERT_HEAD(&spapr_tce_tables, tcet, list); 189 190 vmstate_register(DEVICE(tcet), tcet->liobn, &vmstate_spapr_tce_table, 191 tcet); 192 193 return 0; 194 } 195 196 void spapr_tce_set_need_vfio(sPAPRTCETable *tcet, bool need_vfio) 197 { 198 size_t table_size = tcet->nb_table * sizeof(uint64_t); 199 void *newtable; 200 201 if (need_vfio == tcet->need_vfio) { 202 /* Nothing to do */ 203 return; 204 } 205 206 if (!need_vfio) { 207 /* FIXME: We don't support transition back to KVM accelerated 208 * TCEs yet */ 209 return; 210 } 211 212 tcet->need_vfio = true; 213 214 if (tcet->fd < 0) { 215 /* Table is already in userspace, nothing to be do */ 216 return; 217 } 218 219 newtable = g_malloc(table_size); 220 memcpy(newtable, tcet->table, table_size); 221 222 kvmppc_remove_spapr_tce(tcet->table, tcet->fd, tcet->nb_table); 223 224 tcet->fd = -1; 225 tcet->table = newtable; 226 } 227 228 sPAPRTCETable *spapr_tce_new_table(DeviceState *owner, uint32_t liobn, 229 uint64_t bus_offset, 230 uint32_t page_shift, 231 uint32_t nb_table, 232 bool need_vfio) 233 { 234 sPAPRTCETable *tcet; 235 char tmp[64]; 236 237 if (spapr_tce_find_by_liobn(liobn)) { 238 fprintf(stderr, "Attempted to create TCE table with duplicate" 239 " LIOBN 0x%x\n", liobn); 240 return NULL; 241 } 242 243 if (!nb_table) { 244 return NULL; 245 } 246 247 tcet = SPAPR_TCE_TABLE(object_new(TYPE_SPAPR_TCE_TABLE)); 248 tcet->liobn = liobn; 249 tcet->bus_offset = bus_offset; 250 tcet->page_shift = page_shift; 251 tcet->nb_table = nb_table; 252 tcet->need_vfio = need_vfio; 253 254 snprintf(tmp, sizeof(tmp), "tce-table-%x", liobn); 255 object_property_add_child(OBJECT(owner), tmp, OBJECT(tcet), NULL); 256 257 object_property_set_bool(OBJECT(tcet), true, "realized", NULL); 258 259 return tcet; 260 } 261 262 static void spapr_tce_table_unrealize(DeviceState *dev, Error **errp) 263 { 264 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev); 265 266 QLIST_REMOVE(tcet, list); 267 268 spapr_tce_free_table(tcet->table, tcet->fd, tcet->nb_table); 269 tcet->fd = -1; 270 } 271 272 MemoryRegion *spapr_tce_get_iommu(sPAPRTCETable *tcet) 273 { 274 return &tcet->iommu; 275 } 276 277 static void spapr_tce_reset(DeviceState *dev) 278 { 279 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev); 280 size_t table_size = tcet->nb_table * sizeof(uint64_t); 281 282 memset(tcet->table, 0, table_size); 283 } 284 285 static target_ulong put_tce_emu(sPAPRTCETable *tcet, target_ulong ioba, 286 target_ulong tce) 287 { 288 IOMMUTLBEntry entry; 289 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift); 290 unsigned long index = (ioba - tcet->bus_offset) >> tcet->page_shift; 291 292 if (index >= tcet->nb_table) { 293 hcall_dprintf("spapr_vio_put_tce on out-of-bounds IOBA 0x" 294 TARGET_FMT_lx "\n", ioba); 295 return H_PARAMETER; 296 } 297 298 tcet->table[index] = tce; 299 300 entry.target_as = &address_space_memory, 301 entry.iova = (ioba - tcet->bus_offset) & page_mask; 302 entry.translated_addr = tce & page_mask; 303 entry.addr_mask = ~page_mask; 304 entry.perm = spapr_tce_iommu_access_flags(tce); 305 memory_region_notify_iommu(&tcet->iommu, entry); 306 307 return H_SUCCESS; 308 } 309 310 static target_ulong h_put_tce_indirect(PowerPCCPU *cpu, 311 sPAPRMachineState *spapr, 312 target_ulong opcode, target_ulong *args) 313 { 314 int i; 315 target_ulong liobn = args[0]; 316 target_ulong ioba = args[1]; 317 target_ulong ioba1 = ioba; 318 target_ulong tce_list = args[2]; 319 target_ulong npages = args[3]; 320 target_ulong ret = H_PARAMETER, tce = 0; 321 sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn); 322 CPUState *cs = CPU(cpu); 323 hwaddr page_mask, page_size; 324 325 if (!tcet) { 326 return H_PARAMETER; 327 } 328 329 if ((npages > 512) || (tce_list & SPAPR_TCE_PAGE_MASK)) { 330 return H_PARAMETER; 331 } 332 333 page_mask = IOMMU_PAGE_MASK(tcet->page_shift); 334 page_size = IOMMU_PAGE_SIZE(tcet->page_shift); 335 ioba &= page_mask; 336 337 for (i = 0; i < npages; ++i, ioba += page_size) { 338 tce = ldq_be_phys(cs->as, tce_list + i * sizeof(target_ulong)); 339 340 ret = put_tce_emu(tcet, ioba, tce); 341 if (ret) { 342 break; 343 } 344 } 345 346 /* Trace last successful or the first problematic entry */ 347 i = i ? (i - 1) : 0; 348 if (SPAPR_IS_PCI_LIOBN(liobn)) { 349 trace_spapr_iommu_pci_indirect(liobn, ioba1, tce_list, i, tce, ret); 350 } else { 351 trace_spapr_iommu_indirect(liobn, ioba1, tce_list, i, tce, ret); 352 } 353 return ret; 354 } 355 356 static target_ulong h_stuff_tce(PowerPCCPU *cpu, sPAPRMachineState *spapr, 357 target_ulong opcode, target_ulong *args) 358 { 359 int i; 360 target_ulong liobn = args[0]; 361 target_ulong ioba = args[1]; 362 target_ulong tce_value = args[2]; 363 target_ulong npages = args[3]; 364 target_ulong ret = H_PARAMETER; 365 sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn); 366 hwaddr page_mask, page_size; 367 368 if (!tcet) { 369 return H_PARAMETER; 370 } 371 372 if (npages > tcet->nb_table) { 373 return H_PARAMETER; 374 } 375 376 page_mask = IOMMU_PAGE_MASK(tcet->page_shift); 377 page_size = IOMMU_PAGE_SIZE(tcet->page_shift); 378 ioba &= page_mask; 379 380 for (i = 0; i < npages; ++i, ioba += page_size) { 381 ret = put_tce_emu(tcet, ioba, tce_value); 382 if (ret) { 383 break; 384 } 385 } 386 if (SPAPR_IS_PCI_LIOBN(liobn)) { 387 trace_spapr_iommu_pci_stuff(liobn, ioba, tce_value, npages, ret); 388 } else { 389 trace_spapr_iommu_stuff(liobn, ioba, tce_value, npages, ret); 390 } 391 392 return ret; 393 } 394 395 static target_ulong h_put_tce(PowerPCCPU *cpu, sPAPRMachineState *spapr, 396 target_ulong opcode, target_ulong *args) 397 { 398 target_ulong liobn = args[0]; 399 target_ulong ioba = args[1]; 400 target_ulong tce = args[2]; 401 target_ulong ret = H_PARAMETER; 402 sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn); 403 404 if (tcet) { 405 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift); 406 407 ioba &= page_mask; 408 409 ret = put_tce_emu(tcet, ioba, tce); 410 } 411 if (SPAPR_IS_PCI_LIOBN(liobn)) { 412 trace_spapr_iommu_pci_put(liobn, ioba, tce, ret); 413 } else { 414 trace_spapr_iommu_put(liobn, ioba, tce, ret); 415 } 416 417 return ret; 418 } 419 420 static target_ulong get_tce_emu(sPAPRTCETable *tcet, target_ulong ioba, 421 target_ulong *tce) 422 { 423 unsigned long index = (ioba - tcet->bus_offset) >> tcet->page_shift; 424 425 if (index >= tcet->nb_table) { 426 hcall_dprintf("spapr_iommu_get_tce on out-of-bounds IOBA 0x" 427 TARGET_FMT_lx "\n", ioba); 428 return H_PARAMETER; 429 } 430 431 *tce = tcet->table[index]; 432 433 return H_SUCCESS; 434 } 435 436 static target_ulong h_get_tce(PowerPCCPU *cpu, sPAPRMachineState *spapr, 437 target_ulong opcode, target_ulong *args) 438 { 439 target_ulong liobn = args[0]; 440 target_ulong ioba = args[1]; 441 target_ulong tce = 0; 442 target_ulong ret = H_PARAMETER; 443 sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn); 444 445 if (tcet) { 446 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift); 447 448 ioba &= page_mask; 449 450 ret = get_tce_emu(tcet, ioba, &tce); 451 if (!ret) { 452 args[0] = tce; 453 } 454 } 455 if (SPAPR_IS_PCI_LIOBN(liobn)) { 456 trace_spapr_iommu_pci_get(liobn, ioba, ret, tce); 457 } else { 458 trace_spapr_iommu_get(liobn, ioba, ret, tce); 459 } 460 461 return ret; 462 } 463 464 int spapr_dma_dt(void *fdt, int node_off, const char *propname, 465 uint32_t liobn, uint64_t window, uint32_t size) 466 { 467 uint32_t dma_prop[5]; 468 int ret; 469 470 dma_prop[0] = cpu_to_be32(liobn); 471 dma_prop[1] = cpu_to_be32(window >> 32); 472 dma_prop[2] = cpu_to_be32(window & 0xFFFFFFFF); 473 dma_prop[3] = 0; /* window size is 32 bits */ 474 dma_prop[4] = cpu_to_be32(size); 475 476 ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-address-cells", 2); 477 if (ret < 0) { 478 return ret; 479 } 480 481 ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-size-cells", 2); 482 if (ret < 0) { 483 return ret; 484 } 485 486 ret = fdt_setprop(fdt, node_off, propname, dma_prop, sizeof(dma_prop)); 487 if (ret < 0) { 488 return ret; 489 } 490 491 return 0; 492 } 493 494 int spapr_tcet_dma_dt(void *fdt, int node_off, const char *propname, 495 sPAPRTCETable *tcet) 496 { 497 if (!tcet) { 498 return 0; 499 } 500 501 return spapr_dma_dt(fdt, node_off, propname, 502 tcet->liobn, 0, tcet->nb_table << tcet->page_shift); 503 } 504 505 static void spapr_tce_table_class_init(ObjectClass *klass, void *data) 506 { 507 DeviceClass *dc = DEVICE_CLASS(klass); 508 dc->init = spapr_tce_table_realize; 509 dc->reset = spapr_tce_reset; 510 dc->unrealize = spapr_tce_table_unrealize; 511 512 QLIST_INIT(&spapr_tce_tables); 513 514 /* hcall-tce */ 515 spapr_register_hypercall(H_PUT_TCE, h_put_tce); 516 spapr_register_hypercall(H_GET_TCE, h_get_tce); 517 spapr_register_hypercall(H_PUT_TCE_INDIRECT, h_put_tce_indirect); 518 spapr_register_hypercall(H_STUFF_TCE, h_stuff_tce); 519 } 520 521 static TypeInfo spapr_tce_table_info = { 522 .name = TYPE_SPAPR_TCE_TABLE, 523 .parent = TYPE_DEVICE, 524 .instance_size = sizeof(sPAPRTCETable), 525 .class_init = spapr_tce_table_class_init, 526 }; 527 528 static void register_types(void) 529 { 530 type_register_static(&spapr_tce_table_info); 531 } 532 533 type_init(register_types); 534