1 /* 2 * QEMU sPAPR IOMMU (TCE) code 3 * 4 * Copyright (c) 2010 David Gibson, IBM Corporation <dwg@au1.ibm.com> 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "hw/hw.h" 20 #include "sysemu/kvm.h" 21 #include "hw/qdev.h" 22 #include "kvm_ppc.h" 23 #include "sysemu/dma.h" 24 #include "exec/address-spaces.h" 25 #include "trace.h" 26 27 #include "hw/ppc/spapr.h" 28 29 #include <libfdt.h> 30 31 enum sPAPRTCEAccess { 32 SPAPR_TCE_FAULT = 0, 33 SPAPR_TCE_RO = 1, 34 SPAPR_TCE_WO = 2, 35 SPAPR_TCE_RW = 3, 36 }; 37 38 #define IOMMU_PAGE_SIZE(shift) (1ULL << (shift)) 39 #define IOMMU_PAGE_MASK(shift) (~(IOMMU_PAGE_SIZE(shift) - 1)) 40 41 static QLIST_HEAD(spapr_tce_tables, sPAPRTCETable) spapr_tce_tables; 42 43 static sPAPRTCETable *spapr_tce_find_by_liobn(uint32_t liobn) 44 { 45 sPAPRTCETable *tcet; 46 47 if (liobn & 0xFFFFFFFF00000000ULL) { 48 hcall_dprintf("Request for out-of-bounds LIOBN 0x" TARGET_FMT_lx "\n", 49 liobn); 50 return NULL; 51 } 52 53 QLIST_FOREACH(tcet, &spapr_tce_tables, list) { 54 if (tcet->liobn == liobn) { 55 return tcet; 56 } 57 } 58 59 return NULL; 60 } 61 62 static IOMMUTLBEntry spapr_tce_translate_iommu(MemoryRegion *iommu, hwaddr addr) 63 { 64 sPAPRTCETable *tcet = container_of(iommu, sPAPRTCETable, iommu); 65 uint64_t tce; 66 IOMMUTLBEntry ret = { 67 .target_as = &address_space_memory, 68 .iova = 0, 69 .translated_addr = 0, 70 .addr_mask = ~(hwaddr)0, 71 .perm = IOMMU_NONE, 72 }; 73 74 if (tcet->bypass) { 75 ret.perm = IOMMU_RW; 76 } else if ((addr >> tcet->page_shift) < tcet->nb_table) { 77 /* Check if we are in bound */ 78 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift); 79 80 tce = tcet->table[addr >> tcet->page_shift]; 81 ret.iova = addr & page_mask; 82 ret.translated_addr = tce & page_mask; 83 ret.addr_mask = ~page_mask; 84 ret.perm = tce; 85 } 86 trace_spapr_iommu_xlate(tcet->liobn, addr, ret.iova, ret.perm, 87 ret.addr_mask); 88 89 return ret; 90 } 91 92 static const VMStateDescription vmstate_spapr_tce_table = { 93 .name = "spapr_iommu", 94 .version_id = 2, 95 .minimum_version_id = 2, 96 .fields = (VMStateField []) { 97 /* Sanity check */ 98 VMSTATE_UINT32_EQUAL(liobn, sPAPRTCETable), 99 VMSTATE_UINT32_EQUAL(nb_table, sPAPRTCETable), 100 101 /* IOMMU state */ 102 VMSTATE_BOOL(bypass, sPAPRTCETable), 103 VMSTATE_VARRAY_UINT32(table, sPAPRTCETable, nb_table, 0, vmstate_info_uint64, uint64_t), 104 105 VMSTATE_END_OF_LIST() 106 }, 107 }; 108 109 static MemoryRegionIOMMUOps spapr_iommu_ops = { 110 .translate = spapr_tce_translate_iommu, 111 }; 112 113 static int spapr_tce_table_realize(DeviceState *dev) 114 { 115 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev); 116 117 if (kvm_enabled()) { 118 tcet->table = kvmppc_create_spapr_tce(tcet->liobn, 119 tcet->nb_table << 120 tcet->page_shift, 121 &tcet->fd, 122 tcet->vfio_accel); 123 } 124 125 if (!tcet->table) { 126 size_t table_size = tcet->nb_table * sizeof(uint64_t); 127 tcet->table = g_malloc0(table_size); 128 } 129 130 trace_spapr_iommu_new_table(tcet->liobn, tcet, tcet->table, tcet->fd); 131 132 memory_region_init_iommu(&tcet->iommu, OBJECT(dev), &spapr_iommu_ops, 133 "iommu-spapr", ram_size); 134 135 QLIST_INSERT_HEAD(&spapr_tce_tables, tcet, list); 136 137 vmstate_register(DEVICE(tcet), tcet->liobn, &vmstate_spapr_tce_table, 138 tcet); 139 140 return 0; 141 } 142 143 sPAPRTCETable *spapr_tce_new_table(DeviceState *owner, uint32_t liobn, 144 uint64_t bus_offset, 145 uint32_t page_shift, 146 uint32_t nb_table, 147 bool vfio_accel) 148 { 149 sPAPRTCETable *tcet; 150 151 if (spapr_tce_find_by_liobn(liobn)) { 152 fprintf(stderr, "Attempted to create TCE table with duplicate" 153 " LIOBN 0x%x\n", liobn); 154 return NULL; 155 } 156 157 if (!nb_table) { 158 return NULL; 159 } 160 161 tcet = SPAPR_TCE_TABLE(object_new(TYPE_SPAPR_TCE_TABLE)); 162 tcet->liobn = liobn; 163 tcet->bus_offset = bus_offset; 164 tcet->page_shift = page_shift; 165 tcet->nb_table = nb_table; 166 tcet->vfio_accel = vfio_accel; 167 168 object_property_add_child(OBJECT(owner), "tce-table", OBJECT(tcet), NULL); 169 170 object_property_set_bool(OBJECT(tcet), true, "realized", NULL); 171 172 return tcet; 173 } 174 175 static void spapr_tce_table_finalize(Object *obj) 176 { 177 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(obj); 178 179 QLIST_REMOVE(tcet, list); 180 181 if (!kvm_enabled() || 182 (kvmppc_remove_spapr_tce(tcet->table, tcet->fd, 183 tcet->nb_table) != 0)) { 184 g_free(tcet->table); 185 } 186 } 187 188 MemoryRegion *spapr_tce_get_iommu(sPAPRTCETable *tcet) 189 { 190 return &tcet->iommu; 191 } 192 193 void spapr_tce_set_bypass(sPAPRTCETable *tcet, bool bypass) 194 { 195 tcet->bypass = bypass; 196 } 197 198 static void spapr_tce_reset(DeviceState *dev) 199 { 200 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev); 201 size_t table_size = tcet->nb_table * sizeof(uint64_t); 202 203 tcet->bypass = false; 204 memset(tcet->table, 0, table_size); 205 } 206 207 static target_ulong put_tce_emu(sPAPRTCETable *tcet, target_ulong ioba, 208 target_ulong tce) 209 { 210 IOMMUTLBEntry entry; 211 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift); 212 unsigned long index = (ioba - tcet->bus_offset) >> tcet->page_shift; 213 214 if (index >= tcet->nb_table) { 215 hcall_dprintf("spapr_vio_put_tce on out-of-bounds IOBA 0x" 216 TARGET_FMT_lx "\n", ioba); 217 return H_PARAMETER; 218 } 219 220 tcet->table[index] = tce; 221 222 entry.target_as = &address_space_memory, 223 entry.iova = ioba & page_mask; 224 entry.translated_addr = tce & page_mask; 225 entry.addr_mask = ~page_mask; 226 entry.perm = tce; 227 memory_region_notify_iommu(&tcet->iommu, entry); 228 229 return H_SUCCESS; 230 } 231 232 static target_ulong h_put_tce_indirect(PowerPCCPU *cpu, 233 sPAPREnvironment *spapr, 234 target_ulong opcode, target_ulong *args) 235 { 236 int i; 237 target_ulong liobn = args[0]; 238 target_ulong ioba = args[1]; 239 target_ulong ioba1 = ioba; 240 target_ulong tce_list = args[2]; 241 target_ulong npages = args[3]; 242 target_ulong ret = H_PARAMETER; 243 sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn); 244 CPUState *cs = CPU(cpu); 245 hwaddr page_mask, page_size; 246 247 if (!tcet) { 248 return H_PARAMETER; 249 } 250 251 if ((npages > 512) || (tce_list & SPAPR_TCE_PAGE_MASK)) { 252 return H_PARAMETER; 253 } 254 255 page_mask = IOMMU_PAGE_MASK(tcet->page_shift); 256 page_size = IOMMU_PAGE_SIZE(tcet->page_shift); 257 ioba &= page_mask; 258 259 for (i = 0; i < npages; ++i, ioba += page_size) { 260 target_ulong off = (tce_list & ~SPAPR_TCE_RW) + 261 i * sizeof(target_ulong); 262 target_ulong tce = ldq_phys(cs->as, off); 263 264 ret = put_tce_emu(tcet, ioba, tce); 265 if (ret) { 266 break; 267 } 268 } 269 270 /* Trace last successful or the first problematic entry */ 271 i = i ? (i - 1) : 0; 272 trace_spapr_iommu_indirect(liobn, ioba1, tce_list, i, 273 ldq_phys(cs->as, 274 tce_list + i * sizeof(target_ulong)), 275 ret); 276 277 return ret; 278 } 279 280 static target_ulong h_stuff_tce(PowerPCCPU *cpu, sPAPREnvironment *spapr, 281 target_ulong opcode, target_ulong *args) 282 { 283 int i; 284 target_ulong liobn = args[0]; 285 target_ulong ioba = args[1]; 286 target_ulong tce_value = args[2]; 287 target_ulong npages = args[3]; 288 target_ulong ret = H_PARAMETER; 289 sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn); 290 hwaddr page_mask, page_size; 291 292 if (!tcet) { 293 return H_PARAMETER; 294 } 295 296 if (npages > tcet->nb_table) { 297 return H_PARAMETER; 298 } 299 300 page_mask = IOMMU_PAGE_MASK(tcet->page_shift); 301 page_size = IOMMU_PAGE_SIZE(tcet->page_shift); 302 ioba &= page_mask; 303 304 for (i = 0; i < npages; ++i, ioba += page_size) { 305 ret = put_tce_emu(tcet, ioba, tce_value); 306 if (ret) { 307 break; 308 } 309 } 310 trace_spapr_iommu_stuff(liobn, ioba, tce_value, npages, ret); 311 312 return ret; 313 } 314 315 static target_ulong h_put_tce(PowerPCCPU *cpu, sPAPREnvironment *spapr, 316 target_ulong opcode, target_ulong *args) 317 { 318 target_ulong liobn = args[0]; 319 target_ulong ioba = args[1]; 320 target_ulong tce = args[2]; 321 target_ulong ret = H_PARAMETER; 322 sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn); 323 324 if (tcet) { 325 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift); 326 327 ioba &= page_mask; 328 329 ret = put_tce_emu(tcet, ioba, tce); 330 } 331 trace_spapr_iommu_put(liobn, ioba, tce, ret); 332 333 return ret; 334 } 335 336 static target_ulong get_tce_emu(sPAPRTCETable *tcet, target_ulong ioba, 337 target_ulong *tce) 338 { 339 unsigned long index = (ioba - tcet->bus_offset) >> tcet->page_shift; 340 341 if (index >= tcet->nb_table) { 342 hcall_dprintf("spapr_iommu_get_tce on out-of-bounds IOBA 0x" 343 TARGET_FMT_lx "\n", ioba); 344 return H_PARAMETER; 345 } 346 347 *tce = tcet->table[index]; 348 349 return H_SUCCESS; 350 } 351 352 static target_ulong h_get_tce(PowerPCCPU *cpu, sPAPREnvironment *spapr, 353 target_ulong opcode, target_ulong *args) 354 { 355 target_ulong liobn = args[0]; 356 target_ulong ioba = args[1]; 357 target_ulong tce = 0; 358 target_ulong ret = H_PARAMETER; 359 sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn); 360 361 if (tcet) { 362 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift); 363 364 ioba &= page_mask; 365 366 ret = get_tce_emu(tcet, ioba, &tce); 367 if (!ret) { 368 args[0] = tce; 369 } 370 } 371 trace_spapr_iommu_get(liobn, ioba, ret, tce); 372 373 return ret; 374 } 375 376 int spapr_dma_dt(void *fdt, int node_off, const char *propname, 377 uint32_t liobn, uint64_t window, uint32_t size) 378 { 379 uint32_t dma_prop[5]; 380 int ret; 381 382 dma_prop[0] = cpu_to_be32(liobn); 383 dma_prop[1] = cpu_to_be32(window >> 32); 384 dma_prop[2] = cpu_to_be32(window & 0xFFFFFFFF); 385 dma_prop[3] = 0; /* window size is 32 bits */ 386 dma_prop[4] = cpu_to_be32(size); 387 388 ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-address-cells", 2); 389 if (ret < 0) { 390 return ret; 391 } 392 393 ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-size-cells", 2); 394 if (ret < 0) { 395 return ret; 396 } 397 398 ret = fdt_setprop(fdt, node_off, propname, dma_prop, sizeof(dma_prop)); 399 if (ret < 0) { 400 return ret; 401 } 402 403 return 0; 404 } 405 406 int spapr_tcet_dma_dt(void *fdt, int node_off, const char *propname, 407 sPAPRTCETable *tcet) 408 { 409 if (!tcet) { 410 return 0; 411 } 412 413 return spapr_dma_dt(fdt, node_off, propname, 414 tcet->liobn, 0, tcet->nb_table << tcet->page_shift); 415 } 416 417 static void spapr_tce_table_class_init(ObjectClass *klass, void *data) 418 { 419 DeviceClass *dc = DEVICE_CLASS(klass); 420 dc->init = spapr_tce_table_realize; 421 dc->reset = spapr_tce_reset; 422 423 QLIST_INIT(&spapr_tce_tables); 424 425 /* hcall-tce */ 426 spapr_register_hypercall(H_PUT_TCE, h_put_tce); 427 spapr_register_hypercall(H_GET_TCE, h_get_tce); 428 spapr_register_hypercall(H_PUT_TCE_INDIRECT, h_put_tce_indirect); 429 spapr_register_hypercall(H_STUFF_TCE, h_stuff_tce); 430 } 431 432 static TypeInfo spapr_tce_table_info = { 433 .name = TYPE_SPAPR_TCE_TABLE, 434 .parent = TYPE_DEVICE, 435 .instance_size = sizeof(sPAPRTCETable), 436 .class_init = spapr_tce_table_class_init, 437 .instance_finalize = spapr_tce_table_finalize, 438 }; 439 440 static void register_types(void) 441 { 442 type_register_static(&spapr_tce_table_info); 443 } 444 445 type_init(register_types); 446