1 /* 2 * QEMU sPAPR IOMMU (TCE) code 3 * 4 * Copyright (c) 2010 David Gibson, IBM Corporation <dwg@au1.ibm.com> 5 * 6 * This library is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU Lesser General Public 8 * License as published by the Free Software Foundation; either 9 * version 2 of the License, or (at your option) any later version. 10 * 11 * This library is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14 * Lesser General Public License for more details. 15 * 16 * You should have received a copy of the GNU Lesser General Public 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include "hw/hw.h" 20 #include "sysemu/kvm.h" 21 #include "hw/qdev.h" 22 #include "kvm_ppc.h" 23 #include "sysemu/dma.h" 24 #include "exec/address-spaces.h" 25 #include "trace.h" 26 27 #include "hw/ppc/spapr.h" 28 29 #include <libfdt.h> 30 31 enum sPAPRTCEAccess { 32 SPAPR_TCE_FAULT = 0, 33 SPAPR_TCE_RO = 1, 34 SPAPR_TCE_WO = 2, 35 SPAPR_TCE_RW = 3, 36 }; 37 38 #define IOMMU_PAGE_SIZE(shift) (1ULL << (shift)) 39 #define IOMMU_PAGE_MASK(shift) (~(IOMMU_PAGE_SIZE(shift) - 1)) 40 41 static QLIST_HEAD(spapr_tce_tables, sPAPRTCETable) spapr_tce_tables; 42 43 static sPAPRTCETable *spapr_tce_find_by_liobn(uint32_t liobn) 44 { 45 sPAPRTCETable *tcet; 46 47 if (liobn & 0xFFFFFFFF00000000ULL) { 48 hcall_dprintf("Request for out-of-bounds LIOBN 0x" TARGET_FMT_lx "\n", 49 liobn); 50 return NULL; 51 } 52 53 QLIST_FOREACH(tcet, &spapr_tce_tables, list) { 54 if (tcet->liobn == liobn) { 55 return tcet; 56 } 57 } 58 59 return NULL; 60 } 61 62 static IOMMUTLBEntry spapr_tce_translate_iommu(MemoryRegion *iommu, hwaddr addr, 63 bool is_write) 64 { 65 sPAPRTCETable *tcet = container_of(iommu, sPAPRTCETable, iommu); 66 uint64_t tce; 67 IOMMUTLBEntry ret = { 68 .target_as = &address_space_memory, 69 .iova = 0, 70 .translated_addr = 0, 71 .addr_mask = ~(hwaddr)0, 72 .perm = IOMMU_NONE, 73 }; 74 75 if (tcet->bypass) { 76 ret.perm = IOMMU_RW; 77 } else if ((addr >> tcet->page_shift) < tcet->nb_table) { 78 /* Check if we are in bound */ 79 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift); 80 81 tce = tcet->table[addr >> tcet->page_shift]; 82 ret.iova = addr & page_mask; 83 ret.translated_addr = tce & page_mask; 84 ret.addr_mask = ~page_mask; 85 ret.perm = tce & IOMMU_RW; 86 } 87 trace_spapr_iommu_xlate(tcet->liobn, addr, ret.iova, ret.perm, 88 ret.addr_mask); 89 90 return ret; 91 } 92 93 static const VMStateDescription vmstate_spapr_tce_table = { 94 .name = "spapr_iommu", 95 .version_id = 2, 96 .minimum_version_id = 2, 97 .fields = (VMStateField []) { 98 /* Sanity check */ 99 VMSTATE_UINT32_EQUAL(liobn, sPAPRTCETable), 100 VMSTATE_UINT32_EQUAL(nb_table, sPAPRTCETable), 101 102 /* IOMMU state */ 103 VMSTATE_BOOL(bypass, sPAPRTCETable), 104 VMSTATE_VARRAY_UINT32(table, sPAPRTCETable, nb_table, 0, vmstate_info_uint64, uint64_t), 105 106 VMSTATE_END_OF_LIST() 107 }, 108 }; 109 110 static MemoryRegionIOMMUOps spapr_iommu_ops = { 111 .translate = spapr_tce_translate_iommu, 112 }; 113 114 static int spapr_tce_table_realize(DeviceState *dev) 115 { 116 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev); 117 118 if (kvm_enabled()) { 119 tcet->table = kvmppc_create_spapr_tce(tcet->liobn, 120 tcet->nb_table << 121 tcet->page_shift, 122 &tcet->fd, 123 tcet->vfio_accel); 124 } 125 126 if (!tcet->table) { 127 size_t table_size = tcet->nb_table * sizeof(uint64_t); 128 tcet->table = g_malloc0(table_size); 129 } 130 131 trace_spapr_iommu_new_table(tcet->liobn, tcet, tcet->table, tcet->fd); 132 133 memory_region_init_iommu(&tcet->iommu, OBJECT(dev), &spapr_iommu_ops, 134 "iommu-spapr", ram_size); 135 136 QLIST_INSERT_HEAD(&spapr_tce_tables, tcet, list); 137 138 vmstate_register(DEVICE(tcet), tcet->liobn, &vmstate_spapr_tce_table, 139 tcet); 140 141 return 0; 142 } 143 144 sPAPRTCETable *spapr_tce_new_table(DeviceState *owner, uint32_t liobn, 145 uint64_t bus_offset, 146 uint32_t page_shift, 147 uint32_t nb_table, 148 bool vfio_accel) 149 { 150 sPAPRTCETable *tcet; 151 152 if (spapr_tce_find_by_liobn(liobn)) { 153 fprintf(stderr, "Attempted to create TCE table with duplicate" 154 " LIOBN 0x%x\n", liobn); 155 return NULL; 156 } 157 158 if (!nb_table) { 159 return NULL; 160 } 161 162 tcet = SPAPR_TCE_TABLE(object_new(TYPE_SPAPR_TCE_TABLE)); 163 tcet->liobn = liobn; 164 tcet->bus_offset = bus_offset; 165 tcet->page_shift = page_shift; 166 tcet->nb_table = nb_table; 167 tcet->vfio_accel = vfio_accel; 168 169 object_property_add_child(OBJECT(owner), "tce-table", OBJECT(tcet), NULL); 170 171 object_property_set_bool(OBJECT(tcet), true, "realized", NULL); 172 173 return tcet; 174 } 175 176 static void spapr_tce_table_finalize(Object *obj) 177 { 178 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(obj); 179 180 QLIST_REMOVE(tcet, list); 181 182 if (!kvm_enabled() || 183 (kvmppc_remove_spapr_tce(tcet->table, tcet->fd, 184 tcet->nb_table) != 0)) { 185 g_free(tcet->table); 186 } 187 } 188 189 MemoryRegion *spapr_tce_get_iommu(sPAPRTCETable *tcet) 190 { 191 return &tcet->iommu; 192 } 193 194 void spapr_tce_set_bypass(sPAPRTCETable *tcet, bool bypass) 195 { 196 tcet->bypass = bypass; 197 } 198 199 static void spapr_tce_reset(DeviceState *dev) 200 { 201 sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev); 202 size_t table_size = tcet->nb_table * sizeof(uint64_t); 203 204 tcet->bypass = false; 205 memset(tcet->table, 0, table_size); 206 } 207 208 static target_ulong put_tce_emu(sPAPRTCETable *tcet, target_ulong ioba, 209 target_ulong tce) 210 { 211 IOMMUTLBEntry entry; 212 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift); 213 unsigned long index = (ioba - tcet->bus_offset) >> tcet->page_shift; 214 215 if (index >= tcet->nb_table) { 216 hcall_dprintf("spapr_vio_put_tce on out-of-bounds IOBA 0x" 217 TARGET_FMT_lx "\n", ioba); 218 return H_PARAMETER; 219 } 220 221 tcet->table[index] = tce; 222 223 entry.target_as = &address_space_memory, 224 entry.iova = ioba & page_mask; 225 entry.translated_addr = tce & page_mask; 226 entry.addr_mask = ~page_mask; 227 entry.perm = tce & IOMMU_RW; 228 memory_region_notify_iommu(&tcet->iommu, entry); 229 230 return H_SUCCESS; 231 } 232 233 static target_ulong h_put_tce_indirect(PowerPCCPU *cpu, 234 sPAPREnvironment *spapr, 235 target_ulong opcode, target_ulong *args) 236 { 237 int i; 238 target_ulong liobn = args[0]; 239 target_ulong ioba = args[1]; 240 target_ulong ioba1 = ioba; 241 target_ulong tce_list = args[2]; 242 target_ulong npages = args[3]; 243 target_ulong ret = H_PARAMETER; 244 sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn); 245 CPUState *cs = CPU(cpu); 246 hwaddr page_mask, page_size; 247 248 if (!tcet) { 249 return H_PARAMETER; 250 } 251 252 if ((npages > 512) || (tce_list & SPAPR_TCE_PAGE_MASK)) { 253 return H_PARAMETER; 254 } 255 256 page_mask = IOMMU_PAGE_MASK(tcet->page_shift); 257 page_size = IOMMU_PAGE_SIZE(tcet->page_shift); 258 ioba &= page_mask; 259 260 for (i = 0; i < npages; ++i, ioba += page_size) { 261 target_ulong off = (tce_list & ~SPAPR_TCE_RW) + 262 i * sizeof(target_ulong); 263 target_ulong tce = ldq_phys(cs->as, off); 264 265 ret = put_tce_emu(tcet, ioba, tce); 266 if (ret) { 267 break; 268 } 269 } 270 271 /* Trace last successful or the first problematic entry */ 272 i = i ? (i - 1) : 0; 273 trace_spapr_iommu_indirect(liobn, ioba1, tce_list, i, 274 ldq_phys(cs->as, 275 tce_list + i * sizeof(target_ulong)), 276 ret); 277 278 return ret; 279 } 280 281 static target_ulong h_stuff_tce(PowerPCCPU *cpu, sPAPREnvironment *spapr, 282 target_ulong opcode, target_ulong *args) 283 { 284 int i; 285 target_ulong liobn = args[0]; 286 target_ulong ioba = args[1]; 287 target_ulong tce_value = args[2]; 288 target_ulong npages = args[3]; 289 target_ulong ret = H_PARAMETER; 290 sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn); 291 hwaddr page_mask, page_size; 292 293 if (!tcet) { 294 return H_PARAMETER; 295 } 296 297 if (npages > tcet->nb_table) { 298 return H_PARAMETER; 299 } 300 301 page_mask = IOMMU_PAGE_MASK(tcet->page_shift); 302 page_size = IOMMU_PAGE_SIZE(tcet->page_shift); 303 ioba &= page_mask; 304 305 for (i = 0; i < npages; ++i, ioba += page_size) { 306 ret = put_tce_emu(tcet, ioba, tce_value); 307 if (ret) { 308 break; 309 } 310 } 311 trace_spapr_iommu_stuff(liobn, ioba, tce_value, npages, ret); 312 313 return ret; 314 } 315 316 static target_ulong h_put_tce(PowerPCCPU *cpu, sPAPREnvironment *spapr, 317 target_ulong opcode, target_ulong *args) 318 { 319 target_ulong liobn = args[0]; 320 target_ulong ioba = args[1]; 321 target_ulong tce = args[2]; 322 target_ulong ret = H_PARAMETER; 323 sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn); 324 325 if (tcet) { 326 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift); 327 328 ioba &= page_mask; 329 330 ret = put_tce_emu(tcet, ioba, tce); 331 } 332 trace_spapr_iommu_put(liobn, ioba, tce, ret); 333 334 return ret; 335 } 336 337 static target_ulong get_tce_emu(sPAPRTCETable *tcet, target_ulong ioba, 338 target_ulong *tce) 339 { 340 unsigned long index = (ioba - tcet->bus_offset) >> tcet->page_shift; 341 342 if (index >= tcet->nb_table) { 343 hcall_dprintf("spapr_iommu_get_tce on out-of-bounds IOBA 0x" 344 TARGET_FMT_lx "\n", ioba); 345 return H_PARAMETER; 346 } 347 348 *tce = tcet->table[index]; 349 350 return H_SUCCESS; 351 } 352 353 static target_ulong h_get_tce(PowerPCCPU *cpu, sPAPREnvironment *spapr, 354 target_ulong opcode, target_ulong *args) 355 { 356 target_ulong liobn = args[0]; 357 target_ulong ioba = args[1]; 358 target_ulong tce = 0; 359 target_ulong ret = H_PARAMETER; 360 sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn); 361 362 if (tcet) { 363 hwaddr page_mask = IOMMU_PAGE_MASK(tcet->page_shift); 364 365 ioba &= page_mask; 366 367 ret = get_tce_emu(tcet, ioba, &tce); 368 if (!ret) { 369 args[0] = tce; 370 } 371 } 372 trace_spapr_iommu_get(liobn, ioba, ret, tce); 373 374 return ret; 375 } 376 377 int spapr_dma_dt(void *fdt, int node_off, const char *propname, 378 uint32_t liobn, uint64_t window, uint32_t size) 379 { 380 uint32_t dma_prop[5]; 381 int ret; 382 383 dma_prop[0] = cpu_to_be32(liobn); 384 dma_prop[1] = cpu_to_be32(window >> 32); 385 dma_prop[2] = cpu_to_be32(window & 0xFFFFFFFF); 386 dma_prop[3] = 0; /* window size is 32 bits */ 387 dma_prop[4] = cpu_to_be32(size); 388 389 ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-address-cells", 2); 390 if (ret < 0) { 391 return ret; 392 } 393 394 ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-size-cells", 2); 395 if (ret < 0) { 396 return ret; 397 } 398 399 ret = fdt_setprop(fdt, node_off, propname, dma_prop, sizeof(dma_prop)); 400 if (ret < 0) { 401 return ret; 402 } 403 404 return 0; 405 } 406 407 int spapr_tcet_dma_dt(void *fdt, int node_off, const char *propname, 408 sPAPRTCETable *tcet) 409 { 410 if (!tcet) { 411 return 0; 412 } 413 414 return spapr_dma_dt(fdt, node_off, propname, 415 tcet->liobn, 0, tcet->nb_table << tcet->page_shift); 416 } 417 418 static void spapr_tce_table_class_init(ObjectClass *klass, void *data) 419 { 420 DeviceClass *dc = DEVICE_CLASS(klass); 421 dc->init = spapr_tce_table_realize; 422 dc->reset = spapr_tce_reset; 423 424 QLIST_INIT(&spapr_tce_tables); 425 426 /* hcall-tce */ 427 spapr_register_hypercall(H_PUT_TCE, h_put_tce); 428 spapr_register_hypercall(H_GET_TCE, h_get_tce); 429 spapr_register_hypercall(H_PUT_TCE_INDIRECT, h_put_tce_indirect); 430 spapr_register_hypercall(H_STUFF_TCE, h_stuff_tce); 431 } 432 433 static TypeInfo spapr_tce_table_info = { 434 .name = TYPE_SPAPR_TCE_TABLE, 435 .parent = TYPE_DEVICE, 436 .instance_size = sizeof(sPAPRTCETable), 437 .class_init = spapr_tce_table_class_init, 438 .instance_finalize = spapr_tce_table_finalize, 439 }; 440 441 static void register_types(void) 442 { 443 type_register_static(&spapr_tce_table_info); 444 } 445 446 type_init(register_types); 447