1 /* 2 * Copyright (C) 2014-2016 Broadcom Corporation 3 * Copyright (c) 2017 Red Hat, Inc. 4 * Written by Prem Mallappa, Eric Auger 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program; if not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #include "qemu/osdep.h" 20 #include "hw/irq.h" 21 #include "hw/sysbus.h" 22 #include "migration/vmstate.h" 23 #include "hw/qdev-core.h" 24 #include "hw/pci/pci.h" 25 #include "exec/address-spaces.h" 26 #include "cpu.h" 27 #include "trace.h" 28 #include "qemu/log.h" 29 #include "qemu/error-report.h" 30 #include "qapi/error.h" 31 32 #include "hw/arm/smmuv3.h" 33 #include "smmuv3-internal.h" 34 35 /** 36 * smmuv3_trigger_irq - pulse @irq if enabled and update 37 * GERROR register in case of GERROR interrupt 38 * 39 * @irq: irq type 40 * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR) 41 */ 42 static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq, 43 uint32_t gerror_mask) 44 { 45 46 bool pulse = false; 47 48 switch (irq) { 49 case SMMU_IRQ_EVTQ: 50 pulse = smmuv3_eventq_irq_enabled(s); 51 break; 52 case SMMU_IRQ_PRIQ: 53 qemu_log_mask(LOG_UNIMP, "PRI not yet supported\n"); 54 break; 55 case SMMU_IRQ_CMD_SYNC: 56 pulse = true; 57 break; 58 case SMMU_IRQ_GERROR: 59 { 60 uint32_t pending = s->gerror ^ s->gerrorn; 61 uint32_t new_gerrors = ~pending & gerror_mask; 62 63 if (!new_gerrors) { 64 /* only toggle non pending errors */ 65 return; 66 } 67 s->gerror ^= new_gerrors; 68 trace_smmuv3_write_gerror(new_gerrors, s->gerror); 69 70 pulse = smmuv3_gerror_irq_enabled(s); 71 break; 72 } 73 } 74 if (pulse) { 75 trace_smmuv3_trigger_irq(irq); 76 qemu_irq_pulse(s->irq[irq]); 77 } 78 } 79 80 static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn) 81 { 82 uint32_t pending = s->gerror ^ s->gerrorn; 83 uint32_t toggled = s->gerrorn ^ new_gerrorn; 84 85 if (toggled & ~pending) { 86 qemu_log_mask(LOG_GUEST_ERROR, 87 "guest toggles non pending errors = 0x%x\n", 88 toggled & ~pending); 89 } 90 91 /* 92 * We do not raise any error in case guest toggles bits corresponding 93 * to not active IRQs (CONSTRAINED UNPREDICTABLE) 94 */ 95 s->gerrorn = new_gerrorn; 96 97 trace_smmuv3_write_gerrorn(toggled & pending, s->gerrorn); 98 } 99 100 static inline MemTxResult queue_read(SMMUQueue *q, void *data) 101 { 102 dma_addr_t addr = Q_CONS_ENTRY(q); 103 104 return dma_memory_read(&address_space_memory, addr, data, q->entry_size); 105 } 106 107 static MemTxResult queue_write(SMMUQueue *q, void *data) 108 { 109 dma_addr_t addr = Q_PROD_ENTRY(q); 110 MemTxResult ret; 111 112 ret = dma_memory_write(&address_space_memory, addr, data, q->entry_size); 113 if (ret != MEMTX_OK) { 114 return ret; 115 } 116 117 queue_prod_incr(q); 118 return MEMTX_OK; 119 } 120 121 static MemTxResult smmuv3_write_eventq(SMMUv3State *s, Evt *evt) 122 { 123 SMMUQueue *q = &s->eventq; 124 MemTxResult r; 125 126 if (!smmuv3_eventq_enabled(s)) { 127 return MEMTX_ERROR; 128 } 129 130 if (smmuv3_q_full(q)) { 131 return MEMTX_ERROR; 132 } 133 134 r = queue_write(q, evt); 135 if (r != MEMTX_OK) { 136 return r; 137 } 138 139 if (!smmuv3_q_empty(q)) { 140 smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0); 141 } 142 return MEMTX_OK; 143 } 144 145 void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *info) 146 { 147 Evt evt = {}; 148 MemTxResult r; 149 150 if (!smmuv3_eventq_enabled(s)) { 151 return; 152 } 153 154 EVT_SET_TYPE(&evt, info->type); 155 EVT_SET_SID(&evt, info->sid); 156 157 switch (info->type) { 158 case SMMU_EVT_NONE: 159 return; 160 case SMMU_EVT_F_UUT: 161 EVT_SET_SSID(&evt, info->u.f_uut.ssid); 162 EVT_SET_SSV(&evt, info->u.f_uut.ssv); 163 EVT_SET_ADDR(&evt, info->u.f_uut.addr); 164 EVT_SET_RNW(&evt, info->u.f_uut.rnw); 165 EVT_SET_PNU(&evt, info->u.f_uut.pnu); 166 EVT_SET_IND(&evt, info->u.f_uut.ind); 167 break; 168 case SMMU_EVT_C_BAD_STREAMID: 169 EVT_SET_SSID(&evt, info->u.c_bad_streamid.ssid); 170 EVT_SET_SSV(&evt, info->u.c_bad_streamid.ssv); 171 break; 172 case SMMU_EVT_F_STE_FETCH: 173 EVT_SET_SSID(&evt, info->u.f_ste_fetch.ssid); 174 EVT_SET_SSV(&evt, info->u.f_ste_fetch.ssv); 175 EVT_SET_ADDR2(&evt, info->u.f_ste_fetch.addr); 176 break; 177 case SMMU_EVT_C_BAD_STE: 178 EVT_SET_SSID(&evt, info->u.c_bad_ste.ssid); 179 EVT_SET_SSV(&evt, info->u.c_bad_ste.ssv); 180 break; 181 case SMMU_EVT_F_STREAM_DISABLED: 182 break; 183 case SMMU_EVT_F_TRANS_FORBIDDEN: 184 EVT_SET_ADDR(&evt, info->u.f_transl_forbidden.addr); 185 EVT_SET_RNW(&evt, info->u.f_transl_forbidden.rnw); 186 break; 187 case SMMU_EVT_C_BAD_SUBSTREAMID: 188 EVT_SET_SSID(&evt, info->u.c_bad_substream.ssid); 189 break; 190 case SMMU_EVT_F_CD_FETCH: 191 EVT_SET_SSID(&evt, info->u.f_cd_fetch.ssid); 192 EVT_SET_SSV(&evt, info->u.f_cd_fetch.ssv); 193 EVT_SET_ADDR(&evt, info->u.f_cd_fetch.addr); 194 break; 195 case SMMU_EVT_C_BAD_CD: 196 EVT_SET_SSID(&evt, info->u.c_bad_cd.ssid); 197 EVT_SET_SSV(&evt, info->u.c_bad_cd.ssv); 198 break; 199 case SMMU_EVT_F_WALK_EABT: 200 case SMMU_EVT_F_TRANSLATION: 201 case SMMU_EVT_F_ADDR_SIZE: 202 case SMMU_EVT_F_ACCESS: 203 case SMMU_EVT_F_PERMISSION: 204 EVT_SET_STALL(&evt, info->u.f_walk_eabt.stall); 205 EVT_SET_STAG(&evt, info->u.f_walk_eabt.stag); 206 EVT_SET_SSID(&evt, info->u.f_walk_eabt.ssid); 207 EVT_SET_SSV(&evt, info->u.f_walk_eabt.ssv); 208 EVT_SET_S2(&evt, info->u.f_walk_eabt.s2); 209 EVT_SET_ADDR(&evt, info->u.f_walk_eabt.addr); 210 EVT_SET_RNW(&evt, info->u.f_walk_eabt.rnw); 211 EVT_SET_PNU(&evt, info->u.f_walk_eabt.pnu); 212 EVT_SET_IND(&evt, info->u.f_walk_eabt.ind); 213 EVT_SET_CLASS(&evt, info->u.f_walk_eabt.class); 214 EVT_SET_ADDR2(&evt, info->u.f_walk_eabt.addr2); 215 break; 216 case SMMU_EVT_F_CFG_CONFLICT: 217 EVT_SET_SSID(&evt, info->u.f_cfg_conflict.ssid); 218 EVT_SET_SSV(&evt, info->u.f_cfg_conflict.ssv); 219 break; 220 /* rest is not implemented */ 221 case SMMU_EVT_F_BAD_ATS_TREQ: 222 case SMMU_EVT_F_TLB_CONFLICT: 223 case SMMU_EVT_E_PAGE_REQ: 224 default: 225 g_assert_not_reached(); 226 } 227 228 trace_smmuv3_record_event(smmu_event_string(info->type), info->sid); 229 r = smmuv3_write_eventq(s, &evt); 230 if (r != MEMTX_OK) { 231 smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_EVENTQ_ABT_ERR_MASK); 232 } 233 info->recorded = true; 234 } 235 236 static void smmuv3_init_regs(SMMUv3State *s) 237 { 238 /** 239 * IDR0: stage1 only, AArch64 only, coherent access, 16b ASID, 240 * multi-level stream table 241 */ 242 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1); /* stage 1 supported */ 243 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTF, 2); /* AArch64 PTW only */ 244 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, COHACC, 1); /* IO coherent */ 245 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, ASID16, 1); /* 16-bit ASID */ 246 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTENDIAN, 2); /* little endian */ 247 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STALL_MODEL, 1); /* No stall */ 248 /* terminated transaction will always be aborted/error returned */ 249 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TERM_MODEL, 1); 250 /* 2-level stream table supported */ 251 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STLEVEL, 1); 252 253 s->idr[1] = FIELD_DP32(s->idr[1], IDR1, SIDSIZE, SMMU_IDR1_SIDSIZE); 254 s->idr[1] = FIELD_DP32(s->idr[1], IDR1, EVENTQS, SMMU_EVENTQS); 255 s->idr[1] = FIELD_DP32(s->idr[1], IDR1, CMDQS, SMMU_CMDQS); 256 257 /* 4K and 64K granule support */ 258 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1); 259 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1); 260 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */ 261 262 s->cmdq.base = deposit64(s->cmdq.base, 0, 5, SMMU_CMDQS); 263 s->cmdq.prod = 0; 264 s->cmdq.cons = 0; 265 s->cmdq.entry_size = sizeof(struct Cmd); 266 s->eventq.base = deposit64(s->eventq.base, 0, 5, SMMU_EVENTQS); 267 s->eventq.prod = 0; 268 s->eventq.cons = 0; 269 s->eventq.entry_size = sizeof(struct Evt); 270 271 s->features = 0; 272 s->sid_split = 0; 273 } 274 275 static int smmu_get_ste(SMMUv3State *s, dma_addr_t addr, STE *buf, 276 SMMUEventInfo *event) 277 { 278 int ret; 279 280 trace_smmuv3_get_ste(addr); 281 /* TODO: guarantee 64-bit single-copy atomicity */ 282 ret = dma_memory_read(&address_space_memory, addr, 283 (void *)buf, sizeof(*buf)); 284 if (ret != MEMTX_OK) { 285 qemu_log_mask(LOG_GUEST_ERROR, 286 "Cannot fetch pte at address=0x%"PRIx64"\n", addr); 287 event->type = SMMU_EVT_F_STE_FETCH; 288 event->u.f_ste_fetch.addr = addr; 289 return -EINVAL; 290 } 291 return 0; 292 293 } 294 295 /* @ssid > 0 not supported yet */ 296 static int smmu_get_cd(SMMUv3State *s, STE *ste, uint32_t ssid, 297 CD *buf, SMMUEventInfo *event) 298 { 299 dma_addr_t addr = STE_CTXPTR(ste); 300 int ret; 301 302 trace_smmuv3_get_cd(addr); 303 /* TODO: guarantee 64-bit single-copy atomicity */ 304 ret = dma_memory_read(&address_space_memory, addr, 305 (void *)buf, sizeof(*buf)); 306 if (ret != MEMTX_OK) { 307 qemu_log_mask(LOG_GUEST_ERROR, 308 "Cannot fetch pte at address=0x%"PRIx64"\n", addr); 309 event->type = SMMU_EVT_F_CD_FETCH; 310 event->u.f_ste_fetch.addr = addr; 311 return -EINVAL; 312 } 313 return 0; 314 } 315 316 /* Returns < 0 in case of invalid STE, 0 otherwise */ 317 static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg, 318 STE *ste, SMMUEventInfo *event) 319 { 320 uint32_t config; 321 322 if (!STE_VALID(ste)) { 323 if (!event->inval_ste_allowed) { 324 qemu_log_mask(LOG_GUEST_ERROR, "invalid STE\n"); 325 } 326 goto bad_ste; 327 } 328 329 config = STE_CONFIG(ste); 330 331 if (STE_CFG_ABORT(config)) { 332 cfg->aborted = true; 333 return 0; 334 } 335 336 if (STE_CFG_BYPASS(config)) { 337 cfg->bypassed = true; 338 return 0; 339 } 340 341 if (STE_CFG_S2_ENABLED(config)) { 342 qemu_log_mask(LOG_UNIMP, "SMMUv3 does not support stage 2 yet\n"); 343 goto bad_ste; 344 } 345 346 if (STE_S1CDMAX(ste) != 0) { 347 qemu_log_mask(LOG_UNIMP, 348 "SMMUv3 does not support multiple context descriptors yet\n"); 349 goto bad_ste; 350 } 351 352 if (STE_S1STALLD(ste)) { 353 qemu_log_mask(LOG_UNIMP, 354 "SMMUv3 S1 stalling fault model not allowed yet\n"); 355 goto bad_ste; 356 } 357 return 0; 358 359 bad_ste: 360 event->type = SMMU_EVT_C_BAD_STE; 361 return -EINVAL; 362 } 363 364 /** 365 * smmu_find_ste - Return the stream table entry associated 366 * to the sid 367 * 368 * @s: smmuv3 handle 369 * @sid: stream ID 370 * @ste: returned stream table entry 371 * @event: handle to an event info 372 * 373 * Supports linear and 2-level stream table 374 * Return 0 on success, -EINVAL otherwise 375 */ 376 static int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste, 377 SMMUEventInfo *event) 378 { 379 dma_addr_t addr, strtab_base; 380 uint32_t log2size; 381 int strtab_size_shift; 382 int ret; 383 384 trace_smmuv3_find_ste(sid, s->features, s->sid_split); 385 log2size = FIELD_EX32(s->strtab_base_cfg, STRTAB_BASE_CFG, LOG2SIZE); 386 /* 387 * Check SID range against both guest-configured and implementation limits 388 */ 389 if (sid >= (1 << MIN(log2size, SMMU_IDR1_SIDSIZE))) { 390 event->type = SMMU_EVT_C_BAD_STREAMID; 391 return -EINVAL; 392 } 393 if (s->features & SMMU_FEATURE_2LVL_STE) { 394 int l1_ste_offset, l2_ste_offset, max_l2_ste, span; 395 dma_addr_t l1ptr, l2ptr; 396 STEDesc l1std; 397 398 /* 399 * Align strtab base address to table size. For this purpose, assume it 400 * is not bounded by SMMU_IDR1_SIDSIZE. 401 */ 402 strtab_size_shift = MAX(5, (int)log2size - s->sid_split - 1 + 3); 403 strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK & 404 ~MAKE_64BIT_MASK(0, strtab_size_shift); 405 l1_ste_offset = sid >> s->sid_split; 406 l2_ste_offset = sid & ((1 << s->sid_split) - 1); 407 l1ptr = (dma_addr_t)(strtab_base + l1_ste_offset * sizeof(l1std)); 408 /* TODO: guarantee 64-bit single-copy atomicity */ 409 ret = dma_memory_read(&address_space_memory, l1ptr, 410 (uint8_t *)&l1std, sizeof(l1std)); 411 if (ret != MEMTX_OK) { 412 qemu_log_mask(LOG_GUEST_ERROR, 413 "Could not read L1PTR at 0X%"PRIx64"\n", l1ptr); 414 event->type = SMMU_EVT_F_STE_FETCH; 415 event->u.f_ste_fetch.addr = l1ptr; 416 return -EINVAL; 417 } 418 419 span = L1STD_SPAN(&l1std); 420 421 if (!span) { 422 /* l2ptr is not valid */ 423 if (!event->inval_ste_allowed) { 424 qemu_log_mask(LOG_GUEST_ERROR, 425 "invalid sid=%d (L1STD span=0)\n", sid); 426 } 427 event->type = SMMU_EVT_C_BAD_STREAMID; 428 return -EINVAL; 429 } 430 max_l2_ste = (1 << span) - 1; 431 l2ptr = l1std_l2ptr(&l1std); 432 trace_smmuv3_find_ste_2lvl(s->strtab_base, l1ptr, l1_ste_offset, 433 l2ptr, l2_ste_offset, max_l2_ste); 434 if (l2_ste_offset > max_l2_ste) { 435 qemu_log_mask(LOG_GUEST_ERROR, 436 "l2_ste_offset=%d > max_l2_ste=%d\n", 437 l2_ste_offset, max_l2_ste); 438 event->type = SMMU_EVT_C_BAD_STE; 439 return -EINVAL; 440 } 441 addr = l2ptr + l2_ste_offset * sizeof(*ste); 442 } else { 443 strtab_size_shift = log2size + 5; 444 strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK & 445 ~MAKE_64BIT_MASK(0, strtab_size_shift); 446 addr = strtab_base + sid * sizeof(*ste); 447 } 448 449 if (smmu_get_ste(s, addr, ste, event)) { 450 return -EINVAL; 451 } 452 453 return 0; 454 } 455 456 static int decode_cd(SMMUTransCfg *cfg, CD *cd, SMMUEventInfo *event) 457 { 458 int ret = -EINVAL; 459 int i; 460 461 if (!CD_VALID(cd) || !CD_AARCH64(cd)) { 462 goto bad_cd; 463 } 464 if (!CD_A(cd)) { 465 goto bad_cd; /* SMMU_IDR0.TERM_MODEL == 1 */ 466 } 467 if (CD_S(cd)) { 468 goto bad_cd; /* !STE_SECURE && SMMU_IDR0.STALL_MODEL == 1 */ 469 } 470 if (CD_HA(cd) || CD_HD(cd)) { 471 goto bad_cd; /* HTTU = 0 */ 472 } 473 474 /* we support only those at the moment */ 475 cfg->aa64 = true; 476 cfg->stage = 1; 477 478 cfg->oas = oas2bits(CD_IPS(cd)); 479 cfg->oas = MIN(oas2bits(SMMU_IDR5_OAS), cfg->oas); 480 cfg->tbi = CD_TBI(cd); 481 cfg->asid = CD_ASID(cd); 482 483 trace_smmuv3_decode_cd(cfg->oas); 484 485 /* decode data dependent on TT */ 486 for (i = 0; i <= 1; i++) { 487 int tg, tsz; 488 SMMUTransTableInfo *tt = &cfg->tt[i]; 489 490 cfg->tt[i].disabled = CD_EPD(cd, i); 491 if (cfg->tt[i].disabled) { 492 continue; 493 } 494 495 tsz = CD_TSZ(cd, i); 496 if (tsz < 16 || tsz > 39) { 497 goto bad_cd; 498 } 499 500 tg = CD_TG(cd, i); 501 tt->granule_sz = tg2granule(tg, i); 502 if ((tt->granule_sz != 12 && tt->granule_sz != 16) || CD_ENDI(cd)) { 503 goto bad_cd; 504 } 505 506 tt->tsz = tsz; 507 tt->ttb = CD_TTB(cd, i); 508 if (tt->ttb & ~(MAKE_64BIT_MASK(0, cfg->oas))) { 509 goto bad_cd; 510 } 511 trace_smmuv3_decode_cd_tt(i, tt->tsz, tt->ttb, tt->granule_sz); 512 } 513 514 event->record_trans_faults = CD_R(cd); 515 516 return 0; 517 518 bad_cd: 519 event->type = SMMU_EVT_C_BAD_CD; 520 return ret; 521 } 522 523 /** 524 * smmuv3_decode_config - Prepare the translation configuration 525 * for the @mr iommu region 526 * @mr: iommu memory region the translation config must be prepared for 527 * @cfg: output translation configuration which is populated through 528 * the different configuration decoding steps 529 * @event: must be zero'ed by the caller 530 * 531 * return < 0 in case of config decoding error (@event is filled 532 * accordingly). Return 0 otherwise. 533 */ 534 static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg, 535 SMMUEventInfo *event) 536 { 537 SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu); 538 uint32_t sid = smmu_get_sid(sdev); 539 SMMUv3State *s = sdev->smmu; 540 int ret; 541 STE ste; 542 CD cd; 543 544 ret = smmu_find_ste(s, sid, &ste, event); 545 if (ret) { 546 return ret; 547 } 548 549 ret = decode_ste(s, cfg, &ste, event); 550 if (ret) { 551 return ret; 552 } 553 554 if (cfg->aborted || cfg->bypassed) { 555 return 0; 556 } 557 558 ret = smmu_get_cd(s, &ste, 0 /* ssid */, &cd, event); 559 if (ret) { 560 return ret; 561 } 562 563 return decode_cd(cfg, &cd, event); 564 } 565 566 /** 567 * smmuv3_get_config - Look up for a cached copy of configuration data for 568 * @sdev and on cache miss performs a configuration structure decoding from 569 * guest RAM. 570 * 571 * @sdev: SMMUDevice handle 572 * @event: output event info 573 * 574 * The configuration cache contains data resulting from both STE and CD 575 * decoding under the form of an SMMUTransCfg struct. The hash table is indexed 576 * by the SMMUDevice handle. 577 */ 578 static SMMUTransCfg *smmuv3_get_config(SMMUDevice *sdev, SMMUEventInfo *event) 579 { 580 SMMUv3State *s = sdev->smmu; 581 SMMUState *bc = &s->smmu_state; 582 SMMUTransCfg *cfg; 583 584 cfg = g_hash_table_lookup(bc->configs, sdev); 585 if (cfg) { 586 sdev->cfg_cache_hits++; 587 trace_smmuv3_config_cache_hit(smmu_get_sid(sdev), 588 sdev->cfg_cache_hits, sdev->cfg_cache_misses, 589 100 * sdev->cfg_cache_hits / 590 (sdev->cfg_cache_hits + sdev->cfg_cache_misses)); 591 } else { 592 sdev->cfg_cache_misses++; 593 trace_smmuv3_config_cache_miss(smmu_get_sid(sdev), 594 sdev->cfg_cache_hits, sdev->cfg_cache_misses, 595 100 * sdev->cfg_cache_hits / 596 (sdev->cfg_cache_hits + sdev->cfg_cache_misses)); 597 cfg = g_new0(SMMUTransCfg, 1); 598 599 if (!smmuv3_decode_config(&sdev->iommu, cfg, event)) { 600 g_hash_table_insert(bc->configs, sdev, cfg); 601 } else { 602 g_free(cfg); 603 cfg = NULL; 604 } 605 } 606 return cfg; 607 } 608 609 static void smmuv3_flush_config(SMMUDevice *sdev) 610 { 611 SMMUv3State *s = sdev->smmu; 612 SMMUState *bc = &s->smmu_state; 613 614 trace_smmuv3_config_cache_inv(smmu_get_sid(sdev)); 615 g_hash_table_remove(bc->configs, sdev); 616 } 617 618 static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, 619 IOMMUAccessFlags flag, int iommu_idx) 620 { 621 SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu); 622 SMMUv3State *s = sdev->smmu; 623 uint32_t sid = smmu_get_sid(sdev); 624 SMMUEventInfo event = {.type = SMMU_EVT_NONE, 625 .sid = sid, 626 .inval_ste_allowed = false}; 627 SMMUPTWEventInfo ptw_info = {}; 628 SMMUTranslationStatus status; 629 SMMUState *bs = ARM_SMMU(s); 630 uint64_t page_mask, aligned_addr; 631 IOMMUTLBEntry *cached_entry = NULL; 632 SMMUTransTableInfo *tt; 633 SMMUTransCfg *cfg = NULL; 634 IOMMUTLBEntry entry = { 635 .target_as = &address_space_memory, 636 .iova = addr, 637 .translated_addr = addr, 638 .addr_mask = ~(hwaddr)0, 639 .perm = IOMMU_NONE, 640 }; 641 SMMUIOTLBKey key, *new_key; 642 643 qemu_mutex_lock(&s->mutex); 644 645 if (!smmu_enabled(s)) { 646 status = SMMU_TRANS_DISABLE; 647 goto epilogue; 648 } 649 650 cfg = smmuv3_get_config(sdev, &event); 651 if (!cfg) { 652 status = SMMU_TRANS_ERROR; 653 goto epilogue; 654 } 655 656 if (cfg->aborted) { 657 status = SMMU_TRANS_ABORT; 658 goto epilogue; 659 } 660 661 if (cfg->bypassed) { 662 status = SMMU_TRANS_BYPASS; 663 goto epilogue; 664 } 665 666 tt = select_tt(cfg, addr); 667 if (!tt) { 668 if (event.record_trans_faults) { 669 event.type = SMMU_EVT_F_TRANSLATION; 670 event.u.f_translation.addr = addr; 671 event.u.f_translation.rnw = flag & 0x1; 672 } 673 status = SMMU_TRANS_ERROR; 674 goto epilogue; 675 } 676 677 page_mask = (1ULL << (tt->granule_sz)) - 1; 678 aligned_addr = addr & ~page_mask; 679 680 key.asid = cfg->asid; 681 key.iova = aligned_addr; 682 683 cached_entry = g_hash_table_lookup(bs->iotlb, &key); 684 if (cached_entry) { 685 cfg->iotlb_hits++; 686 trace_smmu_iotlb_cache_hit(cfg->asid, aligned_addr, 687 cfg->iotlb_hits, cfg->iotlb_misses, 688 100 * cfg->iotlb_hits / 689 (cfg->iotlb_hits + cfg->iotlb_misses)); 690 if ((flag & IOMMU_WO) && !(cached_entry->perm & IOMMU_WO)) { 691 status = SMMU_TRANS_ERROR; 692 if (event.record_trans_faults) { 693 event.type = SMMU_EVT_F_PERMISSION; 694 event.u.f_permission.addr = addr; 695 event.u.f_permission.rnw = flag & 0x1; 696 } 697 } else { 698 status = SMMU_TRANS_SUCCESS; 699 } 700 goto epilogue; 701 } 702 703 cfg->iotlb_misses++; 704 trace_smmu_iotlb_cache_miss(cfg->asid, addr & ~page_mask, 705 cfg->iotlb_hits, cfg->iotlb_misses, 706 100 * cfg->iotlb_hits / 707 (cfg->iotlb_hits + cfg->iotlb_misses)); 708 709 if (g_hash_table_size(bs->iotlb) >= SMMU_IOTLB_MAX_SIZE) { 710 smmu_iotlb_inv_all(bs); 711 } 712 713 cached_entry = g_new0(IOMMUTLBEntry, 1); 714 715 if (smmu_ptw(cfg, aligned_addr, flag, cached_entry, &ptw_info)) { 716 g_free(cached_entry); 717 switch (ptw_info.type) { 718 case SMMU_PTW_ERR_WALK_EABT: 719 event.type = SMMU_EVT_F_WALK_EABT; 720 event.u.f_walk_eabt.addr = addr; 721 event.u.f_walk_eabt.rnw = flag & 0x1; 722 event.u.f_walk_eabt.class = 0x1; 723 event.u.f_walk_eabt.addr2 = ptw_info.addr; 724 break; 725 case SMMU_PTW_ERR_TRANSLATION: 726 if (event.record_trans_faults) { 727 event.type = SMMU_EVT_F_TRANSLATION; 728 event.u.f_translation.addr = addr; 729 event.u.f_translation.rnw = flag & 0x1; 730 } 731 break; 732 case SMMU_PTW_ERR_ADDR_SIZE: 733 if (event.record_trans_faults) { 734 event.type = SMMU_EVT_F_ADDR_SIZE; 735 event.u.f_addr_size.addr = addr; 736 event.u.f_addr_size.rnw = flag & 0x1; 737 } 738 break; 739 case SMMU_PTW_ERR_ACCESS: 740 if (event.record_trans_faults) { 741 event.type = SMMU_EVT_F_ACCESS; 742 event.u.f_access.addr = addr; 743 event.u.f_access.rnw = flag & 0x1; 744 } 745 break; 746 case SMMU_PTW_ERR_PERMISSION: 747 if (event.record_trans_faults) { 748 event.type = SMMU_EVT_F_PERMISSION; 749 event.u.f_permission.addr = addr; 750 event.u.f_permission.rnw = flag & 0x1; 751 } 752 break; 753 default: 754 g_assert_not_reached(); 755 } 756 status = SMMU_TRANS_ERROR; 757 } else { 758 new_key = g_new0(SMMUIOTLBKey, 1); 759 new_key->asid = cfg->asid; 760 new_key->iova = aligned_addr; 761 g_hash_table_insert(bs->iotlb, new_key, cached_entry); 762 status = SMMU_TRANS_SUCCESS; 763 } 764 765 epilogue: 766 qemu_mutex_unlock(&s->mutex); 767 switch (status) { 768 case SMMU_TRANS_SUCCESS: 769 entry.perm = flag; 770 entry.translated_addr = cached_entry->translated_addr + 771 (addr & page_mask); 772 entry.addr_mask = cached_entry->addr_mask; 773 trace_smmuv3_translate_success(mr->parent_obj.name, sid, addr, 774 entry.translated_addr, entry.perm); 775 break; 776 case SMMU_TRANS_DISABLE: 777 entry.perm = flag; 778 entry.addr_mask = ~TARGET_PAGE_MASK; 779 trace_smmuv3_translate_disable(mr->parent_obj.name, sid, addr, 780 entry.perm); 781 break; 782 case SMMU_TRANS_BYPASS: 783 entry.perm = flag; 784 entry.addr_mask = ~TARGET_PAGE_MASK; 785 trace_smmuv3_translate_bypass(mr->parent_obj.name, sid, addr, 786 entry.perm); 787 break; 788 case SMMU_TRANS_ABORT: 789 /* no event is recorded on abort */ 790 trace_smmuv3_translate_abort(mr->parent_obj.name, sid, addr, 791 entry.perm); 792 break; 793 case SMMU_TRANS_ERROR: 794 qemu_log_mask(LOG_GUEST_ERROR, 795 "%s translation failed for iova=0x%"PRIx64"(%s)\n", 796 mr->parent_obj.name, addr, smmu_event_string(event.type)); 797 smmuv3_record_event(s, &event); 798 break; 799 } 800 801 return entry; 802 } 803 804 /** 805 * smmuv3_notify_iova - call the notifier @n for a given 806 * @asid and @iova tuple. 807 * 808 * @mr: IOMMU mr region handle 809 * @n: notifier to be called 810 * @asid: address space ID or negative value if we don't care 811 * @iova: iova 812 */ 813 static void smmuv3_notify_iova(IOMMUMemoryRegion *mr, 814 IOMMUNotifier *n, 815 int asid, 816 dma_addr_t iova) 817 { 818 SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu); 819 SMMUEventInfo event = {.inval_ste_allowed = true}; 820 SMMUTransTableInfo *tt; 821 SMMUTransCfg *cfg; 822 IOMMUTLBEntry entry; 823 824 cfg = smmuv3_get_config(sdev, &event); 825 if (!cfg) { 826 return; 827 } 828 829 if (asid >= 0 && cfg->asid != asid) { 830 return; 831 } 832 833 tt = select_tt(cfg, iova); 834 if (!tt) { 835 return; 836 } 837 838 entry.target_as = &address_space_memory; 839 entry.iova = iova; 840 entry.addr_mask = (1 << tt->granule_sz) - 1; 841 entry.perm = IOMMU_NONE; 842 843 memory_region_notify_one(n, &entry); 844 } 845 846 /* invalidate an asid/iova tuple in all mr's */ 847 static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova) 848 { 849 SMMUDevice *sdev; 850 851 QLIST_FOREACH(sdev, &s->devices_with_notifiers, next) { 852 IOMMUMemoryRegion *mr = &sdev->iommu; 853 IOMMUNotifier *n; 854 855 trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, iova); 856 857 IOMMU_NOTIFIER_FOREACH(n, mr) { 858 smmuv3_notify_iova(mr, n, asid, iova); 859 } 860 } 861 } 862 863 static int smmuv3_cmdq_consume(SMMUv3State *s) 864 { 865 SMMUState *bs = ARM_SMMU(s); 866 SMMUCmdError cmd_error = SMMU_CERROR_NONE; 867 SMMUQueue *q = &s->cmdq; 868 SMMUCommandType type = 0; 869 870 if (!smmuv3_cmdq_enabled(s)) { 871 return 0; 872 } 873 /* 874 * some commands depend on register values, typically CR0. In case those 875 * register values change while handling the command, spec says it 876 * is UNPREDICTABLE whether the command is interpreted under the new 877 * or old value. 878 */ 879 880 while (!smmuv3_q_empty(q)) { 881 uint32_t pending = s->gerror ^ s->gerrorn; 882 Cmd cmd; 883 884 trace_smmuv3_cmdq_consume(Q_PROD(q), Q_CONS(q), 885 Q_PROD_WRAP(q), Q_CONS_WRAP(q)); 886 887 if (FIELD_EX32(pending, GERROR, CMDQ_ERR)) { 888 break; 889 } 890 891 if (queue_read(q, &cmd) != MEMTX_OK) { 892 cmd_error = SMMU_CERROR_ABT; 893 break; 894 } 895 896 type = CMD_TYPE(&cmd); 897 898 trace_smmuv3_cmdq_opcode(smmu_cmd_string(type)); 899 900 qemu_mutex_lock(&s->mutex); 901 switch (type) { 902 case SMMU_CMD_SYNC: 903 if (CMD_SYNC_CS(&cmd) & CMD_SYNC_SIG_IRQ) { 904 smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0); 905 } 906 break; 907 case SMMU_CMD_PREFETCH_CONFIG: 908 case SMMU_CMD_PREFETCH_ADDR: 909 break; 910 case SMMU_CMD_CFGI_STE: 911 { 912 uint32_t sid = CMD_SID(&cmd); 913 IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid); 914 SMMUDevice *sdev; 915 916 if (CMD_SSEC(&cmd)) { 917 cmd_error = SMMU_CERROR_ILL; 918 break; 919 } 920 921 if (!mr) { 922 break; 923 } 924 925 trace_smmuv3_cmdq_cfgi_ste(sid); 926 sdev = container_of(mr, SMMUDevice, iommu); 927 smmuv3_flush_config(sdev); 928 929 break; 930 } 931 case SMMU_CMD_CFGI_STE_RANGE: /* same as SMMU_CMD_CFGI_ALL */ 932 { 933 uint32_t start = CMD_SID(&cmd), end, i; 934 uint8_t range = CMD_STE_RANGE(&cmd); 935 936 if (CMD_SSEC(&cmd)) { 937 cmd_error = SMMU_CERROR_ILL; 938 break; 939 } 940 941 end = start + (1 << (range + 1)) - 1; 942 trace_smmuv3_cmdq_cfgi_ste_range(start, end); 943 944 for (i = start; i <= end; i++) { 945 IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, i); 946 SMMUDevice *sdev; 947 948 if (!mr) { 949 continue; 950 } 951 sdev = container_of(mr, SMMUDevice, iommu); 952 smmuv3_flush_config(sdev); 953 } 954 break; 955 } 956 case SMMU_CMD_CFGI_CD: 957 case SMMU_CMD_CFGI_CD_ALL: 958 { 959 uint32_t sid = CMD_SID(&cmd); 960 IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid); 961 SMMUDevice *sdev; 962 963 if (CMD_SSEC(&cmd)) { 964 cmd_error = SMMU_CERROR_ILL; 965 break; 966 } 967 968 if (!mr) { 969 break; 970 } 971 972 trace_smmuv3_cmdq_cfgi_cd(sid); 973 sdev = container_of(mr, SMMUDevice, iommu); 974 smmuv3_flush_config(sdev); 975 break; 976 } 977 case SMMU_CMD_TLBI_NH_ASID: 978 { 979 uint16_t asid = CMD_ASID(&cmd); 980 981 trace_smmuv3_cmdq_tlbi_nh_asid(asid); 982 smmu_inv_notifiers_all(&s->smmu_state); 983 smmu_iotlb_inv_asid(bs, asid); 984 break; 985 } 986 case SMMU_CMD_TLBI_NH_ALL: 987 case SMMU_CMD_TLBI_NSNH_ALL: 988 trace_smmuv3_cmdq_tlbi_nh(); 989 smmu_inv_notifiers_all(&s->smmu_state); 990 smmu_iotlb_inv_all(bs); 991 break; 992 case SMMU_CMD_TLBI_NH_VAA: 993 { 994 dma_addr_t addr = CMD_ADDR(&cmd); 995 uint16_t vmid = CMD_VMID(&cmd); 996 997 trace_smmuv3_cmdq_tlbi_nh_vaa(vmid, addr); 998 smmuv3_inv_notifiers_iova(bs, -1, addr); 999 smmu_iotlb_inv_all(bs); 1000 break; 1001 } 1002 case SMMU_CMD_TLBI_NH_VA: 1003 { 1004 uint16_t asid = CMD_ASID(&cmd); 1005 uint16_t vmid = CMD_VMID(&cmd); 1006 dma_addr_t addr = CMD_ADDR(&cmd); 1007 bool leaf = CMD_LEAF(&cmd); 1008 1009 trace_smmuv3_cmdq_tlbi_nh_va(vmid, asid, addr, leaf); 1010 smmuv3_inv_notifiers_iova(bs, asid, addr); 1011 smmu_iotlb_inv_iova(bs, asid, addr); 1012 break; 1013 } 1014 case SMMU_CMD_TLBI_EL3_ALL: 1015 case SMMU_CMD_TLBI_EL3_VA: 1016 case SMMU_CMD_TLBI_EL2_ALL: 1017 case SMMU_CMD_TLBI_EL2_ASID: 1018 case SMMU_CMD_TLBI_EL2_VA: 1019 case SMMU_CMD_TLBI_EL2_VAA: 1020 case SMMU_CMD_TLBI_S12_VMALL: 1021 case SMMU_CMD_TLBI_S2_IPA: 1022 case SMMU_CMD_ATC_INV: 1023 case SMMU_CMD_PRI_RESP: 1024 case SMMU_CMD_RESUME: 1025 case SMMU_CMD_STALL_TERM: 1026 trace_smmuv3_unhandled_cmd(type); 1027 break; 1028 default: 1029 cmd_error = SMMU_CERROR_ILL; 1030 qemu_log_mask(LOG_GUEST_ERROR, 1031 "Illegal command type: %d\n", CMD_TYPE(&cmd)); 1032 break; 1033 } 1034 qemu_mutex_unlock(&s->mutex); 1035 if (cmd_error) { 1036 break; 1037 } 1038 /* 1039 * We only increment the cons index after the completion of 1040 * the command. We do that because the SYNC returns immediately 1041 * and does not check the completion of previous commands 1042 */ 1043 queue_cons_incr(q); 1044 } 1045 1046 if (cmd_error) { 1047 trace_smmuv3_cmdq_consume_error(smmu_cmd_string(type), cmd_error); 1048 smmu_write_cmdq_err(s, cmd_error); 1049 smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_CMDQ_ERR_MASK); 1050 } 1051 1052 trace_smmuv3_cmdq_consume_out(Q_PROD(q), Q_CONS(q), 1053 Q_PROD_WRAP(q), Q_CONS_WRAP(q)); 1054 1055 return 0; 1056 } 1057 1058 static MemTxResult smmu_writell(SMMUv3State *s, hwaddr offset, 1059 uint64_t data, MemTxAttrs attrs) 1060 { 1061 switch (offset) { 1062 case A_GERROR_IRQ_CFG0: 1063 s->gerror_irq_cfg0 = data; 1064 return MEMTX_OK; 1065 case A_STRTAB_BASE: 1066 s->strtab_base = data; 1067 return MEMTX_OK; 1068 case A_CMDQ_BASE: 1069 s->cmdq.base = data; 1070 s->cmdq.log2size = extract64(s->cmdq.base, 0, 5); 1071 if (s->cmdq.log2size > SMMU_CMDQS) { 1072 s->cmdq.log2size = SMMU_CMDQS; 1073 } 1074 return MEMTX_OK; 1075 case A_EVENTQ_BASE: 1076 s->eventq.base = data; 1077 s->eventq.log2size = extract64(s->eventq.base, 0, 5); 1078 if (s->eventq.log2size > SMMU_EVENTQS) { 1079 s->eventq.log2size = SMMU_EVENTQS; 1080 } 1081 return MEMTX_OK; 1082 case A_EVENTQ_IRQ_CFG0: 1083 s->eventq_irq_cfg0 = data; 1084 return MEMTX_OK; 1085 default: 1086 qemu_log_mask(LOG_UNIMP, 1087 "%s Unexpected 64-bit access to 0x%"PRIx64" (WI)\n", 1088 __func__, offset); 1089 return MEMTX_OK; 1090 } 1091 } 1092 1093 static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset, 1094 uint64_t data, MemTxAttrs attrs) 1095 { 1096 switch (offset) { 1097 case A_CR0: 1098 s->cr[0] = data; 1099 s->cr0ack = data & ~SMMU_CR0_RESERVED; 1100 /* in case the command queue has been enabled */ 1101 smmuv3_cmdq_consume(s); 1102 return MEMTX_OK; 1103 case A_CR1: 1104 s->cr[1] = data; 1105 return MEMTX_OK; 1106 case A_CR2: 1107 s->cr[2] = data; 1108 return MEMTX_OK; 1109 case A_IRQ_CTRL: 1110 s->irq_ctrl = data; 1111 return MEMTX_OK; 1112 case A_GERRORN: 1113 smmuv3_write_gerrorn(s, data); 1114 /* 1115 * By acknowledging the CMDQ_ERR, SW may notify cmds can 1116 * be processed again 1117 */ 1118 smmuv3_cmdq_consume(s); 1119 return MEMTX_OK; 1120 case A_GERROR_IRQ_CFG0: /* 64b */ 1121 s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 0, 32, data); 1122 return MEMTX_OK; 1123 case A_GERROR_IRQ_CFG0 + 4: 1124 s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 32, 32, data); 1125 return MEMTX_OK; 1126 case A_GERROR_IRQ_CFG1: 1127 s->gerror_irq_cfg1 = data; 1128 return MEMTX_OK; 1129 case A_GERROR_IRQ_CFG2: 1130 s->gerror_irq_cfg2 = data; 1131 return MEMTX_OK; 1132 case A_STRTAB_BASE: /* 64b */ 1133 s->strtab_base = deposit64(s->strtab_base, 0, 32, data); 1134 return MEMTX_OK; 1135 case A_STRTAB_BASE + 4: 1136 s->strtab_base = deposit64(s->strtab_base, 32, 32, data); 1137 return MEMTX_OK; 1138 case A_STRTAB_BASE_CFG: 1139 s->strtab_base_cfg = data; 1140 if (FIELD_EX32(data, STRTAB_BASE_CFG, FMT) == 1) { 1141 s->sid_split = FIELD_EX32(data, STRTAB_BASE_CFG, SPLIT); 1142 s->features |= SMMU_FEATURE_2LVL_STE; 1143 } 1144 return MEMTX_OK; 1145 case A_CMDQ_BASE: /* 64b */ 1146 s->cmdq.base = deposit64(s->cmdq.base, 0, 32, data); 1147 s->cmdq.log2size = extract64(s->cmdq.base, 0, 5); 1148 if (s->cmdq.log2size > SMMU_CMDQS) { 1149 s->cmdq.log2size = SMMU_CMDQS; 1150 } 1151 return MEMTX_OK; 1152 case A_CMDQ_BASE + 4: /* 64b */ 1153 s->cmdq.base = deposit64(s->cmdq.base, 32, 32, data); 1154 return MEMTX_OK; 1155 case A_CMDQ_PROD: 1156 s->cmdq.prod = data; 1157 smmuv3_cmdq_consume(s); 1158 return MEMTX_OK; 1159 case A_CMDQ_CONS: 1160 s->cmdq.cons = data; 1161 return MEMTX_OK; 1162 case A_EVENTQ_BASE: /* 64b */ 1163 s->eventq.base = deposit64(s->eventq.base, 0, 32, data); 1164 s->eventq.log2size = extract64(s->eventq.base, 0, 5); 1165 if (s->eventq.log2size > SMMU_EVENTQS) { 1166 s->eventq.log2size = SMMU_EVENTQS; 1167 } 1168 return MEMTX_OK; 1169 case A_EVENTQ_BASE + 4: 1170 s->eventq.base = deposit64(s->eventq.base, 32, 32, data); 1171 return MEMTX_OK; 1172 case A_EVENTQ_PROD: 1173 s->eventq.prod = data; 1174 return MEMTX_OK; 1175 case A_EVENTQ_CONS: 1176 s->eventq.cons = data; 1177 return MEMTX_OK; 1178 case A_EVENTQ_IRQ_CFG0: /* 64b */ 1179 s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 0, 32, data); 1180 return MEMTX_OK; 1181 case A_EVENTQ_IRQ_CFG0 + 4: 1182 s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 32, 32, data); 1183 return MEMTX_OK; 1184 case A_EVENTQ_IRQ_CFG1: 1185 s->eventq_irq_cfg1 = data; 1186 return MEMTX_OK; 1187 case A_EVENTQ_IRQ_CFG2: 1188 s->eventq_irq_cfg2 = data; 1189 return MEMTX_OK; 1190 default: 1191 qemu_log_mask(LOG_UNIMP, 1192 "%s Unexpected 32-bit access to 0x%"PRIx64" (WI)\n", 1193 __func__, offset); 1194 return MEMTX_OK; 1195 } 1196 } 1197 1198 static MemTxResult smmu_write_mmio(void *opaque, hwaddr offset, uint64_t data, 1199 unsigned size, MemTxAttrs attrs) 1200 { 1201 SMMUState *sys = opaque; 1202 SMMUv3State *s = ARM_SMMUV3(sys); 1203 MemTxResult r; 1204 1205 /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */ 1206 offset &= ~0x10000; 1207 1208 switch (size) { 1209 case 8: 1210 r = smmu_writell(s, offset, data, attrs); 1211 break; 1212 case 4: 1213 r = smmu_writel(s, offset, data, attrs); 1214 break; 1215 default: 1216 r = MEMTX_ERROR; 1217 break; 1218 } 1219 1220 trace_smmuv3_write_mmio(offset, data, size, r); 1221 return r; 1222 } 1223 1224 static MemTxResult smmu_readll(SMMUv3State *s, hwaddr offset, 1225 uint64_t *data, MemTxAttrs attrs) 1226 { 1227 switch (offset) { 1228 case A_GERROR_IRQ_CFG0: 1229 *data = s->gerror_irq_cfg0; 1230 return MEMTX_OK; 1231 case A_STRTAB_BASE: 1232 *data = s->strtab_base; 1233 return MEMTX_OK; 1234 case A_CMDQ_BASE: 1235 *data = s->cmdq.base; 1236 return MEMTX_OK; 1237 case A_EVENTQ_BASE: 1238 *data = s->eventq.base; 1239 return MEMTX_OK; 1240 default: 1241 *data = 0; 1242 qemu_log_mask(LOG_UNIMP, 1243 "%s Unexpected 64-bit access to 0x%"PRIx64" (RAZ)\n", 1244 __func__, offset); 1245 return MEMTX_OK; 1246 } 1247 } 1248 1249 static MemTxResult smmu_readl(SMMUv3State *s, hwaddr offset, 1250 uint64_t *data, MemTxAttrs attrs) 1251 { 1252 switch (offset) { 1253 case A_IDREGS ... A_IDREGS + 0x2f: 1254 *data = smmuv3_idreg(offset - A_IDREGS); 1255 return MEMTX_OK; 1256 case A_IDR0 ... A_IDR5: 1257 *data = s->idr[(offset - A_IDR0) / 4]; 1258 return MEMTX_OK; 1259 case A_IIDR: 1260 *data = s->iidr; 1261 return MEMTX_OK; 1262 case A_CR0: 1263 *data = s->cr[0]; 1264 return MEMTX_OK; 1265 case A_CR0ACK: 1266 *data = s->cr0ack; 1267 return MEMTX_OK; 1268 case A_CR1: 1269 *data = s->cr[1]; 1270 return MEMTX_OK; 1271 case A_CR2: 1272 *data = s->cr[2]; 1273 return MEMTX_OK; 1274 case A_STATUSR: 1275 *data = s->statusr; 1276 return MEMTX_OK; 1277 case A_IRQ_CTRL: 1278 case A_IRQ_CTRL_ACK: 1279 *data = s->irq_ctrl; 1280 return MEMTX_OK; 1281 case A_GERROR: 1282 *data = s->gerror; 1283 return MEMTX_OK; 1284 case A_GERRORN: 1285 *data = s->gerrorn; 1286 return MEMTX_OK; 1287 case A_GERROR_IRQ_CFG0: /* 64b */ 1288 *data = extract64(s->gerror_irq_cfg0, 0, 32); 1289 return MEMTX_OK; 1290 case A_GERROR_IRQ_CFG0 + 4: 1291 *data = extract64(s->gerror_irq_cfg0, 32, 32); 1292 return MEMTX_OK; 1293 case A_GERROR_IRQ_CFG1: 1294 *data = s->gerror_irq_cfg1; 1295 return MEMTX_OK; 1296 case A_GERROR_IRQ_CFG2: 1297 *data = s->gerror_irq_cfg2; 1298 return MEMTX_OK; 1299 case A_STRTAB_BASE: /* 64b */ 1300 *data = extract64(s->strtab_base, 0, 32); 1301 return MEMTX_OK; 1302 case A_STRTAB_BASE + 4: /* 64b */ 1303 *data = extract64(s->strtab_base, 32, 32); 1304 return MEMTX_OK; 1305 case A_STRTAB_BASE_CFG: 1306 *data = s->strtab_base_cfg; 1307 return MEMTX_OK; 1308 case A_CMDQ_BASE: /* 64b */ 1309 *data = extract64(s->cmdq.base, 0, 32); 1310 return MEMTX_OK; 1311 case A_CMDQ_BASE + 4: 1312 *data = extract64(s->cmdq.base, 32, 32); 1313 return MEMTX_OK; 1314 case A_CMDQ_PROD: 1315 *data = s->cmdq.prod; 1316 return MEMTX_OK; 1317 case A_CMDQ_CONS: 1318 *data = s->cmdq.cons; 1319 return MEMTX_OK; 1320 case A_EVENTQ_BASE: /* 64b */ 1321 *data = extract64(s->eventq.base, 0, 32); 1322 return MEMTX_OK; 1323 case A_EVENTQ_BASE + 4: /* 64b */ 1324 *data = extract64(s->eventq.base, 32, 32); 1325 return MEMTX_OK; 1326 case A_EVENTQ_PROD: 1327 *data = s->eventq.prod; 1328 return MEMTX_OK; 1329 case A_EVENTQ_CONS: 1330 *data = s->eventq.cons; 1331 return MEMTX_OK; 1332 default: 1333 *data = 0; 1334 qemu_log_mask(LOG_UNIMP, 1335 "%s unhandled 32-bit access at 0x%"PRIx64" (RAZ)\n", 1336 __func__, offset); 1337 return MEMTX_OK; 1338 } 1339 } 1340 1341 static MemTxResult smmu_read_mmio(void *opaque, hwaddr offset, uint64_t *data, 1342 unsigned size, MemTxAttrs attrs) 1343 { 1344 SMMUState *sys = opaque; 1345 SMMUv3State *s = ARM_SMMUV3(sys); 1346 MemTxResult r; 1347 1348 /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */ 1349 offset &= ~0x10000; 1350 1351 switch (size) { 1352 case 8: 1353 r = smmu_readll(s, offset, data, attrs); 1354 break; 1355 case 4: 1356 r = smmu_readl(s, offset, data, attrs); 1357 break; 1358 default: 1359 r = MEMTX_ERROR; 1360 break; 1361 } 1362 1363 trace_smmuv3_read_mmio(offset, *data, size, r); 1364 return r; 1365 } 1366 1367 static const MemoryRegionOps smmu_mem_ops = { 1368 .read_with_attrs = smmu_read_mmio, 1369 .write_with_attrs = smmu_write_mmio, 1370 .endianness = DEVICE_LITTLE_ENDIAN, 1371 .valid = { 1372 .min_access_size = 4, 1373 .max_access_size = 8, 1374 }, 1375 .impl = { 1376 .min_access_size = 4, 1377 .max_access_size = 8, 1378 }, 1379 }; 1380 1381 static void smmu_init_irq(SMMUv3State *s, SysBusDevice *dev) 1382 { 1383 int i; 1384 1385 for (i = 0; i < ARRAY_SIZE(s->irq); i++) { 1386 sysbus_init_irq(dev, &s->irq[i]); 1387 } 1388 } 1389 1390 static void smmu_reset(DeviceState *dev) 1391 { 1392 SMMUv3State *s = ARM_SMMUV3(dev); 1393 SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s); 1394 1395 c->parent_reset(dev); 1396 1397 smmuv3_init_regs(s); 1398 } 1399 1400 static void smmu_realize(DeviceState *d, Error **errp) 1401 { 1402 SMMUState *sys = ARM_SMMU(d); 1403 SMMUv3State *s = ARM_SMMUV3(sys); 1404 SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s); 1405 SysBusDevice *dev = SYS_BUS_DEVICE(d); 1406 Error *local_err = NULL; 1407 1408 c->parent_realize(d, &local_err); 1409 if (local_err) { 1410 error_propagate(errp, local_err); 1411 return; 1412 } 1413 1414 qemu_mutex_init(&s->mutex); 1415 1416 memory_region_init_io(&sys->iomem, OBJECT(s), 1417 &smmu_mem_ops, sys, TYPE_ARM_SMMUV3, 0x20000); 1418 1419 sys->mrtypename = TYPE_SMMUV3_IOMMU_MEMORY_REGION; 1420 1421 sysbus_init_mmio(dev, &sys->iomem); 1422 1423 smmu_init_irq(s, dev); 1424 } 1425 1426 static const VMStateDescription vmstate_smmuv3_queue = { 1427 .name = "smmuv3_queue", 1428 .version_id = 1, 1429 .minimum_version_id = 1, 1430 .fields = (VMStateField[]) { 1431 VMSTATE_UINT64(base, SMMUQueue), 1432 VMSTATE_UINT32(prod, SMMUQueue), 1433 VMSTATE_UINT32(cons, SMMUQueue), 1434 VMSTATE_UINT8(log2size, SMMUQueue), 1435 VMSTATE_END_OF_LIST(), 1436 }, 1437 }; 1438 1439 static const VMStateDescription vmstate_smmuv3 = { 1440 .name = "smmuv3", 1441 .version_id = 1, 1442 .minimum_version_id = 1, 1443 .fields = (VMStateField[]) { 1444 VMSTATE_UINT32(features, SMMUv3State), 1445 VMSTATE_UINT8(sid_size, SMMUv3State), 1446 VMSTATE_UINT8(sid_split, SMMUv3State), 1447 1448 VMSTATE_UINT32_ARRAY(cr, SMMUv3State, 3), 1449 VMSTATE_UINT32(cr0ack, SMMUv3State), 1450 VMSTATE_UINT32(statusr, SMMUv3State), 1451 VMSTATE_UINT32(irq_ctrl, SMMUv3State), 1452 VMSTATE_UINT32(gerror, SMMUv3State), 1453 VMSTATE_UINT32(gerrorn, SMMUv3State), 1454 VMSTATE_UINT64(gerror_irq_cfg0, SMMUv3State), 1455 VMSTATE_UINT32(gerror_irq_cfg1, SMMUv3State), 1456 VMSTATE_UINT32(gerror_irq_cfg2, SMMUv3State), 1457 VMSTATE_UINT64(strtab_base, SMMUv3State), 1458 VMSTATE_UINT32(strtab_base_cfg, SMMUv3State), 1459 VMSTATE_UINT64(eventq_irq_cfg0, SMMUv3State), 1460 VMSTATE_UINT32(eventq_irq_cfg1, SMMUv3State), 1461 VMSTATE_UINT32(eventq_irq_cfg2, SMMUv3State), 1462 1463 VMSTATE_STRUCT(cmdq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue), 1464 VMSTATE_STRUCT(eventq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue), 1465 1466 VMSTATE_END_OF_LIST(), 1467 }, 1468 }; 1469 1470 static void smmuv3_instance_init(Object *obj) 1471 { 1472 /* Nothing much to do here as of now */ 1473 } 1474 1475 static void smmuv3_class_init(ObjectClass *klass, void *data) 1476 { 1477 DeviceClass *dc = DEVICE_CLASS(klass); 1478 SMMUv3Class *c = ARM_SMMUV3_CLASS(klass); 1479 1480 dc->vmsd = &vmstate_smmuv3; 1481 device_class_set_parent_reset(dc, smmu_reset, &c->parent_reset); 1482 c->parent_realize = dc->realize; 1483 dc->realize = smmu_realize; 1484 } 1485 1486 static int smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu, 1487 IOMMUNotifierFlag old, 1488 IOMMUNotifierFlag new, 1489 Error **errp) 1490 { 1491 SMMUDevice *sdev = container_of(iommu, SMMUDevice, iommu); 1492 SMMUv3State *s3 = sdev->smmu; 1493 SMMUState *s = &(s3->smmu_state); 1494 1495 if (new & IOMMU_NOTIFIER_MAP) { 1496 error_setg(errp, 1497 "device %02x.%02x.%x requires iommu MAP notifier which is " 1498 "not currently supported", pci_bus_num(sdev->bus), 1499 PCI_SLOT(sdev->devfn), PCI_FUNC(sdev->devfn)); 1500 return -EINVAL; 1501 } 1502 1503 if (old == IOMMU_NOTIFIER_NONE) { 1504 trace_smmuv3_notify_flag_add(iommu->parent_obj.name); 1505 QLIST_INSERT_HEAD(&s->devices_with_notifiers, sdev, next); 1506 } else if (new == IOMMU_NOTIFIER_NONE) { 1507 trace_smmuv3_notify_flag_del(iommu->parent_obj.name); 1508 QLIST_REMOVE(sdev, next); 1509 } 1510 return 0; 1511 } 1512 1513 static void smmuv3_iommu_memory_region_class_init(ObjectClass *klass, 1514 void *data) 1515 { 1516 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); 1517 1518 imrc->translate = smmuv3_translate; 1519 imrc->notify_flag_changed = smmuv3_notify_flag_changed; 1520 } 1521 1522 static const TypeInfo smmuv3_type_info = { 1523 .name = TYPE_ARM_SMMUV3, 1524 .parent = TYPE_ARM_SMMU, 1525 .instance_size = sizeof(SMMUv3State), 1526 .instance_init = smmuv3_instance_init, 1527 .class_size = sizeof(SMMUv3Class), 1528 .class_init = smmuv3_class_init, 1529 }; 1530 1531 static const TypeInfo smmuv3_iommu_memory_region_info = { 1532 .parent = TYPE_IOMMU_MEMORY_REGION, 1533 .name = TYPE_SMMUV3_IOMMU_MEMORY_REGION, 1534 .class_init = smmuv3_iommu_memory_region_class_init, 1535 }; 1536 1537 static void smmuv3_register_types(void) 1538 { 1539 type_register(&smmuv3_type_info); 1540 type_register(&smmuv3_iommu_memory_region_info); 1541 } 1542 1543 type_init(smmuv3_register_types) 1544 1545