1 /* 2 * Copyright (C) 2014-2016 Broadcom Corporation 3 * Copyright (c) 2017 Red Hat, Inc. 4 * Written by Prem Mallappa, Eric Auger 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program; if not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #include "qemu/osdep.h" 20 #include "hw/irq.h" 21 #include "hw/sysbus.h" 22 #include "migration/vmstate.h" 23 #include "hw/qdev-core.h" 24 #include "hw/pci/pci.h" 25 #include "exec/address-spaces.h" 26 #include "cpu.h" 27 #include "trace.h" 28 #include "qemu/log.h" 29 #include "qemu/error-report.h" 30 #include "qapi/error.h" 31 32 #include "hw/arm/smmuv3.h" 33 #include "smmuv3-internal.h" 34 35 /** 36 * smmuv3_trigger_irq - pulse @irq if enabled and update 37 * GERROR register in case of GERROR interrupt 38 * 39 * @irq: irq type 40 * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR) 41 */ 42 static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq, 43 uint32_t gerror_mask) 44 { 45 46 bool pulse = false; 47 48 switch (irq) { 49 case SMMU_IRQ_EVTQ: 50 pulse = smmuv3_eventq_irq_enabled(s); 51 break; 52 case SMMU_IRQ_PRIQ: 53 qemu_log_mask(LOG_UNIMP, "PRI not yet supported\n"); 54 break; 55 case SMMU_IRQ_CMD_SYNC: 56 pulse = true; 57 break; 58 case SMMU_IRQ_GERROR: 59 { 60 uint32_t pending = s->gerror ^ s->gerrorn; 61 uint32_t new_gerrors = ~pending & gerror_mask; 62 63 if (!new_gerrors) { 64 /* only toggle non pending errors */ 65 return; 66 } 67 s->gerror ^= new_gerrors; 68 trace_smmuv3_write_gerror(new_gerrors, s->gerror); 69 70 pulse = smmuv3_gerror_irq_enabled(s); 71 break; 72 } 73 } 74 if (pulse) { 75 trace_smmuv3_trigger_irq(irq); 76 qemu_irq_pulse(s->irq[irq]); 77 } 78 } 79 80 static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn) 81 { 82 uint32_t pending = s->gerror ^ s->gerrorn; 83 uint32_t toggled = s->gerrorn ^ new_gerrorn; 84 85 if (toggled & ~pending) { 86 qemu_log_mask(LOG_GUEST_ERROR, 87 "guest toggles non pending errors = 0x%x\n", 88 toggled & ~pending); 89 } 90 91 /* 92 * We do not raise any error in case guest toggles bits corresponding 93 * to not active IRQs (CONSTRAINED UNPREDICTABLE) 94 */ 95 s->gerrorn = new_gerrorn; 96 97 trace_smmuv3_write_gerrorn(toggled & pending, s->gerrorn); 98 } 99 100 static inline MemTxResult queue_read(SMMUQueue *q, void *data) 101 { 102 dma_addr_t addr = Q_CONS_ENTRY(q); 103 104 return dma_memory_read(&address_space_memory, addr, data, q->entry_size); 105 } 106 107 static MemTxResult queue_write(SMMUQueue *q, void *data) 108 { 109 dma_addr_t addr = Q_PROD_ENTRY(q); 110 MemTxResult ret; 111 112 ret = dma_memory_write(&address_space_memory, addr, data, q->entry_size); 113 if (ret != MEMTX_OK) { 114 return ret; 115 } 116 117 queue_prod_incr(q); 118 return MEMTX_OK; 119 } 120 121 static MemTxResult smmuv3_write_eventq(SMMUv3State *s, Evt *evt) 122 { 123 SMMUQueue *q = &s->eventq; 124 MemTxResult r; 125 126 if (!smmuv3_eventq_enabled(s)) { 127 return MEMTX_ERROR; 128 } 129 130 if (smmuv3_q_full(q)) { 131 return MEMTX_ERROR; 132 } 133 134 r = queue_write(q, evt); 135 if (r != MEMTX_OK) { 136 return r; 137 } 138 139 if (!smmuv3_q_empty(q)) { 140 smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0); 141 } 142 return MEMTX_OK; 143 } 144 145 void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *info) 146 { 147 Evt evt = {}; 148 MemTxResult r; 149 150 if (!smmuv3_eventq_enabled(s)) { 151 return; 152 } 153 154 EVT_SET_TYPE(&evt, info->type); 155 EVT_SET_SID(&evt, info->sid); 156 157 switch (info->type) { 158 case SMMU_EVT_NONE: 159 return; 160 case SMMU_EVT_F_UUT: 161 EVT_SET_SSID(&evt, info->u.f_uut.ssid); 162 EVT_SET_SSV(&evt, info->u.f_uut.ssv); 163 EVT_SET_ADDR(&evt, info->u.f_uut.addr); 164 EVT_SET_RNW(&evt, info->u.f_uut.rnw); 165 EVT_SET_PNU(&evt, info->u.f_uut.pnu); 166 EVT_SET_IND(&evt, info->u.f_uut.ind); 167 break; 168 case SMMU_EVT_C_BAD_STREAMID: 169 EVT_SET_SSID(&evt, info->u.c_bad_streamid.ssid); 170 EVT_SET_SSV(&evt, info->u.c_bad_streamid.ssv); 171 break; 172 case SMMU_EVT_F_STE_FETCH: 173 EVT_SET_SSID(&evt, info->u.f_ste_fetch.ssid); 174 EVT_SET_SSV(&evt, info->u.f_ste_fetch.ssv); 175 EVT_SET_ADDR2(&evt, info->u.f_ste_fetch.addr); 176 break; 177 case SMMU_EVT_C_BAD_STE: 178 EVT_SET_SSID(&evt, info->u.c_bad_ste.ssid); 179 EVT_SET_SSV(&evt, info->u.c_bad_ste.ssv); 180 break; 181 case SMMU_EVT_F_STREAM_DISABLED: 182 break; 183 case SMMU_EVT_F_TRANS_FORBIDDEN: 184 EVT_SET_ADDR(&evt, info->u.f_transl_forbidden.addr); 185 EVT_SET_RNW(&evt, info->u.f_transl_forbidden.rnw); 186 break; 187 case SMMU_EVT_C_BAD_SUBSTREAMID: 188 EVT_SET_SSID(&evt, info->u.c_bad_substream.ssid); 189 break; 190 case SMMU_EVT_F_CD_FETCH: 191 EVT_SET_SSID(&evt, info->u.f_cd_fetch.ssid); 192 EVT_SET_SSV(&evt, info->u.f_cd_fetch.ssv); 193 EVT_SET_ADDR(&evt, info->u.f_cd_fetch.addr); 194 break; 195 case SMMU_EVT_C_BAD_CD: 196 EVT_SET_SSID(&evt, info->u.c_bad_cd.ssid); 197 EVT_SET_SSV(&evt, info->u.c_bad_cd.ssv); 198 break; 199 case SMMU_EVT_F_WALK_EABT: 200 case SMMU_EVT_F_TRANSLATION: 201 case SMMU_EVT_F_ADDR_SIZE: 202 case SMMU_EVT_F_ACCESS: 203 case SMMU_EVT_F_PERMISSION: 204 EVT_SET_STALL(&evt, info->u.f_walk_eabt.stall); 205 EVT_SET_STAG(&evt, info->u.f_walk_eabt.stag); 206 EVT_SET_SSID(&evt, info->u.f_walk_eabt.ssid); 207 EVT_SET_SSV(&evt, info->u.f_walk_eabt.ssv); 208 EVT_SET_S2(&evt, info->u.f_walk_eabt.s2); 209 EVT_SET_ADDR(&evt, info->u.f_walk_eabt.addr); 210 EVT_SET_RNW(&evt, info->u.f_walk_eabt.rnw); 211 EVT_SET_PNU(&evt, info->u.f_walk_eabt.pnu); 212 EVT_SET_IND(&evt, info->u.f_walk_eabt.ind); 213 EVT_SET_CLASS(&evt, info->u.f_walk_eabt.class); 214 EVT_SET_ADDR2(&evt, info->u.f_walk_eabt.addr2); 215 break; 216 case SMMU_EVT_F_CFG_CONFLICT: 217 EVT_SET_SSID(&evt, info->u.f_cfg_conflict.ssid); 218 EVT_SET_SSV(&evt, info->u.f_cfg_conflict.ssv); 219 break; 220 /* rest is not implemented */ 221 case SMMU_EVT_F_BAD_ATS_TREQ: 222 case SMMU_EVT_F_TLB_CONFLICT: 223 case SMMU_EVT_E_PAGE_REQ: 224 default: 225 g_assert_not_reached(); 226 } 227 228 trace_smmuv3_record_event(smmu_event_string(info->type), info->sid); 229 r = smmuv3_write_eventq(s, &evt); 230 if (r != MEMTX_OK) { 231 smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_EVENTQ_ABT_ERR_MASK); 232 } 233 info->recorded = true; 234 } 235 236 static void smmuv3_init_regs(SMMUv3State *s) 237 { 238 /** 239 * IDR0: stage1 only, AArch64 only, coherent access, 16b ASID, 240 * multi-level stream table 241 */ 242 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1); /* stage 1 supported */ 243 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTF, 2); /* AArch64 PTW only */ 244 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, COHACC, 1); /* IO coherent */ 245 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, ASID16, 1); /* 16-bit ASID */ 246 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTENDIAN, 2); /* little endian */ 247 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STALL_MODEL, 1); /* No stall */ 248 /* terminated transaction will always be aborted/error returned */ 249 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TERM_MODEL, 1); 250 /* 2-level stream table supported */ 251 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STLEVEL, 1); 252 253 s->idr[1] = FIELD_DP32(s->idr[1], IDR1, SIDSIZE, SMMU_IDR1_SIDSIZE); 254 s->idr[1] = FIELD_DP32(s->idr[1], IDR1, EVENTQS, SMMU_EVENTQS); 255 s->idr[1] = FIELD_DP32(s->idr[1], IDR1, CMDQS, SMMU_CMDQS); 256 257 /* 4K and 64K granule support */ 258 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1); 259 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1); 260 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */ 261 262 s->cmdq.base = deposit64(s->cmdq.base, 0, 5, SMMU_CMDQS); 263 s->cmdq.prod = 0; 264 s->cmdq.cons = 0; 265 s->cmdq.entry_size = sizeof(struct Cmd); 266 s->eventq.base = deposit64(s->eventq.base, 0, 5, SMMU_EVENTQS); 267 s->eventq.prod = 0; 268 s->eventq.cons = 0; 269 s->eventq.entry_size = sizeof(struct Evt); 270 271 s->features = 0; 272 s->sid_split = 0; 273 } 274 275 static int smmu_get_ste(SMMUv3State *s, dma_addr_t addr, STE *buf, 276 SMMUEventInfo *event) 277 { 278 int ret; 279 280 trace_smmuv3_get_ste(addr); 281 /* TODO: guarantee 64-bit single-copy atomicity */ 282 ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf)); 283 if (ret != MEMTX_OK) { 284 qemu_log_mask(LOG_GUEST_ERROR, 285 "Cannot fetch pte at address=0x%"PRIx64"\n", addr); 286 event->type = SMMU_EVT_F_STE_FETCH; 287 event->u.f_ste_fetch.addr = addr; 288 return -EINVAL; 289 } 290 return 0; 291 292 } 293 294 /* @ssid > 0 not supported yet */ 295 static int smmu_get_cd(SMMUv3State *s, STE *ste, uint32_t ssid, 296 CD *buf, SMMUEventInfo *event) 297 { 298 dma_addr_t addr = STE_CTXPTR(ste); 299 int ret; 300 301 trace_smmuv3_get_cd(addr); 302 /* TODO: guarantee 64-bit single-copy atomicity */ 303 ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf)); 304 if (ret != MEMTX_OK) { 305 qemu_log_mask(LOG_GUEST_ERROR, 306 "Cannot fetch pte at address=0x%"PRIx64"\n", addr); 307 event->type = SMMU_EVT_F_CD_FETCH; 308 event->u.f_ste_fetch.addr = addr; 309 return -EINVAL; 310 } 311 return 0; 312 } 313 314 /* Returns < 0 in case of invalid STE, 0 otherwise */ 315 static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg, 316 STE *ste, SMMUEventInfo *event) 317 { 318 uint32_t config; 319 320 if (!STE_VALID(ste)) { 321 if (!event->inval_ste_allowed) { 322 qemu_log_mask(LOG_GUEST_ERROR, "invalid STE\n"); 323 } 324 goto bad_ste; 325 } 326 327 config = STE_CONFIG(ste); 328 329 if (STE_CFG_ABORT(config)) { 330 cfg->aborted = true; 331 return 0; 332 } 333 334 if (STE_CFG_BYPASS(config)) { 335 cfg->bypassed = true; 336 return 0; 337 } 338 339 if (STE_CFG_S2_ENABLED(config)) { 340 qemu_log_mask(LOG_UNIMP, "SMMUv3 does not support stage 2 yet\n"); 341 goto bad_ste; 342 } 343 344 if (STE_S1CDMAX(ste) != 0) { 345 qemu_log_mask(LOG_UNIMP, 346 "SMMUv3 does not support multiple context descriptors yet\n"); 347 goto bad_ste; 348 } 349 350 if (STE_S1STALLD(ste)) { 351 qemu_log_mask(LOG_UNIMP, 352 "SMMUv3 S1 stalling fault model not allowed yet\n"); 353 goto bad_ste; 354 } 355 return 0; 356 357 bad_ste: 358 event->type = SMMU_EVT_C_BAD_STE; 359 return -EINVAL; 360 } 361 362 /** 363 * smmu_find_ste - Return the stream table entry associated 364 * to the sid 365 * 366 * @s: smmuv3 handle 367 * @sid: stream ID 368 * @ste: returned stream table entry 369 * @event: handle to an event info 370 * 371 * Supports linear and 2-level stream table 372 * Return 0 on success, -EINVAL otherwise 373 */ 374 static int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste, 375 SMMUEventInfo *event) 376 { 377 dma_addr_t addr, strtab_base; 378 uint32_t log2size; 379 int strtab_size_shift; 380 int ret; 381 382 trace_smmuv3_find_ste(sid, s->features, s->sid_split); 383 log2size = FIELD_EX32(s->strtab_base_cfg, STRTAB_BASE_CFG, LOG2SIZE); 384 /* 385 * Check SID range against both guest-configured and implementation limits 386 */ 387 if (sid >= (1 << MIN(log2size, SMMU_IDR1_SIDSIZE))) { 388 event->type = SMMU_EVT_C_BAD_STREAMID; 389 return -EINVAL; 390 } 391 if (s->features & SMMU_FEATURE_2LVL_STE) { 392 int l1_ste_offset, l2_ste_offset, max_l2_ste, span; 393 dma_addr_t l1ptr, l2ptr; 394 STEDesc l1std; 395 396 /* 397 * Align strtab base address to table size. For this purpose, assume it 398 * is not bounded by SMMU_IDR1_SIDSIZE. 399 */ 400 strtab_size_shift = MAX(5, (int)log2size - s->sid_split - 1 + 3); 401 strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK & 402 ~MAKE_64BIT_MASK(0, strtab_size_shift); 403 l1_ste_offset = sid >> s->sid_split; 404 l2_ste_offset = sid & ((1 << s->sid_split) - 1); 405 l1ptr = (dma_addr_t)(strtab_base + l1_ste_offset * sizeof(l1std)); 406 /* TODO: guarantee 64-bit single-copy atomicity */ 407 ret = dma_memory_read(&address_space_memory, l1ptr, &l1std, 408 sizeof(l1std)); 409 if (ret != MEMTX_OK) { 410 qemu_log_mask(LOG_GUEST_ERROR, 411 "Could not read L1PTR at 0X%"PRIx64"\n", l1ptr); 412 event->type = SMMU_EVT_F_STE_FETCH; 413 event->u.f_ste_fetch.addr = l1ptr; 414 return -EINVAL; 415 } 416 417 span = L1STD_SPAN(&l1std); 418 419 if (!span) { 420 /* l2ptr is not valid */ 421 if (!event->inval_ste_allowed) { 422 qemu_log_mask(LOG_GUEST_ERROR, 423 "invalid sid=%d (L1STD span=0)\n", sid); 424 } 425 event->type = SMMU_EVT_C_BAD_STREAMID; 426 return -EINVAL; 427 } 428 max_l2_ste = (1 << span) - 1; 429 l2ptr = l1std_l2ptr(&l1std); 430 trace_smmuv3_find_ste_2lvl(s->strtab_base, l1ptr, l1_ste_offset, 431 l2ptr, l2_ste_offset, max_l2_ste); 432 if (l2_ste_offset > max_l2_ste) { 433 qemu_log_mask(LOG_GUEST_ERROR, 434 "l2_ste_offset=%d > max_l2_ste=%d\n", 435 l2_ste_offset, max_l2_ste); 436 event->type = SMMU_EVT_C_BAD_STE; 437 return -EINVAL; 438 } 439 addr = l2ptr + l2_ste_offset * sizeof(*ste); 440 } else { 441 strtab_size_shift = log2size + 5; 442 strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK & 443 ~MAKE_64BIT_MASK(0, strtab_size_shift); 444 addr = strtab_base + sid * sizeof(*ste); 445 } 446 447 if (smmu_get_ste(s, addr, ste, event)) { 448 return -EINVAL; 449 } 450 451 return 0; 452 } 453 454 static int decode_cd(SMMUTransCfg *cfg, CD *cd, SMMUEventInfo *event) 455 { 456 int ret = -EINVAL; 457 int i; 458 459 if (!CD_VALID(cd) || !CD_AARCH64(cd)) { 460 goto bad_cd; 461 } 462 if (!CD_A(cd)) { 463 goto bad_cd; /* SMMU_IDR0.TERM_MODEL == 1 */ 464 } 465 if (CD_S(cd)) { 466 goto bad_cd; /* !STE_SECURE && SMMU_IDR0.STALL_MODEL == 1 */ 467 } 468 if (CD_HA(cd) || CD_HD(cd)) { 469 goto bad_cd; /* HTTU = 0 */ 470 } 471 472 /* we support only those at the moment */ 473 cfg->aa64 = true; 474 cfg->stage = 1; 475 476 cfg->oas = oas2bits(CD_IPS(cd)); 477 cfg->oas = MIN(oas2bits(SMMU_IDR5_OAS), cfg->oas); 478 cfg->tbi = CD_TBI(cd); 479 cfg->asid = CD_ASID(cd); 480 481 trace_smmuv3_decode_cd(cfg->oas); 482 483 /* decode data dependent on TT */ 484 for (i = 0; i <= 1; i++) { 485 int tg, tsz; 486 SMMUTransTableInfo *tt = &cfg->tt[i]; 487 488 cfg->tt[i].disabled = CD_EPD(cd, i); 489 if (cfg->tt[i].disabled) { 490 continue; 491 } 492 493 tsz = CD_TSZ(cd, i); 494 if (tsz < 16 || tsz > 39) { 495 goto bad_cd; 496 } 497 498 tg = CD_TG(cd, i); 499 tt->granule_sz = tg2granule(tg, i); 500 if ((tt->granule_sz != 12 && tt->granule_sz != 16) || CD_ENDI(cd)) { 501 goto bad_cd; 502 } 503 504 tt->tsz = tsz; 505 tt->ttb = CD_TTB(cd, i); 506 if (tt->ttb & ~(MAKE_64BIT_MASK(0, cfg->oas))) { 507 goto bad_cd; 508 } 509 trace_smmuv3_decode_cd_tt(i, tt->tsz, tt->ttb, tt->granule_sz); 510 } 511 512 event->record_trans_faults = CD_R(cd); 513 514 return 0; 515 516 bad_cd: 517 event->type = SMMU_EVT_C_BAD_CD; 518 return ret; 519 } 520 521 /** 522 * smmuv3_decode_config - Prepare the translation configuration 523 * for the @mr iommu region 524 * @mr: iommu memory region the translation config must be prepared for 525 * @cfg: output translation configuration which is populated through 526 * the different configuration decoding steps 527 * @event: must be zero'ed by the caller 528 * 529 * return < 0 in case of config decoding error (@event is filled 530 * accordingly). Return 0 otherwise. 531 */ 532 static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg, 533 SMMUEventInfo *event) 534 { 535 SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu); 536 uint32_t sid = smmu_get_sid(sdev); 537 SMMUv3State *s = sdev->smmu; 538 int ret; 539 STE ste; 540 CD cd; 541 542 ret = smmu_find_ste(s, sid, &ste, event); 543 if (ret) { 544 return ret; 545 } 546 547 ret = decode_ste(s, cfg, &ste, event); 548 if (ret) { 549 return ret; 550 } 551 552 if (cfg->aborted || cfg->bypassed) { 553 return 0; 554 } 555 556 ret = smmu_get_cd(s, &ste, 0 /* ssid */, &cd, event); 557 if (ret) { 558 return ret; 559 } 560 561 return decode_cd(cfg, &cd, event); 562 } 563 564 /** 565 * smmuv3_get_config - Look up for a cached copy of configuration data for 566 * @sdev and on cache miss performs a configuration structure decoding from 567 * guest RAM. 568 * 569 * @sdev: SMMUDevice handle 570 * @event: output event info 571 * 572 * The configuration cache contains data resulting from both STE and CD 573 * decoding under the form of an SMMUTransCfg struct. The hash table is indexed 574 * by the SMMUDevice handle. 575 */ 576 static SMMUTransCfg *smmuv3_get_config(SMMUDevice *sdev, SMMUEventInfo *event) 577 { 578 SMMUv3State *s = sdev->smmu; 579 SMMUState *bc = &s->smmu_state; 580 SMMUTransCfg *cfg; 581 582 cfg = g_hash_table_lookup(bc->configs, sdev); 583 if (cfg) { 584 sdev->cfg_cache_hits++; 585 trace_smmuv3_config_cache_hit(smmu_get_sid(sdev), 586 sdev->cfg_cache_hits, sdev->cfg_cache_misses, 587 100 * sdev->cfg_cache_hits / 588 (sdev->cfg_cache_hits + sdev->cfg_cache_misses)); 589 } else { 590 sdev->cfg_cache_misses++; 591 trace_smmuv3_config_cache_miss(smmu_get_sid(sdev), 592 sdev->cfg_cache_hits, sdev->cfg_cache_misses, 593 100 * sdev->cfg_cache_hits / 594 (sdev->cfg_cache_hits + sdev->cfg_cache_misses)); 595 cfg = g_new0(SMMUTransCfg, 1); 596 597 if (!smmuv3_decode_config(&sdev->iommu, cfg, event)) { 598 g_hash_table_insert(bc->configs, sdev, cfg); 599 } else { 600 g_free(cfg); 601 cfg = NULL; 602 } 603 } 604 return cfg; 605 } 606 607 static void smmuv3_flush_config(SMMUDevice *sdev) 608 { 609 SMMUv3State *s = sdev->smmu; 610 SMMUState *bc = &s->smmu_state; 611 612 trace_smmuv3_config_cache_inv(smmu_get_sid(sdev)); 613 g_hash_table_remove(bc->configs, sdev); 614 } 615 616 static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, 617 IOMMUAccessFlags flag, int iommu_idx) 618 { 619 SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu); 620 SMMUv3State *s = sdev->smmu; 621 uint32_t sid = smmu_get_sid(sdev); 622 SMMUEventInfo event = {.type = SMMU_EVT_NONE, 623 .sid = sid, 624 .inval_ste_allowed = false}; 625 SMMUPTWEventInfo ptw_info = {}; 626 SMMUTranslationStatus status; 627 SMMUState *bs = ARM_SMMU(s); 628 uint64_t page_mask, aligned_addr; 629 IOMMUTLBEntry *cached_entry = NULL; 630 SMMUTransTableInfo *tt; 631 SMMUTransCfg *cfg = NULL; 632 IOMMUTLBEntry entry = { 633 .target_as = &address_space_memory, 634 .iova = addr, 635 .translated_addr = addr, 636 .addr_mask = ~(hwaddr)0, 637 .perm = IOMMU_NONE, 638 }; 639 SMMUIOTLBKey key, *new_key; 640 641 qemu_mutex_lock(&s->mutex); 642 643 if (!smmu_enabled(s)) { 644 status = SMMU_TRANS_DISABLE; 645 goto epilogue; 646 } 647 648 cfg = smmuv3_get_config(sdev, &event); 649 if (!cfg) { 650 status = SMMU_TRANS_ERROR; 651 goto epilogue; 652 } 653 654 if (cfg->aborted) { 655 status = SMMU_TRANS_ABORT; 656 goto epilogue; 657 } 658 659 if (cfg->bypassed) { 660 status = SMMU_TRANS_BYPASS; 661 goto epilogue; 662 } 663 664 tt = select_tt(cfg, addr); 665 if (!tt) { 666 if (event.record_trans_faults) { 667 event.type = SMMU_EVT_F_TRANSLATION; 668 event.u.f_translation.addr = addr; 669 event.u.f_translation.rnw = flag & 0x1; 670 } 671 status = SMMU_TRANS_ERROR; 672 goto epilogue; 673 } 674 675 page_mask = (1ULL << (tt->granule_sz)) - 1; 676 aligned_addr = addr & ~page_mask; 677 678 key.asid = cfg->asid; 679 key.iova = aligned_addr; 680 681 cached_entry = g_hash_table_lookup(bs->iotlb, &key); 682 if (cached_entry) { 683 cfg->iotlb_hits++; 684 trace_smmu_iotlb_cache_hit(cfg->asid, aligned_addr, 685 cfg->iotlb_hits, cfg->iotlb_misses, 686 100 * cfg->iotlb_hits / 687 (cfg->iotlb_hits + cfg->iotlb_misses)); 688 if ((flag & IOMMU_WO) && !(cached_entry->perm & IOMMU_WO)) { 689 status = SMMU_TRANS_ERROR; 690 if (event.record_trans_faults) { 691 event.type = SMMU_EVT_F_PERMISSION; 692 event.u.f_permission.addr = addr; 693 event.u.f_permission.rnw = flag & 0x1; 694 } 695 } else { 696 status = SMMU_TRANS_SUCCESS; 697 } 698 goto epilogue; 699 } 700 701 cfg->iotlb_misses++; 702 trace_smmu_iotlb_cache_miss(cfg->asid, addr & ~page_mask, 703 cfg->iotlb_hits, cfg->iotlb_misses, 704 100 * cfg->iotlb_hits / 705 (cfg->iotlb_hits + cfg->iotlb_misses)); 706 707 if (g_hash_table_size(bs->iotlb) >= SMMU_IOTLB_MAX_SIZE) { 708 smmu_iotlb_inv_all(bs); 709 } 710 711 cached_entry = g_new0(IOMMUTLBEntry, 1); 712 713 if (smmu_ptw(cfg, aligned_addr, flag, cached_entry, &ptw_info)) { 714 g_free(cached_entry); 715 switch (ptw_info.type) { 716 case SMMU_PTW_ERR_WALK_EABT: 717 event.type = SMMU_EVT_F_WALK_EABT; 718 event.u.f_walk_eabt.addr = addr; 719 event.u.f_walk_eabt.rnw = flag & 0x1; 720 event.u.f_walk_eabt.class = 0x1; 721 event.u.f_walk_eabt.addr2 = ptw_info.addr; 722 break; 723 case SMMU_PTW_ERR_TRANSLATION: 724 if (event.record_trans_faults) { 725 event.type = SMMU_EVT_F_TRANSLATION; 726 event.u.f_translation.addr = addr; 727 event.u.f_translation.rnw = flag & 0x1; 728 } 729 break; 730 case SMMU_PTW_ERR_ADDR_SIZE: 731 if (event.record_trans_faults) { 732 event.type = SMMU_EVT_F_ADDR_SIZE; 733 event.u.f_addr_size.addr = addr; 734 event.u.f_addr_size.rnw = flag & 0x1; 735 } 736 break; 737 case SMMU_PTW_ERR_ACCESS: 738 if (event.record_trans_faults) { 739 event.type = SMMU_EVT_F_ACCESS; 740 event.u.f_access.addr = addr; 741 event.u.f_access.rnw = flag & 0x1; 742 } 743 break; 744 case SMMU_PTW_ERR_PERMISSION: 745 if (event.record_trans_faults) { 746 event.type = SMMU_EVT_F_PERMISSION; 747 event.u.f_permission.addr = addr; 748 event.u.f_permission.rnw = flag & 0x1; 749 } 750 break; 751 default: 752 g_assert_not_reached(); 753 } 754 status = SMMU_TRANS_ERROR; 755 } else { 756 new_key = g_new0(SMMUIOTLBKey, 1); 757 new_key->asid = cfg->asid; 758 new_key->iova = aligned_addr; 759 g_hash_table_insert(bs->iotlb, new_key, cached_entry); 760 status = SMMU_TRANS_SUCCESS; 761 } 762 763 epilogue: 764 qemu_mutex_unlock(&s->mutex); 765 switch (status) { 766 case SMMU_TRANS_SUCCESS: 767 entry.perm = flag; 768 entry.translated_addr = cached_entry->translated_addr + 769 (addr & page_mask); 770 entry.addr_mask = cached_entry->addr_mask; 771 trace_smmuv3_translate_success(mr->parent_obj.name, sid, addr, 772 entry.translated_addr, entry.perm); 773 break; 774 case SMMU_TRANS_DISABLE: 775 entry.perm = flag; 776 entry.addr_mask = ~TARGET_PAGE_MASK; 777 trace_smmuv3_translate_disable(mr->parent_obj.name, sid, addr, 778 entry.perm); 779 break; 780 case SMMU_TRANS_BYPASS: 781 entry.perm = flag; 782 entry.addr_mask = ~TARGET_PAGE_MASK; 783 trace_smmuv3_translate_bypass(mr->parent_obj.name, sid, addr, 784 entry.perm); 785 break; 786 case SMMU_TRANS_ABORT: 787 /* no event is recorded on abort */ 788 trace_smmuv3_translate_abort(mr->parent_obj.name, sid, addr, 789 entry.perm); 790 break; 791 case SMMU_TRANS_ERROR: 792 qemu_log_mask(LOG_GUEST_ERROR, 793 "%s translation failed for iova=0x%"PRIx64"(%s)\n", 794 mr->parent_obj.name, addr, smmu_event_string(event.type)); 795 smmuv3_record_event(s, &event); 796 break; 797 } 798 799 return entry; 800 } 801 802 /** 803 * smmuv3_notify_iova - call the notifier @n for a given 804 * @asid and @iova tuple. 805 * 806 * @mr: IOMMU mr region handle 807 * @n: notifier to be called 808 * @asid: address space ID or negative value if we don't care 809 * @iova: iova 810 */ 811 static void smmuv3_notify_iova(IOMMUMemoryRegion *mr, 812 IOMMUNotifier *n, 813 int asid, 814 dma_addr_t iova) 815 { 816 SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu); 817 SMMUEventInfo event = {.inval_ste_allowed = true}; 818 SMMUTransTableInfo *tt; 819 SMMUTransCfg *cfg; 820 IOMMUTLBEntry entry; 821 822 cfg = smmuv3_get_config(sdev, &event); 823 if (!cfg) { 824 return; 825 } 826 827 if (asid >= 0 && cfg->asid != asid) { 828 return; 829 } 830 831 tt = select_tt(cfg, iova); 832 if (!tt) { 833 return; 834 } 835 836 entry.target_as = &address_space_memory; 837 entry.iova = iova; 838 entry.addr_mask = (1 << tt->granule_sz) - 1; 839 entry.perm = IOMMU_NONE; 840 841 memory_region_notify_one(n, &entry); 842 } 843 844 /* invalidate an asid/iova tuple in all mr's */ 845 static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova) 846 { 847 SMMUDevice *sdev; 848 849 QLIST_FOREACH(sdev, &s->devices_with_notifiers, next) { 850 IOMMUMemoryRegion *mr = &sdev->iommu; 851 IOMMUNotifier *n; 852 853 trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, iova); 854 855 IOMMU_NOTIFIER_FOREACH(n, mr) { 856 smmuv3_notify_iova(mr, n, asid, iova); 857 } 858 } 859 } 860 861 static int smmuv3_cmdq_consume(SMMUv3State *s) 862 { 863 SMMUState *bs = ARM_SMMU(s); 864 SMMUCmdError cmd_error = SMMU_CERROR_NONE; 865 SMMUQueue *q = &s->cmdq; 866 SMMUCommandType type = 0; 867 868 if (!smmuv3_cmdq_enabled(s)) { 869 return 0; 870 } 871 /* 872 * some commands depend on register values, typically CR0. In case those 873 * register values change while handling the command, spec says it 874 * is UNPREDICTABLE whether the command is interpreted under the new 875 * or old value. 876 */ 877 878 while (!smmuv3_q_empty(q)) { 879 uint32_t pending = s->gerror ^ s->gerrorn; 880 Cmd cmd; 881 882 trace_smmuv3_cmdq_consume(Q_PROD(q), Q_CONS(q), 883 Q_PROD_WRAP(q), Q_CONS_WRAP(q)); 884 885 if (FIELD_EX32(pending, GERROR, CMDQ_ERR)) { 886 break; 887 } 888 889 if (queue_read(q, &cmd) != MEMTX_OK) { 890 cmd_error = SMMU_CERROR_ABT; 891 break; 892 } 893 894 type = CMD_TYPE(&cmd); 895 896 trace_smmuv3_cmdq_opcode(smmu_cmd_string(type)); 897 898 qemu_mutex_lock(&s->mutex); 899 switch (type) { 900 case SMMU_CMD_SYNC: 901 if (CMD_SYNC_CS(&cmd) & CMD_SYNC_SIG_IRQ) { 902 smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0); 903 } 904 break; 905 case SMMU_CMD_PREFETCH_CONFIG: 906 case SMMU_CMD_PREFETCH_ADDR: 907 break; 908 case SMMU_CMD_CFGI_STE: 909 { 910 uint32_t sid = CMD_SID(&cmd); 911 IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid); 912 SMMUDevice *sdev; 913 914 if (CMD_SSEC(&cmd)) { 915 cmd_error = SMMU_CERROR_ILL; 916 break; 917 } 918 919 if (!mr) { 920 break; 921 } 922 923 trace_smmuv3_cmdq_cfgi_ste(sid); 924 sdev = container_of(mr, SMMUDevice, iommu); 925 smmuv3_flush_config(sdev); 926 927 break; 928 } 929 case SMMU_CMD_CFGI_STE_RANGE: /* same as SMMU_CMD_CFGI_ALL */ 930 { 931 uint32_t start = CMD_SID(&cmd), end, i; 932 uint8_t range = CMD_STE_RANGE(&cmd); 933 934 if (CMD_SSEC(&cmd)) { 935 cmd_error = SMMU_CERROR_ILL; 936 break; 937 } 938 939 end = start + (1 << (range + 1)) - 1; 940 trace_smmuv3_cmdq_cfgi_ste_range(start, end); 941 942 for (i = start; i <= end; i++) { 943 IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, i); 944 SMMUDevice *sdev; 945 946 if (!mr) { 947 continue; 948 } 949 sdev = container_of(mr, SMMUDevice, iommu); 950 smmuv3_flush_config(sdev); 951 } 952 break; 953 } 954 case SMMU_CMD_CFGI_CD: 955 case SMMU_CMD_CFGI_CD_ALL: 956 { 957 uint32_t sid = CMD_SID(&cmd); 958 IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid); 959 SMMUDevice *sdev; 960 961 if (CMD_SSEC(&cmd)) { 962 cmd_error = SMMU_CERROR_ILL; 963 break; 964 } 965 966 if (!mr) { 967 break; 968 } 969 970 trace_smmuv3_cmdq_cfgi_cd(sid); 971 sdev = container_of(mr, SMMUDevice, iommu); 972 smmuv3_flush_config(sdev); 973 break; 974 } 975 case SMMU_CMD_TLBI_NH_ASID: 976 { 977 uint16_t asid = CMD_ASID(&cmd); 978 979 trace_smmuv3_cmdq_tlbi_nh_asid(asid); 980 smmu_inv_notifiers_all(&s->smmu_state); 981 smmu_iotlb_inv_asid(bs, asid); 982 break; 983 } 984 case SMMU_CMD_TLBI_NH_ALL: 985 case SMMU_CMD_TLBI_NSNH_ALL: 986 trace_smmuv3_cmdq_tlbi_nh(); 987 smmu_inv_notifiers_all(&s->smmu_state); 988 smmu_iotlb_inv_all(bs); 989 break; 990 case SMMU_CMD_TLBI_NH_VAA: 991 { 992 dma_addr_t addr = CMD_ADDR(&cmd); 993 uint16_t vmid = CMD_VMID(&cmd); 994 995 trace_smmuv3_cmdq_tlbi_nh_vaa(vmid, addr); 996 smmuv3_inv_notifiers_iova(bs, -1, addr); 997 smmu_iotlb_inv_all(bs); 998 break; 999 } 1000 case SMMU_CMD_TLBI_NH_VA: 1001 { 1002 uint16_t asid = CMD_ASID(&cmd); 1003 uint16_t vmid = CMD_VMID(&cmd); 1004 dma_addr_t addr = CMD_ADDR(&cmd); 1005 bool leaf = CMD_LEAF(&cmd); 1006 1007 trace_smmuv3_cmdq_tlbi_nh_va(vmid, asid, addr, leaf); 1008 smmuv3_inv_notifiers_iova(bs, asid, addr); 1009 smmu_iotlb_inv_iova(bs, asid, addr); 1010 break; 1011 } 1012 case SMMU_CMD_TLBI_EL3_ALL: 1013 case SMMU_CMD_TLBI_EL3_VA: 1014 case SMMU_CMD_TLBI_EL2_ALL: 1015 case SMMU_CMD_TLBI_EL2_ASID: 1016 case SMMU_CMD_TLBI_EL2_VA: 1017 case SMMU_CMD_TLBI_EL2_VAA: 1018 case SMMU_CMD_TLBI_S12_VMALL: 1019 case SMMU_CMD_TLBI_S2_IPA: 1020 case SMMU_CMD_ATC_INV: 1021 case SMMU_CMD_PRI_RESP: 1022 case SMMU_CMD_RESUME: 1023 case SMMU_CMD_STALL_TERM: 1024 trace_smmuv3_unhandled_cmd(type); 1025 break; 1026 default: 1027 cmd_error = SMMU_CERROR_ILL; 1028 qemu_log_mask(LOG_GUEST_ERROR, 1029 "Illegal command type: %d\n", CMD_TYPE(&cmd)); 1030 break; 1031 } 1032 qemu_mutex_unlock(&s->mutex); 1033 if (cmd_error) { 1034 break; 1035 } 1036 /* 1037 * We only increment the cons index after the completion of 1038 * the command. We do that because the SYNC returns immediately 1039 * and does not check the completion of previous commands 1040 */ 1041 queue_cons_incr(q); 1042 } 1043 1044 if (cmd_error) { 1045 trace_smmuv3_cmdq_consume_error(smmu_cmd_string(type), cmd_error); 1046 smmu_write_cmdq_err(s, cmd_error); 1047 smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_CMDQ_ERR_MASK); 1048 } 1049 1050 trace_smmuv3_cmdq_consume_out(Q_PROD(q), Q_CONS(q), 1051 Q_PROD_WRAP(q), Q_CONS_WRAP(q)); 1052 1053 return 0; 1054 } 1055 1056 static MemTxResult smmu_writell(SMMUv3State *s, hwaddr offset, 1057 uint64_t data, MemTxAttrs attrs) 1058 { 1059 switch (offset) { 1060 case A_GERROR_IRQ_CFG0: 1061 s->gerror_irq_cfg0 = data; 1062 return MEMTX_OK; 1063 case A_STRTAB_BASE: 1064 s->strtab_base = data; 1065 return MEMTX_OK; 1066 case A_CMDQ_BASE: 1067 s->cmdq.base = data; 1068 s->cmdq.log2size = extract64(s->cmdq.base, 0, 5); 1069 if (s->cmdq.log2size > SMMU_CMDQS) { 1070 s->cmdq.log2size = SMMU_CMDQS; 1071 } 1072 return MEMTX_OK; 1073 case A_EVENTQ_BASE: 1074 s->eventq.base = data; 1075 s->eventq.log2size = extract64(s->eventq.base, 0, 5); 1076 if (s->eventq.log2size > SMMU_EVENTQS) { 1077 s->eventq.log2size = SMMU_EVENTQS; 1078 } 1079 return MEMTX_OK; 1080 case A_EVENTQ_IRQ_CFG0: 1081 s->eventq_irq_cfg0 = data; 1082 return MEMTX_OK; 1083 default: 1084 qemu_log_mask(LOG_UNIMP, 1085 "%s Unexpected 64-bit access to 0x%"PRIx64" (WI)\n", 1086 __func__, offset); 1087 return MEMTX_OK; 1088 } 1089 } 1090 1091 static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset, 1092 uint64_t data, MemTxAttrs attrs) 1093 { 1094 switch (offset) { 1095 case A_CR0: 1096 s->cr[0] = data; 1097 s->cr0ack = data & ~SMMU_CR0_RESERVED; 1098 /* in case the command queue has been enabled */ 1099 smmuv3_cmdq_consume(s); 1100 return MEMTX_OK; 1101 case A_CR1: 1102 s->cr[1] = data; 1103 return MEMTX_OK; 1104 case A_CR2: 1105 s->cr[2] = data; 1106 return MEMTX_OK; 1107 case A_IRQ_CTRL: 1108 s->irq_ctrl = data; 1109 return MEMTX_OK; 1110 case A_GERRORN: 1111 smmuv3_write_gerrorn(s, data); 1112 /* 1113 * By acknowledging the CMDQ_ERR, SW may notify cmds can 1114 * be processed again 1115 */ 1116 smmuv3_cmdq_consume(s); 1117 return MEMTX_OK; 1118 case A_GERROR_IRQ_CFG0: /* 64b */ 1119 s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 0, 32, data); 1120 return MEMTX_OK; 1121 case A_GERROR_IRQ_CFG0 + 4: 1122 s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 32, 32, data); 1123 return MEMTX_OK; 1124 case A_GERROR_IRQ_CFG1: 1125 s->gerror_irq_cfg1 = data; 1126 return MEMTX_OK; 1127 case A_GERROR_IRQ_CFG2: 1128 s->gerror_irq_cfg2 = data; 1129 return MEMTX_OK; 1130 case A_STRTAB_BASE: /* 64b */ 1131 s->strtab_base = deposit64(s->strtab_base, 0, 32, data); 1132 return MEMTX_OK; 1133 case A_STRTAB_BASE + 4: 1134 s->strtab_base = deposit64(s->strtab_base, 32, 32, data); 1135 return MEMTX_OK; 1136 case A_STRTAB_BASE_CFG: 1137 s->strtab_base_cfg = data; 1138 if (FIELD_EX32(data, STRTAB_BASE_CFG, FMT) == 1) { 1139 s->sid_split = FIELD_EX32(data, STRTAB_BASE_CFG, SPLIT); 1140 s->features |= SMMU_FEATURE_2LVL_STE; 1141 } 1142 return MEMTX_OK; 1143 case A_CMDQ_BASE: /* 64b */ 1144 s->cmdq.base = deposit64(s->cmdq.base, 0, 32, data); 1145 s->cmdq.log2size = extract64(s->cmdq.base, 0, 5); 1146 if (s->cmdq.log2size > SMMU_CMDQS) { 1147 s->cmdq.log2size = SMMU_CMDQS; 1148 } 1149 return MEMTX_OK; 1150 case A_CMDQ_BASE + 4: /* 64b */ 1151 s->cmdq.base = deposit64(s->cmdq.base, 32, 32, data); 1152 return MEMTX_OK; 1153 case A_CMDQ_PROD: 1154 s->cmdq.prod = data; 1155 smmuv3_cmdq_consume(s); 1156 return MEMTX_OK; 1157 case A_CMDQ_CONS: 1158 s->cmdq.cons = data; 1159 return MEMTX_OK; 1160 case A_EVENTQ_BASE: /* 64b */ 1161 s->eventq.base = deposit64(s->eventq.base, 0, 32, data); 1162 s->eventq.log2size = extract64(s->eventq.base, 0, 5); 1163 if (s->eventq.log2size > SMMU_EVENTQS) { 1164 s->eventq.log2size = SMMU_EVENTQS; 1165 } 1166 return MEMTX_OK; 1167 case A_EVENTQ_BASE + 4: 1168 s->eventq.base = deposit64(s->eventq.base, 32, 32, data); 1169 return MEMTX_OK; 1170 case A_EVENTQ_PROD: 1171 s->eventq.prod = data; 1172 return MEMTX_OK; 1173 case A_EVENTQ_CONS: 1174 s->eventq.cons = data; 1175 return MEMTX_OK; 1176 case A_EVENTQ_IRQ_CFG0: /* 64b */ 1177 s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 0, 32, data); 1178 return MEMTX_OK; 1179 case A_EVENTQ_IRQ_CFG0 + 4: 1180 s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 32, 32, data); 1181 return MEMTX_OK; 1182 case A_EVENTQ_IRQ_CFG1: 1183 s->eventq_irq_cfg1 = data; 1184 return MEMTX_OK; 1185 case A_EVENTQ_IRQ_CFG2: 1186 s->eventq_irq_cfg2 = data; 1187 return MEMTX_OK; 1188 default: 1189 qemu_log_mask(LOG_UNIMP, 1190 "%s Unexpected 32-bit access to 0x%"PRIx64" (WI)\n", 1191 __func__, offset); 1192 return MEMTX_OK; 1193 } 1194 } 1195 1196 static MemTxResult smmu_write_mmio(void *opaque, hwaddr offset, uint64_t data, 1197 unsigned size, MemTxAttrs attrs) 1198 { 1199 SMMUState *sys = opaque; 1200 SMMUv3State *s = ARM_SMMUV3(sys); 1201 MemTxResult r; 1202 1203 /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */ 1204 offset &= ~0x10000; 1205 1206 switch (size) { 1207 case 8: 1208 r = smmu_writell(s, offset, data, attrs); 1209 break; 1210 case 4: 1211 r = smmu_writel(s, offset, data, attrs); 1212 break; 1213 default: 1214 r = MEMTX_ERROR; 1215 break; 1216 } 1217 1218 trace_smmuv3_write_mmio(offset, data, size, r); 1219 return r; 1220 } 1221 1222 static MemTxResult smmu_readll(SMMUv3State *s, hwaddr offset, 1223 uint64_t *data, MemTxAttrs attrs) 1224 { 1225 switch (offset) { 1226 case A_GERROR_IRQ_CFG0: 1227 *data = s->gerror_irq_cfg0; 1228 return MEMTX_OK; 1229 case A_STRTAB_BASE: 1230 *data = s->strtab_base; 1231 return MEMTX_OK; 1232 case A_CMDQ_BASE: 1233 *data = s->cmdq.base; 1234 return MEMTX_OK; 1235 case A_EVENTQ_BASE: 1236 *data = s->eventq.base; 1237 return MEMTX_OK; 1238 default: 1239 *data = 0; 1240 qemu_log_mask(LOG_UNIMP, 1241 "%s Unexpected 64-bit access to 0x%"PRIx64" (RAZ)\n", 1242 __func__, offset); 1243 return MEMTX_OK; 1244 } 1245 } 1246 1247 static MemTxResult smmu_readl(SMMUv3State *s, hwaddr offset, 1248 uint64_t *data, MemTxAttrs attrs) 1249 { 1250 switch (offset) { 1251 case A_IDREGS ... A_IDREGS + 0x2f: 1252 *data = smmuv3_idreg(offset - A_IDREGS); 1253 return MEMTX_OK; 1254 case A_IDR0 ... A_IDR5: 1255 *data = s->idr[(offset - A_IDR0) / 4]; 1256 return MEMTX_OK; 1257 case A_IIDR: 1258 *data = s->iidr; 1259 return MEMTX_OK; 1260 case A_CR0: 1261 *data = s->cr[0]; 1262 return MEMTX_OK; 1263 case A_CR0ACK: 1264 *data = s->cr0ack; 1265 return MEMTX_OK; 1266 case A_CR1: 1267 *data = s->cr[1]; 1268 return MEMTX_OK; 1269 case A_CR2: 1270 *data = s->cr[2]; 1271 return MEMTX_OK; 1272 case A_STATUSR: 1273 *data = s->statusr; 1274 return MEMTX_OK; 1275 case A_IRQ_CTRL: 1276 case A_IRQ_CTRL_ACK: 1277 *data = s->irq_ctrl; 1278 return MEMTX_OK; 1279 case A_GERROR: 1280 *data = s->gerror; 1281 return MEMTX_OK; 1282 case A_GERRORN: 1283 *data = s->gerrorn; 1284 return MEMTX_OK; 1285 case A_GERROR_IRQ_CFG0: /* 64b */ 1286 *data = extract64(s->gerror_irq_cfg0, 0, 32); 1287 return MEMTX_OK; 1288 case A_GERROR_IRQ_CFG0 + 4: 1289 *data = extract64(s->gerror_irq_cfg0, 32, 32); 1290 return MEMTX_OK; 1291 case A_GERROR_IRQ_CFG1: 1292 *data = s->gerror_irq_cfg1; 1293 return MEMTX_OK; 1294 case A_GERROR_IRQ_CFG2: 1295 *data = s->gerror_irq_cfg2; 1296 return MEMTX_OK; 1297 case A_STRTAB_BASE: /* 64b */ 1298 *data = extract64(s->strtab_base, 0, 32); 1299 return MEMTX_OK; 1300 case A_STRTAB_BASE + 4: /* 64b */ 1301 *data = extract64(s->strtab_base, 32, 32); 1302 return MEMTX_OK; 1303 case A_STRTAB_BASE_CFG: 1304 *data = s->strtab_base_cfg; 1305 return MEMTX_OK; 1306 case A_CMDQ_BASE: /* 64b */ 1307 *data = extract64(s->cmdq.base, 0, 32); 1308 return MEMTX_OK; 1309 case A_CMDQ_BASE + 4: 1310 *data = extract64(s->cmdq.base, 32, 32); 1311 return MEMTX_OK; 1312 case A_CMDQ_PROD: 1313 *data = s->cmdq.prod; 1314 return MEMTX_OK; 1315 case A_CMDQ_CONS: 1316 *data = s->cmdq.cons; 1317 return MEMTX_OK; 1318 case A_EVENTQ_BASE: /* 64b */ 1319 *data = extract64(s->eventq.base, 0, 32); 1320 return MEMTX_OK; 1321 case A_EVENTQ_BASE + 4: /* 64b */ 1322 *data = extract64(s->eventq.base, 32, 32); 1323 return MEMTX_OK; 1324 case A_EVENTQ_PROD: 1325 *data = s->eventq.prod; 1326 return MEMTX_OK; 1327 case A_EVENTQ_CONS: 1328 *data = s->eventq.cons; 1329 return MEMTX_OK; 1330 default: 1331 *data = 0; 1332 qemu_log_mask(LOG_UNIMP, 1333 "%s unhandled 32-bit access at 0x%"PRIx64" (RAZ)\n", 1334 __func__, offset); 1335 return MEMTX_OK; 1336 } 1337 } 1338 1339 static MemTxResult smmu_read_mmio(void *opaque, hwaddr offset, uint64_t *data, 1340 unsigned size, MemTxAttrs attrs) 1341 { 1342 SMMUState *sys = opaque; 1343 SMMUv3State *s = ARM_SMMUV3(sys); 1344 MemTxResult r; 1345 1346 /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */ 1347 offset &= ~0x10000; 1348 1349 switch (size) { 1350 case 8: 1351 r = smmu_readll(s, offset, data, attrs); 1352 break; 1353 case 4: 1354 r = smmu_readl(s, offset, data, attrs); 1355 break; 1356 default: 1357 r = MEMTX_ERROR; 1358 break; 1359 } 1360 1361 trace_smmuv3_read_mmio(offset, *data, size, r); 1362 return r; 1363 } 1364 1365 static const MemoryRegionOps smmu_mem_ops = { 1366 .read_with_attrs = smmu_read_mmio, 1367 .write_with_attrs = smmu_write_mmio, 1368 .endianness = DEVICE_LITTLE_ENDIAN, 1369 .valid = { 1370 .min_access_size = 4, 1371 .max_access_size = 8, 1372 }, 1373 .impl = { 1374 .min_access_size = 4, 1375 .max_access_size = 8, 1376 }, 1377 }; 1378 1379 static void smmu_init_irq(SMMUv3State *s, SysBusDevice *dev) 1380 { 1381 int i; 1382 1383 for (i = 0; i < ARRAY_SIZE(s->irq); i++) { 1384 sysbus_init_irq(dev, &s->irq[i]); 1385 } 1386 } 1387 1388 static void smmu_reset(DeviceState *dev) 1389 { 1390 SMMUv3State *s = ARM_SMMUV3(dev); 1391 SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s); 1392 1393 c->parent_reset(dev); 1394 1395 smmuv3_init_regs(s); 1396 } 1397 1398 static void smmu_realize(DeviceState *d, Error **errp) 1399 { 1400 SMMUState *sys = ARM_SMMU(d); 1401 SMMUv3State *s = ARM_SMMUV3(sys); 1402 SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s); 1403 SysBusDevice *dev = SYS_BUS_DEVICE(d); 1404 Error *local_err = NULL; 1405 1406 c->parent_realize(d, &local_err); 1407 if (local_err) { 1408 error_propagate(errp, local_err); 1409 return; 1410 } 1411 1412 qemu_mutex_init(&s->mutex); 1413 1414 memory_region_init_io(&sys->iomem, OBJECT(s), 1415 &smmu_mem_ops, sys, TYPE_ARM_SMMUV3, 0x20000); 1416 1417 sys->mrtypename = TYPE_SMMUV3_IOMMU_MEMORY_REGION; 1418 1419 sysbus_init_mmio(dev, &sys->iomem); 1420 1421 smmu_init_irq(s, dev); 1422 } 1423 1424 static const VMStateDescription vmstate_smmuv3_queue = { 1425 .name = "smmuv3_queue", 1426 .version_id = 1, 1427 .minimum_version_id = 1, 1428 .fields = (VMStateField[]) { 1429 VMSTATE_UINT64(base, SMMUQueue), 1430 VMSTATE_UINT32(prod, SMMUQueue), 1431 VMSTATE_UINT32(cons, SMMUQueue), 1432 VMSTATE_UINT8(log2size, SMMUQueue), 1433 VMSTATE_END_OF_LIST(), 1434 }, 1435 }; 1436 1437 static const VMStateDescription vmstate_smmuv3 = { 1438 .name = "smmuv3", 1439 .version_id = 1, 1440 .minimum_version_id = 1, 1441 .fields = (VMStateField[]) { 1442 VMSTATE_UINT32(features, SMMUv3State), 1443 VMSTATE_UINT8(sid_size, SMMUv3State), 1444 VMSTATE_UINT8(sid_split, SMMUv3State), 1445 1446 VMSTATE_UINT32_ARRAY(cr, SMMUv3State, 3), 1447 VMSTATE_UINT32(cr0ack, SMMUv3State), 1448 VMSTATE_UINT32(statusr, SMMUv3State), 1449 VMSTATE_UINT32(irq_ctrl, SMMUv3State), 1450 VMSTATE_UINT32(gerror, SMMUv3State), 1451 VMSTATE_UINT32(gerrorn, SMMUv3State), 1452 VMSTATE_UINT64(gerror_irq_cfg0, SMMUv3State), 1453 VMSTATE_UINT32(gerror_irq_cfg1, SMMUv3State), 1454 VMSTATE_UINT32(gerror_irq_cfg2, SMMUv3State), 1455 VMSTATE_UINT64(strtab_base, SMMUv3State), 1456 VMSTATE_UINT32(strtab_base_cfg, SMMUv3State), 1457 VMSTATE_UINT64(eventq_irq_cfg0, SMMUv3State), 1458 VMSTATE_UINT32(eventq_irq_cfg1, SMMUv3State), 1459 VMSTATE_UINT32(eventq_irq_cfg2, SMMUv3State), 1460 1461 VMSTATE_STRUCT(cmdq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue), 1462 VMSTATE_STRUCT(eventq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue), 1463 1464 VMSTATE_END_OF_LIST(), 1465 }, 1466 }; 1467 1468 static void smmuv3_instance_init(Object *obj) 1469 { 1470 /* Nothing much to do here as of now */ 1471 } 1472 1473 static void smmuv3_class_init(ObjectClass *klass, void *data) 1474 { 1475 DeviceClass *dc = DEVICE_CLASS(klass); 1476 SMMUv3Class *c = ARM_SMMUV3_CLASS(klass); 1477 1478 dc->vmsd = &vmstate_smmuv3; 1479 device_class_set_parent_reset(dc, smmu_reset, &c->parent_reset); 1480 c->parent_realize = dc->realize; 1481 dc->realize = smmu_realize; 1482 } 1483 1484 static int smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu, 1485 IOMMUNotifierFlag old, 1486 IOMMUNotifierFlag new, 1487 Error **errp) 1488 { 1489 SMMUDevice *sdev = container_of(iommu, SMMUDevice, iommu); 1490 SMMUv3State *s3 = sdev->smmu; 1491 SMMUState *s = &(s3->smmu_state); 1492 1493 if (new & IOMMU_NOTIFIER_MAP) { 1494 error_setg(errp, 1495 "device %02x.%02x.%x requires iommu MAP notifier which is " 1496 "not currently supported", pci_bus_num(sdev->bus), 1497 PCI_SLOT(sdev->devfn), PCI_FUNC(sdev->devfn)); 1498 return -EINVAL; 1499 } 1500 1501 if (old == IOMMU_NOTIFIER_NONE) { 1502 trace_smmuv3_notify_flag_add(iommu->parent_obj.name); 1503 QLIST_INSERT_HEAD(&s->devices_with_notifiers, sdev, next); 1504 } else if (new == IOMMU_NOTIFIER_NONE) { 1505 trace_smmuv3_notify_flag_del(iommu->parent_obj.name); 1506 QLIST_REMOVE(sdev, next); 1507 } 1508 return 0; 1509 } 1510 1511 static void smmuv3_iommu_memory_region_class_init(ObjectClass *klass, 1512 void *data) 1513 { 1514 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); 1515 1516 imrc->translate = smmuv3_translate; 1517 imrc->notify_flag_changed = smmuv3_notify_flag_changed; 1518 } 1519 1520 static const TypeInfo smmuv3_type_info = { 1521 .name = TYPE_ARM_SMMUV3, 1522 .parent = TYPE_ARM_SMMU, 1523 .instance_size = sizeof(SMMUv3State), 1524 .instance_init = smmuv3_instance_init, 1525 .class_size = sizeof(SMMUv3Class), 1526 .class_init = smmuv3_class_init, 1527 }; 1528 1529 static const TypeInfo smmuv3_iommu_memory_region_info = { 1530 .parent = TYPE_IOMMU_MEMORY_REGION, 1531 .name = TYPE_SMMUV3_IOMMU_MEMORY_REGION, 1532 .class_init = smmuv3_iommu_memory_region_class_init, 1533 }; 1534 1535 static void smmuv3_register_types(void) 1536 { 1537 type_register(&smmuv3_type_info); 1538 type_register(&smmuv3_iommu_memory_region_info); 1539 } 1540 1541 type_init(smmuv3_register_types) 1542 1543