1 /* 2 * Copyright (C) 2014-2016 Broadcom Corporation 3 * Copyright (c) 2017 Red Hat, Inc. 4 * Written by Prem Mallappa, Eric Auger 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program; if not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #include "qemu/osdep.h" 20 #include "qemu/bitops.h" 21 #include "hw/irq.h" 22 #include "hw/sysbus.h" 23 #include "migration/vmstate.h" 24 #include "hw/qdev-core.h" 25 #include "hw/pci/pci.h" 26 #include "cpu.h" 27 #include "trace.h" 28 #include "qemu/log.h" 29 #include "qemu/error-report.h" 30 #include "qapi/error.h" 31 32 #include "hw/arm/smmuv3.h" 33 #include "smmuv3-internal.h" 34 #include "smmu-internal.h" 35 36 /** 37 * smmuv3_trigger_irq - pulse @irq if enabled and update 38 * GERROR register in case of GERROR interrupt 39 * 40 * @irq: irq type 41 * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR) 42 */ 43 static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq, 44 uint32_t gerror_mask) 45 { 46 47 bool pulse = false; 48 49 switch (irq) { 50 case SMMU_IRQ_EVTQ: 51 pulse = smmuv3_eventq_irq_enabled(s); 52 break; 53 case SMMU_IRQ_PRIQ: 54 qemu_log_mask(LOG_UNIMP, "PRI not yet supported\n"); 55 break; 56 case SMMU_IRQ_CMD_SYNC: 57 pulse = true; 58 break; 59 case SMMU_IRQ_GERROR: 60 { 61 uint32_t pending = s->gerror ^ s->gerrorn; 62 uint32_t new_gerrors = ~pending & gerror_mask; 63 64 if (!new_gerrors) { 65 /* only toggle non pending errors */ 66 return; 67 } 68 s->gerror ^= new_gerrors; 69 trace_smmuv3_write_gerror(new_gerrors, s->gerror); 70 71 pulse = smmuv3_gerror_irq_enabled(s); 72 break; 73 } 74 } 75 if (pulse) { 76 trace_smmuv3_trigger_irq(irq); 77 qemu_irq_pulse(s->irq[irq]); 78 } 79 } 80 81 static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn) 82 { 83 uint32_t pending = s->gerror ^ s->gerrorn; 84 uint32_t toggled = s->gerrorn ^ new_gerrorn; 85 86 if (toggled & ~pending) { 87 qemu_log_mask(LOG_GUEST_ERROR, 88 "guest toggles non pending errors = 0x%x\n", 89 toggled & ~pending); 90 } 91 92 /* 93 * We do not raise any error in case guest toggles bits corresponding 94 * to not active IRQs (CONSTRAINED UNPREDICTABLE) 95 */ 96 s->gerrorn = new_gerrorn; 97 98 trace_smmuv3_write_gerrorn(toggled & pending, s->gerrorn); 99 } 100 101 static inline MemTxResult queue_read(SMMUQueue *q, void *data) 102 { 103 dma_addr_t addr = Q_CONS_ENTRY(q); 104 105 return dma_memory_read(&address_space_memory, addr, data, q->entry_size); 106 } 107 108 static MemTxResult queue_write(SMMUQueue *q, void *data) 109 { 110 dma_addr_t addr = Q_PROD_ENTRY(q); 111 MemTxResult ret; 112 113 ret = dma_memory_write(&address_space_memory, addr, data, q->entry_size); 114 if (ret != MEMTX_OK) { 115 return ret; 116 } 117 118 queue_prod_incr(q); 119 return MEMTX_OK; 120 } 121 122 static MemTxResult smmuv3_write_eventq(SMMUv3State *s, Evt *evt) 123 { 124 SMMUQueue *q = &s->eventq; 125 MemTxResult r; 126 127 if (!smmuv3_eventq_enabled(s)) { 128 return MEMTX_ERROR; 129 } 130 131 if (smmuv3_q_full(q)) { 132 return MEMTX_ERROR; 133 } 134 135 r = queue_write(q, evt); 136 if (r != MEMTX_OK) { 137 return r; 138 } 139 140 if (!smmuv3_q_empty(q)) { 141 smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0); 142 } 143 return MEMTX_OK; 144 } 145 146 void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *info) 147 { 148 Evt evt = {}; 149 MemTxResult r; 150 151 if (!smmuv3_eventq_enabled(s)) { 152 return; 153 } 154 155 EVT_SET_TYPE(&evt, info->type); 156 EVT_SET_SID(&evt, info->sid); 157 158 switch (info->type) { 159 case SMMU_EVT_NONE: 160 return; 161 case SMMU_EVT_F_UUT: 162 EVT_SET_SSID(&evt, info->u.f_uut.ssid); 163 EVT_SET_SSV(&evt, info->u.f_uut.ssv); 164 EVT_SET_ADDR(&evt, info->u.f_uut.addr); 165 EVT_SET_RNW(&evt, info->u.f_uut.rnw); 166 EVT_SET_PNU(&evt, info->u.f_uut.pnu); 167 EVT_SET_IND(&evt, info->u.f_uut.ind); 168 break; 169 case SMMU_EVT_C_BAD_STREAMID: 170 EVT_SET_SSID(&evt, info->u.c_bad_streamid.ssid); 171 EVT_SET_SSV(&evt, info->u.c_bad_streamid.ssv); 172 break; 173 case SMMU_EVT_F_STE_FETCH: 174 EVT_SET_SSID(&evt, info->u.f_ste_fetch.ssid); 175 EVT_SET_SSV(&evt, info->u.f_ste_fetch.ssv); 176 EVT_SET_ADDR2(&evt, info->u.f_ste_fetch.addr); 177 break; 178 case SMMU_EVT_C_BAD_STE: 179 EVT_SET_SSID(&evt, info->u.c_bad_ste.ssid); 180 EVT_SET_SSV(&evt, info->u.c_bad_ste.ssv); 181 break; 182 case SMMU_EVT_F_STREAM_DISABLED: 183 break; 184 case SMMU_EVT_F_TRANS_FORBIDDEN: 185 EVT_SET_ADDR(&evt, info->u.f_transl_forbidden.addr); 186 EVT_SET_RNW(&evt, info->u.f_transl_forbidden.rnw); 187 break; 188 case SMMU_EVT_C_BAD_SUBSTREAMID: 189 EVT_SET_SSID(&evt, info->u.c_bad_substream.ssid); 190 break; 191 case SMMU_EVT_F_CD_FETCH: 192 EVT_SET_SSID(&evt, info->u.f_cd_fetch.ssid); 193 EVT_SET_SSV(&evt, info->u.f_cd_fetch.ssv); 194 EVT_SET_ADDR(&evt, info->u.f_cd_fetch.addr); 195 break; 196 case SMMU_EVT_C_BAD_CD: 197 EVT_SET_SSID(&evt, info->u.c_bad_cd.ssid); 198 EVT_SET_SSV(&evt, info->u.c_bad_cd.ssv); 199 break; 200 case SMMU_EVT_F_WALK_EABT: 201 case SMMU_EVT_F_TRANSLATION: 202 case SMMU_EVT_F_ADDR_SIZE: 203 case SMMU_EVT_F_ACCESS: 204 case SMMU_EVT_F_PERMISSION: 205 EVT_SET_STALL(&evt, info->u.f_walk_eabt.stall); 206 EVT_SET_STAG(&evt, info->u.f_walk_eabt.stag); 207 EVT_SET_SSID(&evt, info->u.f_walk_eabt.ssid); 208 EVT_SET_SSV(&evt, info->u.f_walk_eabt.ssv); 209 EVT_SET_S2(&evt, info->u.f_walk_eabt.s2); 210 EVT_SET_ADDR(&evt, info->u.f_walk_eabt.addr); 211 EVT_SET_RNW(&evt, info->u.f_walk_eabt.rnw); 212 EVT_SET_PNU(&evt, info->u.f_walk_eabt.pnu); 213 EVT_SET_IND(&evt, info->u.f_walk_eabt.ind); 214 EVT_SET_CLASS(&evt, info->u.f_walk_eabt.class); 215 EVT_SET_ADDR2(&evt, info->u.f_walk_eabt.addr2); 216 break; 217 case SMMU_EVT_F_CFG_CONFLICT: 218 EVT_SET_SSID(&evt, info->u.f_cfg_conflict.ssid); 219 EVT_SET_SSV(&evt, info->u.f_cfg_conflict.ssv); 220 break; 221 /* rest is not implemented */ 222 case SMMU_EVT_F_BAD_ATS_TREQ: 223 case SMMU_EVT_F_TLB_CONFLICT: 224 case SMMU_EVT_E_PAGE_REQ: 225 default: 226 g_assert_not_reached(); 227 } 228 229 trace_smmuv3_record_event(smmu_event_string(info->type), info->sid); 230 r = smmuv3_write_eventq(s, &evt); 231 if (r != MEMTX_OK) { 232 smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_EVENTQ_ABT_ERR_MASK); 233 } 234 info->recorded = true; 235 } 236 237 static void smmuv3_init_regs(SMMUv3State *s) 238 { 239 /** 240 * IDR0: stage1 only, AArch64 only, coherent access, 16b ASID, 241 * multi-level stream table 242 */ 243 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1); /* stage 1 supported */ 244 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTF, 2); /* AArch64 PTW only */ 245 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, COHACC, 1); /* IO coherent */ 246 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, ASID16, 1); /* 16-bit ASID */ 247 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTENDIAN, 2); /* little endian */ 248 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STALL_MODEL, 1); /* No stall */ 249 /* terminated transaction will always be aborted/error returned */ 250 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TERM_MODEL, 1); 251 /* 2-level stream table supported */ 252 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STLEVEL, 1); 253 254 s->idr[1] = FIELD_DP32(s->idr[1], IDR1, SIDSIZE, SMMU_IDR1_SIDSIZE); 255 s->idr[1] = FIELD_DP32(s->idr[1], IDR1, EVENTQS, SMMU_EVENTQS); 256 s->idr[1] = FIELD_DP32(s->idr[1], IDR1, CMDQS, SMMU_CMDQS); 257 258 s->idr[3] = FIELD_DP32(s->idr[3], IDR3, RIL, 1); 259 s->idr[3] = FIELD_DP32(s->idr[3], IDR3, HAD, 1); 260 261 /* 4K, 16K and 64K granule support */ 262 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1); 263 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN16K, 1); 264 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1); 265 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */ 266 267 s->cmdq.base = deposit64(s->cmdq.base, 0, 5, SMMU_CMDQS); 268 s->cmdq.prod = 0; 269 s->cmdq.cons = 0; 270 s->cmdq.entry_size = sizeof(struct Cmd); 271 s->eventq.base = deposit64(s->eventq.base, 0, 5, SMMU_EVENTQS); 272 s->eventq.prod = 0; 273 s->eventq.cons = 0; 274 s->eventq.entry_size = sizeof(struct Evt); 275 276 s->features = 0; 277 s->sid_split = 0; 278 s->aidr = 0x1; 279 } 280 281 static int smmu_get_ste(SMMUv3State *s, dma_addr_t addr, STE *buf, 282 SMMUEventInfo *event) 283 { 284 int ret; 285 286 trace_smmuv3_get_ste(addr); 287 /* TODO: guarantee 64-bit single-copy atomicity */ 288 ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf)); 289 if (ret != MEMTX_OK) { 290 qemu_log_mask(LOG_GUEST_ERROR, 291 "Cannot fetch pte at address=0x%"PRIx64"\n", addr); 292 event->type = SMMU_EVT_F_STE_FETCH; 293 event->u.f_ste_fetch.addr = addr; 294 return -EINVAL; 295 } 296 return 0; 297 298 } 299 300 /* @ssid > 0 not supported yet */ 301 static int smmu_get_cd(SMMUv3State *s, STE *ste, uint32_t ssid, 302 CD *buf, SMMUEventInfo *event) 303 { 304 dma_addr_t addr = STE_CTXPTR(ste); 305 int ret; 306 307 trace_smmuv3_get_cd(addr); 308 /* TODO: guarantee 64-bit single-copy atomicity */ 309 ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf)); 310 if (ret != MEMTX_OK) { 311 qemu_log_mask(LOG_GUEST_ERROR, 312 "Cannot fetch pte at address=0x%"PRIx64"\n", addr); 313 event->type = SMMU_EVT_F_CD_FETCH; 314 event->u.f_ste_fetch.addr = addr; 315 return -EINVAL; 316 } 317 return 0; 318 } 319 320 /* Returns < 0 in case of invalid STE, 0 otherwise */ 321 static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg, 322 STE *ste, SMMUEventInfo *event) 323 { 324 uint32_t config; 325 326 if (!STE_VALID(ste)) { 327 if (!event->inval_ste_allowed) { 328 qemu_log_mask(LOG_GUEST_ERROR, "invalid STE\n"); 329 } 330 goto bad_ste; 331 } 332 333 config = STE_CONFIG(ste); 334 335 if (STE_CFG_ABORT(config)) { 336 cfg->aborted = true; 337 return 0; 338 } 339 340 if (STE_CFG_BYPASS(config)) { 341 cfg->bypassed = true; 342 return 0; 343 } 344 345 if (STE_CFG_S2_ENABLED(config)) { 346 qemu_log_mask(LOG_UNIMP, "SMMUv3 does not support stage 2 yet\n"); 347 goto bad_ste; 348 } 349 350 if (STE_S1CDMAX(ste) != 0) { 351 qemu_log_mask(LOG_UNIMP, 352 "SMMUv3 does not support multiple context descriptors yet\n"); 353 goto bad_ste; 354 } 355 356 if (STE_S1STALLD(ste)) { 357 qemu_log_mask(LOG_UNIMP, 358 "SMMUv3 S1 stalling fault model not allowed yet\n"); 359 goto bad_ste; 360 } 361 return 0; 362 363 bad_ste: 364 event->type = SMMU_EVT_C_BAD_STE; 365 return -EINVAL; 366 } 367 368 /** 369 * smmu_find_ste - Return the stream table entry associated 370 * to the sid 371 * 372 * @s: smmuv3 handle 373 * @sid: stream ID 374 * @ste: returned stream table entry 375 * @event: handle to an event info 376 * 377 * Supports linear and 2-level stream table 378 * Return 0 on success, -EINVAL otherwise 379 */ 380 static int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste, 381 SMMUEventInfo *event) 382 { 383 dma_addr_t addr, strtab_base; 384 uint32_t log2size; 385 int strtab_size_shift; 386 int ret; 387 388 trace_smmuv3_find_ste(sid, s->features, s->sid_split); 389 log2size = FIELD_EX32(s->strtab_base_cfg, STRTAB_BASE_CFG, LOG2SIZE); 390 /* 391 * Check SID range against both guest-configured and implementation limits 392 */ 393 if (sid >= (1 << MIN(log2size, SMMU_IDR1_SIDSIZE))) { 394 event->type = SMMU_EVT_C_BAD_STREAMID; 395 return -EINVAL; 396 } 397 if (s->features & SMMU_FEATURE_2LVL_STE) { 398 int l1_ste_offset, l2_ste_offset, max_l2_ste, span; 399 dma_addr_t l1ptr, l2ptr; 400 STEDesc l1std; 401 402 /* 403 * Align strtab base address to table size. For this purpose, assume it 404 * is not bounded by SMMU_IDR1_SIDSIZE. 405 */ 406 strtab_size_shift = MAX(5, (int)log2size - s->sid_split - 1 + 3); 407 strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK & 408 ~MAKE_64BIT_MASK(0, strtab_size_shift); 409 l1_ste_offset = sid >> s->sid_split; 410 l2_ste_offset = sid & ((1 << s->sid_split) - 1); 411 l1ptr = (dma_addr_t)(strtab_base + l1_ste_offset * sizeof(l1std)); 412 /* TODO: guarantee 64-bit single-copy atomicity */ 413 ret = dma_memory_read(&address_space_memory, l1ptr, &l1std, 414 sizeof(l1std)); 415 if (ret != MEMTX_OK) { 416 qemu_log_mask(LOG_GUEST_ERROR, 417 "Could not read L1PTR at 0X%"PRIx64"\n", l1ptr); 418 event->type = SMMU_EVT_F_STE_FETCH; 419 event->u.f_ste_fetch.addr = l1ptr; 420 return -EINVAL; 421 } 422 423 span = L1STD_SPAN(&l1std); 424 425 if (!span) { 426 /* l2ptr is not valid */ 427 if (!event->inval_ste_allowed) { 428 qemu_log_mask(LOG_GUEST_ERROR, 429 "invalid sid=%d (L1STD span=0)\n", sid); 430 } 431 event->type = SMMU_EVT_C_BAD_STREAMID; 432 return -EINVAL; 433 } 434 max_l2_ste = (1 << span) - 1; 435 l2ptr = l1std_l2ptr(&l1std); 436 trace_smmuv3_find_ste_2lvl(s->strtab_base, l1ptr, l1_ste_offset, 437 l2ptr, l2_ste_offset, max_l2_ste); 438 if (l2_ste_offset > max_l2_ste) { 439 qemu_log_mask(LOG_GUEST_ERROR, 440 "l2_ste_offset=%d > max_l2_ste=%d\n", 441 l2_ste_offset, max_l2_ste); 442 event->type = SMMU_EVT_C_BAD_STE; 443 return -EINVAL; 444 } 445 addr = l2ptr + l2_ste_offset * sizeof(*ste); 446 } else { 447 strtab_size_shift = log2size + 5; 448 strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK & 449 ~MAKE_64BIT_MASK(0, strtab_size_shift); 450 addr = strtab_base + sid * sizeof(*ste); 451 } 452 453 if (smmu_get_ste(s, addr, ste, event)) { 454 return -EINVAL; 455 } 456 457 return 0; 458 } 459 460 static int decode_cd(SMMUTransCfg *cfg, CD *cd, SMMUEventInfo *event) 461 { 462 int ret = -EINVAL; 463 int i; 464 465 if (!CD_VALID(cd) || !CD_AARCH64(cd)) { 466 goto bad_cd; 467 } 468 if (!CD_A(cd)) { 469 goto bad_cd; /* SMMU_IDR0.TERM_MODEL == 1 */ 470 } 471 if (CD_S(cd)) { 472 goto bad_cd; /* !STE_SECURE && SMMU_IDR0.STALL_MODEL == 1 */ 473 } 474 if (CD_HA(cd) || CD_HD(cd)) { 475 goto bad_cd; /* HTTU = 0 */ 476 } 477 478 /* we support only those at the moment */ 479 cfg->aa64 = true; 480 cfg->stage = 1; 481 482 cfg->oas = oas2bits(CD_IPS(cd)); 483 cfg->oas = MIN(oas2bits(SMMU_IDR5_OAS), cfg->oas); 484 cfg->tbi = CD_TBI(cd); 485 cfg->asid = CD_ASID(cd); 486 487 trace_smmuv3_decode_cd(cfg->oas); 488 489 /* decode data dependent on TT */ 490 for (i = 0; i <= 1; i++) { 491 int tg, tsz; 492 SMMUTransTableInfo *tt = &cfg->tt[i]; 493 494 cfg->tt[i].disabled = CD_EPD(cd, i); 495 if (cfg->tt[i].disabled) { 496 continue; 497 } 498 499 tsz = CD_TSZ(cd, i); 500 if (tsz < 16 || tsz > 39) { 501 goto bad_cd; 502 } 503 504 tg = CD_TG(cd, i); 505 tt->granule_sz = tg2granule(tg, i); 506 if ((tt->granule_sz != 12 && tt->granule_sz != 14 && 507 tt->granule_sz != 16) || CD_ENDI(cd)) { 508 goto bad_cd; 509 } 510 511 tt->tsz = tsz; 512 tt->ttb = CD_TTB(cd, i); 513 if (tt->ttb & ~(MAKE_64BIT_MASK(0, cfg->oas))) { 514 goto bad_cd; 515 } 516 tt->had = CD_HAD(cd, i); 517 trace_smmuv3_decode_cd_tt(i, tt->tsz, tt->ttb, tt->granule_sz, tt->had); 518 } 519 520 event->record_trans_faults = CD_R(cd); 521 522 return 0; 523 524 bad_cd: 525 event->type = SMMU_EVT_C_BAD_CD; 526 return ret; 527 } 528 529 /** 530 * smmuv3_decode_config - Prepare the translation configuration 531 * for the @mr iommu region 532 * @mr: iommu memory region the translation config must be prepared for 533 * @cfg: output translation configuration which is populated through 534 * the different configuration decoding steps 535 * @event: must be zero'ed by the caller 536 * 537 * return < 0 in case of config decoding error (@event is filled 538 * accordingly). Return 0 otherwise. 539 */ 540 static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg, 541 SMMUEventInfo *event) 542 { 543 SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu); 544 uint32_t sid = smmu_get_sid(sdev); 545 SMMUv3State *s = sdev->smmu; 546 int ret; 547 STE ste; 548 CD cd; 549 550 ret = smmu_find_ste(s, sid, &ste, event); 551 if (ret) { 552 return ret; 553 } 554 555 ret = decode_ste(s, cfg, &ste, event); 556 if (ret) { 557 return ret; 558 } 559 560 if (cfg->aborted || cfg->bypassed) { 561 return 0; 562 } 563 564 ret = smmu_get_cd(s, &ste, 0 /* ssid */, &cd, event); 565 if (ret) { 566 return ret; 567 } 568 569 return decode_cd(cfg, &cd, event); 570 } 571 572 /** 573 * smmuv3_get_config - Look up for a cached copy of configuration data for 574 * @sdev and on cache miss performs a configuration structure decoding from 575 * guest RAM. 576 * 577 * @sdev: SMMUDevice handle 578 * @event: output event info 579 * 580 * The configuration cache contains data resulting from both STE and CD 581 * decoding under the form of an SMMUTransCfg struct. The hash table is indexed 582 * by the SMMUDevice handle. 583 */ 584 static SMMUTransCfg *smmuv3_get_config(SMMUDevice *sdev, SMMUEventInfo *event) 585 { 586 SMMUv3State *s = sdev->smmu; 587 SMMUState *bc = &s->smmu_state; 588 SMMUTransCfg *cfg; 589 590 cfg = g_hash_table_lookup(bc->configs, sdev); 591 if (cfg) { 592 sdev->cfg_cache_hits++; 593 trace_smmuv3_config_cache_hit(smmu_get_sid(sdev), 594 sdev->cfg_cache_hits, sdev->cfg_cache_misses, 595 100 * sdev->cfg_cache_hits / 596 (sdev->cfg_cache_hits + sdev->cfg_cache_misses)); 597 } else { 598 sdev->cfg_cache_misses++; 599 trace_smmuv3_config_cache_miss(smmu_get_sid(sdev), 600 sdev->cfg_cache_hits, sdev->cfg_cache_misses, 601 100 * sdev->cfg_cache_hits / 602 (sdev->cfg_cache_hits + sdev->cfg_cache_misses)); 603 cfg = g_new0(SMMUTransCfg, 1); 604 605 if (!smmuv3_decode_config(&sdev->iommu, cfg, event)) { 606 g_hash_table_insert(bc->configs, sdev, cfg); 607 } else { 608 g_free(cfg); 609 cfg = NULL; 610 } 611 } 612 return cfg; 613 } 614 615 static void smmuv3_flush_config(SMMUDevice *sdev) 616 { 617 SMMUv3State *s = sdev->smmu; 618 SMMUState *bc = &s->smmu_state; 619 620 trace_smmuv3_config_cache_inv(smmu_get_sid(sdev)); 621 g_hash_table_remove(bc->configs, sdev); 622 } 623 624 static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, 625 IOMMUAccessFlags flag, int iommu_idx) 626 { 627 SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu); 628 SMMUv3State *s = sdev->smmu; 629 uint32_t sid = smmu_get_sid(sdev); 630 SMMUEventInfo event = {.type = SMMU_EVT_NONE, 631 .sid = sid, 632 .inval_ste_allowed = false}; 633 SMMUPTWEventInfo ptw_info = {}; 634 SMMUTranslationStatus status; 635 SMMUState *bs = ARM_SMMU(s); 636 uint64_t page_mask, aligned_addr; 637 SMMUTLBEntry *cached_entry = NULL; 638 SMMUTransTableInfo *tt; 639 SMMUTransCfg *cfg = NULL; 640 IOMMUTLBEntry entry = { 641 .target_as = &address_space_memory, 642 .iova = addr, 643 .translated_addr = addr, 644 .addr_mask = ~(hwaddr)0, 645 .perm = IOMMU_NONE, 646 }; 647 648 qemu_mutex_lock(&s->mutex); 649 650 if (!smmu_enabled(s)) { 651 status = SMMU_TRANS_DISABLE; 652 goto epilogue; 653 } 654 655 cfg = smmuv3_get_config(sdev, &event); 656 if (!cfg) { 657 status = SMMU_TRANS_ERROR; 658 goto epilogue; 659 } 660 661 if (cfg->aborted) { 662 status = SMMU_TRANS_ABORT; 663 goto epilogue; 664 } 665 666 if (cfg->bypassed) { 667 status = SMMU_TRANS_BYPASS; 668 goto epilogue; 669 } 670 671 tt = select_tt(cfg, addr); 672 if (!tt) { 673 if (event.record_trans_faults) { 674 event.type = SMMU_EVT_F_TRANSLATION; 675 event.u.f_translation.addr = addr; 676 event.u.f_translation.rnw = flag & 0x1; 677 } 678 status = SMMU_TRANS_ERROR; 679 goto epilogue; 680 } 681 682 page_mask = (1ULL << (tt->granule_sz)) - 1; 683 aligned_addr = addr & ~page_mask; 684 685 cached_entry = smmu_iotlb_lookup(bs, cfg, tt, aligned_addr); 686 if (cached_entry) { 687 if ((flag & IOMMU_WO) && !(cached_entry->entry.perm & IOMMU_WO)) { 688 status = SMMU_TRANS_ERROR; 689 if (event.record_trans_faults) { 690 event.type = SMMU_EVT_F_PERMISSION; 691 event.u.f_permission.addr = addr; 692 event.u.f_permission.rnw = flag & 0x1; 693 } 694 } else { 695 status = SMMU_TRANS_SUCCESS; 696 } 697 goto epilogue; 698 } 699 700 cached_entry = g_new0(SMMUTLBEntry, 1); 701 702 if (smmu_ptw(cfg, aligned_addr, flag, cached_entry, &ptw_info)) { 703 g_free(cached_entry); 704 switch (ptw_info.type) { 705 case SMMU_PTW_ERR_WALK_EABT: 706 event.type = SMMU_EVT_F_WALK_EABT; 707 event.u.f_walk_eabt.addr = addr; 708 event.u.f_walk_eabt.rnw = flag & 0x1; 709 event.u.f_walk_eabt.class = 0x1; 710 event.u.f_walk_eabt.addr2 = ptw_info.addr; 711 break; 712 case SMMU_PTW_ERR_TRANSLATION: 713 if (event.record_trans_faults) { 714 event.type = SMMU_EVT_F_TRANSLATION; 715 event.u.f_translation.addr = addr; 716 event.u.f_translation.rnw = flag & 0x1; 717 } 718 break; 719 case SMMU_PTW_ERR_ADDR_SIZE: 720 if (event.record_trans_faults) { 721 event.type = SMMU_EVT_F_ADDR_SIZE; 722 event.u.f_addr_size.addr = addr; 723 event.u.f_addr_size.rnw = flag & 0x1; 724 } 725 break; 726 case SMMU_PTW_ERR_ACCESS: 727 if (event.record_trans_faults) { 728 event.type = SMMU_EVT_F_ACCESS; 729 event.u.f_access.addr = addr; 730 event.u.f_access.rnw = flag & 0x1; 731 } 732 break; 733 case SMMU_PTW_ERR_PERMISSION: 734 if (event.record_trans_faults) { 735 event.type = SMMU_EVT_F_PERMISSION; 736 event.u.f_permission.addr = addr; 737 event.u.f_permission.rnw = flag & 0x1; 738 } 739 break; 740 default: 741 g_assert_not_reached(); 742 } 743 status = SMMU_TRANS_ERROR; 744 } else { 745 smmu_iotlb_insert(bs, cfg, cached_entry); 746 status = SMMU_TRANS_SUCCESS; 747 } 748 749 epilogue: 750 qemu_mutex_unlock(&s->mutex); 751 switch (status) { 752 case SMMU_TRANS_SUCCESS: 753 entry.perm = flag; 754 entry.translated_addr = cached_entry->entry.translated_addr + 755 (addr & cached_entry->entry.addr_mask); 756 entry.addr_mask = cached_entry->entry.addr_mask; 757 trace_smmuv3_translate_success(mr->parent_obj.name, sid, addr, 758 entry.translated_addr, entry.perm); 759 break; 760 case SMMU_TRANS_DISABLE: 761 entry.perm = flag; 762 entry.addr_mask = ~TARGET_PAGE_MASK; 763 trace_smmuv3_translate_disable(mr->parent_obj.name, sid, addr, 764 entry.perm); 765 break; 766 case SMMU_TRANS_BYPASS: 767 entry.perm = flag; 768 entry.addr_mask = ~TARGET_PAGE_MASK; 769 trace_smmuv3_translate_bypass(mr->parent_obj.name, sid, addr, 770 entry.perm); 771 break; 772 case SMMU_TRANS_ABORT: 773 /* no event is recorded on abort */ 774 trace_smmuv3_translate_abort(mr->parent_obj.name, sid, addr, 775 entry.perm); 776 break; 777 case SMMU_TRANS_ERROR: 778 qemu_log_mask(LOG_GUEST_ERROR, 779 "%s translation failed for iova=0x%"PRIx64"(%s)\n", 780 mr->parent_obj.name, addr, smmu_event_string(event.type)); 781 smmuv3_record_event(s, &event); 782 break; 783 } 784 785 return entry; 786 } 787 788 /** 789 * smmuv3_notify_iova - call the notifier @n for a given 790 * @asid and @iova tuple. 791 * 792 * @mr: IOMMU mr region handle 793 * @n: notifier to be called 794 * @asid: address space ID or negative value if we don't care 795 * @iova: iova 796 * @tg: translation granule (if communicated through range invalidation) 797 * @num_pages: number of @granule sized pages (if tg != 0), otherwise 1 798 */ 799 static void smmuv3_notify_iova(IOMMUMemoryRegion *mr, 800 IOMMUNotifier *n, 801 int asid, dma_addr_t iova, 802 uint8_t tg, uint64_t num_pages) 803 { 804 SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu); 805 IOMMUTLBEvent event; 806 uint8_t granule; 807 808 if (!tg) { 809 SMMUEventInfo event = {.inval_ste_allowed = true}; 810 SMMUTransCfg *cfg = smmuv3_get_config(sdev, &event); 811 SMMUTransTableInfo *tt; 812 813 if (!cfg) { 814 return; 815 } 816 817 if (asid >= 0 && cfg->asid != asid) { 818 return; 819 } 820 821 tt = select_tt(cfg, iova); 822 if (!tt) { 823 return; 824 } 825 granule = tt->granule_sz; 826 } else { 827 granule = tg * 2 + 10; 828 } 829 830 event.type = IOMMU_NOTIFIER_UNMAP; 831 event.entry.target_as = &address_space_memory; 832 event.entry.iova = iova; 833 event.entry.addr_mask = num_pages * (1 << granule) - 1; 834 event.entry.perm = IOMMU_NONE; 835 836 memory_region_notify_iommu_one(n, &event); 837 } 838 839 /* invalidate an asid/iova range tuple in all mr's */ 840 static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova, 841 uint8_t tg, uint64_t num_pages) 842 { 843 SMMUDevice *sdev; 844 845 QLIST_FOREACH(sdev, &s->devices_with_notifiers, next) { 846 IOMMUMemoryRegion *mr = &sdev->iommu; 847 IOMMUNotifier *n; 848 849 trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, iova, 850 tg, num_pages); 851 852 IOMMU_NOTIFIER_FOREACH(n, mr) { 853 smmuv3_notify_iova(mr, n, asid, iova, tg, num_pages); 854 } 855 } 856 } 857 858 static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd) 859 { 860 uint8_t scale = 0, num = 0, ttl = 0; 861 dma_addr_t addr = CMD_ADDR(cmd); 862 uint8_t type = CMD_TYPE(cmd); 863 uint16_t vmid = CMD_VMID(cmd); 864 bool leaf = CMD_LEAF(cmd); 865 uint8_t tg = CMD_TG(cmd); 866 uint64_t first_page = 0, last_page; 867 uint64_t num_pages = 1; 868 int asid = -1; 869 870 if (tg) { 871 scale = CMD_SCALE(cmd); 872 num = CMD_NUM(cmd); 873 ttl = CMD_TTL(cmd); 874 num_pages = (num + 1) * BIT_ULL(scale); 875 } 876 877 if (type == SMMU_CMD_TLBI_NH_VA) { 878 asid = CMD_ASID(cmd); 879 } 880 881 /* Split invalidations into ^2 range invalidations */ 882 last_page = num_pages - 1; 883 while (num_pages) { 884 uint8_t granule = tg * 2 + 10; 885 uint64_t mask, count; 886 887 mask = dma_aligned_pow2_mask(first_page, last_page, 64 - granule); 888 count = mask + 1; 889 890 trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, count, ttl, leaf); 891 smmuv3_inv_notifiers_iova(s, asid, addr, tg, count); 892 smmu_iotlb_inv_iova(s, asid, addr, tg, count, ttl); 893 894 num_pages -= count; 895 first_page += count; 896 addr += count * BIT_ULL(granule); 897 } 898 } 899 900 static gboolean 901 smmuv3_invalidate_ste(gpointer key, gpointer value, gpointer user_data) 902 { 903 SMMUDevice *sdev = (SMMUDevice *)key; 904 uint32_t sid = smmu_get_sid(sdev); 905 SMMUSIDRange *sid_range = (SMMUSIDRange *)user_data; 906 907 if (sid < sid_range->start || sid > sid_range->end) { 908 return false; 909 } 910 trace_smmuv3_config_cache_inv(sid); 911 return true; 912 } 913 914 static int smmuv3_cmdq_consume(SMMUv3State *s) 915 { 916 SMMUState *bs = ARM_SMMU(s); 917 SMMUCmdError cmd_error = SMMU_CERROR_NONE; 918 SMMUQueue *q = &s->cmdq; 919 SMMUCommandType type = 0; 920 921 if (!smmuv3_cmdq_enabled(s)) { 922 return 0; 923 } 924 /* 925 * some commands depend on register values, typically CR0. In case those 926 * register values change while handling the command, spec says it 927 * is UNPREDICTABLE whether the command is interpreted under the new 928 * or old value. 929 */ 930 931 while (!smmuv3_q_empty(q)) { 932 uint32_t pending = s->gerror ^ s->gerrorn; 933 Cmd cmd; 934 935 trace_smmuv3_cmdq_consume(Q_PROD(q), Q_CONS(q), 936 Q_PROD_WRAP(q), Q_CONS_WRAP(q)); 937 938 if (FIELD_EX32(pending, GERROR, CMDQ_ERR)) { 939 break; 940 } 941 942 if (queue_read(q, &cmd) != MEMTX_OK) { 943 cmd_error = SMMU_CERROR_ABT; 944 break; 945 } 946 947 type = CMD_TYPE(&cmd); 948 949 trace_smmuv3_cmdq_opcode(smmu_cmd_string(type)); 950 951 qemu_mutex_lock(&s->mutex); 952 switch (type) { 953 case SMMU_CMD_SYNC: 954 if (CMD_SYNC_CS(&cmd) & CMD_SYNC_SIG_IRQ) { 955 smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0); 956 } 957 break; 958 case SMMU_CMD_PREFETCH_CONFIG: 959 case SMMU_CMD_PREFETCH_ADDR: 960 break; 961 case SMMU_CMD_CFGI_STE: 962 { 963 uint32_t sid = CMD_SID(&cmd); 964 IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid); 965 SMMUDevice *sdev; 966 967 if (CMD_SSEC(&cmd)) { 968 cmd_error = SMMU_CERROR_ILL; 969 break; 970 } 971 972 if (!mr) { 973 break; 974 } 975 976 trace_smmuv3_cmdq_cfgi_ste(sid); 977 sdev = container_of(mr, SMMUDevice, iommu); 978 smmuv3_flush_config(sdev); 979 980 break; 981 } 982 case SMMU_CMD_CFGI_STE_RANGE: /* same as SMMU_CMD_CFGI_ALL */ 983 { 984 uint32_t sid = CMD_SID(&cmd), mask; 985 uint8_t range = CMD_STE_RANGE(&cmd); 986 SMMUSIDRange sid_range; 987 988 if (CMD_SSEC(&cmd)) { 989 cmd_error = SMMU_CERROR_ILL; 990 break; 991 } 992 993 mask = (1ULL << (range + 1)) - 1; 994 sid_range.start = sid & ~mask; 995 sid_range.end = sid_range.start + mask; 996 997 trace_smmuv3_cmdq_cfgi_ste_range(sid_range.start, sid_range.end); 998 g_hash_table_foreach_remove(bs->configs, smmuv3_invalidate_ste, 999 &sid_range); 1000 break; 1001 } 1002 case SMMU_CMD_CFGI_CD: 1003 case SMMU_CMD_CFGI_CD_ALL: 1004 { 1005 uint32_t sid = CMD_SID(&cmd); 1006 IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid); 1007 SMMUDevice *sdev; 1008 1009 if (CMD_SSEC(&cmd)) { 1010 cmd_error = SMMU_CERROR_ILL; 1011 break; 1012 } 1013 1014 if (!mr) { 1015 break; 1016 } 1017 1018 trace_smmuv3_cmdq_cfgi_cd(sid); 1019 sdev = container_of(mr, SMMUDevice, iommu); 1020 smmuv3_flush_config(sdev); 1021 break; 1022 } 1023 case SMMU_CMD_TLBI_NH_ASID: 1024 { 1025 uint16_t asid = CMD_ASID(&cmd); 1026 1027 trace_smmuv3_cmdq_tlbi_nh_asid(asid); 1028 smmu_inv_notifiers_all(&s->smmu_state); 1029 smmu_iotlb_inv_asid(bs, asid); 1030 break; 1031 } 1032 case SMMU_CMD_TLBI_NH_ALL: 1033 case SMMU_CMD_TLBI_NSNH_ALL: 1034 trace_smmuv3_cmdq_tlbi_nh(); 1035 smmu_inv_notifiers_all(&s->smmu_state); 1036 smmu_iotlb_inv_all(bs); 1037 break; 1038 case SMMU_CMD_TLBI_NH_VAA: 1039 case SMMU_CMD_TLBI_NH_VA: 1040 smmuv3_s1_range_inval(bs, &cmd); 1041 break; 1042 case SMMU_CMD_TLBI_EL3_ALL: 1043 case SMMU_CMD_TLBI_EL3_VA: 1044 case SMMU_CMD_TLBI_EL2_ALL: 1045 case SMMU_CMD_TLBI_EL2_ASID: 1046 case SMMU_CMD_TLBI_EL2_VA: 1047 case SMMU_CMD_TLBI_EL2_VAA: 1048 case SMMU_CMD_TLBI_S12_VMALL: 1049 case SMMU_CMD_TLBI_S2_IPA: 1050 case SMMU_CMD_ATC_INV: 1051 case SMMU_CMD_PRI_RESP: 1052 case SMMU_CMD_RESUME: 1053 case SMMU_CMD_STALL_TERM: 1054 trace_smmuv3_unhandled_cmd(type); 1055 break; 1056 default: 1057 cmd_error = SMMU_CERROR_ILL; 1058 qemu_log_mask(LOG_GUEST_ERROR, 1059 "Illegal command type: %d\n", CMD_TYPE(&cmd)); 1060 break; 1061 } 1062 qemu_mutex_unlock(&s->mutex); 1063 if (cmd_error) { 1064 break; 1065 } 1066 /* 1067 * We only increment the cons index after the completion of 1068 * the command. We do that because the SYNC returns immediately 1069 * and does not check the completion of previous commands 1070 */ 1071 queue_cons_incr(q); 1072 } 1073 1074 if (cmd_error) { 1075 trace_smmuv3_cmdq_consume_error(smmu_cmd_string(type), cmd_error); 1076 smmu_write_cmdq_err(s, cmd_error); 1077 smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_CMDQ_ERR_MASK); 1078 } 1079 1080 trace_smmuv3_cmdq_consume_out(Q_PROD(q), Q_CONS(q), 1081 Q_PROD_WRAP(q), Q_CONS_WRAP(q)); 1082 1083 return 0; 1084 } 1085 1086 static MemTxResult smmu_writell(SMMUv3State *s, hwaddr offset, 1087 uint64_t data, MemTxAttrs attrs) 1088 { 1089 switch (offset) { 1090 case A_GERROR_IRQ_CFG0: 1091 s->gerror_irq_cfg0 = data; 1092 return MEMTX_OK; 1093 case A_STRTAB_BASE: 1094 s->strtab_base = data; 1095 return MEMTX_OK; 1096 case A_CMDQ_BASE: 1097 s->cmdq.base = data; 1098 s->cmdq.log2size = extract64(s->cmdq.base, 0, 5); 1099 if (s->cmdq.log2size > SMMU_CMDQS) { 1100 s->cmdq.log2size = SMMU_CMDQS; 1101 } 1102 return MEMTX_OK; 1103 case A_EVENTQ_BASE: 1104 s->eventq.base = data; 1105 s->eventq.log2size = extract64(s->eventq.base, 0, 5); 1106 if (s->eventq.log2size > SMMU_EVENTQS) { 1107 s->eventq.log2size = SMMU_EVENTQS; 1108 } 1109 return MEMTX_OK; 1110 case A_EVENTQ_IRQ_CFG0: 1111 s->eventq_irq_cfg0 = data; 1112 return MEMTX_OK; 1113 default: 1114 qemu_log_mask(LOG_UNIMP, 1115 "%s Unexpected 64-bit access to 0x%"PRIx64" (WI)\n", 1116 __func__, offset); 1117 return MEMTX_OK; 1118 } 1119 } 1120 1121 static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset, 1122 uint64_t data, MemTxAttrs attrs) 1123 { 1124 switch (offset) { 1125 case A_CR0: 1126 s->cr[0] = data; 1127 s->cr0ack = data & ~SMMU_CR0_RESERVED; 1128 /* in case the command queue has been enabled */ 1129 smmuv3_cmdq_consume(s); 1130 return MEMTX_OK; 1131 case A_CR1: 1132 s->cr[1] = data; 1133 return MEMTX_OK; 1134 case A_CR2: 1135 s->cr[2] = data; 1136 return MEMTX_OK; 1137 case A_IRQ_CTRL: 1138 s->irq_ctrl = data; 1139 return MEMTX_OK; 1140 case A_GERRORN: 1141 smmuv3_write_gerrorn(s, data); 1142 /* 1143 * By acknowledging the CMDQ_ERR, SW may notify cmds can 1144 * be processed again 1145 */ 1146 smmuv3_cmdq_consume(s); 1147 return MEMTX_OK; 1148 case A_GERROR_IRQ_CFG0: /* 64b */ 1149 s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 0, 32, data); 1150 return MEMTX_OK; 1151 case A_GERROR_IRQ_CFG0 + 4: 1152 s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 32, 32, data); 1153 return MEMTX_OK; 1154 case A_GERROR_IRQ_CFG1: 1155 s->gerror_irq_cfg1 = data; 1156 return MEMTX_OK; 1157 case A_GERROR_IRQ_CFG2: 1158 s->gerror_irq_cfg2 = data; 1159 return MEMTX_OK; 1160 case A_STRTAB_BASE: /* 64b */ 1161 s->strtab_base = deposit64(s->strtab_base, 0, 32, data); 1162 return MEMTX_OK; 1163 case A_STRTAB_BASE + 4: 1164 s->strtab_base = deposit64(s->strtab_base, 32, 32, data); 1165 return MEMTX_OK; 1166 case A_STRTAB_BASE_CFG: 1167 s->strtab_base_cfg = data; 1168 if (FIELD_EX32(data, STRTAB_BASE_CFG, FMT) == 1) { 1169 s->sid_split = FIELD_EX32(data, STRTAB_BASE_CFG, SPLIT); 1170 s->features |= SMMU_FEATURE_2LVL_STE; 1171 } 1172 return MEMTX_OK; 1173 case A_CMDQ_BASE: /* 64b */ 1174 s->cmdq.base = deposit64(s->cmdq.base, 0, 32, data); 1175 s->cmdq.log2size = extract64(s->cmdq.base, 0, 5); 1176 if (s->cmdq.log2size > SMMU_CMDQS) { 1177 s->cmdq.log2size = SMMU_CMDQS; 1178 } 1179 return MEMTX_OK; 1180 case A_CMDQ_BASE + 4: /* 64b */ 1181 s->cmdq.base = deposit64(s->cmdq.base, 32, 32, data); 1182 return MEMTX_OK; 1183 case A_CMDQ_PROD: 1184 s->cmdq.prod = data; 1185 smmuv3_cmdq_consume(s); 1186 return MEMTX_OK; 1187 case A_CMDQ_CONS: 1188 s->cmdq.cons = data; 1189 return MEMTX_OK; 1190 case A_EVENTQ_BASE: /* 64b */ 1191 s->eventq.base = deposit64(s->eventq.base, 0, 32, data); 1192 s->eventq.log2size = extract64(s->eventq.base, 0, 5); 1193 if (s->eventq.log2size > SMMU_EVENTQS) { 1194 s->eventq.log2size = SMMU_EVENTQS; 1195 } 1196 return MEMTX_OK; 1197 case A_EVENTQ_BASE + 4: 1198 s->eventq.base = deposit64(s->eventq.base, 32, 32, data); 1199 return MEMTX_OK; 1200 case A_EVENTQ_PROD: 1201 s->eventq.prod = data; 1202 return MEMTX_OK; 1203 case A_EVENTQ_CONS: 1204 s->eventq.cons = data; 1205 return MEMTX_OK; 1206 case A_EVENTQ_IRQ_CFG0: /* 64b */ 1207 s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 0, 32, data); 1208 return MEMTX_OK; 1209 case A_EVENTQ_IRQ_CFG0 + 4: 1210 s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 32, 32, data); 1211 return MEMTX_OK; 1212 case A_EVENTQ_IRQ_CFG1: 1213 s->eventq_irq_cfg1 = data; 1214 return MEMTX_OK; 1215 case A_EVENTQ_IRQ_CFG2: 1216 s->eventq_irq_cfg2 = data; 1217 return MEMTX_OK; 1218 default: 1219 qemu_log_mask(LOG_UNIMP, 1220 "%s Unexpected 32-bit access to 0x%"PRIx64" (WI)\n", 1221 __func__, offset); 1222 return MEMTX_OK; 1223 } 1224 } 1225 1226 static MemTxResult smmu_write_mmio(void *opaque, hwaddr offset, uint64_t data, 1227 unsigned size, MemTxAttrs attrs) 1228 { 1229 SMMUState *sys = opaque; 1230 SMMUv3State *s = ARM_SMMUV3(sys); 1231 MemTxResult r; 1232 1233 /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */ 1234 offset &= ~0x10000; 1235 1236 switch (size) { 1237 case 8: 1238 r = smmu_writell(s, offset, data, attrs); 1239 break; 1240 case 4: 1241 r = smmu_writel(s, offset, data, attrs); 1242 break; 1243 default: 1244 r = MEMTX_ERROR; 1245 break; 1246 } 1247 1248 trace_smmuv3_write_mmio(offset, data, size, r); 1249 return r; 1250 } 1251 1252 static MemTxResult smmu_readll(SMMUv3State *s, hwaddr offset, 1253 uint64_t *data, MemTxAttrs attrs) 1254 { 1255 switch (offset) { 1256 case A_GERROR_IRQ_CFG0: 1257 *data = s->gerror_irq_cfg0; 1258 return MEMTX_OK; 1259 case A_STRTAB_BASE: 1260 *data = s->strtab_base; 1261 return MEMTX_OK; 1262 case A_CMDQ_BASE: 1263 *data = s->cmdq.base; 1264 return MEMTX_OK; 1265 case A_EVENTQ_BASE: 1266 *data = s->eventq.base; 1267 return MEMTX_OK; 1268 default: 1269 *data = 0; 1270 qemu_log_mask(LOG_UNIMP, 1271 "%s Unexpected 64-bit access to 0x%"PRIx64" (RAZ)\n", 1272 __func__, offset); 1273 return MEMTX_OK; 1274 } 1275 } 1276 1277 static MemTxResult smmu_readl(SMMUv3State *s, hwaddr offset, 1278 uint64_t *data, MemTxAttrs attrs) 1279 { 1280 switch (offset) { 1281 case A_IDREGS ... A_IDREGS + 0x2f: 1282 *data = smmuv3_idreg(offset - A_IDREGS); 1283 return MEMTX_OK; 1284 case A_IDR0 ... A_IDR5: 1285 *data = s->idr[(offset - A_IDR0) / 4]; 1286 return MEMTX_OK; 1287 case A_IIDR: 1288 *data = s->iidr; 1289 return MEMTX_OK; 1290 case A_AIDR: 1291 *data = s->aidr; 1292 return MEMTX_OK; 1293 case A_CR0: 1294 *data = s->cr[0]; 1295 return MEMTX_OK; 1296 case A_CR0ACK: 1297 *data = s->cr0ack; 1298 return MEMTX_OK; 1299 case A_CR1: 1300 *data = s->cr[1]; 1301 return MEMTX_OK; 1302 case A_CR2: 1303 *data = s->cr[2]; 1304 return MEMTX_OK; 1305 case A_STATUSR: 1306 *data = s->statusr; 1307 return MEMTX_OK; 1308 case A_IRQ_CTRL: 1309 case A_IRQ_CTRL_ACK: 1310 *data = s->irq_ctrl; 1311 return MEMTX_OK; 1312 case A_GERROR: 1313 *data = s->gerror; 1314 return MEMTX_OK; 1315 case A_GERRORN: 1316 *data = s->gerrorn; 1317 return MEMTX_OK; 1318 case A_GERROR_IRQ_CFG0: /* 64b */ 1319 *data = extract64(s->gerror_irq_cfg0, 0, 32); 1320 return MEMTX_OK; 1321 case A_GERROR_IRQ_CFG0 + 4: 1322 *data = extract64(s->gerror_irq_cfg0, 32, 32); 1323 return MEMTX_OK; 1324 case A_GERROR_IRQ_CFG1: 1325 *data = s->gerror_irq_cfg1; 1326 return MEMTX_OK; 1327 case A_GERROR_IRQ_CFG2: 1328 *data = s->gerror_irq_cfg2; 1329 return MEMTX_OK; 1330 case A_STRTAB_BASE: /* 64b */ 1331 *data = extract64(s->strtab_base, 0, 32); 1332 return MEMTX_OK; 1333 case A_STRTAB_BASE + 4: /* 64b */ 1334 *data = extract64(s->strtab_base, 32, 32); 1335 return MEMTX_OK; 1336 case A_STRTAB_BASE_CFG: 1337 *data = s->strtab_base_cfg; 1338 return MEMTX_OK; 1339 case A_CMDQ_BASE: /* 64b */ 1340 *data = extract64(s->cmdq.base, 0, 32); 1341 return MEMTX_OK; 1342 case A_CMDQ_BASE + 4: 1343 *data = extract64(s->cmdq.base, 32, 32); 1344 return MEMTX_OK; 1345 case A_CMDQ_PROD: 1346 *data = s->cmdq.prod; 1347 return MEMTX_OK; 1348 case A_CMDQ_CONS: 1349 *data = s->cmdq.cons; 1350 return MEMTX_OK; 1351 case A_EVENTQ_BASE: /* 64b */ 1352 *data = extract64(s->eventq.base, 0, 32); 1353 return MEMTX_OK; 1354 case A_EVENTQ_BASE + 4: /* 64b */ 1355 *data = extract64(s->eventq.base, 32, 32); 1356 return MEMTX_OK; 1357 case A_EVENTQ_PROD: 1358 *data = s->eventq.prod; 1359 return MEMTX_OK; 1360 case A_EVENTQ_CONS: 1361 *data = s->eventq.cons; 1362 return MEMTX_OK; 1363 default: 1364 *data = 0; 1365 qemu_log_mask(LOG_UNIMP, 1366 "%s unhandled 32-bit access at 0x%"PRIx64" (RAZ)\n", 1367 __func__, offset); 1368 return MEMTX_OK; 1369 } 1370 } 1371 1372 static MemTxResult smmu_read_mmio(void *opaque, hwaddr offset, uint64_t *data, 1373 unsigned size, MemTxAttrs attrs) 1374 { 1375 SMMUState *sys = opaque; 1376 SMMUv3State *s = ARM_SMMUV3(sys); 1377 MemTxResult r; 1378 1379 /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */ 1380 offset &= ~0x10000; 1381 1382 switch (size) { 1383 case 8: 1384 r = smmu_readll(s, offset, data, attrs); 1385 break; 1386 case 4: 1387 r = smmu_readl(s, offset, data, attrs); 1388 break; 1389 default: 1390 r = MEMTX_ERROR; 1391 break; 1392 } 1393 1394 trace_smmuv3_read_mmio(offset, *data, size, r); 1395 return r; 1396 } 1397 1398 static const MemoryRegionOps smmu_mem_ops = { 1399 .read_with_attrs = smmu_read_mmio, 1400 .write_with_attrs = smmu_write_mmio, 1401 .endianness = DEVICE_LITTLE_ENDIAN, 1402 .valid = { 1403 .min_access_size = 4, 1404 .max_access_size = 8, 1405 }, 1406 .impl = { 1407 .min_access_size = 4, 1408 .max_access_size = 8, 1409 }, 1410 }; 1411 1412 static void smmu_init_irq(SMMUv3State *s, SysBusDevice *dev) 1413 { 1414 int i; 1415 1416 for (i = 0; i < ARRAY_SIZE(s->irq); i++) { 1417 sysbus_init_irq(dev, &s->irq[i]); 1418 } 1419 } 1420 1421 static void smmu_reset(DeviceState *dev) 1422 { 1423 SMMUv3State *s = ARM_SMMUV3(dev); 1424 SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s); 1425 1426 c->parent_reset(dev); 1427 1428 smmuv3_init_regs(s); 1429 } 1430 1431 static void smmu_realize(DeviceState *d, Error **errp) 1432 { 1433 SMMUState *sys = ARM_SMMU(d); 1434 SMMUv3State *s = ARM_SMMUV3(sys); 1435 SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s); 1436 SysBusDevice *dev = SYS_BUS_DEVICE(d); 1437 Error *local_err = NULL; 1438 1439 c->parent_realize(d, &local_err); 1440 if (local_err) { 1441 error_propagate(errp, local_err); 1442 return; 1443 } 1444 1445 qemu_mutex_init(&s->mutex); 1446 1447 memory_region_init_io(&sys->iomem, OBJECT(s), 1448 &smmu_mem_ops, sys, TYPE_ARM_SMMUV3, 0x20000); 1449 1450 sys->mrtypename = TYPE_SMMUV3_IOMMU_MEMORY_REGION; 1451 1452 sysbus_init_mmio(dev, &sys->iomem); 1453 1454 smmu_init_irq(s, dev); 1455 } 1456 1457 static const VMStateDescription vmstate_smmuv3_queue = { 1458 .name = "smmuv3_queue", 1459 .version_id = 1, 1460 .minimum_version_id = 1, 1461 .fields = (VMStateField[]) { 1462 VMSTATE_UINT64(base, SMMUQueue), 1463 VMSTATE_UINT32(prod, SMMUQueue), 1464 VMSTATE_UINT32(cons, SMMUQueue), 1465 VMSTATE_UINT8(log2size, SMMUQueue), 1466 VMSTATE_END_OF_LIST(), 1467 }, 1468 }; 1469 1470 static const VMStateDescription vmstate_smmuv3 = { 1471 .name = "smmuv3", 1472 .version_id = 1, 1473 .minimum_version_id = 1, 1474 .priority = MIG_PRI_IOMMU, 1475 .fields = (VMStateField[]) { 1476 VMSTATE_UINT32(features, SMMUv3State), 1477 VMSTATE_UINT8(sid_size, SMMUv3State), 1478 VMSTATE_UINT8(sid_split, SMMUv3State), 1479 1480 VMSTATE_UINT32_ARRAY(cr, SMMUv3State, 3), 1481 VMSTATE_UINT32(cr0ack, SMMUv3State), 1482 VMSTATE_UINT32(statusr, SMMUv3State), 1483 VMSTATE_UINT32(irq_ctrl, SMMUv3State), 1484 VMSTATE_UINT32(gerror, SMMUv3State), 1485 VMSTATE_UINT32(gerrorn, SMMUv3State), 1486 VMSTATE_UINT64(gerror_irq_cfg0, SMMUv3State), 1487 VMSTATE_UINT32(gerror_irq_cfg1, SMMUv3State), 1488 VMSTATE_UINT32(gerror_irq_cfg2, SMMUv3State), 1489 VMSTATE_UINT64(strtab_base, SMMUv3State), 1490 VMSTATE_UINT32(strtab_base_cfg, SMMUv3State), 1491 VMSTATE_UINT64(eventq_irq_cfg0, SMMUv3State), 1492 VMSTATE_UINT32(eventq_irq_cfg1, SMMUv3State), 1493 VMSTATE_UINT32(eventq_irq_cfg2, SMMUv3State), 1494 1495 VMSTATE_STRUCT(cmdq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue), 1496 VMSTATE_STRUCT(eventq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue), 1497 1498 VMSTATE_END_OF_LIST(), 1499 }, 1500 }; 1501 1502 static void smmuv3_instance_init(Object *obj) 1503 { 1504 /* Nothing much to do here as of now */ 1505 } 1506 1507 static void smmuv3_class_init(ObjectClass *klass, void *data) 1508 { 1509 DeviceClass *dc = DEVICE_CLASS(klass); 1510 SMMUv3Class *c = ARM_SMMUV3_CLASS(klass); 1511 1512 dc->vmsd = &vmstate_smmuv3; 1513 device_class_set_parent_reset(dc, smmu_reset, &c->parent_reset); 1514 c->parent_realize = dc->realize; 1515 dc->realize = smmu_realize; 1516 } 1517 1518 static int smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu, 1519 IOMMUNotifierFlag old, 1520 IOMMUNotifierFlag new, 1521 Error **errp) 1522 { 1523 SMMUDevice *sdev = container_of(iommu, SMMUDevice, iommu); 1524 SMMUv3State *s3 = sdev->smmu; 1525 SMMUState *s = &(s3->smmu_state); 1526 1527 if (new & IOMMU_NOTIFIER_DEVIOTLB_UNMAP) { 1528 error_setg(errp, "SMMUv3 does not support dev-iotlb yet"); 1529 return -EINVAL; 1530 } 1531 1532 if (new & IOMMU_NOTIFIER_MAP) { 1533 error_setg(errp, 1534 "device %02x.%02x.%x requires iommu MAP notifier which is " 1535 "not currently supported", pci_bus_num(sdev->bus), 1536 PCI_SLOT(sdev->devfn), PCI_FUNC(sdev->devfn)); 1537 return -EINVAL; 1538 } 1539 1540 if (old == IOMMU_NOTIFIER_NONE) { 1541 trace_smmuv3_notify_flag_add(iommu->parent_obj.name); 1542 QLIST_INSERT_HEAD(&s->devices_with_notifiers, sdev, next); 1543 } else if (new == IOMMU_NOTIFIER_NONE) { 1544 trace_smmuv3_notify_flag_del(iommu->parent_obj.name); 1545 QLIST_REMOVE(sdev, next); 1546 } 1547 return 0; 1548 } 1549 1550 static void smmuv3_iommu_memory_region_class_init(ObjectClass *klass, 1551 void *data) 1552 { 1553 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); 1554 1555 imrc->translate = smmuv3_translate; 1556 imrc->notify_flag_changed = smmuv3_notify_flag_changed; 1557 } 1558 1559 static const TypeInfo smmuv3_type_info = { 1560 .name = TYPE_ARM_SMMUV3, 1561 .parent = TYPE_ARM_SMMU, 1562 .instance_size = sizeof(SMMUv3State), 1563 .instance_init = smmuv3_instance_init, 1564 .class_size = sizeof(SMMUv3Class), 1565 .class_init = smmuv3_class_init, 1566 }; 1567 1568 static const TypeInfo smmuv3_iommu_memory_region_info = { 1569 .parent = TYPE_IOMMU_MEMORY_REGION, 1570 .name = TYPE_SMMUV3_IOMMU_MEMORY_REGION, 1571 .class_init = smmuv3_iommu_memory_region_class_init, 1572 }; 1573 1574 static void smmuv3_register_types(void) 1575 { 1576 type_register(&smmuv3_type_info); 1577 type_register(&smmuv3_iommu_memory_region_info); 1578 } 1579 1580 type_init(smmuv3_register_types) 1581 1582