1 /* 2 * Copyright (C) 2014-2016 Broadcom Corporation 3 * Copyright (c) 2017 Red Hat, Inc. 4 * Written by Prem Mallappa, Eric Auger 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License along 16 * with this program; if not, see <http://www.gnu.org/licenses/>. 17 */ 18 19 #include "qemu/osdep.h" 20 #include "qemu/bitops.h" 21 #include "hw/irq.h" 22 #include "hw/sysbus.h" 23 #include "migration/vmstate.h" 24 #include "hw/qdev-core.h" 25 #include "hw/pci/pci.h" 26 #include "cpu.h" 27 #include "trace.h" 28 #include "qemu/log.h" 29 #include "qemu/error-report.h" 30 #include "qapi/error.h" 31 32 #include "hw/arm/smmuv3.h" 33 #include "smmuv3-internal.h" 34 #include "smmu-internal.h" 35 36 /** 37 * smmuv3_trigger_irq - pulse @irq if enabled and update 38 * GERROR register in case of GERROR interrupt 39 * 40 * @irq: irq type 41 * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR) 42 */ 43 static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq, 44 uint32_t gerror_mask) 45 { 46 47 bool pulse = false; 48 49 switch (irq) { 50 case SMMU_IRQ_EVTQ: 51 pulse = smmuv3_eventq_irq_enabled(s); 52 break; 53 case SMMU_IRQ_PRIQ: 54 qemu_log_mask(LOG_UNIMP, "PRI not yet supported\n"); 55 break; 56 case SMMU_IRQ_CMD_SYNC: 57 pulse = true; 58 break; 59 case SMMU_IRQ_GERROR: 60 { 61 uint32_t pending = s->gerror ^ s->gerrorn; 62 uint32_t new_gerrors = ~pending & gerror_mask; 63 64 if (!new_gerrors) { 65 /* only toggle non pending errors */ 66 return; 67 } 68 s->gerror ^= new_gerrors; 69 trace_smmuv3_write_gerror(new_gerrors, s->gerror); 70 71 pulse = smmuv3_gerror_irq_enabled(s); 72 break; 73 } 74 } 75 if (pulse) { 76 trace_smmuv3_trigger_irq(irq); 77 qemu_irq_pulse(s->irq[irq]); 78 } 79 } 80 81 static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn) 82 { 83 uint32_t pending = s->gerror ^ s->gerrorn; 84 uint32_t toggled = s->gerrorn ^ new_gerrorn; 85 86 if (toggled & ~pending) { 87 qemu_log_mask(LOG_GUEST_ERROR, 88 "guest toggles non pending errors = 0x%x\n", 89 toggled & ~pending); 90 } 91 92 /* 93 * We do not raise any error in case guest toggles bits corresponding 94 * to not active IRQs (CONSTRAINED UNPREDICTABLE) 95 */ 96 s->gerrorn = new_gerrorn; 97 98 trace_smmuv3_write_gerrorn(toggled & pending, s->gerrorn); 99 } 100 101 static inline MemTxResult queue_read(SMMUQueue *q, void *data) 102 { 103 dma_addr_t addr = Q_CONS_ENTRY(q); 104 105 return dma_memory_read(&address_space_memory, addr, data, q->entry_size, 106 MEMTXATTRS_UNSPECIFIED); 107 } 108 109 static MemTxResult queue_write(SMMUQueue *q, void *data) 110 { 111 dma_addr_t addr = Q_PROD_ENTRY(q); 112 MemTxResult ret; 113 114 ret = dma_memory_write(&address_space_memory, addr, data, q->entry_size, 115 MEMTXATTRS_UNSPECIFIED); 116 if (ret != MEMTX_OK) { 117 return ret; 118 } 119 120 queue_prod_incr(q); 121 return MEMTX_OK; 122 } 123 124 static MemTxResult smmuv3_write_eventq(SMMUv3State *s, Evt *evt) 125 { 126 SMMUQueue *q = &s->eventq; 127 MemTxResult r; 128 129 if (!smmuv3_eventq_enabled(s)) { 130 return MEMTX_ERROR; 131 } 132 133 if (smmuv3_q_full(q)) { 134 return MEMTX_ERROR; 135 } 136 137 r = queue_write(q, evt); 138 if (r != MEMTX_OK) { 139 return r; 140 } 141 142 if (!smmuv3_q_empty(q)) { 143 smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0); 144 } 145 return MEMTX_OK; 146 } 147 148 void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *info) 149 { 150 Evt evt = {}; 151 MemTxResult r; 152 153 if (!smmuv3_eventq_enabled(s)) { 154 return; 155 } 156 157 EVT_SET_TYPE(&evt, info->type); 158 EVT_SET_SID(&evt, info->sid); 159 160 switch (info->type) { 161 case SMMU_EVT_NONE: 162 return; 163 case SMMU_EVT_F_UUT: 164 EVT_SET_SSID(&evt, info->u.f_uut.ssid); 165 EVT_SET_SSV(&evt, info->u.f_uut.ssv); 166 EVT_SET_ADDR(&evt, info->u.f_uut.addr); 167 EVT_SET_RNW(&evt, info->u.f_uut.rnw); 168 EVT_SET_PNU(&evt, info->u.f_uut.pnu); 169 EVT_SET_IND(&evt, info->u.f_uut.ind); 170 break; 171 case SMMU_EVT_C_BAD_STREAMID: 172 EVT_SET_SSID(&evt, info->u.c_bad_streamid.ssid); 173 EVT_SET_SSV(&evt, info->u.c_bad_streamid.ssv); 174 break; 175 case SMMU_EVT_F_STE_FETCH: 176 EVT_SET_SSID(&evt, info->u.f_ste_fetch.ssid); 177 EVT_SET_SSV(&evt, info->u.f_ste_fetch.ssv); 178 EVT_SET_ADDR2(&evt, info->u.f_ste_fetch.addr); 179 break; 180 case SMMU_EVT_C_BAD_STE: 181 EVT_SET_SSID(&evt, info->u.c_bad_ste.ssid); 182 EVT_SET_SSV(&evt, info->u.c_bad_ste.ssv); 183 break; 184 case SMMU_EVT_F_STREAM_DISABLED: 185 break; 186 case SMMU_EVT_F_TRANS_FORBIDDEN: 187 EVT_SET_ADDR(&evt, info->u.f_transl_forbidden.addr); 188 EVT_SET_RNW(&evt, info->u.f_transl_forbidden.rnw); 189 break; 190 case SMMU_EVT_C_BAD_SUBSTREAMID: 191 EVT_SET_SSID(&evt, info->u.c_bad_substream.ssid); 192 break; 193 case SMMU_EVT_F_CD_FETCH: 194 EVT_SET_SSID(&evt, info->u.f_cd_fetch.ssid); 195 EVT_SET_SSV(&evt, info->u.f_cd_fetch.ssv); 196 EVT_SET_ADDR(&evt, info->u.f_cd_fetch.addr); 197 break; 198 case SMMU_EVT_C_BAD_CD: 199 EVT_SET_SSID(&evt, info->u.c_bad_cd.ssid); 200 EVT_SET_SSV(&evt, info->u.c_bad_cd.ssv); 201 break; 202 case SMMU_EVT_F_WALK_EABT: 203 case SMMU_EVT_F_TRANSLATION: 204 case SMMU_EVT_F_ADDR_SIZE: 205 case SMMU_EVT_F_ACCESS: 206 case SMMU_EVT_F_PERMISSION: 207 EVT_SET_STALL(&evt, info->u.f_walk_eabt.stall); 208 EVT_SET_STAG(&evt, info->u.f_walk_eabt.stag); 209 EVT_SET_SSID(&evt, info->u.f_walk_eabt.ssid); 210 EVT_SET_SSV(&evt, info->u.f_walk_eabt.ssv); 211 EVT_SET_S2(&evt, info->u.f_walk_eabt.s2); 212 EVT_SET_ADDR(&evt, info->u.f_walk_eabt.addr); 213 EVT_SET_RNW(&evt, info->u.f_walk_eabt.rnw); 214 EVT_SET_PNU(&evt, info->u.f_walk_eabt.pnu); 215 EVT_SET_IND(&evt, info->u.f_walk_eabt.ind); 216 EVT_SET_CLASS(&evt, info->u.f_walk_eabt.class); 217 EVT_SET_ADDR2(&evt, info->u.f_walk_eabt.addr2); 218 break; 219 case SMMU_EVT_F_CFG_CONFLICT: 220 EVT_SET_SSID(&evt, info->u.f_cfg_conflict.ssid); 221 EVT_SET_SSV(&evt, info->u.f_cfg_conflict.ssv); 222 break; 223 /* rest is not implemented */ 224 case SMMU_EVT_F_BAD_ATS_TREQ: 225 case SMMU_EVT_F_TLB_CONFLICT: 226 case SMMU_EVT_E_PAGE_REQ: 227 default: 228 g_assert_not_reached(); 229 } 230 231 trace_smmuv3_record_event(smmu_event_string(info->type), info->sid); 232 r = smmuv3_write_eventq(s, &evt); 233 if (r != MEMTX_OK) { 234 smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_EVENTQ_ABT_ERR_MASK); 235 } 236 info->recorded = true; 237 } 238 239 static void smmuv3_init_regs(SMMUv3State *s) 240 { 241 /** 242 * IDR0: stage1 only, AArch64 only, coherent access, 16b ASID, 243 * multi-level stream table 244 */ 245 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1); /* stage 1 supported */ 246 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTF, 2); /* AArch64 PTW only */ 247 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, COHACC, 1); /* IO coherent */ 248 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, ASID16, 1); /* 16-bit ASID */ 249 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTENDIAN, 2); /* little endian */ 250 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STALL_MODEL, 1); /* No stall */ 251 /* terminated transaction will always be aborted/error returned */ 252 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TERM_MODEL, 1); 253 /* 2-level stream table supported */ 254 s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STLEVEL, 1); 255 256 s->idr[1] = FIELD_DP32(s->idr[1], IDR1, SIDSIZE, SMMU_IDR1_SIDSIZE); 257 s->idr[1] = FIELD_DP32(s->idr[1], IDR1, EVENTQS, SMMU_EVENTQS); 258 s->idr[1] = FIELD_DP32(s->idr[1], IDR1, CMDQS, SMMU_CMDQS); 259 260 s->idr[3] = FIELD_DP32(s->idr[3], IDR3, RIL, 1); 261 s->idr[3] = FIELD_DP32(s->idr[3], IDR3, HAD, 1); 262 s->idr[3] = FIELD_DP32(s->idr[3], IDR3, BBML, 2); 263 264 /* 4K, 16K and 64K granule support */ 265 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1); 266 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN16K, 1); 267 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1); 268 s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */ 269 270 s->cmdq.base = deposit64(s->cmdq.base, 0, 5, SMMU_CMDQS); 271 s->cmdq.prod = 0; 272 s->cmdq.cons = 0; 273 s->cmdq.entry_size = sizeof(struct Cmd); 274 s->eventq.base = deposit64(s->eventq.base, 0, 5, SMMU_EVENTQS); 275 s->eventq.prod = 0; 276 s->eventq.cons = 0; 277 s->eventq.entry_size = sizeof(struct Evt); 278 279 s->features = 0; 280 s->sid_split = 0; 281 s->aidr = 0x1; 282 s->cr[0] = 0; 283 s->cr0ack = 0; 284 s->irq_ctrl = 0; 285 s->gerror = 0; 286 s->gerrorn = 0; 287 s->statusr = 0; 288 } 289 290 static int smmu_get_ste(SMMUv3State *s, dma_addr_t addr, STE *buf, 291 SMMUEventInfo *event) 292 { 293 int ret; 294 295 trace_smmuv3_get_ste(addr); 296 /* TODO: guarantee 64-bit single-copy atomicity */ 297 ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf), 298 MEMTXATTRS_UNSPECIFIED); 299 if (ret != MEMTX_OK) { 300 qemu_log_mask(LOG_GUEST_ERROR, 301 "Cannot fetch pte at address=0x%"PRIx64"\n", addr); 302 event->type = SMMU_EVT_F_STE_FETCH; 303 event->u.f_ste_fetch.addr = addr; 304 return -EINVAL; 305 } 306 return 0; 307 308 } 309 310 /* @ssid > 0 not supported yet */ 311 static int smmu_get_cd(SMMUv3State *s, STE *ste, uint32_t ssid, 312 CD *buf, SMMUEventInfo *event) 313 { 314 dma_addr_t addr = STE_CTXPTR(ste); 315 int ret; 316 317 trace_smmuv3_get_cd(addr); 318 /* TODO: guarantee 64-bit single-copy atomicity */ 319 ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf), 320 MEMTXATTRS_UNSPECIFIED); 321 if (ret != MEMTX_OK) { 322 qemu_log_mask(LOG_GUEST_ERROR, 323 "Cannot fetch pte at address=0x%"PRIx64"\n", addr); 324 event->type = SMMU_EVT_F_CD_FETCH; 325 event->u.f_ste_fetch.addr = addr; 326 return -EINVAL; 327 } 328 return 0; 329 } 330 331 /* Returns < 0 in case of invalid STE, 0 otherwise */ 332 static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg, 333 STE *ste, SMMUEventInfo *event) 334 { 335 uint32_t config; 336 337 if (!STE_VALID(ste)) { 338 if (!event->inval_ste_allowed) { 339 qemu_log_mask(LOG_GUEST_ERROR, "invalid STE\n"); 340 } 341 goto bad_ste; 342 } 343 344 config = STE_CONFIG(ste); 345 346 if (STE_CFG_ABORT(config)) { 347 cfg->aborted = true; 348 return 0; 349 } 350 351 if (STE_CFG_BYPASS(config)) { 352 cfg->bypassed = true; 353 return 0; 354 } 355 356 if (STE_CFG_S2_ENABLED(config)) { 357 qemu_log_mask(LOG_UNIMP, "SMMUv3 does not support stage 2 yet\n"); 358 goto bad_ste; 359 } 360 361 if (STE_S1CDMAX(ste) != 0) { 362 qemu_log_mask(LOG_UNIMP, 363 "SMMUv3 does not support multiple context descriptors yet\n"); 364 goto bad_ste; 365 } 366 367 if (STE_S1STALLD(ste)) { 368 qemu_log_mask(LOG_UNIMP, 369 "SMMUv3 S1 stalling fault model not allowed yet\n"); 370 goto bad_ste; 371 } 372 return 0; 373 374 bad_ste: 375 event->type = SMMU_EVT_C_BAD_STE; 376 return -EINVAL; 377 } 378 379 /** 380 * smmu_find_ste - Return the stream table entry associated 381 * to the sid 382 * 383 * @s: smmuv3 handle 384 * @sid: stream ID 385 * @ste: returned stream table entry 386 * @event: handle to an event info 387 * 388 * Supports linear and 2-level stream table 389 * Return 0 on success, -EINVAL otherwise 390 */ 391 static int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste, 392 SMMUEventInfo *event) 393 { 394 dma_addr_t addr, strtab_base; 395 uint32_t log2size; 396 int strtab_size_shift; 397 int ret; 398 399 trace_smmuv3_find_ste(sid, s->features, s->sid_split); 400 log2size = FIELD_EX32(s->strtab_base_cfg, STRTAB_BASE_CFG, LOG2SIZE); 401 /* 402 * Check SID range against both guest-configured and implementation limits 403 */ 404 if (sid >= (1 << MIN(log2size, SMMU_IDR1_SIDSIZE))) { 405 event->type = SMMU_EVT_C_BAD_STREAMID; 406 return -EINVAL; 407 } 408 if (s->features & SMMU_FEATURE_2LVL_STE) { 409 int l1_ste_offset, l2_ste_offset, max_l2_ste, span; 410 dma_addr_t l1ptr, l2ptr; 411 STEDesc l1std; 412 413 /* 414 * Align strtab base address to table size. For this purpose, assume it 415 * is not bounded by SMMU_IDR1_SIDSIZE. 416 */ 417 strtab_size_shift = MAX(5, (int)log2size - s->sid_split - 1 + 3); 418 strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK & 419 ~MAKE_64BIT_MASK(0, strtab_size_shift); 420 l1_ste_offset = sid >> s->sid_split; 421 l2_ste_offset = sid & ((1 << s->sid_split) - 1); 422 l1ptr = (dma_addr_t)(strtab_base + l1_ste_offset * sizeof(l1std)); 423 /* TODO: guarantee 64-bit single-copy atomicity */ 424 ret = dma_memory_read(&address_space_memory, l1ptr, &l1std, 425 sizeof(l1std), MEMTXATTRS_UNSPECIFIED); 426 if (ret != MEMTX_OK) { 427 qemu_log_mask(LOG_GUEST_ERROR, 428 "Could not read L1PTR at 0X%"PRIx64"\n", l1ptr); 429 event->type = SMMU_EVT_F_STE_FETCH; 430 event->u.f_ste_fetch.addr = l1ptr; 431 return -EINVAL; 432 } 433 434 span = L1STD_SPAN(&l1std); 435 436 if (!span) { 437 /* l2ptr is not valid */ 438 if (!event->inval_ste_allowed) { 439 qemu_log_mask(LOG_GUEST_ERROR, 440 "invalid sid=%d (L1STD span=0)\n", sid); 441 } 442 event->type = SMMU_EVT_C_BAD_STREAMID; 443 return -EINVAL; 444 } 445 max_l2_ste = (1 << span) - 1; 446 l2ptr = l1std_l2ptr(&l1std); 447 trace_smmuv3_find_ste_2lvl(s->strtab_base, l1ptr, l1_ste_offset, 448 l2ptr, l2_ste_offset, max_l2_ste); 449 if (l2_ste_offset > max_l2_ste) { 450 qemu_log_mask(LOG_GUEST_ERROR, 451 "l2_ste_offset=%d > max_l2_ste=%d\n", 452 l2_ste_offset, max_l2_ste); 453 event->type = SMMU_EVT_C_BAD_STE; 454 return -EINVAL; 455 } 456 addr = l2ptr + l2_ste_offset * sizeof(*ste); 457 } else { 458 strtab_size_shift = log2size + 5; 459 strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK & 460 ~MAKE_64BIT_MASK(0, strtab_size_shift); 461 addr = strtab_base + sid * sizeof(*ste); 462 } 463 464 if (smmu_get_ste(s, addr, ste, event)) { 465 return -EINVAL; 466 } 467 468 return 0; 469 } 470 471 static int decode_cd(SMMUTransCfg *cfg, CD *cd, SMMUEventInfo *event) 472 { 473 int ret = -EINVAL; 474 int i; 475 476 if (!CD_VALID(cd) || !CD_AARCH64(cd)) { 477 goto bad_cd; 478 } 479 if (!CD_A(cd)) { 480 goto bad_cd; /* SMMU_IDR0.TERM_MODEL == 1 */ 481 } 482 if (CD_S(cd)) { 483 goto bad_cd; /* !STE_SECURE && SMMU_IDR0.STALL_MODEL == 1 */ 484 } 485 if (CD_HA(cd) || CD_HD(cd)) { 486 goto bad_cd; /* HTTU = 0 */ 487 } 488 489 /* we support only those at the moment */ 490 cfg->aa64 = true; 491 cfg->stage = 1; 492 493 cfg->oas = oas2bits(CD_IPS(cd)); 494 cfg->oas = MIN(oas2bits(SMMU_IDR5_OAS), cfg->oas); 495 cfg->tbi = CD_TBI(cd); 496 cfg->asid = CD_ASID(cd); 497 498 trace_smmuv3_decode_cd(cfg->oas); 499 500 /* decode data dependent on TT */ 501 for (i = 0; i <= 1; i++) { 502 int tg, tsz; 503 SMMUTransTableInfo *tt = &cfg->tt[i]; 504 505 cfg->tt[i].disabled = CD_EPD(cd, i); 506 if (cfg->tt[i].disabled) { 507 continue; 508 } 509 510 tsz = CD_TSZ(cd, i); 511 if (tsz < 16 || tsz > 39) { 512 goto bad_cd; 513 } 514 515 tg = CD_TG(cd, i); 516 tt->granule_sz = tg2granule(tg, i); 517 if ((tt->granule_sz != 12 && tt->granule_sz != 14 && 518 tt->granule_sz != 16) || CD_ENDI(cd)) { 519 goto bad_cd; 520 } 521 522 tt->tsz = tsz; 523 tt->ttb = CD_TTB(cd, i); 524 if (tt->ttb & ~(MAKE_64BIT_MASK(0, cfg->oas))) { 525 goto bad_cd; 526 } 527 tt->had = CD_HAD(cd, i); 528 trace_smmuv3_decode_cd_tt(i, tt->tsz, tt->ttb, tt->granule_sz, tt->had); 529 } 530 531 cfg->record_faults = CD_R(cd); 532 533 return 0; 534 535 bad_cd: 536 event->type = SMMU_EVT_C_BAD_CD; 537 return ret; 538 } 539 540 /** 541 * smmuv3_decode_config - Prepare the translation configuration 542 * for the @mr iommu region 543 * @mr: iommu memory region the translation config must be prepared for 544 * @cfg: output translation configuration which is populated through 545 * the different configuration decoding steps 546 * @event: must be zero'ed by the caller 547 * 548 * return < 0 in case of config decoding error (@event is filled 549 * accordingly). Return 0 otherwise. 550 */ 551 static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg, 552 SMMUEventInfo *event) 553 { 554 SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu); 555 uint32_t sid = smmu_get_sid(sdev); 556 SMMUv3State *s = sdev->smmu; 557 int ret; 558 STE ste; 559 CD cd; 560 561 ret = smmu_find_ste(s, sid, &ste, event); 562 if (ret) { 563 return ret; 564 } 565 566 ret = decode_ste(s, cfg, &ste, event); 567 if (ret) { 568 return ret; 569 } 570 571 if (cfg->aborted || cfg->bypassed) { 572 return 0; 573 } 574 575 ret = smmu_get_cd(s, &ste, 0 /* ssid */, &cd, event); 576 if (ret) { 577 return ret; 578 } 579 580 return decode_cd(cfg, &cd, event); 581 } 582 583 /** 584 * smmuv3_get_config - Look up for a cached copy of configuration data for 585 * @sdev and on cache miss performs a configuration structure decoding from 586 * guest RAM. 587 * 588 * @sdev: SMMUDevice handle 589 * @event: output event info 590 * 591 * The configuration cache contains data resulting from both STE and CD 592 * decoding under the form of an SMMUTransCfg struct. The hash table is indexed 593 * by the SMMUDevice handle. 594 */ 595 static SMMUTransCfg *smmuv3_get_config(SMMUDevice *sdev, SMMUEventInfo *event) 596 { 597 SMMUv3State *s = sdev->smmu; 598 SMMUState *bc = &s->smmu_state; 599 SMMUTransCfg *cfg; 600 601 cfg = g_hash_table_lookup(bc->configs, sdev); 602 if (cfg) { 603 sdev->cfg_cache_hits++; 604 trace_smmuv3_config_cache_hit(smmu_get_sid(sdev), 605 sdev->cfg_cache_hits, sdev->cfg_cache_misses, 606 100 * sdev->cfg_cache_hits / 607 (sdev->cfg_cache_hits + sdev->cfg_cache_misses)); 608 } else { 609 sdev->cfg_cache_misses++; 610 trace_smmuv3_config_cache_miss(smmu_get_sid(sdev), 611 sdev->cfg_cache_hits, sdev->cfg_cache_misses, 612 100 * sdev->cfg_cache_hits / 613 (sdev->cfg_cache_hits + sdev->cfg_cache_misses)); 614 cfg = g_new0(SMMUTransCfg, 1); 615 616 if (!smmuv3_decode_config(&sdev->iommu, cfg, event)) { 617 g_hash_table_insert(bc->configs, sdev, cfg); 618 } else { 619 g_free(cfg); 620 cfg = NULL; 621 } 622 } 623 return cfg; 624 } 625 626 static void smmuv3_flush_config(SMMUDevice *sdev) 627 { 628 SMMUv3State *s = sdev->smmu; 629 SMMUState *bc = &s->smmu_state; 630 631 trace_smmuv3_config_cache_inv(smmu_get_sid(sdev)); 632 g_hash_table_remove(bc->configs, sdev); 633 } 634 635 static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, 636 IOMMUAccessFlags flag, int iommu_idx) 637 { 638 SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu); 639 SMMUv3State *s = sdev->smmu; 640 uint32_t sid = smmu_get_sid(sdev); 641 SMMUEventInfo event = {.type = SMMU_EVT_NONE, 642 .sid = sid, 643 .inval_ste_allowed = false}; 644 SMMUPTWEventInfo ptw_info = {}; 645 SMMUTranslationStatus status; 646 SMMUState *bs = ARM_SMMU(s); 647 uint64_t page_mask, aligned_addr; 648 SMMUTLBEntry *cached_entry = NULL; 649 SMMUTransTableInfo *tt; 650 SMMUTransCfg *cfg = NULL; 651 IOMMUTLBEntry entry = { 652 .target_as = &address_space_memory, 653 .iova = addr, 654 .translated_addr = addr, 655 .addr_mask = ~(hwaddr)0, 656 .perm = IOMMU_NONE, 657 }; 658 659 qemu_mutex_lock(&s->mutex); 660 661 if (!smmu_enabled(s)) { 662 status = SMMU_TRANS_DISABLE; 663 goto epilogue; 664 } 665 666 cfg = smmuv3_get_config(sdev, &event); 667 if (!cfg) { 668 status = SMMU_TRANS_ERROR; 669 goto epilogue; 670 } 671 672 if (cfg->aborted) { 673 status = SMMU_TRANS_ABORT; 674 goto epilogue; 675 } 676 677 if (cfg->bypassed) { 678 status = SMMU_TRANS_BYPASS; 679 goto epilogue; 680 } 681 682 tt = select_tt(cfg, addr); 683 if (!tt) { 684 if (cfg->record_faults) { 685 event.type = SMMU_EVT_F_TRANSLATION; 686 event.u.f_translation.addr = addr; 687 event.u.f_translation.rnw = flag & 0x1; 688 } 689 status = SMMU_TRANS_ERROR; 690 goto epilogue; 691 } 692 693 page_mask = (1ULL << (tt->granule_sz)) - 1; 694 aligned_addr = addr & ~page_mask; 695 696 cached_entry = smmu_iotlb_lookup(bs, cfg, tt, aligned_addr); 697 if (cached_entry) { 698 if ((flag & IOMMU_WO) && !(cached_entry->entry.perm & IOMMU_WO)) { 699 status = SMMU_TRANS_ERROR; 700 if (cfg->record_faults) { 701 event.type = SMMU_EVT_F_PERMISSION; 702 event.u.f_permission.addr = addr; 703 event.u.f_permission.rnw = flag & 0x1; 704 } 705 } else { 706 status = SMMU_TRANS_SUCCESS; 707 } 708 goto epilogue; 709 } 710 711 cached_entry = g_new0(SMMUTLBEntry, 1); 712 713 if (smmu_ptw(cfg, aligned_addr, flag, cached_entry, &ptw_info)) { 714 g_free(cached_entry); 715 switch (ptw_info.type) { 716 case SMMU_PTW_ERR_WALK_EABT: 717 event.type = SMMU_EVT_F_WALK_EABT; 718 event.u.f_walk_eabt.addr = addr; 719 event.u.f_walk_eabt.rnw = flag & 0x1; 720 event.u.f_walk_eabt.class = 0x1; 721 event.u.f_walk_eabt.addr2 = ptw_info.addr; 722 break; 723 case SMMU_PTW_ERR_TRANSLATION: 724 if (cfg->record_faults) { 725 event.type = SMMU_EVT_F_TRANSLATION; 726 event.u.f_translation.addr = addr; 727 event.u.f_translation.rnw = flag & 0x1; 728 } 729 break; 730 case SMMU_PTW_ERR_ADDR_SIZE: 731 if (cfg->record_faults) { 732 event.type = SMMU_EVT_F_ADDR_SIZE; 733 event.u.f_addr_size.addr = addr; 734 event.u.f_addr_size.rnw = flag & 0x1; 735 } 736 break; 737 case SMMU_PTW_ERR_ACCESS: 738 if (cfg->record_faults) { 739 event.type = SMMU_EVT_F_ACCESS; 740 event.u.f_access.addr = addr; 741 event.u.f_access.rnw = flag & 0x1; 742 } 743 break; 744 case SMMU_PTW_ERR_PERMISSION: 745 if (cfg->record_faults) { 746 event.type = SMMU_EVT_F_PERMISSION; 747 event.u.f_permission.addr = addr; 748 event.u.f_permission.rnw = flag & 0x1; 749 } 750 break; 751 default: 752 g_assert_not_reached(); 753 } 754 status = SMMU_TRANS_ERROR; 755 } else { 756 smmu_iotlb_insert(bs, cfg, cached_entry); 757 status = SMMU_TRANS_SUCCESS; 758 } 759 760 epilogue: 761 qemu_mutex_unlock(&s->mutex); 762 switch (status) { 763 case SMMU_TRANS_SUCCESS: 764 entry.perm = cached_entry->entry.perm; 765 entry.translated_addr = cached_entry->entry.translated_addr + 766 (addr & cached_entry->entry.addr_mask); 767 entry.addr_mask = cached_entry->entry.addr_mask; 768 trace_smmuv3_translate_success(mr->parent_obj.name, sid, addr, 769 entry.translated_addr, entry.perm); 770 break; 771 case SMMU_TRANS_DISABLE: 772 entry.perm = flag; 773 entry.addr_mask = ~TARGET_PAGE_MASK; 774 trace_smmuv3_translate_disable(mr->parent_obj.name, sid, addr, 775 entry.perm); 776 break; 777 case SMMU_TRANS_BYPASS: 778 entry.perm = flag; 779 entry.addr_mask = ~TARGET_PAGE_MASK; 780 trace_smmuv3_translate_bypass(mr->parent_obj.name, sid, addr, 781 entry.perm); 782 break; 783 case SMMU_TRANS_ABORT: 784 /* no event is recorded on abort */ 785 trace_smmuv3_translate_abort(mr->parent_obj.name, sid, addr, 786 entry.perm); 787 break; 788 case SMMU_TRANS_ERROR: 789 qemu_log_mask(LOG_GUEST_ERROR, 790 "%s translation failed for iova=0x%"PRIx64" (%s)\n", 791 mr->parent_obj.name, addr, smmu_event_string(event.type)); 792 smmuv3_record_event(s, &event); 793 break; 794 } 795 796 return entry; 797 } 798 799 /** 800 * smmuv3_notify_iova - call the notifier @n for a given 801 * @asid and @iova tuple. 802 * 803 * @mr: IOMMU mr region handle 804 * @n: notifier to be called 805 * @asid: address space ID or negative value if we don't care 806 * @iova: iova 807 * @tg: translation granule (if communicated through range invalidation) 808 * @num_pages: number of @granule sized pages (if tg != 0), otherwise 1 809 */ 810 static void smmuv3_notify_iova(IOMMUMemoryRegion *mr, 811 IOMMUNotifier *n, 812 int asid, dma_addr_t iova, 813 uint8_t tg, uint64_t num_pages) 814 { 815 SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu); 816 IOMMUTLBEvent event; 817 uint8_t granule; 818 819 if (!tg) { 820 SMMUEventInfo event = {.inval_ste_allowed = true}; 821 SMMUTransCfg *cfg = smmuv3_get_config(sdev, &event); 822 SMMUTransTableInfo *tt; 823 824 if (!cfg) { 825 return; 826 } 827 828 if (asid >= 0 && cfg->asid != asid) { 829 return; 830 } 831 832 tt = select_tt(cfg, iova); 833 if (!tt) { 834 return; 835 } 836 granule = tt->granule_sz; 837 } else { 838 granule = tg * 2 + 10; 839 } 840 841 event.type = IOMMU_NOTIFIER_UNMAP; 842 event.entry.target_as = &address_space_memory; 843 event.entry.iova = iova; 844 event.entry.addr_mask = num_pages * (1 << granule) - 1; 845 event.entry.perm = IOMMU_NONE; 846 847 memory_region_notify_iommu_one(n, &event); 848 } 849 850 /* invalidate an asid/iova range tuple in all mr's */ 851 static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova, 852 uint8_t tg, uint64_t num_pages) 853 { 854 SMMUDevice *sdev; 855 856 QLIST_FOREACH(sdev, &s->devices_with_notifiers, next) { 857 IOMMUMemoryRegion *mr = &sdev->iommu; 858 IOMMUNotifier *n; 859 860 trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, iova, 861 tg, num_pages); 862 863 IOMMU_NOTIFIER_FOREACH(n, mr) { 864 smmuv3_notify_iova(mr, n, asid, iova, tg, num_pages); 865 } 866 } 867 } 868 869 static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd) 870 { 871 dma_addr_t end, addr = CMD_ADDR(cmd); 872 uint8_t type = CMD_TYPE(cmd); 873 uint16_t vmid = CMD_VMID(cmd); 874 uint8_t scale = CMD_SCALE(cmd); 875 uint8_t num = CMD_NUM(cmd); 876 uint8_t ttl = CMD_TTL(cmd); 877 bool leaf = CMD_LEAF(cmd); 878 uint8_t tg = CMD_TG(cmd); 879 uint64_t num_pages; 880 uint8_t granule; 881 int asid = -1; 882 883 if (type == SMMU_CMD_TLBI_NH_VA) { 884 asid = CMD_ASID(cmd); 885 } 886 887 if (!tg) { 888 trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, 1, ttl, leaf); 889 smmuv3_inv_notifiers_iova(s, asid, addr, tg, 1); 890 smmu_iotlb_inv_iova(s, asid, addr, tg, 1, ttl); 891 return; 892 } 893 894 /* RIL in use */ 895 896 num_pages = (num + 1) * BIT_ULL(scale); 897 granule = tg * 2 + 10; 898 899 /* Split invalidations into ^2 range invalidations */ 900 end = addr + (num_pages << granule) - 1; 901 902 while (addr != end + 1) { 903 uint64_t mask = dma_aligned_pow2_mask(addr, end, 64); 904 905 num_pages = (mask + 1) >> granule; 906 trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, num_pages, ttl, leaf); 907 smmuv3_inv_notifiers_iova(s, asid, addr, tg, num_pages); 908 smmu_iotlb_inv_iova(s, asid, addr, tg, num_pages, ttl); 909 addr += mask + 1; 910 } 911 } 912 913 static gboolean 914 smmuv3_invalidate_ste(gpointer key, gpointer value, gpointer user_data) 915 { 916 SMMUDevice *sdev = (SMMUDevice *)key; 917 uint32_t sid = smmu_get_sid(sdev); 918 SMMUSIDRange *sid_range = (SMMUSIDRange *)user_data; 919 920 if (sid < sid_range->start || sid > sid_range->end) { 921 return false; 922 } 923 trace_smmuv3_config_cache_inv(sid); 924 return true; 925 } 926 927 static int smmuv3_cmdq_consume(SMMUv3State *s) 928 { 929 SMMUState *bs = ARM_SMMU(s); 930 SMMUCmdError cmd_error = SMMU_CERROR_NONE; 931 SMMUQueue *q = &s->cmdq; 932 SMMUCommandType type = 0; 933 934 if (!smmuv3_cmdq_enabled(s)) { 935 return 0; 936 } 937 /* 938 * some commands depend on register values, typically CR0. In case those 939 * register values change while handling the command, spec says it 940 * is UNPREDICTABLE whether the command is interpreted under the new 941 * or old value. 942 */ 943 944 while (!smmuv3_q_empty(q)) { 945 uint32_t pending = s->gerror ^ s->gerrorn; 946 Cmd cmd; 947 948 trace_smmuv3_cmdq_consume(Q_PROD(q), Q_CONS(q), 949 Q_PROD_WRAP(q), Q_CONS_WRAP(q)); 950 951 if (FIELD_EX32(pending, GERROR, CMDQ_ERR)) { 952 break; 953 } 954 955 if (queue_read(q, &cmd) != MEMTX_OK) { 956 cmd_error = SMMU_CERROR_ABT; 957 break; 958 } 959 960 type = CMD_TYPE(&cmd); 961 962 trace_smmuv3_cmdq_opcode(smmu_cmd_string(type)); 963 964 qemu_mutex_lock(&s->mutex); 965 switch (type) { 966 case SMMU_CMD_SYNC: 967 if (CMD_SYNC_CS(&cmd) & CMD_SYNC_SIG_IRQ) { 968 smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0); 969 } 970 break; 971 case SMMU_CMD_PREFETCH_CONFIG: 972 case SMMU_CMD_PREFETCH_ADDR: 973 break; 974 case SMMU_CMD_CFGI_STE: 975 { 976 uint32_t sid = CMD_SID(&cmd); 977 IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid); 978 SMMUDevice *sdev; 979 980 if (CMD_SSEC(&cmd)) { 981 cmd_error = SMMU_CERROR_ILL; 982 break; 983 } 984 985 if (!mr) { 986 break; 987 } 988 989 trace_smmuv3_cmdq_cfgi_ste(sid); 990 sdev = container_of(mr, SMMUDevice, iommu); 991 smmuv3_flush_config(sdev); 992 993 break; 994 } 995 case SMMU_CMD_CFGI_STE_RANGE: /* same as SMMU_CMD_CFGI_ALL */ 996 { 997 uint32_t sid = CMD_SID(&cmd), mask; 998 uint8_t range = CMD_STE_RANGE(&cmd); 999 SMMUSIDRange sid_range; 1000 1001 if (CMD_SSEC(&cmd)) { 1002 cmd_error = SMMU_CERROR_ILL; 1003 break; 1004 } 1005 1006 mask = (1ULL << (range + 1)) - 1; 1007 sid_range.start = sid & ~mask; 1008 sid_range.end = sid_range.start + mask; 1009 1010 trace_smmuv3_cmdq_cfgi_ste_range(sid_range.start, sid_range.end); 1011 g_hash_table_foreach_remove(bs->configs, smmuv3_invalidate_ste, 1012 &sid_range); 1013 break; 1014 } 1015 case SMMU_CMD_CFGI_CD: 1016 case SMMU_CMD_CFGI_CD_ALL: 1017 { 1018 uint32_t sid = CMD_SID(&cmd); 1019 IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid); 1020 SMMUDevice *sdev; 1021 1022 if (CMD_SSEC(&cmd)) { 1023 cmd_error = SMMU_CERROR_ILL; 1024 break; 1025 } 1026 1027 if (!mr) { 1028 break; 1029 } 1030 1031 trace_smmuv3_cmdq_cfgi_cd(sid); 1032 sdev = container_of(mr, SMMUDevice, iommu); 1033 smmuv3_flush_config(sdev); 1034 break; 1035 } 1036 case SMMU_CMD_TLBI_NH_ASID: 1037 { 1038 uint16_t asid = CMD_ASID(&cmd); 1039 1040 trace_smmuv3_cmdq_tlbi_nh_asid(asid); 1041 smmu_inv_notifiers_all(&s->smmu_state); 1042 smmu_iotlb_inv_asid(bs, asid); 1043 break; 1044 } 1045 case SMMU_CMD_TLBI_NH_ALL: 1046 case SMMU_CMD_TLBI_NSNH_ALL: 1047 trace_smmuv3_cmdq_tlbi_nh(); 1048 smmu_inv_notifiers_all(&s->smmu_state); 1049 smmu_iotlb_inv_all(bs); 1050 break; 1051 case SMMU_CMD_TLBI_NH_VAA: 1052 case SMMU_CMD_TLBI_NH_VA: 1053 smmuv3_s1_range_inval(bs, &cmd); 1054 break; 1055 case SMMU_CMD_TLBI_EL3_ALL: 1056 case SMMU_CMD_TLBI_EL3_VA: 1057 case SMMU_CMD_TLBI_EL2_ALL: 1058 case SMMU_CMD_TLBI_EL2_ASID: 1059 case SMMU_CMD_TLBI_EL2_VA: 1060 case SMMU_CMD_TLBI_EL2_VAA: 1061 case SMMU_CMD_TLBI_S12_VMALL: 1062 case SMMU_CMD_TLBI_S2_IPA: 1063 case SMMU_CMD_ATC_INV: 1064 case SMMU_CMD_PRI_RESP: 1065 case SMMU_CMD_RESUME: 1066 case SMMU_CMD_STALL_TERM: 1067 trace_smmuv3_unhandled_cmd(type); 1068 break; 1069 default: 1070 cmd_error = SMMU_CERROR_ILL; 1071 qemu_log_mask(LOG_GUEST_ERROR, 1072 "Illegal command type: %d\n", CMD_TYPE(&cmd)); 1073 break; 1074 } 1075 qemu_mutex_unlock(&s->mutex); 1076 if (cmd_error) { 1077 break; 1078 } 1079 /* 1080 * We only increment the cons index after the completion of 1081 * the command. We do that because the SYNC returns immediately 1082 * and does not check the completion of previous commands 1083 */ 1084 queue_cons_incr(q); 1085 } 1086 1087 if (cmd_error) { 1088 trace_smmuv3_cmdq_consume_error(smmu_cmd_string(type), cmd_error); 1089 smmu_write_cmdq_err(s, cmd_error); 1090 smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_CMDQ_ERR_MASK); 1091 } 1092 1093 trace_smmuv3_cmdq_consume_out(Q_PROD(q), Q_CONS(q), 1094 Q_PROD_WRAP(q), Q_CONS_WRAP(q)); 1095 1096 return 0; 1097 } 1098 1099 static MemTxResult smmu_writell(SMMUv3State *s, hwaddr offset, 1100 uint64_t data, MemTxAttrs attrs) 1101 { 1102 switch (offset) { 1103 case A_GERROR_IRQ_CFG0: 1104 s->gerror_irq_cfg0 = data; 1105 return MEMTX_OK; 1106 case A_STRTAB_BASE: 1107 s->strtab_base = data; 1108 return MEMTX_OK; 1109 case A_CMDQ_BASE: 1110 s->cmdq.base = data; 1111 s->cmdq.log2size = extract64(s->cmdq.base, 0, 5); 1112 if (s->cmdq.log2size > SMMU_CMDQS) { 1113 s->cmdq.log2size = SMMU_CMDQS; 1114 } 1115 return MEMTX_OK; 1116 case A_EVENTQ_BASE: 1117 s->eventq.base = data; 1118 s->eventq.log2size = extract64(s->eventq.base, 0, 5); 1119 if (s->eventq.log2size > SMMU_EVENTQS) { 1120 s->eventq.log2size = SMMU_EVENTQS; 1121 } 1122 return MEMTX_OK; 1123 case A_EVENTQ_IRQ_CFG0: 1124 s->eventq_irq_cfg0 = data; 1125 return MEMTX_OK; 1126 default: 1127 qemu_log_mask(LOG_UNIMP, 1128 "%s Unexpected 64-bit access to 0x%"PRIx64" (WI)\n", 1129 __func__, offset); 1130 return MEMTX_OK; 1131 } 1132 } 1133 1134 static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset, 1135 uint64_t data, MemTxAttrs attrs) 1136 { 1137 switch (offset) { 1138 case A_CR0: 1139 s->cr[0] = data; 1140 s->cr0ack = data & ~SMMU_CR0_RESERVED; 1141 /* in case the command queue has been enabled */ 1142 smmuv3_cmdq_consume(s); 1143 return MEMTX_OK; 1144 case A_CR1: 1145 s->cr[1] = data; 1146 return MEMTX_OK; 1147 case A_CR2: 1148 s->cr[2] = data; 1149 return MEMTX_OK; 1150 case A_IRQ_CTRL: 1151 s->irq_ctrl = data; 1152 return MEMTX_OK; 1153 case A_GERRORN: 1154 smmuv3_write_gerrorn(s, data); 1155 /* 1156 * By acknowledging the CMDQ_ERR, SW may notify cmds can 1157 * be processed again 1158 */ 1159 smmuv3_cmdq_consume(s); 1160 return MEMTX_OK; 1161 case A_GERROR_IRQ_CFG0: /* 64b */ 1162 s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 0, 32, data); 1163 return MEMTX_OK; 1164 case A_GERROR_IRQ_CFG0 + 4: 1165 s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 32, 32, data); 1166 return MEMTX_OK; 1167 case A_GERROR_IRQ_CFG1: 1168 s->gerror_irq_cfg1 = data; 1169 return MEMTX_OK; 1170 case A_GERROR_IRQ_CFG2: 1171 s->gerror_irq_cfg2 = data; 1172 return MEMTX_OK; 1173 case A_STRTAB_BASE: /* 64b */ 1174 s->strtab_base = deposit64(s->strtab_base, 0, 32, data); 1175 return MEMTX_OK; 1176 case A_STRTAB_BASE + 4: 1177 s->strtab_base = deposit64(s->strtab_base, 32, 32, data); 1178 return MEMTX_OK; 1179 case A_STRTAB_BASE_CFG: 1180 s->strtab_base_cfg = data; 1181 if (FIELD_EX32(data, STRTAB_BASE_CFG, FMT) == 1) { 1182 s->sid_split = FIELD_EX32(data, STRTAB_BASE_CFG, SPLIT); 1183 s->features |= SMMU_FEATURE_2LVL_STE; 1184 } 1185 return MEMTX_OK; 1186 case A_CMDQ_BASE: /* 64b */ 1187 s->cmdq.base = deposit64(s->cmdq.base, 0, 32, data); 1188 s->cmdq.log2size = extract64(s->cmdq.base, 0, 5); 1189 if (s->cmdq.log2size > SMMU_CMDQS) { 1190 s->cmdq.log2size = SMMU_CMDQS; 1191 } 1192 return MEMTX_OK; 1193 case A_CMDQ_BASE + 4: /* 64b */ 1194 s->cmdq.base = deposit64(s->cmdq.base, 32, 32, data); 1195 return MEMTX_OK; 1196 case A_CMDQ_PROD: 1197 s->cmdq.prod = data; 1198 smmuv3_cmdq_consume(s); 1199 return MEMTX_OK; 1200 case A_CMDQ_CONS: 1201 s->cmdq.cons = data; 1202 return MEMTX_OK; 1203 case A_EVENTQ_BASE: /* 64b */ 1204 s->eventq.base = deposit64(s->eventq.base, 0, 32, data); 1205 s->eventq.log2size = extract64(s->eventq.base, 0, 5); 1206 if (s->eventq.log2size > SMMU_EVENTQS) { 1207 s->eventq.log2size = SMMU_EVENTQS; 1208 } 1209 return MEMTX_OK; 1210 case A_EVENTQ_BASE + 4: 1211 s->eventq.base = deposit64(s->eventq.base, 32, 32, data); 1212 return MEMTX_OK; 1213 case A_EVENTQ_PROD: 1214 s->eventq.prod = data; 1215 return MEMTX_OK; 1216 case A_EVENTQ_CONS: 1217 s->eventq.cons = data; 1218 return MEMTX_OK; 1219 case A_EVENTQ_IRQ_CFG0: /* 64b */ 1220 s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 0, 32, data); 1221 return MEMTX_OK; 1222 case A_EVENTQ_IRQ_CFG0 + 4: 1223 s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 32, 32, data); 1224 return MEMTX_OK; 1225 case A_EVENTQ_IRQ_CFG1: 1226 s->eventq_irq_cfg1 = data; 1227 return MEMTX_OK; 1228 case A_EVENTQ_IRQ_CFG2: 1229 s->eventq_irq_cfg2 = data; 1230 return MEMTX_OK; 1231 default: 1232 qemu_log_mask(LOG_UNIMP, 1233 "%s Unexpected 32-bit access to 0x%"PRIx64" (WI)\n", 1234 __func__, offset); 1235 return MEMTX_OK; 1236 } 1237 } 1238 1239 static MemTxResult smmu_write_mmio(void *opaque, hwaddr offset, uint64_t data, 1240 unsigned size, MemTxAttrs attrs) 1241 { 1242 SMMUState *sys = opaque; 1243 SMMUv3State *s = ARM_SMMUV3(sys); 1244 MemTxResult r; 1245 1246 /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */ 1247 offset &= ~0x10000; 1248 1249 switch (size) { 1250 case 8: 1251 r = smmu_writell(s, offset, data, attrs); 1252 break; 1253 case 4: 1254 r = smmu_writel(s, offset, data, attrs); 1255 break; 1256 default: 1257 r = MEMTX_ERROR; 1258 break; 1259 } 1260 1261 trace_smmuv3_write_mmio(offset, data, size, r); 1262 return r; 1263 } 1264 1265 static MemTxResult smmu_readll(SMMUv3State *s, hwaddr offset, 1266 uint64_t *data, MemTxAttrs attrs) 1267 { 1268 switch (offset) { 1269 case A_GERROR_IRQ_CFG0: 1270 *data = s->gerror_irq_cfg0; 1271 return MEMTX_OK; 1272 case A_STRTAB_BASE: 1273 *data = s->strtab_base; 1274 return MEMTX_OK; 1275 case A_CMDQ_BASE: 1276 *data = s->cmdq.base; 1277 return MEMTX_OK; 1278 case A_EVENTQ_BASE: 1279 *data = s->eventq.base; 1280 return MEMTX_OK; 1281 default: 1282 *data = 0; 1283 qemu_log_mask(LOG_UNIMP, 1284 "%s Unexpected 64-bit access to 0x%"PRIx64" (RAZ)\n", 1285 __func__, offset); 1286 return MEMTX_OK; 1287 } 1288 } 1289 1290 static MemTxResult smmu_readl(SMMUv3State *s, hwaddr offset, 1291 uint64_t *data, MemTxAttrs attrs) 1292 { 1293 switch (offset) { 1294 case A_IDREGS ... A_IDREGS + 0x2f: 1295 *data = smmuv3_idreg(offset - A_IDREGS); 1296 return MEMTX_OK; 1297 case A_IDR0 ... A_IDR5: 1298 *data = s->idr[(offset - A_IDR0) / 4]; 1299 return MEMTX_OK; 1300 case A_IIDR: 1301 *data = s->iidr; 1302 return MEMTX_OK; 1303 case A_AIDR: 1304 *data = s->aidr; 1305 return MEMTX_OK; 1306 case A_CR0: 1307 *data = s->cr[0]; 1308 return MEMTX_OK; 1309 case A_CR0ACK: 1310 *data = s->cr0ack; 1311 return MEMTX_OK; 1312 case A_CR1: 1313 *data = s->cr[1]; 1314 return MEMTX_OK; 1315 case A_CR2: 1316 *data = s->cr[2]; 1317 return MEMTX_OK; 1318 case A_STATUSR: 1319 *data = s->statusr; 1320 return MEMTX_OK; 1321 case A_IRQ_CTRL: 1322 case A_IRQ_CTRL_ACK: 1323 *data = s->irq_ctrl; 1324 return MEMTX_OK; 1325 case A_GERROR: 1326 *data = s->gerror; 1327 return MEMTX_OK; 1328 case A_GERRORN: 1329 *data = s->gerrorn; 1330 return MEMTX_OK; 1331 case A_GERROR_IRQ_CFG0: /* 64b */ 1332 *data = extract64(s->gerror_irq_cfg0, 0, 32); 1333 return MEMTX_OK; 1334 case A_GERROR_IRQ_CFG0 + 4: 1335 *data = extract64(s->gerror_irq_cfg0, 32, 32); 1336 return MEMTX_OK; 1337 case A_GERROR_IRQ_CFG1: 1338 *data = s->gerror_irq_cfg1; 1339 return MEMTX_OK; 1340 case A_GERROR_IRQ_CFG2: 1341 *data = s->gerror_irq_cfg2; 1342 return MEMTX_OK; 1343 case A_STRTAB_BASE: /* 64b */ 1344 *data = extract64(s->strtab_base, 0, 32); 1345 return MEMTX_OK; 1346 case A_STRTAB_BASE + 4: /* 64b */ 1347 *data = extract64(s->strtab_base, 32, 32); 1348 return MEMTX_OK; 1349 case A_STRTAB_BASE_CFG: 1350 *data = s->strtab_base_cfg; 1351 return MEMTX_OK; 1352 case A_CMDQ_BASE: /* 64b */ 1353 *data = extract64(s->cmdq.base, 0, 32); 1354 return MEMTX_OK; 1355 case A_CMDQ_BASE + 4: 1356 *data = extract64(s->cmdq.base, 32, 32); 1357 return MEMTX_OK; 1358 case A_CMDQ_PROD: 1359 *data = s->cmdq.prod; 1360 return MEMTX_OK; 1361 case A_CMDQ_CONS: 1362 *data = s->cmdq.cons; 1363 return MEMTX_OK; 1364 case A_EVENTQ_BASE: /* 64b */ 1365 *data = extract64(s->eventq.base, 0, 32); 1366 return MEMTX_OK; 1367 case A_EVENTQ_BASE + 4: /* 64b */ 1368 *data = extract64(s->eventq.base, 32, 32); 1369 return MEMTX_OK; 1370 case A_EVENTQ_PROD: 1371 *data = s->eventq.prod; 1372 return MEMTX_OK; 1373 case A_EVENTQ_CONS: 1374 *data = s->eventq.cons; 1375 return MEMTX_OK; 1376 default: 1377 *data = 0; 1378 qemu_log_mask(LOG_UNIMP, 1379 "%s unhandled 32-bit access at 0x%"PRIx64" (RAZ)\n", 1380 __func__, offset); 1381 return MEMTX_OK; 1382 } 1383 } 1384 1385 static MemTxResult smmu_read_mmio(void *opaque, hwaddr offset, uint64_t *data, 1386 unsigned size, MemTxAttrs attrs) 1387 { 1388 SMMUState *sys = opaque; 1389 SMMUv3State *s = ARM_SMMUV3(sys); 1390 MemTxResult r; 1391 1392 /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */ 1393 offset &= ~0x10000; 1394 1395 switch (size) { 1396 case 8: 1397 r = smmu_readll(s, offset, data, attrs); 1398 break; 1399 case 4: 1400 r = smmu_readl(s, offset, data, attrs); 1401 break; 1402 default: 1403 r = MEMTX_ERROR; 1404 break; 1405 } 1406 1407 trace_smmuv3_read_mmio(offset, *data, size, r); 1408 return r; 1409 } 1410 1411 static const MemoryRegionOps smmu_mem_ops = { 1412 .read_with_attrs = smmu_read_mmio, 1413 .write_with_attrs = smmu_write_mmio, 1414 .endianness = DEVICE_LITTLE_ENDIAN, 1415 .valid = { 1416 .min_access_size = 4, 1417 .max_access_size = 8, 1418 }, 1419 .impl = { 1420 .min_access_size = 4, 1421 .max_access_size = 8, 1422 }, 1423 }; 1424 1425 static void smmu_init_irq(SMMUv3State *s, SysBusDevice *dev) 1426 { 1427 int i; 1428 1429 for (i = 0; i < ARRAY_SIZE(s->irq); i++) { 1430 sysbus_init_irq(dev, &s->irq[i]); 1431 } 1432 } 1433 1434 static void smmu_reset_hold(Object *obj) 1435 { 1436 SMMUv3State *s = ARM_SMMUV3(obj); 1437 SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s); 1438 1439 if (c->parent_phases.hold) { 1440 c->parent_phases.hold(obj); 1441 } 1442 1443 smmuv3_init_regs(s); 1444 } 1445 1446 static void smmu_realize(DeviceState *d, Error **errp) 1447 { 1448 SMMUState *sys = ARM_SMMU(d); 1449 SMMUv3State *s = ARM_SMMUV3(sys); 1450 SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s); 1451 SysBusDevice *dev = SYS_BUS_DEVICE(d); 1452 Error *local_err = NULL; 1453 1454 c->parent_realize(d, &local_err); 1455 if (local_err) { 1456 error_propagate(errp, local_err); 1457 return; 1458 } 1459 1460 qemu_mutex_init(&s->mutex); 1461 1462 memory_region_init_io(&sys->iomem, OBJECT(s), 1463 &smmu_mem_ops, sys, TYPE_ARM_SMMUV3, 0x20000); 1464 1465 sys->mrtypename = TYPE_SMMUV3_IOMMU_MEMORY_REGION; 1466 1467 sysbus_init_mmio(dev, &sys->iomem); 1468 1469 smmu_init_irq(s, dev); 1470 } 1471 1472 static const VMStateDescription vmstate_smmuv3_queue = { 1473 .name = "smmuv3_queue", 1474 .version_id = 1, 1475 .minimum_version_id = 1, 1476 .fields = (VMStateField[]) { 1477 VMSTATE_UINT64(base, SMMUQueue), 1478 VMSTATE_UINT32(prod, SMMUQueue), 1479 VMSTATE_UINT32(cons, SMMUQueue), 1480 VMSTATE_UINT8(log2size, SMMUQueue), 1481 VMSTATE_END_OF_LIST(), 1482 }, 1483 }; 1484 1485 static const VMStateDescription vmstate_smmuv3 = { 1486 .name = "smmuv3", 1487 .version_id = 1, 1488 .minimum_version_id = 1, 1489 .priority = MIG_PRI_IOMMU, 1490 .fields = (VMStateField[]) { 1491 VMSTATE_UINT32(features, SMMUv3State), 1492 VMSTATE_UINT8(sid_size, SMMUv3State), 1493 VMSTATE_UINT8(sid_split, SMMUv3State), 1494 1495 VMSTATE_UINT32_ARRAY(cr, SMMUv3State, 3), 1496 VMSTATE_UINT32(cr0ack, SMMUv3State), 1497 VMSTATE_UINT32(statusr, SMMUv3State), 1498 VMSTATE_UINT32(irq_ctrl, SMMUv3State), 1499 VMSTATE_UINT32(gerror, SMMUv3State), 1500 VMSTATE_UINT32(gerrorn, SMMUv3State), 1501 VMSTATE_UINT64(gerror_irq_cfg0, SMMUv3State), 1502 VMSTATE_UINT32(gerror_irq_cfg1, SMMUv3State), 1503 VMSTATE_UINT32(gerror_irq_cfg2, SMMUv3State), 1504 VMSTATE_UINT64(strtab_base, SMMUv3State), 1505 VMSTATE_UINT32(strtab_base_cfg, SMMUv3State), 1506 VMSTATE_UINT64(eventq_irq_cfg0, SMMUv3State), 1507 VMSTATE_UINT32(eventq_irq_cfg1, SMMUv3State), 1508 VMSTATE_UINT32(eventq_irq_cfg2, SMMUv3State), 1509 1510 VMSTATE_STRUCT(cmdq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue), 1511 VMSTATE_STRUCT(eventq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue), 1512 1513 VMSTATE_END_OF_LIST(), 1514 }, 1515 }; 1516 1517 static void smmuv3_instance_init(Object *obj) 1518 { 1519 /* Nothing much to do here as of now */ 1520 } 1521 1522 static void smmuv3_class_init(ObjectClass *klass, void *data) 1523 { 1524 DeviceClass *dc = DEVICE_CLASS(klass); 1525 ResettableClass *rc = RESETTABLE_CLASS(klass); 1526 SMMUv3Class *c = ARM_SMMUV3_CLASS(klass); 1527 1528 dc->vmsd = &vmstate_smmuv3; 1529 resettable_class_set_parent_phases(rc, NULL, smmu_reset_hold, NULL, 1530 &c->parent_phases); 1531 c->parent_realize = dc->realize; 1532 dc->realize = smmu_realize; 1533 } 1534 1535 static int smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu, 1536 IOMMUNotifierFlag old, 1537 IOMMUNotifierFlag new, 1538 Error **errp) 1539 { 1540 SMMUDevice *sdev = container_of(iommu, SMMUDevice, iommu); 1541 SMMUv3State *s3 = sdev->smmu; 1542 SMMUState *s = &(s3->smmu_state); 1543 1544 if (new & IOMMU_NOTIFIER_DEVIOTLB_UNMAP) { 1545 error_setg(errp, "SMMUv3 does not support dev-iotlb yet"); 1546 return -EINVAL; 1547 } 1548 1549 if (new & IOMMU_NOTIFIER_MAP) { 1550 error_setg(errp, 1551 "device %02x.%02x.%x requires iommu MAP notifier which is " 1552 "not currently supported", pci_bus_num(sdev->bus), 1553 PCI_SLOT(sdev->devfn), PCI_FUNC(sdev->devfn)); 1554 return -EINVAL; 1555 } 1556 1557 if (old == IOMMU_NOTIFIER_NONE) { 1558 trace_smmuv3_notify_flag_add(iommu->parent_obj.name); 1559 QLIST_INSERT_HEAD(&s->devices_with_notifiers, sdev, next); 1560 } else if (new == IOMMU_NOTIFIER_NONE) { 1561 trace_smmuv3_notify_flag_del(iommu->parent_obj.name); 1562 QLIST_REMOVE(sdev, next); 1563 } 1564 return 0; 1565 } 1566 1567 static void smmuv3_iommu_memory_region_class_init(ObjectClass *klass, 1568 void *data) 1569 { 1570 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); 1571 1572 imrc->translate = smmuv3_translate; 1573 imrc->notify_flag_changed = smmuv3_notify_flag_changed; 1574 } 1575 1576 static const TypeInfo smmuv3_type_info = { 1577 .name = TYPE_ARM_SMMUV3, 1578 .parent = TYPE_ARM_SMMU, 1579 .instance_size = sizeof(SMMUv3State), 1580 .instance_init = smmuv3_instance_init, 1581 .class_size = sizeof(SMMUv3Class), 1582 .class_init = smmuv3_class_init, 1583 }; 1584 1585 static const TypeInfo smmuv3_iommu_memory_region_info = { 1586 .parent = TYPE_IOMMU_MEMORY_REGION, 1587 .name = TYPE_SMMUV3_IOMMU_MEMORY_REGION, 1588 .class_init = smmuv3_iommu_memory_region_class_init, 1589 }; 1590 1591 static void smmuv3_register_types(void) 1592 { 1593 type_register(&smmuv3_type_info); 1594 type_register(&smmuv3_iommu_memory_region_info); 1595 } 1596 1597 type_init(smmuv3_register_types) 1598 1599