1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * intel-pasid.c - PASID idr, table and entry manipulation 4 * 5 * Copyright (C) 2018 Intel Corporation 6 * 7 * Author: Lu Baolu <baolu.lu@linux.intel.com> 8 */ 9 10 #define pr_fmt(fmt) "DMAR: " fmt 11 12 #include <linux/bitops.h> 13 #include <linux/cpufeature.h> 14 #include <linux/dmar.h> 15 #include <linux/iommu.h> 16 #include <linux/memory.h> 17 #include <linux/pci.h> 18 #include <linux/pci-ats.h> 19 #include <linux/spinlock.h> 20 21 #include "iommu.h" 22 #include "pasid.h" 23 24 /* 25 * Intel IOMMU system wide PASID name space: 26 */ 27 u32 intel_pasid_max_id = PASID_MAX; 28 29 int vcmd_alloc_pasid(struct intel_iommu *iommu, u32 *pasid) 30 { 31 unsigned long flags; 32 u8 status_code; 33 int ret = 0; 34 u64 res; 35 36 raw_spin_lock_irqsave(&iommu->register_lock, flags); 37 dmar_writeq(iommu->reg + DMAR_VCMD_REG, VCMD_CMD_ALLOC); 38 IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq, 39 !(res & VCMD_VRSP_IP), res); 40 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); 41 42 status_code = VCMD_VRSP_SC(res); 43 switch (status_code) { 44 case VCMD_VRSP_SC_SUCCESS: 45 *pasid = VCMD_VRSP_RESULT_PASID(res); 46 break; 47 case VCMD_VRSP_SC_NO_PASID_AVAIL: 48 pr_info("IOMMU: %s: No PASID available\n", iommu->name); 49 ret = -ENOSPC; 50 break; 51 default: 52 ret = -ENODEV; 53 pr_warn("IOMMU: %s: Unexpected error code %d\n", 54 iommu->name, status_code); 55 } 56 57 return ret; 58 } 59 60 void vcmd_free_pasid(struct intel_iommu *iommu, u32 pasid) 61 { 62 unsigned long flags; 63 u8 status_code; 64 u64 res; 65 66 raw_spin_lock_irqsave(&iommu->register_lock, flags); 67 dmar_writeq(iommu->reg + DMAR_VCMD_REG, 68 VCMD_CMD_OPERAND(pasid) | VCMD_CMD_FREE); 69 IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq, 70 !(res & VCMD_VRSP_IP), res); 71 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); 72 73 status_code = VCMD_VRSP_SC(res); 74 switch (status_code) { 75 case VCMD_VRSP_SC_SUCCESS: 76 break; 77 case VCMD_VRSP_SC_INVALID_PASID: 78 pr_info("IOMMU: %s: Invalid PASID\n", iommu->name); 79 break; 80 default: 81 pr_warn("IOMMU: %s: Unexpected error code %d\n", 82 iommu->name, status_code); 83 } 84 } 85 86 /* 87 * Per device pasid table management: 88 */ 89 90 /* 91 * Allocate a pasid table for @dev. It should be called in a 92 * single-thread context. 93 */ 94 int intel_pasid_alloc_table(struct device *dev) 95 { 96 struct device_domain_info *info; 97 struct pasid_table *pasid_table; 98 struct page *pages; 99 u32 max_pasid = 0; 100 int order, size; 101 102 might_sleep(); 103 info = dev_iommu_priv_get(dev); 104 if (WARN_ON(!info || !dev_is_pci(dev) || info->pasid_table)) 105 return -EINVAL; 106 107 pasid_table = kzalloc(sizeof(*pasid_table), GFP_KERNEL); 108 if (!pasid_table) 109 return -ENOMEM; 110 111 if (info->pasid_supported) 112 max_pasid = min_t(u32, pci_max_pasids(to_pci_dev(dev)), 113 intel_pasid_max_id); 114 115 size = max_pasid >> (PASID_PDE_SHIFT - 3); 116 order = size ? get_order(size) : 0; 117 pages = alloc_pages_node(info->iommu->node, 118 GFP_KERNEL | __GFP_ZERO, order); 119 if (!pages) { 120 kfree(pasid_table); 121 return -ENOMEM; 122 } 123 124 pasid_table->table = page_address(pages); 125 pasid_table->order = order; 126 pasid_table->max_pasid = 1 << (order + PAGE_SHIFT + 3); 127 info->pasid_table = pasid_table; 128 129 return 0; 130 } 131 132 void intel_pasid_free_table(struct device *dev) 133 { 134 struct device_domain_info *info; 135 struct pasid_table *pasid_table; 136 struct pasid_dir_entry *dir; 137 struct pasid_entry *table; 138 int i, max_pde; 139 140 info = dev_iommu_priv_get(dev); 141 if (!info || !dev_is_pci(dev) || !info->pasid_table) 142 return; 143 144 pasid_table = info->pasid_table; 145 info->pasid_table = NULL; 146 147 /* Free scalable mode PASID directory tables: */ 148 dir = pasid_table->table; 149 max_pde = pasid_table->max_pasid >> PASID_PDE_SHIFT; 150 for (i = 0; i < max_pde; i++) { 151 table = get_pasid_table_from_pde(&dir[i]); 152 free_pgtable_page(table); 153 } 154 155 free_pages((unsigned long)pasid_table->table, pasid_table->order); 156 kfree(pasid_table); 157 } 158 159 struct pasid_table *intel_pasid_get_table(struct device *dev) 160 { 161 struct device_domain_info *info; 162 163 info = dev_iommu_priv_get(dev); 164 if (!info) 165 return NULL; 166 167 return info->pasid_table; 168 } 169 170 static int intel_pasid_get_dev_max_id(struct device *dev) 171 { 172 struct device_domain_info *info; 173 174 info = dev_iommu_priv_get(dev); 175 if (!info || !info->pasid_table) 176 return 0; 177 178 return info->pasid_table->max_pasid; 179 } 180 181 static struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid) 182 { 183 struct device_domain_info *info; 184 struct pasid_table *pasid_table; 185 struct pasid_dir_entry *dir; 186 struct pasid_entry *entries; 187 int dir_index, index; 188 189 pasid_table = intel_pasid_get_table(dev); 190 if (WARN_ON(!pasid_table || pasid >= intel_pasid_get_dev_max_id(dev))) 191 return NULL; 192 193 dir = pasid_table->table; 194 info = dev_iommu_priv_get(dev); 195 dir_index = pasid >> PASID_PDE_SHIFT; 196 index = pasid & PASID_PTE_MASK; 197 198 retry: 199 entries = get_pasid_table_from_pde(&dir[dir_index]); 200 if (!entries) { 201 entries = alloc_pgtable_page(info->iommu->node); 202 if (!entries) 203 return NULL; 204 205 /* 206 * The pasid directory table entry won't be freed after 207 * allocation. No worry about the race with free and 208 * clear. However, this entry might be populated by others 209 * while we are preparing it. Use theirs with a retry. 210 */ 211 if (cmpxchg64(&dir[dir_index].val, 0ULL, 212 (u64)virt_to_phys(entries) | PASID_PTE_PRESENT)) { 213 free_pgtable_page(entries); 214 goto retry; 215 } 216 } 217 218 return &entries[index]; 219 } 220 221 /* 222 * Interfaces for PASID table entry manipulation: 223 */ 224 static inline void pasid_clear_entry(struct pasid_entry *pe) 225 { 226 WRITE_ONCE(pe->val[0], 0); 227 WRITE_ONCE(pe->val[1], 0); 228 WRITE_ONCE(pe->val[2], 0); 229 WRITE_ONCE(pe->val[3], 0); 230 WRITE_ONCE(pe->val[4], 0); 231 WRITE_ONCE(pe->val[5], 0); 232 WRITE_ONCE(pe->val[6], 0); 233 WRITE_ONCE(pe->val[7], 0); 234 } 235 236 static inline void pasid_clear_entry_with_fpd(struct pasid_entry *pe) 237 { 238 WRITE_ONCE(pe->val[0], PASID_PTE_FPD); 239 WRITE_ONCE(pe->val[1], 0); 240 WRITE_ONCE(pe->val[2], 0); 241 WRITE_ONCE(pe->val[3], 0); 242 WRITE_ONCE(pe->val[4], 0); 243 WRITE_ONCE(pe->val[5], 0); 244 WRITE_ONCE(pe->val[6], 0); 245 WRITE_ONCE(pe->val[7], 0); 246 } 247 248 static void 249 intel_pasid_clear_entry(struct device *dev, u32 pasid, bool fault_ignore) 250 { 251 struct pasid_entry *pe; 252 253 pe = intel_pasid_get_entry(dev, pasid); 254 if (WARN_ON(!pe)) 255 return; 256 257 if (fault_ignore && pasid_pte_is_present(pe)) 258 pasid_clear_entry_with_fpd(pe); 259 else 260 pasid_clear_entry(pe); 261 } 262 263 static inline void pasid_set_bits(u64 *ptr, u64 mask, u64 bits) 264 { 265 u64 old; 266 267 old = READ_ONCE(*ptr); 268 WRITE_ONCE(*ptr, (old & ~mask) | bits); 269 } 270 271 /* 272 * Setup the DID(Domain Identifier) field (Bit 64~79) of scalable mode 273 * PASID entry. 274 */ 275 static inline void 276 pasid_set_domain_id(struct pasid_entry *pe, u64 value) 277 { 278 pasid_set_bits(&pe->val[1], GENMASK_ULL(15, 0), value); 279 } 280 281 /* 282 * Get domain ID value of a scalable mode PASID entry. 283 */ 284 static inline u16 285 pasid_get_domain_id(struct pasid_entry *pe) 286 { 287 return (u16)(READ_ONCE(pe->val[1]) & GENMASK_ULL(15, 0)); 288 } 289 290 /* 291 * Setup the SLPTPTR(Second Level Page Table Pointer) field (Bit 12~63) 292 * of a scalable mode PASID entry. 293 */ 294 static inline void 295 pasid_set_slptr(struct pasid_entry *pe, u64 value) 296 { 297 pasid_set_bits(&pe->val[0], VTD_PAGE_MASK, value); 298 } 299 300 /* 301 * Setup the AW(Address Width) field (Bit 2~4) of a scalable mode PASID 302 * entry. 303 */ 304 static inline void 305 pasid_set_address_width(struct pasid_entry *pe, u64 value) 306 { 307 pasid_set_bits(&pe->val[0], GENMASK_ULL(4, 2), value << 2); 308 } 309 310 /* 311 * Setup the PGTT(PASID Granular Translation Type) field (Bit 6~8) 312 * of a scalable mode PASID entry. 313 */ 314 static inline void 315 pasid_set_translation_type(struct pasid_entry *pe, u64 value) 316 { 317 pasid_set_bits(&pe->val[0], GENMASK_ULL(8, 6), value << 6); 318 } 319 320 /* 321 * Enable fault processing by clearing the FPD(Fault Processing 322 * Disable) field (Bit 1) of a scalable mode PASID entry. 323 */ 324 static inline void pasid_set_fault_enable(struct pasid_entry *pe) 325 { 326 pasid_set_bits(&pe->val[0], 1 << 1, 0); 327 } 328 329 /* 330 * Setup the SRE(Supervisor Request Enable) field (Bit 128) of a 331 * scalable mode PASID entry. 332 */ 333 static inline void pasid_set_sre(struct pasid_entry *pe) 334 { 335 pasid_set_bits(&pe->val[2], 1 << 0, 1); 336 } 337 338 /* 339 * Setup the WPE(Write Protect Enable) field (Bit 132) of a 340 * scalable mode PASID entry. 341 */ 342 static inline void pasid_set_wpe(struct pasid_entry *pe) 343 { 344 pasid_set_bits(&pe->val[2], 1 << 4, 1 << 4); 345 } 346 347 /* 348 * Setup the P(Present) field (Bit 0) of a scalable mode PASID 349 * entry. 350 */ 351 static inline void pasid_set_present(struct pasid_entry *pe) 352 { 353 pasid_set_bits(&pe->val[0], 1 << 0, 1); 354 } 355 356 /* 357 * Setup Page Walk Snoop bit (Bit 87) of a scalable mode PASID 358 * entry. 359 */ 360 static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value) 361 { 362 pasid_set_bits(&pe->val[1], 1 << 23, value << 23); 363 } 364 365 /* 366 * Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode 367 * PASID entry. 368 */ 369 static inline void 370 pasid_set_pgsnp(struct pasid_entry *pe) 371 { 372 pasid_set_bits(&pe->val[1], 1ULL << 24, 1ULL << 24); 373 } 374 375 /* 376 * Setup the First Level Page table Pointer field (Bit 140~191) 377 * of a scalable mode PASID entry. 378 */ 379 static inline void 380 pasid_set_flptr(struct pasid_entry *pe, u64 value) 381 { 382 pasid_set_bits(&pe->val[2], VTD_PAGE_MASK, value); 383 } 384 385 /* 386 * Setup the First Level Paging Mode field (Bit 130~131) of a 387 * scalable mode PASID entry. 388 */ 389 static inline void 390 pasid_set_flpm(struct pasid_entry *pe, u64 value) 391 { 392 pasid_set_bits(&pe->val[2], GENMASK_ULL(3, 2), value << 2); 393 } 394 395 static void 396 pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu, 397 u16 did, u32 pasid) 398 { 399 struct qi_desc desc; 400 401 desc.qw0 = QI_PC_DID(did) | QI_PC_GRAN(QI_PC_PASID_SEL) | 402 QI_PC_PASID(pasid) | QI_PC_TYPE; 403 desc.qw1 = 0; 404 desc.qw2 = 0; 405 desc.qw3 = 0; 406 407 qi_submit_sync(iommu, &desc, 1, 0); 408 } 409 410 static void 411 devtlb_invalidation_with_pasid(struct intel_iommu *iommu, 412 struct device *dev, u32 pasid) 413 { 414 struct device_domain_info *info; 415 u16 sid, qdep, pfsid; 416 417 info = dev_iommu_priv_get(dev); 418 if (!info || !info->ats_enabled) 419 return; 420 421 sid = info->bus << 8 | info->devfn; 422 qdep = info->ats_qdep; 423 pfsid = info->pfsid; 424 425 /* 426 * When PASID 0 is used, it indicates RID2PASID(DMA request w/o PASID), 427 * devTLB flush w/o PASID should be used. For non-zero PASID under 428 * SVA usage, device could do DMA with multiple PASIDs. It is more 429 * efficient to flush devTLB specific to the PASID. 430 */ 431 if (pasid == PASID_RID2PASID) 432 qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT); 433 else 434 qi_flush_dev_iotlb_pasid(iommu, sid, pfsid, pasid, qdep, 0, 64 - VTD_PAGE_SHIFT); 435 } 436 437 void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev, 438 u32 pasid, bool fault_ignore) 439 { 440 struct pasid_entry *pte; 441 u16 did, pgtt; 442 443 spin_lock(&iommu->lock); 444 pte = intel_pasid_get_entry(dev, pasid); 445 if (WARN_ON(!pte) || !pasid_pte_is_present(pte)) { 446 spin_unlock(&iommu->lock); 447 return; 448 } 449 450 did = pasid_get_domain_id(pte); 451 pgtt = pasid_pte_get_pgtt(pte); 452 intel_pasid_clear_entry(dev, pasid, fault_ignore); 453 spin_unlock(&iommu->lock); 454 455 if (!ecap_coherent(iommu->ecap)) 456 clflush_cache_range(pte, sizeof(*pte)); 457 458 pasid_cache_invalidation_with_pasid(iommu, did, pasid); 459 460 if (pgtt == PASID_ENTRY_PGTT_PT || pgtt == PASID_ENTRY_PGTT_FL_ONLY) 461 qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); 462 else 463 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); 464 465 /* Device IOTLB doesn't need to be flushed in caching mode. */ 466 if (!cap_caching_mode(iommu->cap)) 467 devtlb_invalidation_with_pasid(iommu, dev, pasid); 468 } 469 470 /* 471 * This function flushes cache for a newly setup pasid table entry. 472 * Caller of it should not modify the in-use pasid table entries. 473 */ 474 static void pasid_flush_caches(struct intel_iommu *iommu, 475 struct pasid_entry *pte, 476 u32 pasid, u16 did) 477 { 478 if (!ecap_coherent(iommu->ecap)) 479 clflush_cache_range(pte, sizeof(*pte)); 480 481 if (cap_caching_mode(iommu->cap)) { 482 pasid_cache_invalidation_with_pasid(iommu, did, pasid); 483 qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); 484 } else { 485 iommu_flush_write_buffer(iommu); 486 } 487 } 488 489 /* 490 * Set up the scalable mode pasid table entry for first only 491 * translation type. 492 */ 493 int intel_pasid_setup_first_level(struct intel_iommu *iommu, 494 struct device *dev, pgd_t *pgd, 495 u32 pasid, u16 did, int flags) 496 { 497 struct pasid_entry *pte; 498 499 if (!ecap_flts(iommu->ecap)) { 500 pr_err("No first level translation support on %s\n", 501 iommu->name); 502 return -EINVAL; 503 } 504 505 if (flags & PASID_FLAG_SUPERVISOR_MODE) { 506 #ifdef CONFIG_X86 507 unsigned long cr0 = read_cr0(); 508 509 /* CR0.WP is normally set but just to be sure */ 510 if (unlikely(!(cr0 & X86_CR0_WP))) { 511 pr_err("No CPU write protect!\n"); 512 return -EINVAL; 513 } 514 #endif 515 if (!ecap_srs(iommu->ecap)) { 516 pr_err("No supervisor request support on %s\n", 517 iommu->name); 518 return -EINVAL; 519 } 520 } 521 522 if ((flags & PASID_FLAG_FL5LP) && !cap_fl5lp_support(iommu->cap)) { 523 pr_err("No 5-level paging support for first-level on %s\n", 524 iommu->name); 525 return -EINVAL; 526 } 527 528 spin_lock(&iommu->lock); 529 pte = intel_pasid_get_entry(dev, pasid); 530 if (!pte) { 531 spin_unlock(&iommu->lock); 532 return -ENODEV; 533 } 534 535 if (pasid_pte_is_present(pte)) { 536 spin_unlock(&iommu->lock); 537 return -EBUSY; 538 } 539 540 pasid_clear_entry(pte); 541 542 /* Setup the first level page table pointer: */ 543 pasid_set_flptr(pte, (u64)__pa(pgd)); 544 if (flags & PASID_FLAG_SUPERVISOR_MODE) { 545 pasid_set_sre(pte); 546 pasid_set_wpe(pte); 547 } 548 549 if (flags & PASID_FLAG_FL5LP) 550 pasid_set_flpm(pte, 1); 551 552 if (flags & PASID_FLAG_PAGE_SNOOP) 553 pasid_set_pgsnp(pte); 554 555 pasid_set_domain_id(pte, did); 556 pasid_set_address_width(pte, iommu->agaw); 557 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); 558 559 /* Setup Present and PASID Granular Transfer Type: */ 560 pasid_set_translation_type(pte, PASID_ENTRY_PGTT_FL_ONLY); 561 pasid_set_present(pte); 562 spin_unlock(&iommu->lock); 563 564 pasid_flush_caches(iommu, pte, pasid, did); 565 566 return 0; 567 } 568 569 /* 570 * Skip top levels of page tables for iommu which has less agaw 571 * than default. Unnecessary for PT mode. 572 */ 573 static inline int iommu_skip_agaw(struct dmar_domain *domain, 574 struct intel_iommu *iommu, 575 struct dma_pte **pgd) 576 { 577 int agaw; 578 579 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) { 580 *pgd = phys_to_virt(dma_pte_addr(*pgd)); 581 if (!dma_pte_present(*pgd)) 582 return -EINVAL; 583 } 584 585 return agaw; 586 } 587 588 /* 589 * Set up the scalable mode pasid entry for second only translation type. 590 */ 591 int intel_pasid_setup_second_level(struct intel_iommu *iommu, 592 struct dmar_domain *domain, 593 struct device *dev, u32 pasid) 594 { 595 struct pasid_entry *pte; 596 struct dma_pte *pgd; 597 u64 pgd_val; 598 int agaw; 599 u16 did; 600 601 /* 602 * If hardware advertises no support for second level 603 * translation, return directly. 604 */ 605 if (!ecap_slts(iommu->ecap)) { 606 pr_err("No second level translation support on %s\n", 607 iommu->name); 608 return -EINVAL; 609 } 610 611 pgd = domain->pgd; 612 agaw = iommu_skip_agaw(domain, iommu, &pgd); 613 if (agaw < 0) { 614 dev_err(dev, "Invalid domain page table\n"); 615 return -EINVAL; 616 } 617 618 pgd_val = virt_to_phys(pgd); 619 did = domain_id_iommu(domain, iommu); 620 621 spin_lock(&iommu->lock); 622 pte = intel_pasid_get_entry(dev, pasid); 623 if (!pte) { 624 spin_unlock(&iommu->lock); 625 return -ENODEV; 626 } 627 628 if (pasid_pte_is_present(pte)) { 629 spin_unlock(&iommu->lock); 630 return -EBUSY; 631 } 632 633 pasid_clear_entry(pte); 634 pasid_set_domain_id(pte, did); 635 pasid_set_slptr(pte, pgd_val); 636 pasid_set_address_width(pte, agaw); 637 pasid_set_translation_type(pte, PASID_ENTRY_PGTT_SL_ONLY); 638 pasid_set_fault_enable(pte); 639 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); 640 641 /* 642 * Since it is a second level only translation setup, we should 643 * set SRE bit as well (addresses are expected to be GPAs). 644 */ 645 if (pasid != PASID_RID2PASID) 646 pasid_set_sre(pte); 647 pasid_set_present(pte); 648 spin_unlock(&iommu->lock); 649 650 pasid_flush_caches(iommu, pte, pasid, did); 651 652 return 0; 653 } 654 655 /* 656 * Set up the scalable mode pasid entry for passthrough translation type. 657 */ 658 int intel_pasid_setup_pass_through(struct intel_iommu *iommu, 659 struct dmar_domain *domain, 660 struct device *dev, u32 pasid) 661 { 662 u16 did = FLPT_DEFAULT_DID; 663 struct pasid_entry *pte; 664 665 spin_lock(&iommu->lock); 666 pte = intel_pasid_get_entry(dev, pasid); 667 if (!pte) { 668 spin_unlock(&iommu->lock); 669 return -ENODEV; 670 } 671 672 if (pasid_pte_is_present(pte)) { 673 spin_unlock(&iommu->lock); 674 return -EBUSY; 675 } 676 677 pasid_clear_entry(pte); 678 pasid_set_domain_id(pte, did); 679 pasid_set_address_width(pte, iommu->agaw); 680 pasid_set_translation_type(pte, PASID_ENTRY_PGTT_PT); 681 pasid_set_fault_enable(pte); 682 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); 683 684 /* 685 * We should set SRE bit as well since the addresses are expected 686 * to be GPAs. 687 */ 688 pasid_set_sre(pte); 689 pasid_set_present(pte); 690 spin_unlock(&iommu->lock); 691 692 pasid_flush_caches(iommu, pte, pasid, did); 693 694 return 0; 695 } 696 697 /* 698 * Set the page snoop control for a pasid entry which has been set up. 699 */ 700 void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu, 701 struct device *dev, u32 pasid) 702 { 703 struct pasid_entry *pte; 704 u16 did; 705 706 spin_lock(&iommu->lock); 707 pte = intel_pasid_get_entry(dev, pasid); 708 if (WARN_ON(!pte || !pasid_pte_is_present(pte))) { 709 spin_unlock(&iommu->lock); 710 return; 711 } 712 713 pasid_set_pgsnp(pte); 714 did = pasid_get_domain_id(pte); 715 spin_unlock(&iommu->lock); 716 717 if (!ecap_coherent(iommu->ecap)) 718 clflush_cache_range(pte, sizeof(*pte)); 719 720 /* 721 * VT-d spec 3.4 table23 states guides for cache invalidation: 722 * 723 * - PASID-selective-within-Domain PASID-cache invalidation 724 * - PASID-selective PASID-based IOTLB invalidation 725 * - If (pasid is RID_PASID) 726 * - Global Device-TLB invalidation to affected functions 727 * Else 728 * - PASID-based Device-TLB invalidation (with S=1 and 729 * Addr[63:12]=0x7FFFFFFF_FFFFF) to affected functions 730 */ 731 pasid_cache_invalidation_with_pasid(iommu, did, pasid); 732 qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); 733 734 /* Device IOTLB doesn't need to be flushed in caching mode. */ 735 if (!cap_caching_mode(iommu->cap)) 736 devtlb_invalidation_with_pasid(iommu, dev, pasid); 737 } 738