1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * intel-pasid.c - PASID idr, table and entry manipulation 4 * 5 * Copyright (C) 2018 Intel Corporation 6 * 7 * Author: Lu Baolu <baolu.lu@linux.intel.com> 8 */ 9 10 #define pr_fmt(fmt) "DMAR: " fmt 11 12 #include <linux/bitops.h> 13 #include <linux/cpufeature.h> 14 #include <linux/dmar.h> 15 #include <linux/iommu.h> 16 #include <linux/memory.h> 17 #include <linux/pci.h> 18 #include <linux/pci-ats.h> 19 #include <linux/spinlock.h> 20 21 #include "iommu.h" 22 #include "pasid.h" 23 24 /* 25 * Intel IOMMU system wide PASID name space: 26 */ 27 u32 intel_pasid_max_id = PASID_MAX; 28 29 int vcmd_alloc_pasid(struct intel_iommu *iommu, u32 *pasid) 30 { 31 unsigned long flags; 32 u8 status_code; 33 int ret = 0; 34 u64 res; 35 36 raw_spin_lock_irqsave(&iommu->register_lock, flags); 37 dmar_writeq(iommu->reg + DMAR_VCMD_REG, VCMD_CMD_ALLOC); 38 IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq, 39 !(res & VCMD_VRSP_IP), res); 40 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); 41 42 status_code = VCMD_VRSP_SC(res); 43 switch (status_code) { 44 case VCMD_VRSP_SC_SUCCESS: 45 *pasid = VCMD_VRSP_RESULT_PASID(res); 46 break; 47 case VCMD_VRSP_SC_NO_PASID_AVAIL: 48 pr_info("IOMMU: %s: No PASID available\n", iommu->name); 49 ret = -ENOSPC; 50 break; 51 default: 52 ret = -ENODEV; 53 pr_warn("IOMMU: %s: Unexpected error code %d\n", 54 iommu->name, status_code); 55 } 56 57 return ret; 58 } 59 60 void vcmd_free_pasid(struct intel_iommu *iommu, u32 pasid) 61 { 62 unsigned long flags; 63 u8 status_code; 64 u64 res; 65 66 raw_spin_lock_irqsave(&iommu->register_lock, flags); 67 dmar_writeq(iommu->reg + DMAR_VCMD_REG, 68 VCMD_CMD_OPERAND(pasid) | VCMD_CMD_FREE); 69 IOMMU_WAIT_OP(iommu, DMAR_VCRSP_REG, dmar_readq, 70 !(res & VCMD_VRSP_IP), res); 71 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); 72 73 status_code = VCMD_VRSP_SC(res); 74 switch (status_code) { 75 case VCMD_VRSP_SC_SUCCESS: 76 break; 77 case VCMD_VRSP_SC_INVALID_PASID: 78 pr_info("IOMMU: %s: Invalid PASID\n", iommu->name); 79 break; 80 default: 81 pr_warn("IOMMU: %s: Unexpected error code %d\n", 82 iommu->name, status_code); 83 } 84 } 85 86 /* 87 * Per device pasid table management: 88 */ 89 90 /* 91 * Allocate a pasid table for @dev. It should be called in a 92 * single-thread context. 93 */ 94 int intel_pasid_alloc_table(struct device *dev) 95 { 96 struct device_domain_info *info; 97 struct pasid_table *pasid_table; 98 struct page *pages; 99 u32 max_pasid = 0; 100 int order, size; 101 102 might_sleep(); 103 info = dev_iommu_priv_get(dev); 104 if (WARN_ON(!info || !dev_is_pci(dev))) 105 return -ENODEV; 106 if (WARN_ON(info->pasid_table)) 107 return -EEXIST; 108 109 pasid_table = kzalloc(sizeof(*pasid_table), GFP_KERNEL); 110 if (!pasid_table) 111 return -ENOMEM; 112 113 if (info->pasid_supported) 114 max_pasid = min_t(u32, pci_max_pasids(to_pci_dev(dev)), 115 intel_pasid_max_id); 116 117 size = max_pasid >> (PASID_PDE_SHIFT - 3); 118 order = size ? get_order(size) : 0; 119 pages = alloc_pages_node(info->iommu->node, 120 GFP_KERNEL | __GFP_ZERO, order); 121 if (!pages) { 122 kfree(pasid_table); 123 return -ENOMEM; 124 } 125 126 pasid_table->table = page_address(pages); 127 pasid_table->order = order; 128 pasid_table->max_pasid = 1 << (order + PAGE_SHIFT + 3); 129 info->pasid_table = pasid_table; 130 131 return 0; 132 } 133 134 void intel_pasid_free_table(struct device *dev) 135 { 136 struct device_domain_info *info; 137 struct pasid_table *pasid_table; 138 struct pasid_dir_entry *dir; 139 struct pasid_entry *table; 140 int i, max_pde; 141 142 info = dev_iommu_priv_get(dev); 143 if (!info || !dev_is_pci(dev) || !info->pasid_table) 144 return; 145 146 pasid_table = info->pasid_table; 147 info->pasid_table = NULL; 148 149 /* Free scalable mode PASID directory tables: */ 150 dir = pasid_table->table; 151 max_pde = pasid_table->max_pasid >> PASID_PDE_SHIFT; 152 for (i = 0; i < max_pde; i++) { 153 table = get_pasid_table_from_pde(&dir[i]); 154 free_pgtable_page(table); 155 } 156 157 free_pages((unsigned long)pasid_table->table, pasid_table->order); 158 kfree(pasid_table); 159 } 160 161 struct pasid_table *intel_pasid_get_table(struct device *dev) 162 { 163 struct device_domain_info *info; 164 165 info = dev_iommu_priv_get(dev); 166 if (!info) 167 return NULL; 168 169 return info->pasid_table; 170 } 171 172 static int intel_pasid_get_dev_max_id(struct device *dev) 173 { 174 struct device_domain_info *info; 175 176 info = dev_iommu_priv_get(dev); 177 if (!info || !info->pasid_table) 178 return 0; 179 180 return info->pasid_table->max_pasid; 181 } 182 183 static struct pasid_entry *intel_pasid_get_entry(struct device *dev, u32 pasid) 184 { 185 struct device_domain_info *info; 186 struct pasid_table *pasid_table; 187 struct pasid_dir_entry *dir; 188 struct pasid_entry *entries; 189 int dir_index, index; 190 191 pasid_table = intel_pasid_get_table(dev); 192 if (WARN_ON(!pasid_table || pasid >= intel_pasid_get_dev_max_id(dev))) 193 return NULL; 194 195 dir = pasid_table->table; 196 info = dev_iommu_priv_get(dev); 197 dir_index = pasid >> PASID_PDE_SHIFT; 198 index = pasid & PASID_PTE_MASK; 199 200 retry: 201 entries = get_pasid_table_from_pde(&dir[dir_index]); 202 if (!entries) { 203 entries = alloc_pgtable_page(info->iommu->node); 204 if (!entries) 205 return NULL; 206 207 /* 208 * The pasid directory table entry won't be freed after 209 * allocation. No worry about the race with free and 210 * clear. However, this entry might be populated by others 211 * while we are preparing it. Use theirs with a retry. 212 */ 213 if (cmpxchg64(&dir[dir_index].val, 0ULL, 214 (u64)virt_to_phys(entries) | PASID_PTE_PRESENT)) { 215 free_pgtable_page(entries); 216 goto retry; 217 } 218 } 219 220 return &entries[index]; 221 } 222 223 /* 224 * Interfaces for PASID table entry manipulation: 225 */ 226 static inline void pasid_clear_entry(struct pasid_entry *pe) 227 { 228 WRITE_ONCE(pe->val[0], 0); 229 WRITE_ONCE(pe->val[1], 0); 230 WRITE_ONCE(pe->val[2], 0); 231 WRITE_ONCE(pe->val[3], 0); 232 WRITE_ONCE(pe->val[4], 0); 233 WRITE_ONCE(pe->val[5], 0); 234 WRITE_ONCE(pe->val[6], 0); 235 WRITE_ONCE(pe->val[7], 0); 236 } 237 238 static inline void pasid_clear_entry_with_fpd(struct pasid_entry *pe) 239 { 240 WRITE_ONCE(pe->val[0], PASID_PTE_FPD); 241 WRITE_ONCE(pe->val[1], 0); 242 WRITE_ONCE(pe->val[2], 0); 243 WRITE_ONCE(pe->val[3], 0); 244 WRITE_ONCE(pe->val[4], 0); 245 WRITE_ONCE(pe->val[5], 0); 246 WRITE_ONCE(pe->val[6], 0); 247 WRITE_ONCE(pe->val[7], 0); 248 } 249 250 static void 251 intel_pasid_clear_entry(struct device *dev, u32 pasid, bool fault_ignore) 252 { 253 struct pasid_entry *pe; 254 255 pe = intel_pasid_get_entry(dev, pasid); 256 if (WARN_ON(!pe)) 257 return; 258 259 if (fault_ignore && pasid_pte_is_present(pe)) 260 pasid_clear_entry_with_fpd(pe); 261 else 262 pasid_clear_entry(pe); 263 } 264 265 static inline void pasid_set_bits(u64 *ptr, u64 mask, u64 bits) 266 { 267 u64 old; 268 269 old = READ_ONCE(*ptr); 270 WRITE_ONCE(*ptr, (old & ~mask) | bits); 271 } 272 273 /* 274 * Setup the DID(Domain Identifier) field (Bit 64~79) of scalable mode 275 * PASID entry. 276 */ 277 static inline void 278 pasid_set_domain_id(struct pasid_entry *pe, u64 value) 279 { 280 pasid_set_bits(&pe->val[1], GENMASK_ULL(15, 0), value); 281 } 282 283 /* 284 * Get domain ID value of a scalable mode PASID entry. 285 */ 286 static inline u16 287 pasid_get_domain_id(struct pasid_entry *pe) 288 { 289 return (u16)(READ_ONCE(pe->val[1]) & GENMASK_ULL(15, 0)); 290 } 291 292 /* 293 * Setup the SLPTPTR(Second Level Page Table Pointer) field (Bit 12~63) 294 * of a scalable mode PASID entry. 295 */ 296 static inline void 297 pasid_set_slptr(struct pasid_entry *pe, u64 value) 298 { 299 pasid_set_bits(&pe->val[0], VTD_PAGE_MASK, value); 300 } 301 302 /* 303 * Setup the AW(Address Width) field (Bit 2~4) of a scalable mode PASID 304 * entry. 305 */ 306 static inline void 307 pasid_set_address_width(struct pasid_entry *pe, u64 value) 308 { 309 pasid_set_bits(&pe->val[0], GENMASK_ULL(4, 2), value << 2); 310 } 311 312 /* 313 * Setup the PGTT(PASID Granular Translation Type) field (Bit 6~8) 314 * of a scalable mode PASID entry. 315 */ 316 static inline void 317 pasid_set_translation_type(struct pasid_entry *pe, u64 value) 318 { 319 pasid_set_bits(&pe->val[0], GENMASK_ULL(8, 6), value << 6); 320 } 321 322 /* 323 * Enable fault processing by clearing the FPD(Fault Processing 324 * Disable) field (Bit 1) of a scalable mode PASID entry. 325 */ 326 static inline void pasid_set_fault_enable(struct pasid_entry *pe) 327 { 328 pasid_set_bits(&pe->val[0], 1 << 1, 0); 329 } 330 331 /* 332 * Setup the SRE(Supervisor Request Enable) field (Bit 128) of a 333 * scalable mode PASID entry. 334 */ 335 static inline void pasid_set_sre(struct pasid_entry *pe) 336 { 337 pasid_set_bits(&pe->val[2], 1 << 0, 1); 338 } 339 340 /* 341 * Setup the WPE(Write Protect Enable) field (Bit 132) of a 342 * scalable mode PASID entry. 343 */ 344 static inline void pasid_set_wpe(struct pasid_entry *pe) 345 { 346 pasid_set_bits(&pe->val[2], 1 << 4, 1 << 4); 347 } 348 349 /* 350 * Setup the P(Present) field (Bit 0) of a scalable mode PASID 351 * entry. 352 */ 353 static inline void pasid_set_present(struct pasid_entry *pe) 354 { 355 pasid_set_bits(&pe->val[0], 1 << 0, 1); 356 } 357 358 /* 359 * Setup Page Walk Snoop bit (Bit 87) of a scalable mode PASID 360 * entry. 361 */ 362 static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value) 363 { 364 pasid_set_bits(&pe->val[1], 1 << 23, value << 23); 365 } 366 367 /* 368 * Setup the Page Snoop (PGSNP) field (Bit 88) of a scalable mode 369 * PASID entry. 370 */ 371 static inline void 372 pasid_set_pgsnp(struct pasid_entry *pe) 373 { 374 pasid_set_bits(&pe->val[1], 1ULL << 24, 1ULL << 24); 375 } 376 377 /* 378 * Setup the First Level Page table Pointer field (Bit 140~191) 379 * of a scalable mode PASID entry. 380 */ 381 static inline void 382 pasid_set_flptr(struct pasid_entry *pe, u64 value) 383 { 384 pasid_set_bits(&pe->val[2], VTD_PAGE_MASK, value); 385 } 386 387 /* 388 * Setup the First Level Paging Mode field (Bit 130~131) of a 389 * scalable mode PASID entry. 390 */ 391 static inline void 392 pasid_set_flpm(struct pasid_entry *pe, u64 value) 393 { 394 pasid_set_bits(&pe->val[2], GENMASK_ULL(3, 2), value << 2); 395 } 396 397 static void 398 pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu, 399 u16 did, u32 pasid) 400 { 401 struct qi_desc desc; 402 403 desc.qw0 = QI_PC_DID(did) | QI_PC_GRAN(QI_PC_PASID_SEL) | 404 QI_PC_PASID(pasid) | QI_PC_TYPE; 405 desc.qw1 = 0; 406 desc.qw2 = 0; 407 desc.qw3 = 0; 408 409 qi_submit_sync(iommu, &desc, 1, 0); 410 } 411 412 static void 413 devtlb_invalidation_with_pasid(struct intel_iommu *iommu, 414 struct device *dev, u32 pasid) 415 { 416 struct device_domain_info *info; 417 u16 sid, qdep, pfsid; 418 419 info = dev_iommu_priv_get(dev); 420 if (!info || !info->ats_enabled) 421 return; 422 423 sid = info->bus << 8 | info->devfn; 424 qdep = info->ats_qdep; 425 pfsid = info->pfsid; 426 427 /* 428 * When PASID 0 is used, it indicates RID2PASID(DMA request w/o PASID), 429 * devTLB flush w/o PASID should be used. For non-zero PASID under 430 * SVA usage, device could do DMA with multiple PASIDs. It is more 431 * efficient to flush devTLB specific to the PASID. 432 */ 433 if (pasid == PASID_RID2PASID) 434 qi_flush_dev_iotlb(iommu, sid, pfsid, qdep, 0, 64 - VTD_PAGE_SHIFT); 435 else 436 qi_flush_dev_iotlb_pasid(iommu, sid, pfsid, pasid, qdep, 0, 64 - VTD_PAGE_SHIFT); 437 } 438 439 void intel_pasid_tear_down_entry(struct intel_iommu *iommu, struct device *dev, 440 u32 pasid, bool fault_ignore) 441 { 442 struct pasid_entry *pte; 443 u16 did, pgtt; 444 445 spin_lock(&iommu->lock); 446 pte = intel_pasid_get_entry(dev, pasid); 447 if (WARN_ON(!pte) || !pasid_pte_is_present(pte)) { 448 spin_unlock(&iommu->lock); 449 return; 450 } 451 452 did = pasid_get_domain_id(pte); 453 pgtt = pasid_pte_get_pgtt(pte); 454 intel_pasid_clear_entry(dev, pasid, fault_ignore); 455 spin_unlock(&iommu->lock); 456 457 if (!ecap_coherent(iommu->ecap)) 458 clflush_cache_range(pte, sizeof(*pte)); 459 460 pasid_cache_invalidation_with_pasid(iommu, did, pasid); 461 462 if (pgtt == PASID_ENTRY_PGTT_PT || pgtt == PASID_ENTRY_PGTT_FL_ONLY) 463 qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); 464 else 465 iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH); 466 467 /* Device IOTLB doesn't need to be flushed in caching mode. */ 468 if (!cap_caching_mode(iommu->cap)) 469 devtlb_invalidation_with_pasid(iommu, dev, pasid); 470 } 471 472 /* 473 * This function flushes cache for a newly setup pasid table entry. 474 * Caller of it should not modify the in-use pasid table entries. 475 */ 476 static void pasid_flush_caches(struct intel_iommu *iommu, 477 struct pasid_entry *pte, 478 u32 pasid, u16 did) 479 { 480 if (!ecap_coherent(iommu->ecap)) 481 clflush_cache_range(pte, sizeof(*pte)); 482 483 if (cap_caching_mode(iommu->cap)) { 484 pasid_cache_invalidation_with_pasid(iommu, did, pasid); 485 qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); 486 } else { 487 iommu_flush_write_buffer(iommu); 488 } 489 } 490 491 /* 492 * Set up the scalable mode pasid table entry for first only 493 * translation type. 494 */ 495 int intel_pasid_setup_first_level(struct intel_iommu *iommu, 496 struct device *dev, pgd_t *pgd, 497 u32 pasid, u16 did, int flags) 498 { 499 struct pasid_entry *pte; 500 501 if (!ecap_flts(iommu->ecap)) { 502 pr_err("No first level translation support on %s\n", 503 iommu->name); 504 return -EINVAL; 505 } 506 507 if (flags & PASID_FLAG_SUPERVISOR_MODE) { 508 #ifdef CONFIG_X86 509 unsigned long cr0 = read_cr0(); 510 511 /* CR0.WP is normally set but just to be sure */ 512 if (unlikely(!(cr0 & X86_CR0_WP))) { 513 pr_err("No CPU write protect!\n"); 514 return -EINVAL; 515 } 516 #endif 517 if (!ecap_srs(iommu->ecap)) { 518 pr_err("No supervisor request support on %s\n", 519 iommu->name); 520 return -EINVAL; 521 } 522 } 523 524 if ((flags & PASID_FLAG_FL5LP) && !cap_fl5lp_support(iommu->cap)) { 525 pr_err("No 5-level paging support for first-level on %s\n", 526 iommu->name); 527 return -EINVAL; 528 } 529 530 spin_lock(&iommu->lock); 531 pte = intel_pasid_get_entry(dev, pasid); 532 if (!pte) { 533 spin_unlock(&iommu->lock); 534 return -ENODEV; 535 } 536 537 if (pasid_pte_is_present(pte)) { 538 spin_unlock(&iommu->lock); 539 return -EBUSY; 540 } 541 542 pasid_clear_entry(pte); 543 544 /* Setup the first level page table pointer: */ 545 pasid_set_flptr(pte, (u64)__pa(pgd)); 546 if (flags & PASID_FLAG_SUPERVISOR_MODE) { 547 pasid_set_sre(pte); 548 pasid_set_wpe(pte); 549 } 550 551 if (flags & PASID_FLAG_FL5LP) 552 pasid_set_flpm(pte, 1); 553 554 if (flags & PASID_FLAG_PAGE_SNOOP) 555 pasid_set_pgsnp(pte); 556 557 pasid_set_domain_id(pte, did); 558 pasid_set_address_width(pte, iommu->agaw); 559 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); 560 561 /* Setup Present and PASID Granular Transfer Type: */ 562 pasid_set_translation_type(pte, PASID_ENTRY_PGTT_FL_ONLY); 563 pasid_set_present(pte); 564 spin_unlock(&iommu->lock); 565 566 pasid_flush_caches(iommu, pte, pasid, did); 567 568 return 0; 569 } 570 571 /* 572 * Skip top levels of page tables for iommu which has less agaw 573 * than default. Unnecessary for PT mode. 574 */ 575 static inline int iommu_skip_agaw(struct dmar_domain *domain, 576 struct intel_iommu *iommu, 577 struct dma_pte **pgd) 578 { 579 int agaw; 580 581 for (agaw = domain->agaw; agaw > iommu->agaw; agaw--) { 582 *pgd = phys_to_virt(dma_pte_addr(*pgd)); 583 if (!dma_pte_present(*pgd)) 584 return -EINVAL; 585 } 586 587 return agaw; 588 } 589 590 /* 591 * Set up the scalable mode pasid entry for second only translation type. 592 */ 593 int intel_pasid_setup_second_level(struct intel_iommu *iommu, 594 struct dmar_domain *domain, 595 struct device *dev, u32 pasid) 596 { 597 struct pasid_entry *pte; 598 struct dma_pte *pgd; 599 u64 pgd_val; 600 int agaw; 601 u16 did; 602 603 /* 604 * If hardware advertises no support for second level 605 * translation, return directly. 606 */ 607 if (!ecap_slts(iommu->ecap)) { 608 pr_err("No second level translation support on %s\n", 609 iommu->name); 610 return -EINVAL; 611 } 612 613 pgd = domain->pgd; 614 agaw = iommu_skip_agaw(domain, iommu, &pgd); 615 if (agaw < 0) { 616 dev_err(dev, "Invalid domain page table\n"); 617 return -EINVAL; 618 } 619 620 pgd_val = virt_to_phys(pgd); 621 did = domain_id_iommu(domain, iommu); 622 623 spin_lock(&iommu->lock); 624 pte = intel_pasid_get_entry(dev, pasid); 625 if (!pte) { 626 spin_unlock(&iommu->lock); 627 return -ENODEV; 628 } 629 630 if (pasid_pte_is_present(pte)) { 631 spin_unlock(&iommu->lock); 632 return -EBUSY; 633 } 634 635 pasid_clear_entry(pte); 636 pasid_set_domain_id(pte, did); 637 pasid_set_slptr(pte, pgd_val); 638 pasid_set_address_width(pte, agaw); 639 pasid_set_translation_type(pte, PASID_ENTRY_PGTT_SL_ONLY); 640 pasid_set_fault_enable(pte); 641 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); 642 643 /* 644 * Since it is a second level only translation setup, we should 645 * set SRE bit as well (addresses are expected to be GPAs). 646 */ 647 if (pasid != PASID_RID2PASID && ecap_srs(iommu->ecap)) 648 pasid_set_sre(pte); 649 pasid_set_present(pte); 650 spin_unlock(&iommu->lock); 651 652 pasid_flush_caches(iommu, pte, pasid, did); 653 654 return 0; 655 } 656 657 /* 658 * Set up the scalable mode pasid entry for passthrough translation type. 659 */ 660 int intel_pasid_setup_pass_through(struct intel_iommu *iommu, 661 struct dmar_domain *domain, 662 struct device *dev, u32 pasid) 663 { 664 u16 did = FLPT_DEFAULT_DID; 665 struct pasid_entry *pte; 666 667 spin_lock(&iommu->lock); 668 pte = intel_pasid_get_entry(dev, pasid); 669 if (!pte) { 670 spin_unlock(&iommu->lock); 671 return -ENODEV; 672 } 673 674 if (pasid_pte_is_present(pte)) { 675 spin_unlock(&iommu->lock); 676 return -EBUSY; 677 } 678 679 pasid_clear_entry(pte); 680 pasid_set_domain_id(pte, did); 681 pasid_set_address_width(pte, iommu->agaw); 682 pasid_set_translation_type(pte, PASID_ENTRY_PGTT_PT); 683 pasid_set_fault_enable(pte); 684 pasid_set_page_snoop(pte, !!ecap_smpwc(iommu->ecap)); 685 686 /* 687 * We should set SRE bit as well since the addresses are expected 688 * to be GPAs. 689 */ 690 if (ecap_srs(iommu->ecap)) 691 pasid_set_sre(pte); 692 pasid_set_present(pte); 693 spin_unlock(&iommu->lock); 694 695 pasid_flush_caches(iommu, pte, pasid, did); 696 697 return 0; 698 } 699 700 /* 701 * Set the page snoop control for a pasid entry which has been set up. 702 */ 703 void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu, 704 struct device *dev, u32 pasid) 705 { 706 struct pasid_entry *pte; 707 u16 did; 708 709 spin_lock(&iommu->lock); 710 pte = intel_pasid_get_entry(dev, pasid); 711 if (WARN_ON(!pte || !pasid_pte_is_present(pte))) { 712 spin_unlock(&iommu->lock); 713 return; 714 } 715 716 pasid_set_pgsnp(pte); 717 did = pasid_get_domain_id(pte); 718 spin_unlock(&iommu->lock); 719 720 if (!ecap_coherent(iommu->ecap)) 721 clflush_cache_range(pte, sizeof(*pte)); 722 723 /* 724 * VT-d spec 3.4 table23 states guides for cache invalidation: 725 * 726 * - PASID-selective-within-Domain PASID-cache invalidation 727 * - PASID-selective PASID-based IOTLB invalidation 728 * - If (pasid is RID_PASID) 729 * - Global Device-TLB invalidation to affected functions 730 * Else 731 * - PASID-based Device-TLB invalidation (with S=1 and 732 * Addr[63:12]=0x7FFFFFFF_FFFFF) to affected functions 733 */ 734 pasid_cache_invalidation_with_pasid(iommu, did, pasid); 735 qi_flush_piotlb(iommu, did, pasid, 0, -1, 0); 736 737 /* Device IOTLB doesn't need to be flushed in caching mode. */ 738 if (!cap_caching_mode(iommu->cap)) 739 devtlb_invalidation_with_pasid(iommu, dev, pasid); 740 } 741