xref: /openbmc/linux/drivers/iommu/amd/io_pgtable.c (revision 354440a7)
1c9b258c6SSuravee Suthikulpanit // SPDX-License-Identifier: GPL-2.0-only
2c9b258c6SSuravee Suthikulpanit /*
3c9b258c6SSuravee Suthikulpanit  * CPU-agnostic AMD IO page table allocator.
4c9b258c6SSuravee Suthikulpanit  *
5c9b258c6SSuravee Suthikulpanit  * Copyright (C) 2020 Advanced Micro Devices, Inc.
6c9b258c6SSuravee Suthikulpanit  * Author: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
7c9b258c6SSuravee Suthikulpanit  */
8c9b258c6SSuravee Suthikulpanit 
9c9b258c6SSuravee Suthikulpanit #define pr_fmt(fmt)     "AMD-Vi: " fmt
10c9b258c6SSuravee Suthikulpanit #define dev_fmt(fmt)    pr_fmt(fmt)
11c9b258c6SSuravee Suthikulpanit 
12c9b258c6SSuravee Suthikulpanit #include <linux/atomic.h>
13c9b258c6SSuravee Suthikulpanit #include <linux/bitops.h>
14c9b258c6SSuravee Suthikulpanit #include <linux/io-pgtable.h>
15c9b258c6SSuravee Suthikulpanit #include <linux/kernel.h>
16c9b258c6SSuravee Suthikulpanit #include <linux/sizes.h>
17c9b258c6SSuravee Suthikulpanit #include <linux/slab.h>
18c9b258c6SSuravee Suthikulpanit #include <linux/types.h>
19c9b258c6SSuravee Suthikulpanit #include <linux/dma-mapping.h>
20c9b258c6SSuravee Suthikulpanit 
21c9b258c6SSuravee Suthikulpanit #include <asm/barrier.h>
22c9b258c6SSuravee Suthikulpanit 
23c9b258c6SSuravee Suthikulpanit #include "amd_iommu_types.h"
24c9b258c6SSuravee Suthikulpanit #include "amd_iommu.h"
25c9b258c6SSuravee Suthikulpanit 
v1_tlb_flush_all(void * cookie)26c9b258c6SSuravee Suthikulpanit static void v1_tlb_flush_all(void *cookie)
27c9b258c6SSuravee Suthikulpanit {
28c9b258c6SSuravee Suthikulpanit }
29c9b258c6SSuravee Suthikulpanit 
v1_tlb_flush_walk(unsigned long iova,size_t size,size_t granule,void * cookie)30c9b258c6SSuravee Suthikulpanit static void v1_tlb_flush_walk(unsigned long iova, size_t size,
31c9b258c6SSuravee Suthikulpanit 				  size_t granule, void *cookie)
32c9b258c6SSuravee Suthikulpanit {
33c9b258c6SSuravee Suthikulpanit }
34c9b258c6SSuravee Suthikulpanit 
v1_tlb_add_page(struct iommu_iotlb_gather * gather,unsigned long iova,size_t granule,void * cookie)35c9b258c6SSuravee Suthikulpanit static void v1_tlb_add_page(struct iommu_iotlb_gather *gather,
36c9b258c6SSuravee Suthikulpanit 					 unsigned long iova, size_t granule,
37c9b258c6SSuravee Suthikulpanit 					 void *cookie)
38c9b258c6SSuravee Suthikulpanit {
39c9b258c6SSuravee Suthikulpanit }
40c9b258c6SSuravee Suthikulpanit 
41c9b258c6SSuravee Suthikulpanit static const struct iommu_flush_ops v1_flush_ops = {
42c9b258c6SSuravee Suthikulpanit 	.tlb_flush_all	= v1_tlb_flush_all,
43c9b258c6SSuravee Suthikulpanit 	.tlb_flush_walk = v1_tlb_flush_walk,
44c9b258c6SSuravee Suthikulpanit 	.tlb_add_page	= v1_tlb_add_page,
45c9b258c6SSuravee Suthikulpanit };
46c9b258c6SSuravee Suthikulpanit 
47c9b258c6SSuravee Suthikulpanit /*
4818954252SSuravee Suthikulpanit  * Helper function to get the first pte of a large mapping
4918954252SSuravee Suthikulpanit  */
first_pte_l7(u64 * pte,unsigned long * page_size,unsigned long * count)5018954252SSuravee Suthikulpanit static u64 *first_pte_l7(u64 *pte, unsigned long *page_size,
5118954252SSuravee Suthikulpanit 			 unsigned long *count)
5218954252SSuravee Suthikulpanit {
5318954252SSuravee Suthikulpanit 	unsigned long pte_mask, pg_size, cnt;
5418954252SSuravee Suthikulpanit 	u64 *fpte;
5518954252SSuravee Suthikulpanit 
5618954252SSuravee Suthikulpanit 	pg_size  = PTE_PAGE_SIZE(*pte);
5718954252SSuravee Suthikulpanit 	cnt      = PAGE_SIZE_PTE_COUNT(pg_size);
5818954252SSuravee Suthikulpanit 	pte_mask = ~((cnt << 3) - 1);
5918954252SSuravee Suthikulpanit 	fpte     = (u64 *)(((unsigned long)pte) & pte_mask);
6018954252SSuravee Suthikulpanit 
6118954252SSuravee Suthikulpanit 	if (page_size)
6218954252SSuravee Suthikulpanit 		*page_size = pg_size;
6318954252SSuravee Suthikulpanit 
6418954252SSuravee Suthikulpanit 	if (count)
6518954252SSuravee Suthikulpanit 		*count = cnt;
6618954252SSuravee Suthikulpanit 
6718954252SSuravee Suthikulpanit 	return fpte;
6818954252SSuravee Suthikulpanit }
6918954252SSuravee Suthikulpanit 
7018954252SSuravee Suthikulpanit /****************************************************************************
7118954252SSuravee Suthikulpanit  *
7218954252SSuravee Suthikulpanit  * The functions below are used the create the page table mappings for
7318954252SSuravee Suthikulpanit  * unity mapped regions.
7418954252SSuravee Suthikulpanit  *
7518954252SSuravee Suthikulpanit  ****************************************************************************/
7618954252SSuravee Suthikulpanit 
free_pt_page(u64 * pt,struct list_head * freelist)77ce00eeceSMatthew Wilcox (Oracle) static void free_pt_page(u64 *pt, struct list_head *freelist)
7818954252SSuravee Suthikulpanit {
796b3106e9SRobin Murphy 	struct page *p = virt_to_page(pt);
8018954252SSuravee Suthikulpanit 
81ce00eeceSMatthew Wilcox (Oracle) 	list_add_tail(&p->lru, freelist);
8218954252SSuravee Suthikulpanit }
8318954252SSuravee Suthikulpanit 
free_pt_lvl(u64 * pt,struct list_head * freelist,int lvl)84ce00eeceSMatthew Wilcox (Oracle) static void free_pt_lvl(u64 *pt, struct list_head *freelist, int lvl)
856b3106e9SRobin Murphy {
866b3106e9SRobin Murphy 	u64 *p;
876b3106e9SRobin Murphy 	int i;
886b3106e9SRobin Murphy 
896b3106e9SRobin Murphy 	for (i = 0; i < 512; ++i) {
906b3106e9SRobin Murphy 		/* PTE present? */
916b3106e9SRobin Murphy 		if (!IOMMU_PTE_PRESENT(pt[i]))
926b3106e9SRobin Murphy 			continue;
936b3106e9SRobin Murphy 
946b3106e9SRobin Murphy 		/* Large PTE? */
956b3106e9SRobin Murphy 		if (PM_PTE_LEVEL(pt[i]) == 0 ||
966b3106e9SRobin Murphy 		    PM_PTE_LEVEL(pt[i]) == 7)
976b3106e9SRobin Murphy 			continue;
986b3106e9SRobin Murphy 
996b3106e9SRobin Murphy 		/*
1006b3106e9SRobin Murphy 		 * Free the next level. No need to look at l1 tables here since
1016b3106e9SRobin Murphy 		 * they can only contain leaf PTEs; just free them directly.
1026b3106e9SRobin Murphy 		 */
1036b3106e9SRobin Murphy 		p = IOMMU_PTE_PAGE(pt[i]);
1046b3106e9SRobin Murphy 		if (lvl > 2)
105ce00eeceSMatthew Wilcox (Oracle) 			free_pt_lvl(p, freelist, lvl - 1);
1066b3106e9SRobin Murphy 		else
107ce00eeceSMatthew Wilcox (Oracle) 			free_pt_page(p, freelist);
10818954252SSuravee Suthikulpanit 	}
10918954252SSuravee Suthikulpanit 
110ce00eeceSMatthew Wilcox (Oracle) 	free_pt_page(pt, freelist);
1116b3106e9SRobin Murphy }
11218954252SSuravee Suthikulpanit 
free_sub_pt(u64 * root,int mode,struct list_head * freelist)113ce00eeceSMatthew Wilcox (Oracle) static void free_sub_pt(u64 *root, int mode, struct list_head *freelist)
11418954252SSuravee Suthikulpanit {
11518954252SSuravee Suthikulpanit 	switch (mode) {
11618954252SSuravee Suthikulpanit 	case PAGE_MODE_NONE:
11718954252SSuravee Suthikulpanit 	case PAGE_MODE_7_LEVEL:
11818954252SSuravee Suthikulpanit 		break;
11918954252SSuravee Suthikulpanit 	case PAGE_MODE_1_LEVEL:
120ce00eeceSMatthew Wilcox (Oracle) 		free_pt_page(root, freelist);
12118954252SSuravee Suthikulpanit 		break;
12218954252SSuravee Suthikulpanit 	case PAGE_MODE_2_LEVEL:
12318954252SSuravee Suthikulpanit 	case PAGE_MODE_3_LEVEL:
12418954252SSuravee Suthikulpanit 	case PAGE_MODE_4_LEVEL:
12518954252SSuravee Suthikulpanit 	case PAGE_MODE_5_LEVEL:
12618954252SSuravee Suthikulpanit 	case PAGE_MODE_6_LEVEL:
1276b3106e9SRobin Murphy 		free_pt_lvl(root, freelist, mode);
12818954252SSuravee Suthikulpanit 		break;
12918954252SSuravee Suthikulpanit 	default:
13018954252SSuravee Suthikulpanit 		BUG();
13118954252SSuravee Suthikulpanit 	}
13218954252SSuravee Suthikulpanit }
13318954252SSuravee Suthikulpanit 
amd_iommu_domain_set_pgtable(struct protection_domain * domain,u64 * root,int mode)13418954252SSuravee Suthikulpanit void amd_iommu_domain_set_pgtable(struct protection_domain *domain,
13518954252SSuravee Suthikulpanit 				  u64 *root, int mode)
13618954252SSuravee Suthikulpanit {
13718954252SSuravee Suthikulpanit 	u64 pt_root;
13818954252SSuravee Suthikulpanit 
13918954252SSuravee Suthikulpanit 	/* lowest 3 bits encode pgtable mode */
14018954252SSuravee Suthikulpanit 	pt_root = mode & 7;
14118954252SSuravee Suthikulpanit 	pt_root |= (u64)root;
14218954252SSuravee Suthikulpanit 
14318954252SSuravee Suthikulpanit 	amd_iommu_domain_set_pt_root(domain, pt_root);
14418954252SSuravee Suthikulpanit }
14518954252SSuravee Suthikulpanit 
14618954252SSuravee Suthikulpanit /*
14718954252SSuravee Suthikulpanit  * This function is used to add another level to an IO page table. Adding
14818954252SSuravee Suthikulpanit  * another level increases the size of the address space by 9 bits to a size up
14918954252SSuravee Suthikulpanit  * to 64 bits.
15018954252SSuravee Suthikulpanit  */
increase_address_space(struct protection_domain * domain,unsigned long address,gfp_t gfp)15118954252SSuravee Suthikulpanit static bool increase_address_space(struct protection_domain *domain,
15218954252SSuravee Suthikulpanit 				   unsigned long address,
15318954252SSuravee Suthikulpanit 				   gfp_t gfp)
15418954252SSuravee Suthikulpanit {
15518954252SSuravee Suthikulpanit 	unsigned long flags;
15618954252SSuravee Suthikulpanit 	bool ret = true;
15718954252SSuravee Suthikulpanit 	u64 *pte;
15818954252SSuravee Suthikulpanit 
1590d571dcbSVasant Hegde 	pte = alloc_pgtable_page(domain->nid, gfp);
160140456f9SAndrey Ryabinin 	if (!pte)
161140456f9SAndrey Ryabinin 		return false;
162140456f9SAndrey Ryabinin 
16318954252SSuravee Suthikulpanit 	spin_lock_irqsave(&domain->lock, flags);
16418954252SSuravee Suthikulpanit 
1656eedb59cSSuravee Suthikulpanit 	if (address <= PM_LEVEL_SIZE(domain->iop.mode))
16618954252SSuravee Suthikulpanit 		goto out;
16718954252SSuravee Suthikulpanit 
16818954252SSuravee Suthikulpanit 	ret = false;
1696eedb59cSSuravee Suthikulpanit 	if (WARN_ON_ONCE(domain->iop.mode == PAGE_MODE_6_LEVEL))
17018954252SSuravee Suthikulpanit 		goto out;
17118954252SSuravee Suthikulpanit 
1726eedb59cSSuravee Suthikulpanit 	*pte = PM_LEVEL_PDE(domain->iop.mode, iommu_virt_to_phys(domain->iop.root));
17318954252SSuravee Suthikulpanit 
1746eedb59cSSuravee Suthikulpanit 	domain->iop.root  = pte;
1756eedb59cSSuravee Suthikulpanit 	domain->iop.mode += 1;
17618954252SSuravee Suthikulpanit 	amd_iommu_update_and_flush_device_table(domain);
17718954252SSuravee Suthikulpanit 	amd_iommu_domain_flush_complete(domain);
17818954252SSuravee Suthikulpanit 
17918954252SSuravee Suthikulpanit 	/*
18018954252SSuravee Suthikulpanit 	 * Device Table needs to be updated and flushed before the new root can
18118954252SSuravee Suthikulpanit 	 * be published.
18218954252SSuravee Suthikulpanit 	 */
1836eedb59cSSuravee Suthikulpanit 	amd_iommu_domain_set_pgtable(domain, pte, domain->iop.mode);
18418954252SSuravee Suthikulpanit 
185140456f9SAndrey Ryabinin 	pte = NULL;
18618954252SSuravee Suthikulpanit 	ret = true;
18718954252SSuravee Suthikulpanit 
18818954252SSuravee Suthikulpanit out:
18918954252SSuravee Suthikulpanit 	spin_unlock_irqrestore(&domain->lock, flags);
190140456f9SAndrey Ryabinin 	free_page((unsigned long)pte);
19118954252SSuravee Suthikulpanit 
19218954252SSuravee Suthikulpanit 	return ret;
19318954252SSuravee Suthikulpanit }
19418954252SSuravee Suthikulpanit 
alloc_pte(struct protection_domain * domain,unsigned long address,unsigned long page_size,u64 ** pte_page,gfp_t gfp,bool * updated)19518954252SSuravee Suthikulpanit static u64 *alloc_pte(struct protection_domain *domain,
19618954252SSuravee Suthikulpanit 		      unsigned long address,
19718954252SSuravee Suthikulpanit 		      unsigned long page_size,
19818954252SSuravee Suthikulpanit 		      u64 **pte_page,
19918954252SSuravee Suthikulpanit 		      gfp_t gfp,
20018954252SSuravee Suthikulpanit 		      bool *updated)
20118954252SSuravee Suthikulpanit {
20218954252SSuravee Suthikulpanit 	int level, end_lvl;
20318954252SSuravee Suthikulpanit 	u64 *pte, *page;
20418954252SSuravee Suthikulpanit 
20518954252SSuravee Suthikulpanit 	BUG_ON(!is_power_of_2(page_size));
20618954252SSuravee Suthikulpanit 
2076eedb59cSSuravee Suthikulpanit 	while (address > PM_LEVEL_SIZE(domain->iop.mode)) {
20818954252SSuravee Suthikulpanit 		/*
20918954252SSuravee Suthikulpanit 		 * Return an error if there is no memory to update the
21018954252SSuravee Suthikulpanit 		 * page-table.
21118954252SSuravee Suthikulpanit 		 */
21218954252SSuravee Suthikulpanit 		if (!increase_address_space(domain, address, gfp))
21318954252SSuravee Suthikulpanit 			return NULL;
21418954252SSuravee Suthikulpanit 	}
21518954252SSuravee Suthikulpanit 
21618954252SSuravee Suthikulpanit 
2176eedb59cSSuravee Suthikulpanit 	level   = domain->iop.mode - 1;
2186eedb59cSSuravee Suthikulpanit 	pte     = &domain->iop.root[PM_LEVEL_INDEX(level, address)];
21918954252SSuravee Suthikulpanit 	address = PAGE_SIZE_ALIGN(address, page_size);
22018954252SSuravee Suthikulpanit 	end_lvl = PAGE_SIZE_LEVEL(page_size);
22118954252SSuravee Suthikulpanit 
22218954252SSuravee Suthikulpanit 	while (level > end_lvl) {
22318954252SSuravee Suthikulpanit 		u64 __pte, __npte;
22418954252SSuravee Suthikulpanit 		int pte_level;
22518954252SSuravee Suthikulpanit 
22618954252SSuravee Suthikulpanit 		__pte     = *pte;
22718954252SSuravee Suthikulpanit 		pte_level = PM_PTE_LEVEL(__pte);
22818954252SSuravee Suthikulpanit 
22918954252SSuravee Suthikulpanit 		/*
23018954252SSuravee Suthikulpanit 		 * If we replace a series of large PTEs, we need
23118954252SSuravee Suthikulpanit 		 * to tear down all of them.
23218954252SSuravee Suthikulpanit 		 */
23318954252SSuravee Suthikulpanit 		if (IOMMU_PTE_PRESENT(__pte) &&
23418954252SSuravee Suthikulpanit 		    pte_level == PAGE_MODE_7_LEVEL) {
23518954252SSuravee Suthikulpanit 			unsigned long count, i;
23618954252SSuravee Suthikulpanit 			u64 *lpte;
23718954252SSuravee Suthikulpanit 
23818954252SSuravee Suthikulpanit 			lpte = first_pte_l7(pte, NULL, &count);
23918954252SSuravee Suthikulpanit 
24018954252SSuravee Suthikulpanit 			/*
24118954252SSuravee Suthikulpanit 			 * Unmap the replicated PTEs that still match the
24218954252SSuravee Suthikulpanit 			 * original large mapping
24318954252SSuravee Suthikulpanit 			 */
24418954252SSuravee Suthikulpanit 			for (i = 0; i < count; ++i)
24518954252SSuravee Suthikulpanit 				cmpxchg64(&lpte[i], __pte, 0ULL);
24618954252SSuravee Suthikulpanit 
24718954252SSuravee Suthikulpanit 			*updated = true;
24818954252SSuravee Suthikulpanit 			continue;
24918954252SSuravee Suthikulpanit 		}
25018954252SSuravee Suthikulpanit 
25118954252SSuravee Suthikulpanit 		if (!IOMMU_PTE_PRESENT(__pte) ||
25218954252SSuravee Suthikulpanit 		    pte_level == PAGE_MODE_NONE) {
2530d571dcbSVasant Hegde 			page = alloc_pgtable_page(domain->nid, gfp);
25418954252SSuravee Suthikulpanit 
25518954252SSuravee Suthikulpanit 			if (!page)
25618954252SSuravee Suthikulpanit 				return NULL;
25718954252SSuravee Suthikulpanit 
25818954252SSuravee Suthikulpanit 			__npte = PM_LEVEL_PDE(level, iommu_virt_to_phys(page));
25918954252SSuravee Suthikulpanit 
26018954252SSuravee Suthikulpanit 			/* pte could have been changed somewhere. */
2610d10fe75SUros Bizjak 			if (!try_cmpxchg64(pte, &__pte, __npte))
26218954252SSuravee Suthikulpanit 				free_page((unsigned long)page);
26318954252SSuravee Suthikulpanit 			else if (IOMMU_PTE_PRESENT(__pte))
26418954252SSuravee Suthikulpanit 				*updated = true;
26518954252SSuravee Suthikulpanit 
26618954252SSuravee Suthikulpanit 			continue;
26718954252SSuravee Suthikulpanit 		}
26818954252SSuravee Suthikulpanit 
26918954252SSuravee Suthikulpanit 		/* No level skipping support yet */
27018954252SSuravee Suthikulpanit 		if (pte_level != level)
27118954252SSuravee Suthikulpanit 			return NULL;
27218954252SSuravee Suthikulpanit 
27318954252SSuravee Suthikulpanit 		level -= 1;
27418954252SSuravee Suthikulpanit 
27518954252SSuravee Suthikulpanit 		pte = IOMMU_PTE_PAGE(__pte);
27618954252SSuravee Suthikulpanit 
27718954252SSuravee Suthikulpanit 		if (pte_page && level == end_lvl)
27818954252SSuravee Suthikulpanit 			*pte_page = pte;
27918954252SSuravee Suthikulpanit 
28018954252SSuravee Suthikulpanit 		pte = &pte[PM_LEVEL_INDEX(level, address)];
28118954252SSuravee Suthikulpanit 	}
28218954252SSuravee Suthikulpanit 
28318954252SSuravee Suthikulpanit 	return pte;
28418954252SSuravee Suthikulpanit }
28518954252SSuravee Suthikulpanit 
28618954252SSuravee Suthikulpanit /*
28718954252SSuravee Suthikulpanit  * This function checks if there is a PTE for a given dma address. If
28818954252SSuravee Suthikulpanit  * there is one, it returns the pointer to it.
28918954252SSuravee Suthikulpanit  */
fetch_pte(struct amd_io_pgtable * pgtable,unsigned long address,unsigned long * page_size)290fd86c950SSuravee Suthikulpanit static u64 *fetch_pte(struct amd_io_pgtable *pgtable,
29118954252SSuravee Suthikulpanit 		      unsigned long address,
29218954252SSuravee Suthikulpanit 		      unsigned long *page_size)
29318954252SSuravee Suthikulpanit {
29418954252SSuravee Suthikulpanit 	int level;
29518954252SSuravee Suthikulpanit 	u64 *pte;
29618954252SSuravee Suthikulpanit 
29718954252SSuravee Suthikulpanit 	*page_size = 0;
29818954252SSuravee Suthikulpanit 
2990633bbccSSuravee Suthikulpanit 	if (address > PM_LEVEL_SIZE(pgtable->mode))
30018954252SSuravee Suthikulpanit 		return NULL;
30118954252SSuravee Suthikulpanit 
3020633bbccSSuravee Suthikulpanit 	level	   =  pgtable->mode - 1;
3030633bbccSSuravee Suthikulpanit 	pte	   = &pgtable->root[PM_LEVEL_INDEX(level, address)];
30418954252SSuravee Suthikulpanit 	*page_size =  PTE_LEVEL_PAGE_SIZE(level);
30518954252SSuravee Suthikulpanit 
30618954252SSuravee Suthikulpanit 	while (level > 0) {
30718954252SSuravee Suthikulpanit 
30818954252SSuravee Suthikulpanit 		/* Not Present */
30918954252SSuravee Suthikulpanit 		if (!IOMMU_PTE_PRESENT(*pte))
31018954252SSuravee Suthikulpanit 			return NULL;
31118954252SSuravee Suthikulpanit 
31218954252SSuravee Suthikulpanit 		/* Large PTE */
313*354440a7SJerry Snitselaar 		if (PM_PTE_LEVEL(*pte) == PAGE_MODE_7_LEVEL ||
314*354440a7SJerry Snitselaar 		    PM_PTE_LEVEL(*pte) == PAGE_MODE_NONE)
31518954252SSuravee Suthikulpanit 			break;
31618954252SSuravee Suthikulpanit 
31718954252SSuravee Suthikulpanit 		/* No level skipping support yet */
31818954252SSuravee Suthikulpanit 		if (PM_PTE_LEVEL(*pte) != level)
31918954252SSuravee Suthikulpanit 			return NULL;
32018954252SSuravee Suthikulpanit 
32118954252SSuravee Suthikulpanit 		level -= 1;
32218954252SSuravee Suthikulpanit 
32318954252SSuravee Suthikulpanit 		/* Walk to the next level */
32418954252SSuravee Suthikulpanit 		pte	   = IOMMU_PTE_PAGE(*pte);
32518954252SSuravee Suthikulpanit 		pte	   = &pte[PM_LEVEL_INDEX(level, address)];
32618954252SSuravee Suthikulpanit 		*page_size = PTE_LEVEL_PAGE_SIZE(level);
32718954252SSuravee Suthikulpanit 	}
32818954252SSuravee Suthikulpanit 
32918954252SSuravee Suthikulpanit 	/*
33018954252SSuravee Suthikulpanit 	 * If we have a series of large PTEs, make
33118954252SSuravee Suthikulpanit 	 * sure to return a pointer to the first one.
33218954252SSuravee Suthikulpanit 	 */
33318954252SSuravee Suthikulpanit 	if (PM_PTE_LEVEL(*pte) == PAGE_MODE_7_LEVEL)
33418954252SSuravee Suthikulpanit 		pte = first_pte_l7(pte, page_size, NULL);
33518954252SSuravee Suthikulpanit 
33618954252SSuravee Suthikulpanit 	return pte;
33718954252SSuravee Suthikulpanit }
33818954252SSuravee Suthikulpanit 
free_clear_pte(u64 * pte,u64 pteval,struct list_head * freelist)339ce00eeceSMatthew Wilcox (Oracle) static void free_clear_pte(u64 *pte, u64 pteval, struct list_head *freelist)
34018954252SSuravee Suthikulpanit {
3416b3106e9SRobin Murphy 	u64 *pt;
34218954252SSuravee Suthikulpanit 	int mode;
34318954252SSuravee Suthikulpanit 
3440d10fe75SUros Bizjak 	while (!try_cmpxchg64(pte, &pteval, 0))
34518954252SSuravee Suthikulpanit 		pr_warn("AMD-Vi: IOMMU pte changed since we read it\n");
34618954252SSuravee Suthikulpanit 
34718954252SSuravee Suthikulpanit 	if (!IOMMU_PTE_PRESENT(pteval))
348ce00eeceSMatthew Wilcox (Oracle) 		return;
34918954252SSuravee Suthikulpanit 
3506b3106e9SRobin Murphy 	pt   = IOMMU_PTE_PAGE(pteval);
35118954252SSuravee Suthikulpanit 	mode = IOMMU_PTE_MODE(pteval);
35218954252SSuravee Suthikulpanit 
353ce00eeceSMatthew Wilcox (Oracle) 	free_sub_pt(pt, mode, freelist);
35418954252SSuravee Suthikulpanit }
35518954252SSuravee Suthikulpanit 
35618954252SSuravee Suthikulpanit /*
35718954252SSuravee Suthikulpanit  * Generic mapping functions. It maps a physical address into a DMA
35818954252SSuravee Suthikulpanit  * address space. It allocates the page table pages if necessary.
35918954252SSuravee Suthikulpanit  * In the future it can be extended to a generic mapping function
36018954252SSuravee Suthikulpanit  * supporting all features of AMD IOMMU page tables like level skipping
36118954252SSuravee Suthikulpanit  * and full 64 bit address spaces.
36218954252SSuravee Suthikulpanit  */
iommu_v1_map_pages(struct io_pgtable_ops * ops,unsigned long iova,phys_addr_t paddr,size_t pgsize,size_t pgcount,int prot,gfp_t gfp,size_t * mapped)3638cc233deSVasant Hegde static int iommu_v1_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
3648cc233deSVasant Hegde 			      phys_addr_t paddr, size_t pgsize, size_t pgcount,
3658cc233deSVasant Hegde 			      int prot, gfp_t gfp, size_t *mapped)
36618954252SSuravee Suthikulpanit {
367fd86c950SSuravee Suthikulpanit 	struct protection_domain *dom = io_pgtable_ops_to_domain(ops);
368ce00eeceSMatthew Wilcox (Oracle) 	LIST_HEAD(freelist);
36918954252SSuravee Suthikulpanit 	bool updated = false;
37018954252SSuravee Suthikulpanit 	u64 __pte, *pte;
37118954252SSuravee Suthikulpanit 	int ret, i, count;
37218954252SSuravee Suthikulpanit 
3738cc233deSVasant Hegde 	BUG_ON(!IS_ALIGNED(iova, pgsize));
3748cc233deSVasant Hegde 	BUG_ON(!IS_ALIGNED(paddr, pgsize));
37518954252SSuravee Suthikulpanit 
37618954252SSuravee Suthikulpanit 	ret = -EINVAL;
37718954252SSuravee Suthikulpanit 	if (!(prot & IOMMU_PROT_MASK))
37818954252SSuravee Suthikulpanit 		goto out;
37918954252SSuravee Suthikulpanit 
3808cc233deSVasant Hegde 	while (pgcount > 0) {
3818cc233deSVasant Hegde 		count = PAGE_SIZE_PTE_COUNT(pgsize);
3828cc233deSVasant Hegde 		pte   = alloc_pte(dom, iova, pgsize, NULL, gfp, &updated);
38318954252SSuravee Suthikulpanit 
38418954252SSuravee Suthikulpanit 		ret = -ENOMEM;
38518954252SSuravee Suthikulpanit 		if (!pte)
38618954252SSuravee Suthikulpanit 			goto out;
38718954252SSuravee Suthikulpanit 
38818954252SSuravee Suthikulpanit 		for (i = 0; i < count; ++i)
389ce00eeceSMatthew Wilcox (Oracle) 			free_clear_pte(&pte[i], pte[i], &freelist);
39018954252SSuravee Suthikulpanit 
391ce00eeceSMatthew Wilcox (Oracle) 		if (!list_empty(&freelist))
39218954252SSuravee Suthikulpanit 			updated = true;
39318954252SSuravee Suthikulpanit 
39418954252SSuravee Suthikulpanit 		if (count > 1) {
3958cc233deSVasant Hegde 			__pte = PAGE_SIZE_PTE(__sme_set(paddr), pgsize);
39618954252SSuravee Suthikulpanit 			__pte |= PM_LEVEL_ENC(7) | IOMMU_PTE_PR | IOMMU_PTE_FC;
39718954252SSuravee Suthikulpanit 		} else
39833aef978SSuravee Suthikulpanit 			__pte = __sme_set(paddr) | IOMMU_PTE_PR | IOMMU_PTE_FC;
39918954252SSuravee Suthikulpanit 
40018954252SSuravee Suthikulpanit 		if (prot & IOMMU_PROT_IR)
40118954252SSuravee Suthikulpanit 			__pte |= IOMMU_PTE_IR;
40218954252SSuravee Suthikulpanit 		if (prot & IOMMU_PROT_IW)
40318954252SSuravee Suthikulpanit 			__pte |= IOMMU_PTE_IW;
40418954252SSuravee Suthikulpanit 
40518954252SSuravee Suthikulpanit 		for (i = 0; i < count; ++i)
40618954252SSuravee Suthikulpanit 			pte[i] = __pte;
40718954252SSuravee Suthikulpanit 
4088cc233deSVasant Hegde 		iova  += pgsize;
4098cc233deSVasant Hegde 		paddr += pgsize;
4108cc233deSVasant Hegde 		pgcount--;
4118cc233deSVasant Hegde 		if (mapped)
4128cc233deSVasant Hegde 			*mapped += pgsize;
4138cc233deSVasant Hegde 	}
4148cc233deSVasant Hegde 
41518954252SSuravee Suthikulpanit 	ret = 0;
41618954252SSuravee Suthikulpanit 
41718954252SSuravee Suthikulpanit out:
41818954252SSuravee Suthikulpanit 	if (updated) {
41918954252SSuravee Suthikulpanit 		unsigned long flags;
42018954252SSuravee Suthikulpanit 
42118954252SSuravee Suthikulpanit 		spin_lock_irqsave(&dom->lock, flags);
42218954252SSuravee Suthikulpanit 		/*
42318954252SSuravee Suthikulpanit 		 * Flush domain TLB(s) and wait for completion. Any Device-Table
42418954252SSuravee Suthikulpanit 		 * Updates and flushing already happened in
42518954252SSuravee Suthikulpanit 		 * increase_address_space().
42618954252SSuravee Suthikulpanit 		 */
42718954252SSuravee Suthikulpanit 		amd_iommu_domain_flush_tlb_pde(dom);
42818954252SSuravee Suthikulpanit 		amd_iommu_domain_flush_complete(dom);
42918954252SSuravee Suthikulpanit 		spin_unlock_irqrestore(&dom->lock, flags);
43018954252SSuravee Suthikulpanit 	}
43118954252SSuravee Suthikulpanit 
43218954252SSuravee Suthikulpanit 	/* Everything flushed out, free pages now */
433ce00eeceSMatthew Wilcox (Oracle) 	put_pages_list(&freelist);
43418954252SSuravee Suthikulpanit 
43518954252SSuravee Suthikulpanit 	return ret;
43618954252SSuravee Suthikulpanit }
43718954252SSuravee Suthikulpanit 
iommu_v1_unmap_pages(struct io_pgtable_ops * ops,unsigned long iova,size_t pgsize,size_t pgcount,struct iommu_iotlb_gather * gather)438251c4db6SVasant Hegde static unsigned long iommu_v1_unmap_pages(struct io_pgtable_ops *ops,
43933aef978SSuravee Suthikulpanit 					  unsigned long iova,
440251c4db6SVasant Hegde 					  size_t pgsize, size_t pgcount,
441fd86c950SSuravee Suthikulpanit 					  struct iommu_iotlb_gather *gather)
44218954252SSuravee Suthikulpanit {
4430633bbccSSuravee Suthikulpanit 	struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
44418954252SSuravee Suthikulpanit 	unsigned long long unmapped;
44518954252SSuravee Suthikulpanit 	unsigned long unmap_size;
44618954252SSuravee Suthikulpanit 	u64 *pte;
447251c4db6SVasant Hegde 	size_t size = pgcount << __ffs(pgsize);
44818954252SSuravee Suthikulpanit 
449251c4db6SVasant Hegde 	BUG_ON(!is_power_of_2(pgsize));
45018954252SSuravee Suthikulpanit 
45118954252SSuravee Suthikulpanit 	unmapped = 0;
45218954252SSuravee Suthikulpanit 
45333aef978SSuravee Suthikulpanit 	while (unmapped < size) {
4540633bbccSSuravee Suthikulpanit 		pte = fetch_pte(pgtable, iova, &unmap_size);
45518954252SSuravee Suthikulpanit 		if (pte) {
45618954252SSuravee Suthikulpanit 			int i, count;
45718954252SSuravee Suthikulpanit 
45818954252SSuravee Suthikulpanit 			count = PAGE_SIZE_PTE_COUNT(unmap_size);
45918954252SSuravee Suthikulpanit 			for (i = 0; i < count; i++)
46018954252SSuravee Suthikulpanit 				pte[i] = 0ULL;
461251c4db6SVasant Hegde 		} else {
462251c4db6SVasant Hegde 			return unmapped;
46318954252SSuravee Suthikulpanit 		}
46418954252SSuravee Suthikulpanit 
46533aef978SSuravee Suthikulpanit 		iova = (iova & ~(unmap_size - 1)) + unmap_size;
46618954252SSuravee Suthikulpanit 		unmapped += unmap_size;
46718954252SSuravee Suthikulpanit 	}
46818954252SSuravee Suthikulpanit 
46918954252SSuravee Suthikulpanit 	return unmapped;
47018954252SSuravee Suthikulpanit }
47118954252SSuravee Suthikulpanit 
iommu_v1_iova_to_phys(struct io_pgtable_ops * ops,unsigned long iova)472441555c6SSuravee Suthikulpanit static phys_addr_t iommu_v1_iova_to_phys(struct io_pgtable_ops *ops, unsigned long iova)
473441555c6SSuravee Suthikulpanit {
474441555c6SSuravee Suthikulpanit 	struct amd_io_pgtable *pgtable = io_pgtable_ops_to_data(ops);
475441555c6SSuravee Suthikulpanit 	unsigned long offset_mask, pte_pgsize;
476441555c6SSuravee Suthikulpanit 	u64 *pte, __pte;
477441555c6SSuravee Suthikulpanit 
478441555c6SSuravee Suthikulpanit 	pte = fetch_pte(pgtable, iova, &pte_pgsize);
479441555c6SSuravee Suthikulpanit 
480441555c6SSuravee Suthikulpanit 	if (!pte || !IOMMU_PTE_PRESENT(*pte))
481441555c6SSuravee Suthikulpanit 		return 0;
482441555c6SSuravee Suthikulpanit 
483441555c6SSuravee Suthikulpanit 	offset_mask = pte_pgsize - 1;
484441555c6SSuravee Suthikulpanit 	__pte	    = __sme_clr(*pte & PM_ADDR_MASK);
485441555c6SSuravee Suthikulpanit 
486441555c6SSuravee Suthikulpanit 	return (__pte & ~offset_mask) | (iova & offset_mask);
487441555c6SSuravee Suthikulpanit }
488441555c6SSuravee Suthikulpanit 
48918954252SSuravee Suthikulpanit /*
490c9b258c6SSuravee Suthikulpanit  * ----------------------------------------------------
491c9b258c6SSuravee Suthikulpanit  */
v1_free_pgtable(struct io_pgtable * iop)492c9b258c6SSuravee Suthikulpanit static void v1_free_pgtable(struct io_pgtable *iop)
493c9b258c6SSuravee Suthikulpanit {
494e42ba063SSuravee Suthikulpanit 	struct amd_io_pgtable *pgtable = container_of(iop, struct amd_io_pgtable, iop);
495e42ba063SSuravee Suthikulpanit 	struct protection_domain *dom;
496ce00eeceSMatthew Wilcox (Oracle) 	LIST_HEAD(freelist);
497e42ba063SSuravee Suthikulpanit 
498e42ba063SSuravee Suthikulpanit 	if (pgtable->mode == PAGE_MODE_NONE)
499e42ba063SSuravee Suthikulpanit 		return;
500e42ba063SSuravee Suthikulpanit 
501e42ba063SSuravee Suthikulpanit 	dom = container_of(pgtable, struct protection_domain, iop);
502e42ba063SSuravee Suthikulpanit 
503e42ba063SSuravee Suthikulpanit 	/* Page-table is not visible to IOMMU anymore, so free it */
504e42ba063SSuravee Suthikulpanit 	BUG_ON(pgtable->mode < PAGE_MODE_NONE ||
505e42ba063SSuravee Suthikulpanit 	       pgtable->mode > PAGE_MODE_6_LEVEL);
506e42ba063SSuravee Suthikulpanit 
507ce00eeceSMatthew Wilcox (Oracle) 	free_sub_pt(pgtable->root, pgtable->mode, &freelist);
508e42ba063SSuravee Suthikulpanit 
5096b0b2d9aSSuravee Suthikulpanit 	/* Update data structure */
5106b0b2d9aSSuravee Suthikulpanit 	amd_iommu_domain_clr_pt_root(dom);
5116b0b2d9aSSuravee Suthikulpanit 
5126b0b2d9aSSuravee Suthikulpanit 	/* Make changes visible to IOMMUs */
5136b0b2d9aSSuravee Suthikulpanit 	amd_iommu_domain_update(dom);
5146b0b2d9aSSuravee Suthikulpanit 
515ce00eeceSMatthew Wilcox (Oracle) 	put_pages_list(&freelist);
516c9b258c6SSuravee Suthikulpanit }
517c9b258c6SSuravee Suthikulpanit 
v1_alloc_pgtable(struct io_pgtable_cfg * cfg,void * cookie)518c9b258c6SSuravee Suthikulpanit static struct io_pgtable *v1_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
519c9b258c6SSuravee Suthikulpanit {
520c9b258c6SSuravee Suthikulpanit 	struct amd_io_pgtable *pgtable = io_pgtable_cfg_to_data(cfg);
521c9b258c6SSuravee Suthikulpanit 
522c9b258c6SSuravee Suthikulpanit 	cfg->pgsize_bitmap  = AMD_IOMMU_PGSIZES,
523c9b258c6SSuravee Suthikulpanit 	cfg->ias            = IOMMU_IN_ADDR_BIT_SIZE,
524c9b258c6SSuravee Suthikulpanit 	cfg->oas            = IOMMU_OUT_ADDR_BIT_SIZE,
525c9b258c6SSuravee Suthikulpanit 	cfg->tlb            = &v1_flush_ops;
526c9b258c6SSuravee Suthikulpanit 
5278cc233deSVasant Hegde 	pgtable->iop.ops.map_pages    = iommu_v1_map_pages;
528251c4db6SVasant Hegde 	pgtable->iop.ops.unmap_pages  = iommu_v1_unmap_pages;
529441555c6SSuravee Suthikulpanit 	pgtable->iop.ops.iova_to_phys = iommu_v1_iova_to_phys;
530441555c6SSuravee Suthikulpanit 
531c9b258c6SSuravee Suthikulpanit 	return &pgtable->iop;
532c9b258c6SSuravee Suthikulpanit }
533c9b258c6SSuravee Suthikulpanit 
534c9b258c6SSuravee Suthikulpanit struct io_pgtable_init_fns io_pgtable_amd_iommu_v1_init_fns = {
535c9b258c6SSuravee Suthikulpanit 	.alloc	= v1_alloc_pgtable,
536c9b258c6SSuravee Suthikulpanit 	.free	= v1_free_pgtable,
537c9b258c6SSuravee Suthikulpanit };
538