xref: /openbmc/linux/drivers/iommu/exynos-iommu.c (revision b398af21)
12a96536eSKyongHo Cho /* linux/drivers/iommu/exynos_iommu.c
22a96536eSKyongHo Cho  *
32a96536eSKyongHo Cho  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
42a96536eSKyongHo Cho  *		http://www.samsung.com
52a96536eSKyongHo Cho  *
62a96536eSKyongHo Cho  * This program is free software; you can redistribute it and/or modify
72a96536eSKyongHo Cho  * it under the terms of the GNU General Public License version 2 as
82a96536eSKyongHo Cho  * published by the Free Software Foundation.
92a96536eSKyongHo Cho  */
102a96536eSKyongHo Cho 
112a96536eSKyongHo Cho #ifdef CONFIG_EXYNOS_IOMMU_DEBUG
122a96536eSKyongHo Cho #define DEBUG
132a96536eSKyongHo Cho #endif
142a96536eSKyongHo Cho 
152a96536eSKyongHo Cho #include <linux/clk.h>
168ed55c81SMarek Szyprowski #include <linux/dma-mapping.h>
172a96536eSKyongHo Cho #include <linux/err.h>
18312900c6SMarek Szyprowski #include <linux/io.h>
192a96536eSKyongHo Cho #include <linux/iommu.h>
20312900c6SMarek Szyprowski #include <linux/interrupt.h>
212a96536eSKyongHo Cho #include <linux/list.h>
228ed55c81SMarek Szyprowski #include <linux/of.h>
238ed55c81SMarek Szyprowski #include <linux/of_iommu.h>
248ed55c81SMarek Szyprowski #include <linux/of_platform.h>
25312900c6SMarek Szyprowski #include <linux/platform_device.h>
26312900c6SMarek Szyprowski #include <linux/pm_runtime.h>
27312900c6SMarek Szyprowski #include <linux/slab.h>
2858c6f6a3SMarek Szyprowski #include <linux/dma-iommu.h>
292a96536eSKyongHo Cho 
30d09d78fcSCho KyongHo typedef u32 sysmmu_iova_t;
31d09d78fcSCho KyongHo typedef u32 sysmmu_pte_t;
32d09d78fcSCho KyongHo 
33f171ababSSachin Kamat /* We do not consider super section mapping (16MB) */
342a96536eSKyongHo Cho #define SECT_ORDER 20
352a96536eSKyongHo Cho #define LPAGE_ORDER 16
362a96536eSKyongHo Cho #define SPAGE_ORDER 12
372a96536eSKyongHo Cho 
382a96536eSKyongHo Cho #define SECT_SIZE (1 << SECT_ORDER)
392a96536eSKyongHo Cho #define LPAGE_SIZE (1 << LPAGE_ORDER)
402a96536eSKyongHo Cho #define SPAGE_SIZE (1 << SPAGE_ORDER)
412a96536eSKyongHo Cho 
422a96536eSKyongHo Cho #define SECT_MASK (~(SECT_SIZE - 1))
432a96536eSKyongHo Cho #define LPAGE_MASK (~(LPAGE_SIZE - 1))
442a96536eSKyongHo Cho #define SPAGE_MASK (~(SPAGE_SIZE - 1))
452a96536eSKyongHo Cho 
4666a7ed84SCho KyongHo #define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
4766a7ed84SCho KyongHo 			   ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
4866a7ed84SCho KyongHo #define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
4966a7ed84SCho KyongHo #define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
5066a7ed84SCho KyongHo #define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
5166a7ed84SCho KyongHo 			  ((*(sent) & 3) == 1))
522a96536eSKyongHo Cho #define lv1ent_section(sent) ((*(sent) & 3) == 2)
532a96536eSKyongHo Cho 
542a96536eSKyongHo Cho #define lv2ent_fault(pent) ((*(pent) & 3) == 0)
552a96536eSKyongHo Cho #define lv2ent_small(pent) ((*(pent) & 2) == 2)
562a96536eSKyongHo Cho #define lv2ent_large(pent) ((*(pent) & 3) == 1)
572a96536eSKyongHo Cho 
58d09d78fcSCho KyongHo static u32 sysmmu_page_offset(sysmmu_iova_t iova, u32 size)
59d09d78fcSCho KyongHo {
60d09d78fcSCho KyongHo 	return iova & (size - 1);
61d09d78fcSCho KyongHo }
622a96536eSKyongHo Cho 
63d09d78fcSCho KyongHo #define section_phys(sent) (*(sent) & SECT_MASK)
64d09d78fcSCho KyongHo #define section_offs(iova) sysmmu_page_offset((iova), SECT_SIZE)
65d09d78fcSCho KyongHo #define lpage_phys(pent) (*(pent) & LPAGE_MASK)
66d09d78fcSCho KyongHo #define lpage_offs(iova) sysmmu_page_offset((iova), LPAGE_SIZE)
67d09d78fcSCho KyongHo #define spage_phys(pent) (*(pent) & SPAGE_MASK)
68d09d78fcSCho KyongHo #define spage_offs(iova) sysmmu_page_offset((iova), SPAGE_SIZE)
692a96536eSKyongHo Cho 
702a96536eSKyongHo Cho #define NUM_LV1ENTRIES 4096
71d09d78fcSCho KyongHo #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
722a96536eSKyongHo Cho 
73d09d78fcSCho KyongHo static u32 lv1ent_offset(sysmmu_iova_t iova)
74d09d78fcSCho KyongHo {
75d09d78fcSCho KyongHo 	return iova >> SECT_ORDER;
76d09d78fcSCho KyongHo }
77d09d78fcSCho KyongHo 
78d09d78fcSCho KyongHo static u32 lv2ent_offset(sysmmu_iova_t iova)
79d09d78fcSCho KyongHo {
80d09d78fcSCho KyongHo 	return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
81d09d78fcSCho KyongHo }
82d09d78fcSCho KyongHo 
835e3435ebSMarek Szyprowski #define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t))
84d09d78fcSCho KyongHo #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
852a96536eSKyongHo Cho 
862a96536eSKyongHo Cho #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
872a96536eSKyongHo Cho 
882a96536eSKyongHo Cho #define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
892a96536eSKyongHo Cho 
902a96536eSKyongHo Cho #define mk_lv1ent_sect(pa) ((pa) | 2)
912a96536eSKyongHo Cho #define mk_lv1ent_page(pa) ((pa) | 1)
922a96536eSKyongHo Cho #define mk_lv2ent_lpage(pa) ((pa) | 1)
932a96536eSKyongHo Cho #define mk_lv2ent_spage(pa) ((pa) | 2)
942a96536eSKyongHo Cho 
952a96536eSKyongHo Cho #define CTRL_ENABLE	0x5
962a96536eSKyongHo Cho #define CTRL_BLOCK	0x7
972a96536eSKyongHo Cho #define CTRL_DISABLE	0x0
982a96536eSKyongHo Cho 
99eeb5184bSCho KyongHo #define CFG_LRU		0x1
100eeb5184bSCho KyongHo #define CFG_QOS(n)	((n & 0xF) << 7)
101eeb5184bSCho KyongHo #define CFG_MASK	0x0150FFFF /* Selecting bit 0-15, 20, 22 and 24 */
102eeb5184bSCho KyongHo #define CFG_ACGEN	(1 << 24) /* System MMU 3.3 only */
103eeb5184bSCho KyongHo #define CFG_SYSSEL	(1 << 22) /* System MMU 3.2 only */
104eeb5184bSCho KyongHo #define CFG_FLPDCACHE	(1 << 20) /* System MMU 3.2+ only */
105eeb5184bSCho KyongHo 
1062a96536eSKyongHo Cho #define REG_MMU_CTRL		0x000
1072a96536eSKyongHo Cho #define REG_MMU_CFG		0x004
1082a96536eSKyongHo Cho #define REG_MMU_STATUS		0x008
1092a96536eSKyongHo Cho #define REG_MMU_FLUSH		0x00C
1102a96536eSKyongHo Cho #define REG_MMU_FLUSH_ENTRY	0x010
1112a96536eSKyongHo Cho #define REG_PT_BASE_ADDR	0x014
1122a96536eSKyongHo Cho #define REG_INT_STATUS		0x018
1132a96536eSKyongHo Cho #define REG_INT_CLEAR		0x01C
1142a96536eSKyongHo Cho 
1152a96536eSKyongHo Cho #define REG_PAGE_FAULT_ADDR	0x024
1162a96536eSKyongHo Cho #define REG_AW_FAULT_ADDR	0x028
1172a96536eSKyongHo Cho #define REG_AR_FAULT_ADDR	0x02C
1182a96536eSKyongHo Cho #define REG_DEFAULT_SLAVE_ADDR	0x030
1192a96536eSKyongHo Cho 
1202a96536eSKyongHo Cho #define REG_MMU_VERSION		0x034
1212a96536eSKyongHo Cho 
122eeb5184bSCho KyongHo #define MMU_MAJ_VER(val)	((val) >> 7)
123eeb5184bSCho KyongHo #define MMU_MIN_VER(val)	((val) & 0x7F)
124eeb5184bSCho KyongHo #define MMU_RAW_VER(reg)	(((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
125eeb5184bSCho KyongHo 
126eeb5184bSCho KyongHo #define MAKE_MMU_VER(maj, min)	((((maj) & 0xF) << 7) | ((min) & 0x7F))
127eeb5184bSCho KyongHo 
1282a96536eSKyongHo Cho #define REG_PB0_SADDR		0x04C
1292a96536eSKyongHo Cho #define REG_PB0_EADDR		0x050
1302a96536eSKyongHo Cho #define REG_PB1_SADDR		0x054
1312a96536eSKyongHo Cho #define REG_PB1_EADDR		0x058
1322a96536eSKyongHo Cho 
1336b21a5dbSCho KyongHo #define has_sysmmu(dev)		(dev->archdata.iommu != NULL)
1346b21a5dbSCho KyongHo 
1355e3435ebSMarek Szyprowski static struct device *dma_dev;
136734c3c73SCho KyongHo static struct kmem_cache *lv2table_kmem_cache;
13766a7ed84SCho KyongHo static sysmmu_pte_t *zero_lv2_table;
13866a7ed84SCho KyongHo #define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
139734c3c73SCho KyongHo 
140d09d78fcSCho KyongHo static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
1412a96536eSKyongHo Cho {
1422a96536eSKyongHo Cho 	return pgtable + lv1ent_offset(iova);
1432a96536eSKyongHo Cho }
1442a96536eSKyongHo Cho 
145d09d78fcSCho KyongHo static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
1462a96536eSKyongHo Cho {
147d09d78fcSCho KyongHo 	return (sysmmu_pte_t *)phys_to_virt(
1487222e8dbSCho KyongHo 				lv2table_base(sent)) + lv2ent_offset(iova);
1492a96536eSKyongHo Cho }
1502a96536eSKyongHo Cho 
1512a96536eSKyongHo Cho enum exynos_sysmmu_inttype {
1522a96536eSKyongHo Cho 	SYSMMU_PAGEFAULT,
1532a96536eSKyongHo Cho 	SYSMMU_AR_MULTIHIT,
1542a96536eSKyongHo Cho 	SYSMMU_AW_MULTIHIT,
1552a96536eSKyongHo Cho 	SYSMMU_BUSERROR,
1562a96536eSKyongHo Cho 	SYSMMU_AR_SECURITY,
1572a96536eSKyongHo Cho 	SYSMMU_AR_ACCESS,
1582a96536eSKyongHo Cho 	SYSMMU_AW_SECURITY,
1592a96536eSKyongHo Cho 	SYSMMU_AW_PROTECTION, /* 7 */
1602a96536eSKyongHo Cho 	SYSMMU_FAULT_UNKNOWN,
1612a96536eSKyongHo Cho 	SYSMMU_FAULTS_NUM
1622a96536eSKyongHo Cho };
1632a96536eSKyongHo Cho 
1642a96536eSKyongHo Cho static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
1652a96536eSKyongHo Cho 	REG_PAGE_FAULT_ADDR,
1662a96536eSKyongHo Cho 	REG_AR_FAULT_ADDR,
1672a96536eSKyongHo Cho 	REG_AW_FAULT_ADDR,
1682a96536eSKyongHo Cho 	REG_DEFAULT_SLAVE_ADDR,
1692a96536eSKyongHo Cho 	REG_AR_FAULT_ADDR,
1702a96536eSKyongHo Cho 	REG_AR_FAULT_ADDR,
1712a96536eSKyongHo Cho 	REG_AW_FAULT_ADDR,
1722a96536eSKyongHo Cho 	REG_AW_FAULT_ADDR
1732a96536eSKyongHo Cho };
1742a96536eSKyongHo Cho 
1752a96536eSKyongHo Cho static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
1762a96536eSKyongHo Cho 	"PAGE FAULT",
1772a96536eSKyongHo Cho 	"AR MULTI-HIT FAULT",
1782a96536eSKyongHo Cho 	"AW MULTI-HIT FAULT",
1792a96536eSKyongHo Cho 	"BUS ERROR",
1802a96536eSKyongHo Cho 	"AR SECURITY PROTECTION FAULT",
1812a96536eSKyongHo Cho 	"AR ACCESS PROTECTION FAULT",
1822a96536eSKyongHo Cho 	"AW SECURITY PROTECTION FAULT",
1832a96536eSKyongHo Cho 	"AW ACCESS PROTECTION FAULT",
1842a96536eSKyongHo Cho 	"UNKNOWN FAULT"
1852a96536eSKyongHo Cho };
1862a96536eSKyongHo Cho 
1872860af3cSMarek Szyprowski /*
1882860af3cSMarek Szyprowski  * This structure is attached to dev.archdata.iommu of the master device
1892860af3cSMarek Szyprowski  * on device add, contains a list of SYSMMU controllers defined by device tree,
1902860af3cSMarek Szyprowski  * which are bound to given master device. It is usually referenced by 'owner'
1912860af3cSMarek Szyprowski  * pointer.
1922860af3cSMarek Szyprowski */
1936b21a5dbSCho KyongHo struct exynos_iommu_owner {
1941b092054SMarek Szyprowski 	struct list_head controllers;	/* list of sysmmu_drvdata.owner_node */
1956b21a5dbSCho KyongHo };
1966b21a5dbSCho KyongHo 
1972860af3cSMarek Szyprowski /*
1982860af3cSMarek Szyprowski  * This structure exynos specific generalization of struct iommu_domain.
1992860af3cSMarek Szyprowski  * It contains list of SYSMMU controllers from all master devices, which has
2002860af3cSMarek Szyprowski  * been attached to this domain and page tables of IO address space defined by
2012860af3cSMarek Szyprowski  * it. It is usually referenced by 'domain' pointer.
2022860af3cSMarek Szyprowski  */
2032a96536eSKyongHo Cho struct exynos_iommu_domain {
2042860af3cSMarek Szyprowski 	struct list_head clients; /* list of sysmmu_drvdata.domain_node */
205d09d78fcSCho KyongHo 	sysmmu_pte_t *pgtable;	/* lv1 page table, 16KB */
2062a96536eSKyongHo Cho 	short *lv2entcnt;	/* free lv2 entry counter for each section */
2072860af3cSMarek Szyprowski 	spinlock_t lock;	/* lock for modyfying list of clients */
2082a96536eSKyongHo Cho 	spinlock_t pgtablelock;	/* lock for modifying page table @ pgtable */
209e1fd1eaaSJoerg Roedel 	struct iommu_domain domain; /* generic domain data structure */
2102a96536eSKyongHo Cho };
2112a96536eSKyongHo Cho 
2122860af3cSMarek Szyprowski /*
2132860af3cSMarek Szyprowski  * This structure hold all data of a single SYSMMU controller, this includes
2142860af3cSMarek Szyprowski  * hw resources like registers and clocks, pointers and list nodes to connect
2152860af3cSMarek Szyprowski  * it to all other structures, internal state and parameters read from device
2162860af3cSMarek Szyprowski  * tree. It is usually referenced by 'data' pointer.
2172860af3cSMarek Szyprowski  */
2182a96536eSKyongHo Cho struct sysmmu_drvdata {
2192860af3cSMarek Szyprowski 	struct device *sysmmu;		/* SYSMMU controller device */
2202860af3cSMarek Szyprowski 	struct device *master;		/* master device (owner) */
2212860af3cSMarek Szyprowski 	void __iomem *sfrbase;		/* our registers */
2222860af3cSMarek Szyprowski 	struct clk *clk;		/* SYSMMU's clock */
2232860af3cSMarek Szyprowski 	struct clk *clk_master;		/* master's device clock */
2242860af3cSMarek Szyprowski 	int activations;		/* number of calls to sysmmu_enable */
2252860af3cSMarek Szyprowski 	spinlock_t lock;		/* lock for modyfying state */
2262860af3cSMarek Szyprowski 	struct exynos_iommu_domain *domain; /* domain we belong to */
2272860af3cSMarek Szyprowski 	struct list_head domain_node;	/* node for domain clients list */
2281b092054SMarek Szyprowski 	struct list_head owner_node;	/* node for owner controllers list */
2292860af3cSMarek Szyprowski 	phys_addr_t pgtable;		/* assigned page table structure */
2302860af3cSMarek Szyprowski 	unsigned int version;		/* our version */
2312a96536eSKyongHo Cho };
2322a96536eSKyongHo Cho 
233e1fd1eaaSJoerg Roedel static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
234e1fd1eaaSJoerg Roedel {
235e1fd1eaaSJoerg Roedel 	return container_of(dom, struct exynos_iommu_domain, domain);
236e1fd1eaaSJoerg Roedel }
237e1fd1eaaSJoerg Roedel 
2382a96536eSKyongHo Cho static bool set_sysmmu_active(struct sysmmu_drvdata *data)
2392a96536eSKyongHo Cho {
2402a96536eSKyongHo Cho 	/* return true if the System MMU was not active previously
2412a96536eSKyongHo Cho 	   and it needs to be initialized */
2422a96536eSKyongHo Cho 	return ++data->activations == 1;
2432a96536eSKyongHo Cho }
2442a96536eSKyongHo Cho 
2452a96536eSKyongHo Cho static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
2462a96536eSKyongHo Cho {
2472a96536eSKyongHo Cho 	/* return true if the System MMU is needed to be disabled */
2482a96536eSKyongHo Cho 	BUG_ON(data->activations < 1);
2492a96536eSKyongHo Cho 	return --data->activations == 0;
2502a96536eSKyongHo Cho }
2512a96536eSKyongHo Cho 
2522a96536eSKyongHo Cho static bool is_sysmmu_active(struct sysmmu_drvdata *data)
2532a96536eSKyongHo Cho {
2542a96536eSKyongHo Cho 	return data->activations > 0;
2552a96536eSKyongHo Cho }
2562a96536eSKyongHo Cho 
2572a96536eSKyongHo Cho static void sysmmu_unblock(void __iomem *sfrbase)
2582a96536eSKyongHo Cho {
2592a96536eSKyongHo Cho 	__raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
2602a96536eSKyongHo Cho }
2612a96536eSKyongHo Cho 
2622a96536eSKyongHo Cho static bool sysmmu_block(void __iomem *sfrbase)
2632a96536eSKyongHo Cho {
2642a96536eSKyongHo Cho 	int i = 120;
2652a96536eSKyongHo Cho 
2662a96536eSKyongHo Cho 	__raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL);
2672a96536eSKyongHo Cho 	while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1))
2682a96536eSKyongHo Cho 		--i;
2692a96536eSKyongHo Cho 
2702a96536eSKyongHo Cho 	if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) {
2712a96536eSKyongHo Cho 		sysmmu_unblock(sfrbase);
2722a96536eSKyongHo Cho 		return false;
2732a96536eSKyongHo Cho 	}
2742a96536eSKyongHo Cho 
2752a96536eSKyongHo Cho 	return true;
2762a96536eSKyongHo Cho }
2772a96536eSKyongHo Cho 
2782a96536eSKyongHo Cho static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
2792a96536eSKyongHo Cho {
2802a96536eSKyongHo Cho 	__raw_writel(0x1, sfrbase + REG_MMU_FLUSH);
2812a96536eSKyongHo Cho }
2822a96536eSKyongHo Cho 
2832a96536eSKyongHo Cho static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
284d09d78fcSCho KyongHo 				sysmmu_iova_t iova, unsigned int num_inv)
2852a96536eSKyongHo Cho {
2863ad6b7f3SCho KyongHo 	unsigned int i;
287365409dbSSachin Kamat 
2883ad6b7f3SCho KyongHo 	for (i = 0; i < num_inv; i++) {
2893ad6b7f3SCho KyongHo 		__raw_writel((iova & SPAGE_MASK) | 1,
2903ad6b7f3SCho KyongHo 				sfrbase + REG_MMU_FLUSH_ENTRY);
2913ad6b7f3SCho KyongHo 		iova += SPAGE_SIZE;
2923ad6b7f3SCho KyongHo 	}
2932a96536eSKyongHo Cho }
2942a96536eSKyongHo Cho 
2952a96536eSKyongHo Cho static void __sysmmu_set_ptbase(void __iomem *sfrbase,
296d09d78fcSCho KyongHo 				       phys_addr_t pgd)
2972a96536eSKyongHo Cho {
2982a96536eSKyongHo Cho 	__raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR);
2992a96536eSKyongHo Cho 
3002a96536eSKyongHo Cho 	__sysmmu_tlb_invalidate(sfrbase);
3012a96536eSKyongHo Cho }
3022a96536eSKyongHo Cho 
3031fab7fa7SCho KyongHo static void show_fault_information(const char *name,
3041fab7fa7SCho KyongHo 		enum exynos_sysmmu_inttype itype,
305d09d78fcSCho KyongHo 		phys_addr_t pgtable_base, sysmmu_iova_t fault_addr)
3062a96536eSKyongHo Cho {
307d09d78fcSCho KyongHo 	sysmmu_pte_t *ent;
3082a96536eSKyongHo Cho 
3092a96536eSKyongHo Cho 	if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
3102a96536eSKyongHo Cho 		itype = SYSMMU_FAULT_UNKNOWN;
3112a96536eSKyongHo Cho 
312d09d78fcSCho KyongHo 	pr_err("%s occurred at %#x by %s(Page table base: %pa)\n",
3131fab7fa7SCho KyongHo 		sysmmu_fault_name[itype], fault_addr, name, &pgtable_base);
3142a96536eSKyongHo Cho 
3157222e8dbSCho KyongHo 	ent = section_entry(phys_to_virt(pgtable_base), fault_addr);
316d09d78fcSCho KyongHo 	pr_err("\tLv1 entry: %#x\n", *ent);
3172a96536eSKyongHo Cho 
3182a96536eSKyongHo Cho 	if (lv1ent_page(ent)) {
3192a96536eSKyongHo Cho 		ent = page_entry(ent, fault_addr);
320d09d78fcSCho KyongHo 		pr_err("\t Lv2 entry: %#x\n", *ent);
3212a96536eSKyongHo Cho 	}
3222a96536eSKyongHo Cho }
3232a96536eSKyongHo Cho 
3242a96536eSKyongHo Cho static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
3252a96536eSKyongHo Cho {
326f171ababSSachin Kamat 	/* SYSMMU is in blocked state when interrupt occurred. */
3272a96536eSKyongHo Cho 	struct sysmmu_drvdata *data = dev_id;
3282a96536eSKyongHo Cho 	enum exynos_sysmmu_inttype itype;
329d09d78fcSCho KyongHo 	sysmmu_iova_t addr = -1;
3307222e8dbSCho KyongHo 	int ret = -ENOSYS;
3312a96536eSKyongHo Cho 
3322a96536eSKyongHo Cho 	WARN_ON(!is_sysmmu_active(data));
3332a96536eSKyongHo Cho 
3349d4e7a24SCho KyongHo 	spin_lock(&data->lock);
3359d4e7a24SCho KyongHo 
33670605870SCho KyongHo 	clk_enable(data->clk_master);
3379d4e7a24SCho KyongHo 
3382a96536eSKyongHo Cho 	itype = (enum exynos_sysmmu_inttype)
3397222e8dbSCho KyongHo 		__ffs(__raw_readl(data->sfrbase + REG_INT_STATUS));
3402a96536eSKyongHo Cho 	if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
3412a96536eSKyongHo Cho 		itype = SYSMMU_FAULT_UNKNOWN;
3422a96536eSKyongHo Cho 	else
3437222e8dbSCho KyongHo 		addr = __raw_readl(data->sfrbase + fault_reg_offset[itype]);
3442a96536eSKyongHo Cho 
3451fab7fa7SCho KyongHo 	if (itype == SYSMMU_FAULT_UNKNOWN) {
3461fab7fa7SCho KyongHo 		pr_err("%s: Fault is not occurred by System MMU '%s'!\n",
3471fab7fa7SCho KyongHo 			__func__, dev_name(data->sysmmu));
3481fab7fa7SCho KyongHo 		pr_err("%s: Please check if IRQ is correctly configured.\n",
3491fab7fa7SCho KyongHo 			__func__);
3501fab7fa7SCho KyongHo 		BUG();
3511fab7fa7SCho KyongHo 	} else {
352d09d78fcSCho KyongHo 		unsigned int base =
3531fab7fa7SCho KyongHo 				__raw_readl(data->sfrbase + REG_PT_BASE_ADDR);
3541fab7fa7SCho KyongHo 		show_fault_information(dev_name(data->sysmmu),
3551fab7fa7SCho KyongHo 					itype, base, addr);
3562a96536eSKyongHo Cho 		if (data->domain)
357a9133b99SMarek Szyprowski 			ret = report_iommu_fault(&data->domain->domain,
3586b21a5dbSCho KyongHo 					data->master, addr, itype);
3592a96536eSKyongHo Cho 	}
3602a96536eSKyongHo Cho 
3611fab7fa7SCho KyongHo 	/* fault is not recovered by fault handler */
3621fab7fa7SCho KyongHo 	BUG_ON(ret != 0);
3632a96536eSKyongHo Cho 
3641fab7fa7SCho KyongHo 	__raw_writel(1 << itype, data->sfrbase + REG_INT_CLEAR);
3651fab7fa7SCho KyongHo 
3667222e8dbSCho KyongHo 	sysmmu_unblock(data->sfrbase);
3672a96536eSKyongHo Cho 
36870605870SCho KyongHo 	clk_disable(data->clk_master);
36970605870SCho KyongHo 
3709d4e7a24SCho KyongHo 	spin_unlock(&data->lock);
3712a96536eSKyongHo Cho 
3722a96536eSKyongHo Cho 	return IRQ_HANDLED;
3732a96536eSKyongHo Cho }
3742a96536eSKyongHo Cho 
3756b21a5dbSCho KyongHo static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data)
3762a96536eSKyongHo Cho {
37770605870SCho KyongHo 	clk_enable(data->clk_master);
37870605870SCho KyongHo 
3797222e8dbSCho KyongHo 	__raw_writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
3806b21a5dbSCho KyongHo 	__raw_writel(0, data->sfrbase + REG_MMU_CFG);
3812a96536eSKyongHo Cho 
3827222e8dbSCho KyongHo 	clk_disable(data->clk);
38370605870SCho KyongHo 	clk_disable(data->clk_master);
3846b21a5dbSCho KyongHo }
3852a96536eSKyongHo Cho 
3866b21a5dbSCho KyongHo static bool __sysmmu_disable(struct sysmmu_drvdata *data)
3876b21a5dbSCho KyongHo {
3886b21a5dbSCho KyongHo 	bool disabled;
3896b21a5dbSCho KyongHo 	unsigned long flags;
3906b21a5dbSCho KyongHo 
3916b21a5dbSCho KyongHo 	spin_lock_irqsave(&data->lock, flags);
3926b21a5dbSCho KyongHo 
3936b21a5dbSCho KyongHo 	disabled = set_sysmmu_inactive(data);
3946b21a5dbSCho KyongHo 
3956b21a5dbSCho KyongHo 	if (disabled) {
3962a96536eSKyongHo Cho 		data->pgtable = 0;
3972a96536eSKyongHo Cho 		data->domain = NULL;
3986b21a5dbSCho KyongHo 
3996b21a5dbSCho KyongHo 		__sysmmu_disable_nocount(data);
4006b21a5dbSCho KyongHo 
4016b21a5dbSCho KyongHo 		dev_dbg(data->sysmmu, "Disabled\n");
4026b21a5dbSCho KyongHo 	} else  {
4036b21a5dbSCho KyongHo 		dev_dbg(data->sysmmu, "%d times left to disable\n",
4046b21a5dbSCho KyongHo 					data->activations);
4056b21a5dbSCho KyongHo 	}
4066b21a5dbSCho KyongHo 
4079d4e7a24SCho KyongHo 	spin_unlock_irqrestore(&data->lock, flags);
4082a96536eSKyongHo Cho 
4092a96536eSKyongHo Cho 	return disabled;
4102a96536eSKyongHo Cho }
4112a96536eSKyongHo Cho 
4126b21a5dbSCho KyongHo static void __sysmmu_init_config(struct sysmmu_drvdata *data)
4136b21a5dbSCho KyongHo {
414eeb5184bSCho KyongHo 	unsigned int cfg = CFG_LRU | CFG_QOS(15);
415eeb5184bSCho KyongHo 	unsigned int ver;
416eeb5184bSCho KyongHo 
417512bd0c6SMarek Szyprowski 	ver = MMU_RAW_VER(__raw_readl(data->sfrbase + REG_MMU_VERSION));
418eeb5184bSCho KyongHo 	if (MMU_MAJ_VER(ver) == 3) {
419eeb5184bSCho KyongHo 		if (MMU_MIN_VER(ver) >= 2) {
420eeb5184bSCho KyongHo 			cfg |= CFG_FLPDCACHE;
421eeb5184bSCho KyongHo 			if (MMU_MIN_VER(ver) == 3) {
422eeb5184bSCho KyongHo 				cfg |= CFG_ACGEN;
423eeb5184bSCho KyongHo 				cfg &= ~CFG_LRU;
424eeb5184bSCho KyongHo 			} else {
425eeb5184bSCho KyongHo 				cfg |= CFG_SYSSEL;
426eeb5184bSCho KyongHo 			}
427eeb5184bSCho KyongHo 		}
428eeb5184bSCho KyongHo 	}
4296b21a5dbSCho KyongHo 
4306b21a5dbSCho KyongHo 	__raw_writel(cfg, data->sfrbase + REG_MMU_CFG);
431512bd0c6SMarek Szyprowski 	data->version = ver;
4326b21a5dbSCho KyongHo }
4336b21a5dbSCho KyongHo 
4346b21a5dbSCho KyongHo static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
4356b21a5dbSCho KyongHo {
4366b21a5dbSCho KyongHo 	clk_enable(data->clk_master);
4376b21a5dbSCho KyongHo 	clk_enable(data->clk);
4386b21a5dbSCho KyongHo 
4396b21a5dbSCho KyongHo 	__raw_writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
4406b21a5dbSCho KyongHo 
4416b21a5dbSCho KyongHo 	__sysmmu_init_config(data);
4426b21a5dbSCho KyongHo 
4436b21a5dbSCho KyongHo 	__sysmmu_set_ptbase(data->sfrbase, data->pgtable);
4446b21a5dbSCho KyongHo 
4456b21a5dbSCho KyongHo 	__raw_writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
4466b21a5dbSCho KyongHo 
4476b21a5dbSCho KyongHo 	clk_disable(data->clk_master);
4486b21a5dbSCho KyongHo }
4496b21a5dbSCho KyongHo 
450bfa00489SMarek Szyprowski static int __sysmmu_enable(struct sysmmu_drvdata *data, phys_addr_t pgtable,
451a9133b99SMarek Szyprowski 			   struct exynos_iommu_domain *domain)
4526b21a5dbSCho KyongHo {
4536b21a5dbSCho KyongHo 	int ret = 0;
4546b21a5dbSCho KyongHo 	unsigned long flags;
4556b21a5dbSCho KyongHo 
4566b21a5dbSCho KyongHo 	spin_lock_irqsave(&data->lock, flags);
4576b21a5dbSCho KyongHo 	if (set_sysmmu_active(data)) {
4586b21a5dbSCho KyongHo 		data->pgtable = pgtable;
459a9133b99SMarek Szyprowski 		data->domain = domain;
4606b21a5dbSCho KyongHo 
4616b21a5dbSCho KyongHo 		__sysmmu_enable_nocount(data);
4626b21a5dbSCho KyongHo 
4636b21a5dbSCho KyongHo 		dev_dbg(data->sysmmu, "Enabled\n");
4646b21a5dbSCho KyongHo 	} else {
4656b21a5dbSCho KyongHo 		ret = (pgtable == data->pgtable) ? 1 : -EBUSY;
4666b21a5dbSCho KyongHo 
4676b21a5dbSCho KyongHo 		dev_dbg(data->sysmmu, "already enabled\n");
4686b21a5dbSCho KyongHo 	}
4696b21a5dbSCho KyongHo 
4706b21a5dbSCho KyongHo 	if (WARN_ON(ret < 0))
4716b21a5dbSCho KyongHo 		set_sysmmu_inactive(data); /* decrement count */
4726b21a5dbSCho KyongHo 
4736b21a5dbSCho KyongHo 	spin_unlock_irqrestore(&data->lock, flags);
4746b21a5dbSCho KyongHo 
4756b21a5dbSCho KyongHo 	return ret;
4766b21a5dbSCho KyongHo }
4776b21a5dbSCho KyongHo 
47866a7ed84SCho KyongHo static void __sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
47966a7ed84SCho KyongHo 					      sysmmu_iova_t iova)
48066a7ed84SCho KyongHo {
481512bd0c6SMarek Szyprowski 	if (data->version == MAKE_MMU_VER(3, 3))
48266a7ed84SCho KyongHo 		__raw_writel(iova | 0x1, data->sfrbase + REG_MMU_FLUSH_ENTRY);
48366a7ed84SCho KyongHo }
48466a7ed84SCho KyongHo 
485469acebeSMarek Szyprowski static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
48666a7ed84SCho KyongHo 					    sysmmu_iova_t iova)
48766a7ed84SCho KyongHo {
48866a7ed84SCho KyongHo 	unsigned long flags;
48966a7ed84SCho KyongHo 
49066a7ed84SCho KyongHo 	clk_enable(data->clk_master);
49166a7ed84SCho KyongHo 
49266a7ed84SCho KyongHo 	spin_lock_irqsave(&data->lock, flags);
49366a7ed84SCho KyongHo 	if (is_sysmmu_active(data))
49466a7ed84SCho KyongHo 		__sysmmu_tlb_invalidate_flpdcache(data, iova);
49566a7ed84SCho KyongHo 	spin_unlock_irqrestore(&data->lock, flags);
49666a7ed84SCho KyongHo 
49766a7ed84SCho KyongHo 	clk_disable(data->clk_master);
49866a7ed84SCho KyongHo }
49966a7ed84SCho KyongHo 
500469acebeSMarek Szyprowski static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
501469acebeSMarek Szyprowski 					sysmmu_iova_t iova, size_t size)
5022a96536eSKyongHo Cho {
5032a96536eSKyongHo Cho 	unsigned long flags;
5042a96536eSKyongHo Cho 
5059d4e7a24SCho KyongHo 	spin_lock_irqsave(&data->lock, flags);
5062a96536eSKyongHo Cho 	if (is_sysmmu_active(data)) {
5073ad6b7f3SCho KyongHo 		unsigned int num_inv = 1;
50870605870SCho KyongHo 
50970605870SCho KyongHo 		clk_enable(data->clk_master);
51070605870SCho KyongHo 
5113ad6b7f3SCho KyongHo 		/*
5123ad6b7f3SCho KyongHo 		 * L2TLB invalidation required
5133ad6b7f3SCho KyongHo 		 * 4KB page: 1 invalidation
514f171ababSSachin Kamat 		 * 64KB page: 16 invalidations
515f171ababSSachin Kamat 		 * 1MB page: 64 invalidations
5163ad6b7f3SCho KyongHo 		 * because it is set-associative TLB
5173ad6b7f3SCho KyongHo 		 * with 8-way and 64 sets.
5183ad6b7f3SCho KyongHo 		 * 1MB page can be cached in one of all sets.
5193ad6b7f3SCho KyongHo 		 * 64KB page can be one of 16 consecutive sets.
5203ad6b7f3SCho KyongHo 		 */
521512bd0c6SMarek Szyprowski 		if (MMU_MAJ_VER(data->version) == 2)
5223ad6b7f3SCho KyongHo 			num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
5233ad6b7f3SCho KyongHo 
5247222e8dbSCho KyongHo 		if (sysmmu_block(data->sfrbase)) {
5252a96536eSKyongHo Cho 			__sysmmu_tlb_invalidate_entry(
5263ad6b7f3SCho KyongHo 				data->sfrbase, iova, num_inv);
5277222e8dbSCho KyongHo 			sysmmu_unblock(data->sfrbase);
5282a96536eSKyongHo Cho 		}
52970605870SCho KyongHo 		clk_disable(data->clk_master);
5302a96536eSKyongHo Cho 	} else {
531469acebeSMarek Szyprowski 		dev_dbg(data->master,
532469acebeSMarek Szyprowski 			"disabled. Skipping TLB invalidation @ %#x\n", iova);
5332a96536eSKyongHo Cho 	}
5349d4e7a24SCho KyongHo 	spin_unlock_irqrestore(&data->lock, flags);
5352a96536eSKyongHo Cho }
5362a96536eSKyongHo Cho 
5376b21a5dbSCho KyongHo static int __init exynos_sysmmu_probe(struct platform_device *pdev)
5382a96536eSKyongHo Cho {
53946c16d1eSCho KyongHo 	int irq, ret;
5407222e8dbSCho KyongHo 	struct device *dev = &pdev->dev;
5412a96536eSKyongHo Cho 	struct sysmmu_drvdata *data;
5427222e8dbSCho KyongHo 	struct resource *res;
5432a96536eSKyongHo Cho 
54446c16d1eSCho KyongHo 	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
54546c16d1eSCho KyongHo 	if (!data)
54646c16d1eSCho KyongHo 		return -ENOMEM;
5472a96536eSKyongHo Cho 
5487222e8dbSCho KyongHo 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
54946c16d1eSCho KyongHo 	data->sfrbase = devm_ioremap_resource(dev, res);
55046c16d1eSCho KyongHo 	if (IS_ERR(data->sfrbase))
55146c16d1eSCho KyongHo 		return PTR_ERR(data->sfrbase);
5522a96536eSKyongHo Cho 
55346c16d1eSCho KyongHo 	irq = platform_get_irq(pdev, 0);
55446c16d1eSCho KyongHo 	if (irq <= 0) {
5550bf4e54dSCho KyongHo 		dev_err(dev, "Unable to find IRQ resource\n");
55646c16d1eSCho KyongHo 		return irq;
5572a96536eSKyongHo Cho 	}
5582a96536eSKyongHo Cho 
55946c16d1eSCho KyongHo 	ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
5602a96536eSKyongHo Cho 				dev_name(dev), data);
5612a96536eSKyongHo Cho 	if (ret) {
56246c16d1eSCho KyongHo 		dev_err(dev, "Unabled to register handler of irq %d\n", irq);
56346c16d1eSCho KyongHo 		return ret;
5642a96536eSKyongHo Cho 	}
5652a96536eSKyongHo Cho 
56646c16d1eSCho KyongHo 	data->clk = devm_clk_get(dev, "sysmmu");
56746c16d1eSCho KyongHo 	if (IS_ERR(data->clk)) {
56846c16d1eSCho KyongHo 		dev_err(dev, "Failed to get clock!\n");
56946c16d1eSCho KyongHo 		return PTR_ERR(data->clk);
57046c16d1eSCho KyongHo 	} else  {
57146c16d1eSCho KyongHo 		ret = clk_prepare(data->clk);
57246c16d1eSCho KyongHo 		if (ret) {
57346c16d1eSCho KyongHo 			dev_err(dev, "Failed to prepare clk\n");
57446c16d1eSCho KyongHo 			return ret;
57546c16d1eSCho KyongHo 		}
5762a96536eSKyongHo Cho 	}
5772a96536eSKyongHo Cho 
57870605870SCho KyongHo 	data->clk_master = devm_clk_get(dev, "master");
57970605870SCho KyongHo 	if (!IS_ERR(data->clk_master)) {
58070605870SCho KyongHo 		ret = clk_prepare(data->clk_master);
58170605870SCho KyongHo 		if (ret) {
58270605870SCho KyongHo 			clk_unprepare(data->clk);
58370605870SCho KyongHo 			dev_err(dev, "Failed to prepare master's clk\n");
58470605870SCho KyongHo 			return ret;
58570605870SCho KyongHo 		}
586b398af21SMarek Szyprowski 	} else {
587b398af21SMarek Szyprowski 		data->clk_master = NULL;
58870605870SCho KyongHo 	}
58970605870SCho KyongHo 
5902a96536eSKyongHo Cho 	data->sysmmu = dev;
5919d4e7a24SCho KyongHo 	spin_lock_init(&data->lock);
5922a96536eSKyongHo Cho 
5937222e8dbSCho KyongHo 	platform_set_drvdata(pdev, data);
5947222e8dbSCho KyongHo 
5952a96536eSKyongHo Cho 	pm_runtime_enable(dev);
5962a96536eSKyongHo Cho 
5972a96536eSKyongHo Cho 	return 0;
5982a96536eSKyongHo Cho }
5992a96536eSKyongHo Cho 
600622015e4SMarek Szyprowski #ifdef CONFIG_PM_SLEEP
601622015e4SMarek Szyprowski static int exynos_sysmmu_suspend(struct device *dev)
602622015e4SMarek Szyprowski {
603622015e4SMarek Szyprowski 	struct sysmmu_drvdata *data = dev_get_drvdata(dev);
604622015e4SMarek Szyprowski 
605622015e4SMarek Szyprowski 	dev_dbg(dev, "suspend\n");
606622015e4SMarek Szyprowski 	if (is_sysmmu_active(data)) {
607622015e4SMarek Szyprowski 		__sysmmu_disable_nocount(data);
608622015e4SMarek Szyprowski 		pm_runtime_put(dev);
609622015e4SMarek Szyprowski 	}
610622015e4SMarek Szyprowski 	return 0;
611622015e4SMarek Szyprowski }
612622015e4SMarek Szyprowski 
613622015e4SMarek Szyprowski static int exynos_sysmmu_resume(struct device *dev)
614622015e4SMarek Szyprowski {
615622015e4SMarek Szyprowski 	struct sysmmu_drvdata *data = dev_get_drvdata(dev);
616622015e4SMarek Szyprowski 
617622015e4SMarek Szyprowski 	dev_dbg(dev, "resume\n");
618622015e4SMarek Szyprowski 	if (is_sysmmu_active(data)) {
619622015e4SMarek Szyprowski 		pm_runtime_get_sync(dev);
620622015e4SMarek Szyprowski 		__sysmmu_enable_nocount(data);
621622015e4SMarek Szyprowski 	}
622622015e4SMarek Szyprowski 	return 0;
623622015e4SMarek Szyprowski }
624622015e4SMarek Szyprowski #endif
625622015e4SMarek Szyprowski 
626622015e4SMarek Szyprowski static const struct dev_pm_ops sysmmu_pm_ops = {
627622015e4SMarek Szyprowski 	SET_LATE_SYSTEM_SLEEP_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume)
628622015e4SMarek Szyprowski };
629622015e4SMarek Szyprowski 
6306b21a5dbSCho KyongHo static const struct of_device_id sysmmu_of_match[] __initconst = {
6316b21a5dbSCho KyongHo 	{ .compatible	= "samsung,exynos-sysmmu", },
6326b21a5dbSCho KyongHo 	{ },
6336b21a5dbSCho KyongHo };
6346b21a5dbSCho KyongHo 
6356b21a5dbSCho KyongHo static struct platform_driver exynos_sysmmu_driver __refdata = {
6362a96536eSKyongHo Cho 	.probe	= exynos_sysmmu_probe,
6372a96536eSKyongHo Cho 	.driver	= {
6382a96536eSKyongHo Cho 		.name		= "exynos-sysmmu",
6396b21a5dbSCho KyongHo 		.of_match_table	= sysmmu_of_match,
640622015e4SMarek Szyprowski 		.pm		= &sysmmu_pm_ops,
6412a96536eSKyongHo Cho 	}
6422a96536eSKyongHo Cho };
6432a96536eSKyongHo Cho 
6445e3435ebSMarek Szyprowski static inline void update_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
6452a96536eSKyongHo Cho {
6465e3435ebSMarek Szyprowski 	dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent),
6475e3435ebSMarek Szyprowski 				DMA_TO_DEVICE);
6485e3435ebSMarek Szyprowski 	*ent = val;
6495e3435ebSMarek Szyprowski 	dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent),
6505e3435ebSMarek Szyprowski 				   DMA_TO_DEVICE);
6512a96536eSKyongHo Cho }
6522a96536eSKyongHo Cho 
653e1fd1eaaSJoerg Roedel static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
6542a96536eSKyongHo Cho {
655bfa00489SMarek Szyprowski 	struct exynos_iommu_domain *domain;
6565e3435ebSMarek Szyprowski 	dma_addr_t handle;
65766a7ed84SCho KyongHo 	int i;
6582a96536eSKyongHo Cho 
6592a96536eSKyongHo Cho 
660bfa00489SMarek Szyprowski 	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
661bfa00489SMarek Szyprowski 	if (!domain)
662e1fd1eaaSJoerg Roedel 		return NULL;
663e1fd1eaaSJoerg Roedel 
66458c6f6a3SMarek Szyprowski 	if (type == IOMMU_DOMAIN_DMA) {
66558c6f6a3SMarek Szyprowski 		if (iommu_get_dma_cookie(&domain->domain) != 0)
66658c6f6a3SMarek Szyprowski 			goto err_pgtable;
66758c6f6a3SMarek Szyprowski 	} else if (type != IOMMU_DOMAIN_UNMANAGED) {
66858c6f6a3SMarek Szyprowski 		goto err_pgtable;
66958c6f6a3SMarek Szyprowski 	}
67058c6f6a3SMarek Szyprowski 
671bfa00489SMarek Szyprowski 	domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
672bfa00489SMarek Szyprowski 	if (!domain->pgtable)
67358c6f6a3SMarek Szyprowski 		goto err_dma_cookie;
6742a96536eSKyongHo Cho 
675bfa00489SMarek Szyprowski 	domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
676bfa00489SMarek Szyprowski 	if (!domain->lv2entcnt)
6772a96536eSKyongHo Cho 		goto err_counter;
6782a96536eSKyongHo Cho 
679f171ababSSachin Kamat 	/* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
68066a7ed84SCho KyongHo 	for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
681bfa00489SMarek Szyprowski 		domain->pgtable[i + 0] = ZERO_LV2LINK;
682bfa00489SMarek Szyprowski 		domain->pgtable[i + 1] = ZERO_LV2LINK;
683bfa00489SMarek Szyprowski 		domain->pgtable[i + 2] = ZERO_LV2LINK;
684bfa00489SMarek Szyprowski 		domain->pgtable[i + 3] = ZERO_LV2LINK;
685bfa00489SMarek Szyprowski 		domain->pgtable[i + 4] = ZERO_LV2LINK;
686bfa00489SMarek Szyprowski 		domain->pgtable[i + 5] = ZERO_LV2LINK;
687bfa00489SMarek Szyprowski 		domain->pgtable[i + 6] = ZERO_LV2LINK;
688bfa00489SMarek Szyprowski 		domain->pgtable[i + 7] = ZERO_LV2LINK;
68966a7ed84SCho KyongHo 	}
69066a7ed84SCho KyongHo 
6915e3435ebSMarek Szyprowski 	handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE,
6925e3435ebSMarek Szyprowski 				DMA_TO_DEVICE);
6935e3435ebSMarek Szyprowski 	/* For mapping page table entries we rely on dma == phys */
6945e3435ebSMarek Szyprowski 	BUG_ON(handle != virt_to_phys(domain->pgtable));
6952a96536eSKyongHo Cho 
696bfa00489SMarek Szyprowski 	spin_lock_init(&domain->lock);
697bfa00489SMarek Szyprowski 	spin_lock_init(&domain->pgtablelock);
698bfa00489SMarek Szyprowski 	INIT_LIST_HEAD(&domain->clients);
6992a96536eSKyongHo Cho 
700bfa00489SMarek Szyprowski 	domain->domain.geometry.aperture_start = 0;
701bfa00489SMarek Szyprowski 	domain->domain.geometry.aperture_end   = ~0UL;
702bfa00489SMarek Szyprowski 	domain->domain.geometry.force_aperture = true;
7033177bb76SJoerg Roedel 
704bfa00489SMarek Szyprowski 	return &domain->domain;
7052a96536eSKyongHo Cho 
7062a96536eSKyongHo Cho err_counter:
707bfa00489SMarek Szyprowski 	free_pages((unsigned long)domain->pgtable, 2);
70858c6f6a3SMarek Szyprowski err_dma_cookie:
70958c6f6a3SMarek Szyprowski 	if (type == IOMMU_DOMAIN_DMA)
71058c6f6a3SMarek Szyprowski 		iommu_put_dma_cookie(&domain->domain);
7112a96536eSKyongHo Cho err_pgtable:
712bfa00489SMarek Szyprowski 	kfree(domain);
713e1fd1eaaSJoerg Roedel 	return NULL;
7142a96536eSKyongHo Cho }
7152a96536eSKyongHo Cho 
716bfa00489SMarek Szyprowski static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
7172a96536eSKyongHo Cho {
718bfa00489SMarek Szyprowski 	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
719469acebeSMarek Szyprowski 	struct sysmmu_drvdata *data, *next;
7202a96536eSKyongHo Cho 	unsigned long flags;
7212a96536eSKyongHo Cho 	int i;
7222a96536eSKyongHo Cho 
723bfa00489SMarek Szyprowski 	WARN_ON(!list_empty(&domain->clients));
7242a96536eSKyongHo Cho 
725bfa00489SMarek Szyprowski 	spin_lock_irqsave(&domain->lock, flags);
7262a96536eSKyongHo Cho 
727bfa00489SMarek Szyprowski 	list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
728469acebeSMarek Szyprowski 		if (__sysmmu_disable(data))
729469acebeSMarek Szyprowski 			data->master = NULL;
730469acebeSMarek Szyprowski 		list_del_init(&data->domain_node);
7312a96536eSKyongHo Cho 	}
7322a96536eSKyongHo Cho 
733bfa00489SMarek Szyprowski 	spin_unlock_irqrestore(&domain->lock, flags);
7342a96536eSKyongHo Cho 
73558c6f6a3SMarek Szyprowski 	if (iommu_domain->type == IOMMU_DOMAIN_DMA)
73658c6f6a3SMarek Szyprowski 		iommu_put_dma_cookie(iommu_domain);
73758c6f6a3SMarek Szyprowski 
7385e3435ebSMarek Szyprowski 	dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE,
7395e3435ebSMarek Szyprowski 			 DMA_TO_DEVICE);
7405e3435ebSMarek Szyprowski 
7412a96536eSKyongHo Cho 	for (i = 0; i < NUM_LV1ENTRIES; i++)
7425e3435ebSMarek Szyprowski 		if (lv1ent_page(domain->pgtable + i)) {
7435e3435ebSMarek Szyprowski 			phys_addr_t base = lv2table_base(domain->pgtable + i);
7445e3435ebSMarek Szyprowski 
7455e3435ebSMarek Szyprowski 			dma_unmap_single(dma_dev, base, LV2TABLE_SIZE,
7465e3435ebSMarek Szyprowski 					 DMA_TO_DEVICE);
747734c3c73SCho KyongHo 			kmem_cache_free(lv2table_kmem_cache,
7485e3435ebSMarek Szyprowski 					phys_to_virt(base));
7495e3435ebSMarek Szyprowski 		}
7502a96536eSKyongHo Cho 
751bfa00489SMarek Szyprowski 	free_pages((unsigned long)domain->pgtable, 2);
752bfa00489SMarek Szyprowski 	free_pages((unsigned long)domain->lv2entcnt, 1);
753bfa00489SMarek Szyprowski 	kfree(domain);
7542a96536eSKyongHo Cho }
7552a96536eSKyongHo Cho 
756bfa00489SMarek Szyprowski static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
7572a96536eSKyongHo Cho 				   struct device *dev)
7582a96536eSKyongHo Cho {
7596b21a5dbSCho KyongHo 	struct exynos_iommu_owner *owner = dev->archdata.iommu;
760bfa00489SMarek Szyprowski 	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
761469acebeSMarek Szyprowski 	struct sysmmu_drvdata *data;
762bfa00489SMarek Szyprowski 	phys_addr_t pagetable = virt_to_phys(domain->pgtable);
7632a96536eSKyongHo Cho 	unsigned long flags;
764469acebeSMarek Szyprowski 	int ret = -ENODEV;
765469acebeSMarek Szyprowski 
766469acebeSMarek Szyprowski 	if (!has_sysmmu(dev))
767469acebeSMarek Szyprowski 		return -ENODEV;
768469acebeSMarek Szyprowski 
7691b092054SMarek Szyprowski 	list_for_each_entry(data, &owner->controllers, owner_node) {
770ce70ca56SMarek Szyprowski 		pm_runtime_get_sync(data->sysmmu);
771a9133b99SMarek Szyprowski 		ret = __sysmmu_enable(data, pagetable, domain);
772469acebeSMarek Szyprowski 		if (ret >= 0) {
773469acebeSMarek Szyprowski 			data->master = dev;
7742a96536eSKyongHo Cho 
775bfa00489SMarek Szyprowski 			spin_lock_irqsave(&domain->lock, flags);
776bfa00489SMarek Szyprowski 			list_add_tail(&data->domain_node, &domain->clients);
777bfa00489SMarek Szyprowski 			spin_unlock_irqrestore(&domain->lock, flags);
778469acebeSMarek Szyprowski 		}
779469acebeSMarek Szyprowski 	}
7802a96536eSKyongHo Cho 
7812a96536eSKyongHo Cho 	if (ret < 0) {
7827222e8dbSCho KyongHo 		dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
7837222e8dbSCho KyongHo 					__func__, &pagetable);
7847222e8dbSCho KyongHo 		return ret;
7852a96536eSKyongHo Cho 	}
7862a96536eSKyongHo Cho 
7877222e8dbSCho KyongHo 	dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n",
7887222e8dbSCho KyongHo 		__func__, &pagetable, (ret == 0) ? "" : ", again");
7897222e8dbSCho KyongHo 
7902a96536eSKyongHo Cho 	return ret;
7912a96536eSKyongHo Cho }
7922a96536eSKyongHo Cho 
793bfa00489SMarek Szyprowski static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
7942a96536eSKyongHo Cho 				    struct device *dev)
7952a96536eSKyongHo Cho {
796bfa00489SMarek Szyprowski 	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
797bfa00489SMarek Szyprowski 	phys_addr_t pagetable = virt_to_phys(domain->pgtable);
7981b092054SMarek Szyprowski 	struct sysmmu_drvdata *data, *next;
7992a96536eSKyongHo Cho 	unsigned long flags;
800469acebeSMarek Szyprowski 	bool found = false;
801469acebeSMarek Szyprowski 
802469acebeSMarek Szyprowski 	if (!has_sysmmu(dev))
803469acebeSMarek Szyprowski 		return;
8042a96536eSKyongHo Cho 
805bfa00489SMarek Szyprowski 	spin_lock_irqsave(&domain->lock, flags);
8061b092054SMarek Szyprowski 	list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
807469acebeSMarek Szyprowski 		if (data->master == dev) {
808469acebeSMarek Szyprowski 			if (__sysmmu_disable(data)) {
809469acebeSMarek Szyprowski 				data->master = NULL;
810469acebeSMarek Szyprowski 				list_del_init(&data->domain_node);
811469acebeSMarek Szyprowski 			}
812ce70ca56SMarek Szyprowski 			pm_runtime_put(data->sysmmu);
813469acebeSMarek Szyprowski 			found = true;
8142a96536eSKyongHo Cho 		}
8152a96536eSKyongHo Cho 	}
816bfa00489SMarek Szyprowski 	spin_unlock_irqrestore(&domain->lock, flags);
8172a96536eSKyongHo Cho 
818469acebeSMarek Szyprowski 	if (found)
8196b21a5dbSCho KyongHo 		dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
8206b21a5dbSCho KyongHo 					__func__, &pagetable);
8216b21a5dbSCho KyongHo 	else
8226b21a5dbSCho KyongHo 		dev_err(dev, "%s: No IOMMU is attached\n", __func__);
8232a96536eSKyongHo Cho }
8242a96536eSKyongHo Cho 
825bfa00489SMarek Szyprowski static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
82666a7ed84SCho KyongHo 		sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
8272a96536eSKyongHo Cho {
82861128f08SCho KyongHo 	if (lv1ent_section(sent)) {
829d09d78fcSCho KyongHo 		WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
83061128f08SCho KyongHo 		return ERR_PTR(-EADDRINUSE);
83161128f08SCho KyongHo 	}
83261128f08SCho KyongHo 
8332a96536eSKyongHo Cho 	if (lv1ent_fault(sent)) {
834d09d78fcSCho KyongHo 		sysmmu_pte_t *pent;
83566a7ed84SCho KyongHo 		bool need_flush_flpd_cache = lv1ent_zero(sent);
8362a96536eSKyongHo Cho 
837734c3c73SCho KyongHo 		pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
838d09d78fcSCho KyongHo 		BUG_ON((unsigned int)pent & (LV2TABLE_SIZE - 1));
8392a96536eSKyongHo Cho 		if (!pent)
84061128f08SCho KyongHo 			return ERR_PTR(-ENOMEM);
8412a96536eSKyongHo Cho 
8425e3435ebSMarek Szyprowski 		update_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
843dc3814f4SColin Cross 		kmemleak_ignore(pent);
8442a96536eSKyongHo Cho 		*pgcounter = NUM_LV2ENTRIES;
8455e3435ebSMarek Szyprowski 		dma_map_single(dma_dev, pent, LV2TABLE_SIZE, DMA_TO_DEVICE);
84666a7ed84SCho KyongHo 
84766a7ed84SCho KyongHo 		/*
848f171ababSSachin Kamat 		 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
849f171ababSSachin Kamat 		 * FLPD cache may cache the address of zero_l2_table. This
850f171ababSSachin Kamat 		 * function replaces the zero_l2_table with new L2 page table
851f171ababSSachin Kamat 		 * to write valid mappings.
85266a7ed84SCho KyongHo 		 * Accessing the valid area may cause page fault since FLPD
853f171ababSSachin Kamat 		 * cache may still cache zero_l2_table for the valid area
854f171ababSSachin Kamat 		 * instead of new L2 page table that has the mapping
855f171ababSSachin Kamat 		 * information of the valid area.
85666a7ed84SCho KyongHo 		 * Thus any replacement of zero_l2_table with other valid L2
85766a7ed84SCho KyongHo 		 * page table must involve FLPD cache invalidation for System
85866a7ed84SCho KyongHo 		 * MMU v3.3.
85966a7ed84SCho KyongHo 		 * FLPD cache invalidation is performed with TLB invalidation
86066a7ed84SCho KyongHo 		 * by VPN without blocking. It is safe to invalidate TLB without
86166a7ed84SCho KyongHo 		 * blocking because the target address of TLB invalidation is
86266a7ed84SCho KyongHo 		 * not currently mapped.
86366a7ed84SCho KyongHo 		 */
86466a7ed84SCho KyongHo 		if (need_flush_flpd_cache) {
865469acebeSMarek Szyprowski 			struct sysmmu_drvdata *data;
866365409dbSSachin Kamat 
867bfa00489SMarek Szyprowski 			spin_lock(&domain->lock);
868bfa00489SMarek Szyprowski 			list_for_each_entry(data, &domain->clients, domain_node)
869469acebeSMarek Szyprowski 				sysmmu_tlb_invalidate_flpdcache(data, iova);
870bfa00489SMarek Szyprowski 			spin_unlock(&domain->lock);
87166a7ed84SCho KyongHo 		}
8722a96536eSKyongHo Cho 	}
8732a96536eSKyongHo Cho 
8742a96536eSKyongHo Cho 	return page_entry(sent, iova);
8752a96536eSKyongHo Cho }
8762a96536eSKyongHo Cho 
877bfa00489SMarek Szyprowski static int lv1set_section(struct exynos_iommu_domain *domain,
87866a7ed84SCho KyongHo 			  sysmmu_pte_t *sent, sysmmu_iova_t iova,
87961128f08SCho KyongHo 			  phys_addr_t paddr, short *pgcnt)
8802a96536eSKyongHo Cho {
88161128f08SCho KyongHo 	if (lv1ent_section(sent)) {
882d09d78fcSCho KyongHo 		WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
88361128f08SCho KyongHo 			iova);
8842a96536eSKyongHo Cho 		return -EADDRINUSE;
88561128f08SCho KyongHo 	}
8862a96536eSKyongHo Cho 
8872a96536eSKyongHo Cho 	if (lv1ent_page(sent)) {
88861128f08SCho KyongHo 		if (*pgcnt != NUM_LV2ENTRIES) {
889d09d78fcSCho KyongHo 			WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
89061128f08SCho KyongHo 				iova);
8912a96536eSKyongHo Cho 			return -EADDRINUSE;
89261128f08SCho KyongHo 		}
8932a96536eSKyongHo Cho 
894734c3c73SCho KyongHo 		kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
8952a96536eSKyongHo Cho 		*pgcnt = 0;
8962a96536eSKyongHo Cho 	}
8972a96536eSKyongHo Cho 
8985e3435ebSMarek Szyprowski 	update_pte(sent, mk_lv1ent_sect(paddr));
8992a96536eSKyongHo Cho 
900bfa00489SMarek Szyprowski 	spin_lock(&domain->lock);
90166a7ed84SCho KyongHo 	if (lv1ent_page_zero(sent)) {
902469acebeSMarek Szyprowski 		struct sysmmu_drvdata *data;
90366a7ed84SCho KyongHo 		/*
90466a7ed84SCho KyongHo 		 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
90566a7ed84SCho KyongHo 		 * entry by speculative prefetch of SLPD which has no mapping.
90666a7ed84SCho KyongHo 		 */
907bfa00489SMarek Szyprowski 		list_for_each_entry(data, &domain->clients, domain_node)
908469acebeSMarek Szyprowski 			sysmmu_tlb_invalidate_flpdcache(data, iova);
90966a7ed84SCho KyongHo 	}
910bfa00489SMarek Szyprowski 	spin_unlock(&domain->lock);
91166a7ed84SCho KyongHo 
9122a96536eSKyongHo Cho 	return 0;
9132a96536eSKyongHo Cho }
9142a96536eSKyongHo Cho 
915d09d78fcSCho KyongHo static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
9162a96536eSKyongHo Cho 								short *pgcnt)
9172a96536eSKyongHo Cho {
9182a96536eSKyongHo Cho 	if (size == SPAGE_SIZE) {
9190bf4e54dSCho KyongHo 		if (WARN_ON(!lv2ent_fault(pent)))
9202a96536eSKyongHo Cho 			return -EADDRINUSE;
9212a96536eSKyongHo Cho 
9225e3435ebSMarek Szyprowski 		update_pte(pent, mk_lv2ent_spage(paddr));
9232a96536eSKyongHo Cho 		*pgcnt -= 1;
9242a96536eSKyongHo Cho 	} else { /* size == LPAGE_SIZE */
9252a96536eSKyongHo Cho 		int i;
9265e3435ebSMarek Szyprowski 		dma_addr_t pent_base = virt_to_phys(pent);
927365409dbSSachin Kamat 
9285e3435ebSMarek Szyprowski 		dma_sync_single_for_cpu(dma_dev, pent_base,
9295e3435ebSMarek Szyprowski 					sizeof(*pent) * SPAGES_PER_LPAGE,
9305e3435ebSMarek Szyprowski 					DMA_TO_DEVICE);
9312a96536eSKyongHo Cho 		for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
9320bf4e54dSCho KyongHo 			if (WARN_ON(!lv2ent_fault(pent))) {
93361128f08SCho KyongHo 				if (i > 0)
93461128f08SCho KyongHo 					memset(pent - i, 0, sizeof(*pent) * i);
9352a96536eSKyongHo Cho 				return -EADDRINUSE;
9362a96536eSKyongHo Cho 			}
9372a96536eSKyongHo Cho 
9382a96536eSKyongHo Cho 			*pent = mk_lv2ent_lpage(paddr);
9392a96536eSKyongHo Cho 		}
9405e3435ebSMarek Szyprowski 		dma_sync_single_for_device(dma_dev, pent_base,
9415e3435ebSMarek Szyprowski 					   sizeof(*pent) * SPAGES_PER_LPAGE,
9425e3435ebSMarek Szyprowski 					   DMA_TO_DEVICE);
9432a96536eSKyongHo Cho 		*pgcnt -= SPAGES_PER_LPAGE;
9442a96536eSKyongHo Cho 	}
9452a96536eSKyongHo Cho 
9462a96536eSKyongHo Cho 	return 0;
9472a96536eSKyongHo Cho }
9482a96536eSKyongHo Cho 
94966a7ed84SCho KyongHo /*
95066a7ed84SCho KyongHo  * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
95166a7ed84SCho KyongHo  *
952f171ababSSachin Kamat  * System MMU v3.x has advanced logic to improve address translation
95366a7ed84SCho KyongHo  * performance with caching more page table entries by a page table walk.
954f171ababSSachin Kamat  * However, the logic has a bug that while caching faulty page table entries,
955f171ababSSachin Kamat  * System MMU reports page fault if the cached fault entry is hit even though
956f171ababSSachin Kamat  * the fault entry is updated to a valid entry after the entry is cached.
957f171ababSSachin Kamat  * To prevent caching faulty page table entries which may be updated to valid
958f171ababSSachin Kamat  * entries later, the virtual memory manager should care about the workaround
959f171ababSSachin Kamat  * for the problem. The following describes the workaround.
96066a7ed84SCho KyongHo  *
96166a7ed84SCho KyongHo  * Any two consecutive I/O virtual address regions must have a hole of 128KiB
962f171ababSSachin Kamat  * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
96366a7ed84SCho KyongHo  *
964f171ababSSachin Kamat  * Precisely, any start address of I/O virtual region must be aligned with
96566a7ed84SCho KyongHo  * the following sizes for System MMU v3.1 and v3.2.
96666a7ed84SCho KyongHo  * System MMU v3.1: 128KiB
96766a7ed84SCho KyongHo  * System MMU v3.2: 256KiB
96866a7ed84SCho KyongHo  *
96966a7ed84SCho KyongHo  * Because System MMU v3.3 caches page table entries more aggressively, it needs
970f171ababSSachin Kamat  * more workarounds.
971f171ababSSachin Kamat  * - Any two consecutive I/O virtual regions must have a hole of size larger
972f171ababSSachin Kamat  *   than or equal to 128KiB.
97366a7ed84SCho KyongHo  * - Start address of an I/O virtual region must be aligned by 128KiB.
97466a7ed84SCho KyongHo  */
975bfa00489SMarek Szyprowski static int exynos_iommu_map(struct iommu_domain *iommu_domain,
976bfa00489SMarek Szyprowski 			    unsigned long l_iova, phys_addr_t paddr, size_t size,
977bfa00489SMarek Szyprowski 			    int prot)
9782a96536eSKyongHo Cho {
979bfa00489SMarek Szyprowski 	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
980d09d78fcSCho KyongHo 	sysmmu_pte_t *entry;
981d09d78fcSCho KyongHo 	sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
9822a96536eSKyongHo Cho 	unsigned long flags;
9832a96536eSKyongHo Cho 	int ret = -ENOMEM;
9842a96536eSKyongHo Cho 
985bfa00489SMarek Szyprowski 	BUG_ON(domain->pgtable == NULL);
9862a96536eSKyongHo Cho 
987bfa00489SMarek Szyprowski 	spin_lock_irqsave(&domain->pgtablelock, flags);
9882a96536eSKyongHo Cho 
989bfa00489SMarek Szyprowski 	entry = section_entry(domain->pgtable, iova);
9902a96536eSKyongHo Cho 
9912a96536eSKyongHo Cho 	if (size == SECT_SIZE) {
992bfa00489SMarek Szyprowski 		ret = lv1set_section(domain, entry, iova, paddr,
993bfa00489SMarek Szyprowski 				     &domain->lv2entcnt[lv1ent_offset(iova)]);
9942a96536eSKyongHo Cho 	} else {
995d09d78fcSCho KyongHo 		sysmmu_pte_t *pent;
9962a96536eSKyongHo Cho 
997bfa00489SMarek Szyprowski 		pent = alloc_lv2entry(domain, entry, iova,
998bfa00489SMarek Szyprowski 				      &domain->lv2entcnt[lv1ent_offset(iova)]);
9992a96536eSKyongHo Cho 
100061128f08SCho KyongHo 		if (IS_ERR(pent))
100161128f08SCho KyongHo 			ret = PTR_ERR(pent);
10022a96536eSKyongHo Cho 		else
10032a96536eSKyongHo Cho 			ret = lv2set_page(pent, paddr, size,
1004bfa00489SMarek Szyprowski 				       &domain->lv2entcnt[lv1ent_offset(iova)]);
10052a96536eSKyongHo Cho 	}
10062a96536eSKyongHo Cho 
100761128f08SCho KyongHo 	if (ret)
10080bf4e54dSCho KyongHo 		pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
10090bf4e54dSCho KyongHo 			__func__, ret, size, iova);
10102a96536eSKyongHo Cho 
1011bfa00489SMarek Szyprowski 	spin_unlock_irqrestore(&domain->pgtablelock, flags);
10122a96536eSKyongHo Cho 
10132a96536eSKyongHo Cho 	return ret;
10142a96536eSKyongHo Cho }
10152a96536eSKyongHo Cho 
1016bfa00489SMarek Szyprowski static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain,
101766a7ed84SCho KyongHo 					      sysmmu_iova_t iova, size_t size)
101866a7ed84SCho KyongHo {
1019469acebeSMarek Szyprowski 	struct sysmmu_drvdata *data;
102066a7ed84SCho KyongHo 	unsigned long flags;
102166a7ed84SCho KyongHo 
1022bfa00489SMarek Szyprowski 	spin_lock_irqsave(&domain->lock, flags);
102366a7ed84SCho KyongHo 
1024bfa00489SMarek Szyprowski 	list_for_each_entry(data, &domain->clients, domain_node)
1025469acebeSMarek Szyprowski 		sysmmu_tlb_invalidate_entry(data, iova, size);
102666a7ed84SCho KyongHo 
1027bfa00489SMarek Szyprowski 	spin_unlock_irqrestore(&domain->lock, flags);
102866a7ed84SCho KyongHo }
102966a7ed84SCho KyongHo 
1030bfa00489SMarek Szyprowski static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
1031d09d78fcSCho KyongHo 				 unsigned long l_iova, size_t size)
10322a96536eSKyongHo Cho {
1033bfa00489SMarek Szyprowski 	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1034d09d78fcSCho KyongHo 	sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1035d09d78fcSCho KyongHo 	sysmmu_pte_t *ent;
103661128f08SCho KyongHo 	size_t err_pgsize;
1037d09d78fcSCho KyongHo 	unsigned long flags;
10382a96536eSKyongHo Cho 
1039bfa00489SMarek Szyprowski 	BUG_ON(domain->pgtable == NULL);
10402a96536eSKyongHo Cho 
1041bfa00489SMarek Szyprowski 	spin_lock_irqsave(&domain->pgtablelock, flags);
10422a96536eSKyongHo Cho 
1043bfa00489SMarek Szyprowski 	ent = section_entry(domain->pgtable, iova);
10442a96536eSKyongHo Cho 
10452a96536eSKyongHo Cho 	if (lv1ent_section(ent)) {
10460bf4e54dSCho KyongHo 		if (WARN_ON(size < SECT_SIZE)) {
104761128f08SCho KyongHo 			err_pgsize = SECT_SIZE;
104861128f08SCho KyongHo 			goto err;
104961128f08SCho KyongHo 		}
10502a96536eSKyongHo Cho 
1051f171ababSSachin Kamat 		/* workaround for h/w bug in System MMU v3.3 */
10525e3435ebSMarek Szyprowski 		update_pte(ent, ZERO_LV2LINK);
10532a96536eSKyongHo Cho 		size = SECT_SIZE;
10542a96536eSKyongHo Cho 		goto done;
10552a96536eSKyongHo Cho 	}
10562a96536eSKyongHo Cho 
10572a96536eSKyongHo Cho 	if (unlikely(lv1ent_fault(ent))) {
10582a96536eSKyongHo Cho 		if (size > SECT_SIZE)
10592a96536eSKyongHo Cho 			size = SECT_SIZE;
10602a96536eSKyongHo Cho 		goto done;
10612a96536eSKyongHo Cho 	}
10622a96536eSKyongHo Cho 
10632a96536eSKyongHo Cho 	/* lv1ent_page(sent) == true here */
10642a96536eSKyongHo Cho 
10652a96536eSKyongHo Cho 	ent = page_entry(ent, iova);
10662a96536eSKyongHo Cho 
10672a96536eSKyongHo Cho 	if (unlikely(lv2ent_fault(ent))) {
10682a96536eSKyongHo Cho 		size = SPAGE_SIZE;
10692a96536eSKyongHo Cho 		goto done;
10702a96536eSKyongHo Cho 	}
10712a96536eSKyongHo Cho 
10722a96536eSKyongHo Cho 	if (lv2ent_small(ent)) {
10735e3435ebSMarek Szyprowski 		update_pte(ent, 0);
10742a96536eSKyongHo Cho 		size = SPAGE_SIZE;
1075bfa00489SMarek Szyprowski 		domain->lv2entcnt[lv1ent_offset(iova)] += 1;
10762a96536eSKyongHo Cho 		goto done;
10772a96536eSKyongHo Cho 	}
10782a96536eSKyongHo Cho 
10792a96536eSKyongHo Cho 	/* lv1ent_large(ent) == true here */
10800bf4e54dSCho KyongHo 	if (WARN_ON(size < LPAGE_SIZE)) {
108161128f08SCho KyongHo 		err_pgsize = LPAGE_SIZE;
108261128f08SCho KyongHo 		goto err;
108361128f08SCho KyongHo 	}
10842a96536eSKyongHo Cho 
10855e3435ebSMarek Szyprowski 	dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent),
10865e3435ebSMarek Szyprowski 				sizeof(*ent) * SPAGES_PER_LPAGE,
10875e3435ebSMarek Szyprowski 				DMA_TO_DEVICE);
10882a96536eSKyongHo Cho 	memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
10895e3435ebSMarek Szyprowski 	dma_sync_single_for_device(dma_dev, virt_to_phys(ent),
10905e3435ebSMarek Szyprowski 				   sizeof(*ent) * SPAGES_PER_LPAGE,
10915e3435ebSMarek Szyprowski 				   DMA_TO_DEVICE);
10922a96536eSKyongHo Cho 	size = LPAGE_SIZE;
1093bfa00489SMarek Szyprowski 	domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
10942a96536eSKyongHo Cho done:
1095bfa00489SMarek Szyprowski 	spin_unlock_irqrestore(&domain->pgtablelock, flags);
10962a96536eSKyongHo Cho 
1097bfa00489SMarek Szyprowski 	exynos_iommu_tlb_invalidate_entry(domain, iova, size);
10982a96536eSKyongHo Cho 
10992a96536eSKyongHo Cho 	return size;
110061128f08SCho KyongHo err:
1101bfa00489SMarek Szyprowski 	spin_unlock_irqrestore(&domain->pgtablelock, flags);
110261128f08SCho KyongHo 
11030bf4e54dSCho KyongHo 	pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
110461128f08SCho KyongHo 		__func__, size, iova, err_pgsize);
110561128f08SCho KyongHo 
110661128f08SCho KyongHo 	return 0;
11072a96536eSKyongHo Cho }
11082a96536eSKyongHo Cho 
1109bfa00489SMarek Szyprowski static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
1110bb5547acSVarun Sethi 					  dma_addr_t iova)
11112a96536eSKyongHo Cho {
1112bfa00489SMarek Szyprowski 	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1113d09d78fcSCho KyongHo 	sysmmu_pte_t *entry;
11142a96536eSKyongHo Cho 	unsigned long flags;
11152a96536eSKyongHo Cho 	phys_addr_t phys = 0;
11162a96536eSKyongHo Cho 
1117bfa00489SMarek Szyprowski 	spin_lock_irqsave(&domain->pgtablelock, flags);
11182a96536eSKyongHo Cho 
1119bfa00489SMarek Szyprowski 	entry = section_entry(domain->pgtable, iova);
11202a96536eSKyongHo Cho 
11212a96536eSKyongHo Cho 	if (lv1ent_section(entry)) {
11222a96536eSKyongHo Cho 		phys = section_phys(entry) + section_offs(iova);
11232a96536eSKyongHo Cho 	} else if (lv1ent_page(entry)) {
11242a96536eSKyongHo Cho 		entry = page_entry(entry, iova);
11252a96536eSKyongHo Cho 
11262a96536eSKyongHo Cho 		if (lv2ent_large(entry))
11272a96536eSKyongHo Cho 			phys = lpage_phys(entry) + lpage_offs(iova);
11282a96536eSKyongHo Cho 		else if (lv2ent_small(entry))
11292a96536eSKyongHo Cho 			phys = spage_phys(entry) + spage_offs(iova);
11302a96536eSKyongHo Cho 	}
11312a96536eSKyongHo Cho 
1132bfa00489SMarek Szyprowski 	spin_unlock_irqrestore(&domain->pgtablelock, flags);
11332a96536eSKyongHo Cho 
11342a96536eSKyongHo Cho 	return phys;
11352a96536eSKyongHo Cho }
11362a96536eSKyongHo Cho 
11376c2ae7e2SMarek Szyprowski static struct iommu_group *get_device_iommu_group(struct device *dev)
11386c2ae7e2SMarek Szyprowski {
11396c2ae7e2SMarek Szyprowski 	struct iommu_group *group;
11406c2ae7e2SMarek Szyprowski 
11416c2ae7e2SMarek Szyprowski 	group = iommu_group_get(dev);
11426c2ae7e2SMarek Szyprowski 	if (!group)
11436c2ae7e2SMarek Szyprowski 		group = iommu_group_alloc();
11446c2ae7e2SMarek Szyprowski 
11456c2ae7e2SMarek Szyprowski 	return group;
11466c2ae7e2SMarek Szyprowski }
11476c2ae7e2SMarek Szyprowski 
1148bf4a1c92SAntonios Motakis static int exynos_iommu_add_device(struct device *dev)
1149bf4a1c92SAntonios Motakis {
1150bf4a1c92SAntonios Motakis 	struct iommu_group *group;
1151bf4a1c92SAntonios Motakis 
115206801db0SMarek Szyprowski 	if (!has_sysmmu(dev))
115306801db0SMarek Szyprowski 		return -ENODEV;
115406801db0SMarek Szyprowski 
11556c2ae7e2SMarek Szyprowski 	group = iommu_group_get_for_dev(dev);
1156bf4a1c92SAntonios Motakis 
11576c2ae7e2SMarek Szyprowski 	if (IS_ERR(group))
1158bf4a1c92SAntonios Motakis 		return PTR_ERR(group);
1159bf4a1c92SAntonios Motakis 
1160bf4a1c92SAntonios Motakis 	iommu_group_put(group);
1161bf4a1c92SAntonios Motakis 
11626c2ae7e2SMarek Szyprowski 	return 0;
1163bf4a1c92SAntonios Motakis }
1164bf4a1c92SAntonios Motakis 
1165bf4a1c92SAntonios Motakis static void exynos_iommu_remove_device(struct device *dev)
1166bf4a1c92SAntonios Motakis {
116706801db0SMarek Szyprowski 	if (!has_sysmmu(dev))
116806801db0SMarek Szyprowski 		return;
116906801db0SMarek Szyprowski 
1170bf4a1c92SAntonios Motakis 	iommu_group_remove_device(dev);
1171bf4a1c92SAntonios Motakis }
1172bf4a1c92SAntonios Motakis 
1173aa759fd3SMarek Szyprowski static int exynos_iommu_of_xlate(struct device *dev,
1174aa759fd3SMarek Szyprowski 				 struct of_phandle_args *spec)
1175aa759fd3SMarek Szyprowski {
1176aa759fd3SMarek Szyprowski 	struct exynos_iommu_owner *owner = dev->archdata.iommu;
1177aa759fd3SMarek Szyprowski 	struct platform_device *sysmmu = of_find_device_by_node(spec->np);
1178aa759fd3SMarek Szyprowski 	struct sysmmu_drvdata *data;
1179aa759fd3SMarek Szyprowski 
1180aa759fd3SMarek Szyprowski 	if (!sysmmu)
1181aa759fd3SMarek Szyprowski 		return -ENODEV;
1182aa759fd3SMarek Szyprowski 
1183aa759fd3SMarek Szyprowski 	data = platform_get_drvdata(sysmmu);
1184aa759fd3SMarek Szyprowski 	if (!data)
1185aa759fd3SMarek Szyprowski 		return -ENODEV;
1186aa759fd3SMarek Szyprowski 
1187aa759fd3SMarek Szyprowski 	if (!owner) {
1188aa759fd3SMarek Szyprowski 		owner = kzalloc(sizeof(*owner), GFP_KERNEL);
1189aa759fd3SMarek Szyprowski 		if (!owner)
1190aa759fd3SMarek Szyprowski 			return -ENOMEM;
1191aa759fd3SMarek Szyprowski 
1192aa759fd3SMarek Szyprowski 		INIT_LIST_HEAD(&owner->controllers);
1193aa759fd3SMarek Szyprowski 		dev->archdata.iommu = owner;
1194aa759fd3SMarek Szyprowski 	}
1195aa759fd3SMarek Szyprowski 
1196aa759fd3SMarek Szyprowski 	list_add_tail(&data->owner_node, &owner->controllers);
1197aa759fd3SMarek Szyprowski 	return 0;
1198aa759fd3SMarek Szyprowski }
1199aa759fd3SMarek Szyprowski 
12008ed55c81SMarek Szyprowski static struct iommu_ops exynos_iommu_ops = {
1201e1fd1eaaSJoerg Roedel 	.domain_alloc = exynos_iommu_domain_alloc,
1202e1fd1eaaSJoerg Roedel 	.domain_free = exynos_iommu_domain_free,
1203ba5fa6f6SBjorn Helgaas 	.attach_dev = exynos_iommu_attach_device,
1204ba5fa6f6SBjorn Helgaas 	.detach_dev = exynos_iommu_detach_device,
1205ba5fa6f6SBjorn Helgaas 	.map = exynos_iommu_map,
1206ba5fa6f6SBjorn Helgaas 	.unmap = exynos_iommu_unmap,
1207315786ebSOlav Haugan 	.map_sg = default_iommu_map_sg,
1208ba5fa6f6SBjorn Helgaas 	.iova_to_phys = exynos_iommu_iova_to_phys,
12096c2ae7e2SMarek Szyprowski 	.device_group = get_device_iommu_group,
1210ba5fa6f6SBjorn Helgaas 	.add_device = exynos_iommu_add_device,
1211ba5fa6f6SBjorn Helgaas 	.remove_device = exynos_iommu_remove_device,
12122a96536eSKyongHo Cho 	.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
1213aa759fd3SMarek Szyprowski 	.of_xlate = exynos_iommu_of_xlate,
12142a96536eSKyongHo Cho };
12152a96536eSKyongHo Cho 
12168ed55c81SMarek Szyprowski static bool init_done;
12178ed55c81SMarek Szyprowski 
12182a96536eSKyongHo Cho static int __init exynos_iommu_init(void)
12192a96536eSKyongHo Cho {
12202a96536eSKyongHo Cho 	int ret;
12212a96536eSKyongHo Cho 
1222734c3c73SCho KyongHo 	lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1223734c3c73SCho KyongHo 				LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1224734c3c73SCho KyongHo 	if (!lv2table_kmem_cache) {
1225734c3c73SCho KyongHo 		pr_err("%s: Failed to create kmem cache\n", __func__);
1226734c3c73SCho KyongHo 		return -ENOMEM;
1227734c3c73SCho KyongHo 	}
1228734c3c73SCho KyongHo 
12292a96536eSKyongHo Cho 	ret = platform_driver_register(&exynos_sysmmu_driver);
1230734c3c73SCho KyongHo 	if (ret) {
1231734c3c73SCho KyongHo 		pr_err("%s: Failed to register driver\n", __func__);
1232734c3c73SCho KyongHo 		goto err_reg_driver;
1233734c3c73SCho KyongHo 	}
12342a96536eSKyongHo Cho 
123566a7ed84SCho KyongHo 	zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
123666a7ed84SCho KyongHo 	if (zero_lv2_table == NULL) {
123766a7ed84SCho KyongHo 		pr_err("%s: Failed to allocate zero level2 page table\n",
123866a7ed84SCho KyongHo 			__func__);
123966a7ed84SCho KyongHo 		ret = -ENOMEM;
124066a7ed84SCho KyongHo 		goto err_zero_lv2;
124166a7ed84SCho KyongHo 	}
124266a7ed84SCho KyongHo 
1243734c3c73SCho KyongHo 	ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1244734c3c73SCho KyongHo 	if (ret) {
1245734c3c73SCho KyongHo 		pr_err("%s: Failed to register exynos-iommu driver.\n",
1246734c3c73SCho KyongHo 								__func__);
1247734c3c73SCho KyongHo 		goto err_set_iommu;
1248734c3c73SCho KyongHo 	}
12492a96536eSKyongHo Cho 
12508ed55c81SMarek Szyprowski 	init_done = true;
12518ed55c81SMarek Szyprowski 
1252734c3c73SCho KyongHo 	return 0;
1253734c3c73SCho KyongHo err_set_iommu:
125466a7ed84SCho KyongHo 	kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
125566a7ed84SCho KyongHo err_zero_lv2:
1256734c3c73SCho KyongHo 	platform_driver_unregister(&exynos_sysmmu_driver);
1257734c3c73SCho KyongHo err_reg_driver:
1258734c3c73SCho KyongHo 	kmem_cache_destroy(lv2table_kmem_cache);
12592a96536eSKyongHo Cho 	return ret;
12602a96536eSKyongHo Cho }
12618ed55c81SMarek Szyprowski 
12628ed55c81SMarek Szyprowski static int __init exynos_iommu_of_setup(struct device_node *np)
12638ed55c81SMarek Szyprowski {
12648ed55c81SMarek Szyprowski 	struct platform_device *pdev;
12658ed55c81SMarek Szyprowski 
12668ed55c81SMarek Szyprowski 	if (!init_done)
12678ed55c81SMarek Szyprowski 		exynos_iommu_init();
12688ed55c81SMarek Szyprowski 
12698ed55c81SMarek Szyprowski 	pdev = of_platform_device_create(np, NULL, platform_bus_type.dev_root);
12708ed55c81SMarek Szyprowski 	if (IS_ERR(pdev))
12718ed55c81SMarek Szyprowski 		return PTR_ERR(pdev);
12728ed55c81SMarek Szyprowski 
12735e3435ebSMarek Szyprowski 	/*
12745e3435ebSMarek Szyprowski 	 * use the first registered sysmmu device for performing
12755e3435ebSMarek Szyprowski 	 * dma mapping operations on iommu page tables (cpu cache flush)
12765e3435ebSMarek Szyprowski 	 */
12775e3435ebSMarek Szyprowski 	if (!dma_dev)
12785e3435ebSMarek Szyprowski 		dma_dev = &pdev->dev;
12795e3435ebSMarek Szyprowski 
12808ed55c81SMarek Szyprowski 	of_iommu_set_ops(np, &exynos_iommu_ops);
12818ed55c81SMarek Szyprowski 	return 0;
12828ed55c81SMarek Szyprowski }
12838ed55c81SMarek Szyprowski 
12848ed55c81SMarek Szyprowski IOMMU_OF_DECLARE(exynos_iommu_of, "samsung,exynos-sysmmu",
12858ed55c81SMarek Szyprowski 		 exynos_iommu_of_setup);
1286