xref: /openbmc/linux/drivers/iommu/exynos-iommu.c (revision 2125afbe)
1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2740a01eeSMarek Szyprowski /*
3740a01eeSMarek Szyprowski  * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd.
42a96536eSKyongHo Cho  *		http://www.samsung.com
52a96536eSKyongHo Cho  */
62a96536eSKyongHo Cho 
72a96536eSKyongHo Cho #ifdef CONFIG_EXYNOS_IOMMU_DEBUG
82a96536eSKyongHo Cho #define DEBUG
92a96536eSKyongHo Cho #endif
102a96536eSKyongHo Cho 
112a96536eSKyongHo Cho #include <linux/clk.h>
128ed55c81SMarek Szyprowski #include <linux/dma-mapping.h>
132a96536eSKyongHo Cho #include <linux/err.h>
14312900c6SMarek Szyprowski #include <linux/io.h>
152a96536eSKyongHo Cho #include <linux/iommu.h>
16312900c6SMarek Szyprowski #include <linux/interrupt.h>
17514c6032SRandy Dunlap #include <linux/kmemleak.h>
182a96536eSKyongHo Cho #include <linux/list.h>
198ed55c81SMarek Szyprowski #include <linux/of.h>
208ed55c81SMarek Szyprowski #include <linux/of_platform.h>
21312900c6SMarek Szyprowski #include <linux/platform_device.h>
22312900c6SMarek Szyprowski #include <linux/pm_runtime.h>
23312900c6SMarek Szyprowski #include <linux/slab.h>
242a96536eSKyongHo Cho 
25d09d78fcSCho KyongHo typedef u32 sysmmu_iova_t;
26d09d78fcSCho KyongHo typedef u32 sysmmu_pte_t;
27d09d78fcSCho KyongHo 
28f171ababSSachin Kamat /* We do not consider super section mapping (16MB) */
292a96536eSKyongHo Cho #define SECT_ORDER 20
302a96536eSKyongHo Cho #define LPAGE_ORDER 16
312a96536eSKyongHo Cho #define SPAGE_ORDER 12
322a96536eSKyongHo Cho 
332a96536eSKyongHo Cho #define SECT_SIZE (1 << SECT_ORDER)
342a96536eSKyongHo Cho #define LPAGE_SIZE (1 << LPAGE_ORDER)
352a96536eSKyongHo Cho #define SPAGE_SIZE (1 << SPAGE_ORDER)
362a96536eSKyongHo Cho 
372a96536eSKyongHo Cho #define SECT_MASK (~(SECT_SIZE - 1))
382a96536eSKyongHo Cho #define LPAGE_MASK (~(LPAGE_SIZE - 1))
392a96536eSKyongHo Cho #define SPAGE_MASK (~(SPAGE_SIZE - 1))
402a96536eSKyongHo Cho 
4166a7ed84SCho KyongHo #define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
4266a7ed84SCho KyongHo 			   ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
4366a7ed84SCho KyongHo #define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
4466a7ed84SCho KyongHo #define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
4566a7ed84SCho KyongHo #define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
4666a7ed84SCho KyongHo 			  ((*(sent) & 3) == 1))
472a96536eSKyongHo Cho #define lv1ent_section(sent) ((*(sent) & 3) == 2)
482a96536eSKyongHo Cho 
492a96536eSKyongHo Cho #define lv2ent_fault(pent) ((*(pent) & 3) == 0)
502a96536eSKyongHo Cho #define lv2ent_small(pent) ((*(pent) & 2) == 2)
512a96536eSKyongHo Cho #define lv2ent_large(pent) ((*(pent) & 3) == 1)
522a96536eSKyongHo Cho 
53740a01eeSMarek Szyprowski /*
54740a01eeSMarek Szyprowski  * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces
55740a01eeSMarek Szyprowski  * v5.0 introduced support for 36bit physical address space by shifting
56740a01eeSMarek Szyprowski  * all page entry values by 4 bits.
57740a01eeSMarek Szyprowski  * All SYSMMU controllers in the system support the address spaces of the same
58740a01eeSMarek Szyprowski  * size, so PG_ENT_SHIFT can be initialized on first SYSMMU probe to proper
59740a01eeSMarek Szyprowski  * value (0 or 4).
60740a01eeSMarek Szyprowski  */
61740a01eeSMarek Szyprowski static short PG_ENT_SHIFT = -1;
62740a01eeSMarek Szyprowski #define SYSMMU_PG_ENT_SHIFT 0
63740a01eeSMarek Szyprowski #define SYSMMU_V5_PG_ENT_SHIFT 4
642a96536eSKyongHo Cho 
651a0d8dacSMarek Szyprowski static const sysmmu_pte_t *LV1_PROT;
661a0d8dacSMarek Szyprowski static const sysmmu_pte_t SYSMMU_LV1_PROT[] = {
671a0d8dacSMarek Szyprowski 	((0 << 15) | (0 << 10)), /* no access */
681a0d8dacSMarek Szyprowski 	((1 << 15) | (1 << 10)), /* IOMMU_READ only */
691a0d8dacSMarek Szyprowski 	((0 << 15) | (1 << 10)), /* IOMMU_WRITE not supported, use read/write */
701a0d8dacSMarek Szyprowski 	((0 << 15) | (1 << 10)), /* IOMMU_READ | IOMMU_WRITE */
711a0d8dacSMarek Szyprowski };
721a0d8dacSMarek Szyprowski static const sysmmu_pte_t SYSMMU_V5_LV1_PROT[] = {
731a0d8dacSMarek Szyprowski 	(0 << 4), /* no access */
741a0d8dacSMarek Szyprowski 	(1 << 4), /* IOMMU_READ only */
751a0d8dacSMarek Szyprowski 	(2 << 4), /* IOMMU_WRITE only */
761a0d8dacSMarek Szyprowski 	(3 << 4), /* IOMMU_READ | IOMMU_WRITE */
771a0d8dacSMarek Szyprowski };
781a0d8dacSMarek Szyprowski 
791a0d8dacSMarek Szyprowski static const sysmmu_pte_t *LV2_PROT;
801a0d8dacSMarek Szyprowski static const sysmmu_pte_t SYSMMU_LV2_PROT[] = {
811a0d8dacSMarek Szyprowski 	((0 << 9) | (0 << 4)), /* no access */
821a0d8dacSMarek Szyprowski 	((1 << 9) | (1 << 4)), /* IOMMU_READ only */
831a0d8dacSMarek Szyprowski 	((0 << 9) | (1 << 4)), /* IOMMU_WRITE not supported, use read/write */
841a0d8dacSMarek Szyprowski 	((0 << 9) | (1 << 4)), /* IOMMU_READ | IOMMU_WRITE */
851a0d8dacSMarek Szyprowski };
861a0d8dacSMarek Szyprowski static const sysmmu_pte_t SYSMMU_V5_LV2_PROT[] = {
871a0d8dacSMarek Szyprowski 	(0 << 2), /* no access */
881a0d8dacSMarek Szyprowski 	(1 << 2), /* IOMMU_READ only */
891a0d8dacSMarek Szyprowski 	(2 << 2), /* IOMMU_WRITE only */
901a0d8dacSMarek Szyprowski 	(3 << 2), /* IOMMU_READ | IOMMU_WRITE */
911a0d8dacSMarek Szyprowski };
921a0d8dacSMarek Szyprowski 
931a0d8dacSMarek Szyprowski #define SYSMMU_SUPPORTED_PROT_BITS (IOMMU_READ | IOMMU_WRITE)
941a0d8dacSMarek Szyprowski 
95740a01eeSMarek Szyprowski #define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT)
96740a01eeSMarek Szyprowski #define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK)
97740a01eeSMarek Szyprowski #define section_offs(iova) (iova & (SECT_SIZE - 1))
98740a01eeSMarek Szyprowski #define lpage_phys(pent) (sect_to_phys(*(pent)) & LPAGE_MASK)
99740a01eeSMarek Szyprowski #define lpage_offs(iova) (iova & (LPAGE_SIZE - 1))
100740a01eeSMarek Szyprowski #define spage_phys(pent) (sect_to_phys(*(pent)) & SPAGE_MASK)
101740a01eeSMarek Szyprowski #define spage_offs(iova) (iova & (SPAGE_SIZE - 1))
1022a96536eSKyongHo Cho 
1032a96536eSKyongHo Cho #define NUM_LV1ENTRIES 4096
104d09d78fcSCho KyongHo #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
1052a96536eSKyongHo Cho 
106d09d78fcSCho KyongHo static u32 lv1ent_offset(sysmmu_iova_t iova)
107d09d78fcSCho KyongHo {
108d09d78fcSCho KyongHo 	return iova >> SECT_ORDER;
109d09d78fcSCho KyongHo }
110d09d78fcSCho KyongHo 
111d09d78fcSCho KyongHo static u32 lv2ent_offset(sysmmu_iova_t iova)
112d09d78fcSCho KyongHo {
113d09d78fcSCho KyongHo 	return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
114d09d78fcSCho KyongHo }
115d09d78fcSCho KyongHo 
1165e3435ebSMarek Szyprowski #define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t))
117d09d78fcSCho KyongHo #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
1182a96536eSKyongHo Cho 
1192a96536eSKyongHo Cho #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
120740a01eeSMarek Szyprowski #define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0))
1212a96536eSKyongHo Cho 
1221a0d8dacSMarek Szyprowski #define mk_lv1ent_sect(pa, prot) ((pa >> PG_ENT_SHIFT) | LV1_PROT[prot] | 2)
123740a01eeSMarek Szyprowski #define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1)
1241a0d8dacSMarek Szyprowski #define mk_lv2ent_lpage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 1)
1251a0d8dacSMarek Szyprowski #define mk_lv2ent_spage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 2)
1262a96536eSKyongHo Cho 
1272a96536eSKyongHo Cho #define CTRL_ENABLE	0x5
1282a96536eSKyongHo Cho #define CTRL_BLOCK	0x7
1292a96536eSKyongHo Cho #define CTRL_DISABLE	0x0
1302a96536eSKyongHo Cho 
131eeb5184bSCho KyongHo #define CFG_LRU		0x1
1321a0d8dacSMarek Szyprowski #define CFG_EAP		(1 << 2)
133eeb5184bSCho KyongHo #define CFG_QOS(n)	((n & 0xF) << 7)
134eeb5184bSCho KyongHo #define CFG_ACGEN	(1 << 24) /* System MMU 3.3 only */
135eeb5184bSCho KyongHo #define CFG_SYSSEL	(1 << 22) /* System MMU 3.2 only */
136eeb5184bSCho KyongHo #define CFG_FLPDCACHE	(1 << 20) /* System MMU 3.2+ only */
137eeb5184bSCho KyongHo 
138740a01eeSMarek Szyprowski /* common registers */
1392a96536eSKyongHo Cho #define REG_MMU_CTRL		0x000
1402a96536eSKyongHo Cho #define REG_MMU_CFG		0x004
1412a96536eSKyongHo Cho #define REG_MMU_STATUS		0x008
142740a01eeSMarek Szyprowski #define REG_MMU_VERSION		0x034
143740a01eeSMarek Szyprowski 
144740a01eeSMarek Szyprowski #define MMU_MAJ_VER(val)	((val) >> 7)
145740a01eeSMarek Szyprowski #define MMU_MIN_VER(val)	((val) & 0x7F)
146740a01eeSMarek Szyprowski #define MMU_RAW_VER(reg)	(((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
147740a01eeSMarek Szyprowski 
148740a01eeSMarek Szyprowski #define MAKE_MMU_VER(maj, min)	((((maj) & 0xF) << 7) | ((min) & 0x7F))
149740a01eeSMarek Szyprowski 
150740a01eeSMarek Szyprowski /* v1.x - v3.x registers */
1512a96536eSKyongHo Cho #define REG_PAGE_FAULT_ADDR	0x024
1522a96536eSKyongHo Cho #define REG_AW_FAULT_ADDR	0x028
1532a96536eSKyongHo Cho #define REG_AR_FAULT_ADDR	0x02C
1542a96536eSKyongHo Cho #define REG_DEFAULT_SLAVE_ADDR	0x030
1552a96536eSKyongHo Cho 
156740a01eeSMarek Szyprowski /* v5.x registers */
157740a01eeSMarek Szyprowski #define REG_V5_FAULT_AR_VA	0x070
158740a01eeSMarek Szyprowski #define REG_V5_FAULT_AW_VA	0x080
1592a96536eSKyongHo Cho 
1600f45b04dSJoerg Roedel #define has_sysmmu(dev)		(dev_iommu_priv_get(dev) != NULL)
1616b21a5dbSCho KyongHo 
1625e3435ebSMarek Szyprowski static struct device *dma_dev;
163734c3c73SCho KyongHo static struct kmem_cache *lv2table_kmem_cache;
16466a7ed84SCho KyongHo static sysmmu_pte_t *zero_lv2_table;
16566a7ed84SCho KyongHo #define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
166734c3c73SCho KyongHo 
167d09d78fcSCho KyongHo static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
1682a96536eSKyongHo Cho {
1692a96536eSKyongHo Cho 	return pgtable + lv1ent_offset(iova);
1702a96536eSKyongHo Cho }
1712a96536eSKyongHo Cho 
172d09d78fcSCho KyongHo static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
1732a96536eSKyongHo Cho {
174d09d78fcSCho KyongHo 	return (sysmmu_pte_t *)phys_to_virt(
1757222e8dbSCho KyongHo 				lv2table_base(sent)) + lv2ent_offset(iova);
1762a96536eSKyongHo Cho }
1772a96536eSKyongHo Cho 
178d093fc7eSMarek Szyprowski /*
179d093fc7eSMarek Szyprowski  * IOMMU fault information register
180d093fc7eSMarek Szyprowski  */
181d093fc7eSMarek Szyprowski struct sysmmu_fault_info {
182d093fc7eSMarek Szyprowski 	unsigned int bit;	/* bit number in STATUS register */
183d093fc7eSMarek Szyprowski 	unsigned short addr_reg; /* register to read VA fault address */
184d093fc7eSMarek Szyprowski 	const char *name;	/* human readable fault name */
185d093fc7eSMarek Szyprowski 	unsigned int type;	/* fault type for report_iommu_fault */
1862a96536eSKyongHo Cho };
1872a96536eSKyongHo Cho 
188d093fc7eSMarek Szyprowski static const struct sysmmu_fault_info sysmmu_faults[] = {
189d093fc7eSMarek Szyprowski 	{ 0, REG_PAGE_FAULT_ADDR, "PAGE", IOMMU_FAULT_READ },
190d093fc7eSMarek Szyprowski 	{ 1, REG_AR_FAULT_ADDR, "AR MULTI-HIT", IOMMU_FAULT_READ },
191d093fc7eSMarek Szyprowski 	{ 2, REG_AW_FAULT_ADDR, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
192d093fc7eSMarek Szyprowski 	{ 3, REG_DEFAULT_SLAVE_ADDR, "BUS ERROR", IOMMU_FAULT_READ },
193d093fc7eSMarek Szyprowski 	{ 4, REG_AR_FAULT_ADDR, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
194d093fc7eSMarek Szyprowski 	{ 5, REG_AR_FAULT_ADDR, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
195d093fc7eSMarek Szyprowski 	{ 6, REG_AW_FAULT_ADDR, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
196d093fc7eSMarek Szyprowski 	{ 7, REG_AW_FAULT_ADDR, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
1972a96536eSKyongHo Cho };
1982a96536eSKyongHo Cho 
199740a01eeSMarek Szyprowski static const struct sysmmu_fault_info sysmmu_v5_faults[] = {
200740a01eeSMarek Szyprowski 	{ 0, REG_V5_FAULT_AR_VA, "AR PTW", IOMMU_FAULT_READ },
201740a01eeSMarek Szyprowski 	{ 1, REG_V5_FAULT_AR_VA, "AR PAGE", IOMMU_FAULT_READ },
202740a01eeSMarek Szyprowski 	{ 2, REG_V5_FAULT_AR_VA, "AR MULTI-HIT", IOMMU_FAULT_READ },
203740a01eeSMarek Szyprowski 	{ 3, REG_V5_FAULT_AR_VA, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
204740a01eeSMarek Szyprowski 	{ 4, REG_V5_FAULT_AR_VA, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
205740a01eeSMarek Szyprowski 	{ 16, REG_V5_FAULT_AW_VA, "AW PTW", IOMMU_FAULT_WRITE },
206740a01eeSMarek Szyprowski 	{ 17, REG_V5_FAULT_AW_VA, "AW PAGE", IOMMU_FAULT_WRITE },
207740a01eeSMarek Szyprowski 	{ 18, REG_V5_FAULT_AW_VA, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
208740a01eeSMarek Szyprowski 	{ 19, REG_V5_FAULT_AW_VA, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
209740a01eeSMarek Szyprowski 	{ 20, REG_V5_FAULT_AW_VA, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
210740a01eeSMarek Szyprowski };
211740a01eeSMarek Szyprowski 
2122860af3cSMarek Szyprowski /*
2130f45b04dSJoerg Roedel  * This structure is attached to dev->iommu->priv of the master device
2142860af3cSMarek Szyprowski  * on device add, contains a list of SYSMMU controllers defined by device tree,
2152860af3cSMarek Szyprowski  * which are bound to given master device. It is usually referenced by 'owner'
2162860af3cSMarek Szyprowski  * pointer.
2172860af3cSMarek Szyprowski */
2186b21a5dbSCho KyongHo struct exynos_iommu_owner {
2191b092054SMarek Szyprowski 	struct list_head controllers;	/* list of sysmmu_drvdata.owner_node */
2205fa61cbfSMarek Szyprowski 	struct iommu_domain *domain;	/* domain this device is attached */
2219b265536SMarek Szyprowski 	struct mutex rpm_lock;		/* for runtime pm of all sysmmus */
2226b21a5dbSCho KyongHo };
2236b21a5dbSCho KyongHo 
2242860af3cSMarek Szyprowski /*
2252860af3cSMarek Szyprowski  * This structure exynos specific generalization of struct iommu_domain.
2262860af3cSMarek Szyprowski  * It contains list of SYSMMU controllers from all master devices, which has
2272860af3cSMarek Szyprowski  * been attached to this domain and page tables of IO address space defined by
2282860af3cSMarek Szyprowski  * it. It is usually referenced by 'domain' pointer.
2292860af3cSMarek Szyprowski  */
2302a96536eSKyongHo Cho struct exynos_iommu_domain {
2312860af3cSMarek Szyprowski 	struct list_head clients; /* list of sysmmu_drvdata.domain_node */
232d09d78fcSCho KyongHo 	sysmmu_pte_t *pgtable;	/* lv1 page table, 16KB */
2332a96536eSKyongHo Cho 	short *lv2entcnt;	/* free lv2 entry counter for each section */
2342860af3cSMarek Szyprowski 	spinlock_t lock;	/* lock for modyfying list of clients */
2352a96536eSKyongHo Cho 	spinlock_t pgtablelock;	/* lock for modifying page table @ pgtable */
236e1fd1eaaSJoerg Roedel 	struct iommu_domain domain; /* generic domain data structure */
2372a96536eSKyongHo Cho };
2382a96536eSKyongHo Cho 
2392860af3cSMarek Szyprowski /*
240*2125afbeSSam Protsenko  * SysMMU version specific data. Contains offsets for the registers which can
241*2125afbeSSam Protsenko  * be found in different SysMMU variants, but have different offset values.
242*2125afbeSSam Protsenko  */
243*2125afbeSSam Protsenko struct sysmmu_variant {
244*2125afbeSSam Protsenko 	u32 pt_base;		/* page table base address (physical) */
245*2125afbeSSam Protsenko 	u32 flush_all;		/* invalidate all TLB entries */
246*2125afbeSSam Protsenko 	u32 flush_entry;	/* invalidate specific TLB entry */
247*2125afbeSSam Protsenko 	u32 flush_range;	/* invalidate TLB entries in specified range */
248*2125afbeSSam Protsenko 	u32 flush_start;	/* start address of range invalidation */
249*2125afbeSSam Protsenko 	u32 flush_end;		/* end address of range invalidation */
250*2125afbeSSam Protsenko 	u32 int_status;		/* interrupt status information */
251*2125afbeSSam Protsenko 	u32 int_clear;		/* clear the interrupt */
252*2125afbeSSam Protsenko };
253*2125afbeSSam Protsenko 
254*2125afbeSSam Protsenko /*
2552860af3cSMarek Szyprowski  * This structure hold all data of a single SYSMMU controller, this includes
2562860af3cSMarek Szyprowski  * hw resources like registers and clocks, pointers and list nodes to connect
2572860af3cSMarek Szyprowski  * it to all other structures, internal state and parameters read from device
2582860af3cSMarek Szyprowski  * tree. It is usually referenced by 'data' pointer.
2592860af3cSMarek Szyprowski  */
2602a96536eSKyongHo Cho struct sysmmu_drvdata {
2612860af3cSMarek Szyprowski 	struct device *sysmmu;		/* SYSMMU controller device */
2622860af3cSMarek Szyprowski 	struct device *master;		/* master device (owner) */
2637a974b29SMarek Szyprowski 	struct device_link *link;	/* runtime PM link to master */
2642860af3cSMarek Szyprowski 	void __iomem *sfrbase;		/* our registers */
2652860af3cSMarek Szyprowski 	struct clk *clk;		/* SYSMMU's clock */
266740a01eeSMarek Szyprowski 	struct clk *aclk;		/* SYSMMU's aclk clock */
267740a01eeSMarek Szyprowski 	struct clk *pclk;		/* SYSMMU's pclk clock */
2682860af3cSMarek Szyprowski 	struct clk *clk_master;		/* master's device clock */
2692860af3cSMarek Szyprowski 	spinlock_t lock;		/* lock for modyfying state */
27047a574ffSMarek Szyprowski 	bool active;			/* current status */
2712860af3cSMarek Szyprowski 	struct exynos_iommu_domain *domain; /* domain we belong to */
2722860af3cSMarek Szyprowski 	struct list_head domain_node;	/* node for domain clients list */
2731b092054SMarek Szyprowski 	struct list_head owner_node;	/* node for owner controllers list */
2742860af3cSMarek Szyprowski 	phys_addr_t pgtable;		/* assigned page table structure */
2752860af3cSMarek Szyprowski 	unsigned int version;		/* our version */
276d2c302b6SJoerg Roedel 
277d2c302b6SJoerg Roedel 	struct iommu_device iommu;	/* IOMMU core handle */
278*2125afbeSSam Protsenko 	const struct sysmmu_variant *variant; /* version specific data */
279*2125afbeSSam Protsenko };
280*2125afbeSSam Protsenko 
281*2125afbeSSam Protsenko #define SYSMMU_REG(data, reg) ((data)->sfrbase + (data)->variant->reg)
282*2125afbeSSam Protsenko 
283*2125afbeSSam Protsenko /* SysMMU v1..v3 */
284*2125afbeSSam Protsenko static const struct sysmmu_variant sysmmu_v1_variant = {
285*2125afbeSSam Protsenko 	.flush_all	= 0x0c,
286*2125afbeSSam Protsenko 	.flush_entry	= 0x10,
287*2125afbeSSam Protsenko 	.pt_base	= 0x14,
288*2125afbeSSam Protsenko 	.int_status	= 0x18,
289*2125afbeSSam Protsenko 	.int_clear	= 0x1c,
290*2125afbeSSam Protsenko };
291*2125afbeSSam Protsenko 
292*2125afbeSSam Protsenko /* SysMMU v5 */
293*2125afbeSSam Protsenko static const struct sysmmu_variant sysmmu_v5_variant = {
294*2125afbeSSam Protsenko 	.pt_base	= 0x0c,
295*2125afbeSSam Protsenko 	.flush_all	= 0x10,
296*2125afbeSSam Protsenko 	.flush_entry	= 0x14,
297*2125afbeSSam Protsenko 	.flush_range	= 0x18,
298*2125afbeSSam Protsenko 	.flush_start	= 0x20,
299*2125afbeSSam Protsenko 	.flush_end	= 0x24,
300*2125afbeSSam Protsenko 	.int_status	= 0x60,
301*2125afbeSSam Protsenko 	.int_clear	= 0x64,
3022a96536eSKyongHo Cho };
3032a96536eSKyongHo Cho 
304e1fd1eaaSJoerg Roedel static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
305e1fd1eaaSJoerg Roedel {
306e1fd1eaaSJoerg Roedel 	return container_of(dom, struct exynos_iommu_domain, domain);
307e1fd1eaaSJoerg Roedel }
308e1fd1eaaSJoerg Roedel 
30902cdc365SMarek Szyprowski static void sysmmu_unblock(struct sysmmu_drvdata *data)
3102a96536eSKyongHo Cho {
31184bd0428SMarek Szyprowski 	writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
3122a96536eSKyongHo Cho }
3132a96536eSKyongHo Cho 
31402cdc365SMarek Szyprowski static bool sysmmu_block(struct sysmmu_drvdata *data)
3152a96536eSKyongHo Cho {
3162a96536eSKyongHo Cho 	int i = 120;
3172a96536eSKyongHo Cho 
31884bd0428SMarek Szyprowski 	writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
31984bd0428SMarek Szyprowski 	while ((i > 0) && !(readl(data->sfrbase + REG_MMU_STATUS) & 1))
3202a96536eSKyongHo Cho 		--i;
3212a96536eSKyongHo Cho 
32284bd0428SMarek Szyprowski 	if (!(readl(data->sfrbase + REG_MMU_STATUS) & 1)) {
32302cdc365SMarek Szyprowski 		sysmmu_unblock(data);
3242a96536eSKyongHo Cho 		return false;
3252a96536eSKyongHo Cho 	}
3262a96536eSKyongHo Cho 
3272a96536eSKyongHo Cho 	return true;
3282a96536eSKyongHo Cho }
3292a96536eSKyongHo Cho 
33002cdc365SMarek Szyprowski static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data)
3312a96536eSKyongHo Cho {
332*2125afbeSSam Protsenko 	writel(0x1, SYSMMU_REG(data, flush_all));
3332a96536eSKyongHo Cho }
3342a96536eSKyongHo Cho 
33502cdc365SMarek Szyprowski static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
336d09d78fcSCho KyongHo 				sysmmu_iova_t iova, unsigned int num_inv)
3372a96536eSKyongHo Cho {
3383ad6b7f3SCho KyongHo 	unsigned int i;
339365409dbSSachin Kamat 
340*2125afbeSSam Protsenko 	if (MMU_MAJ_VER(data->version) < 5 || num_inv == 1) {
3413ad6b7f3SCho KyongHo 		for (i = 0; i < num_inv; i++) {
34284bd0428SMarek Szyprowski 			writel((iova & SPAGE_MASK) | 1,
343*2125afbeSSam Protsenko 			       SYSMMU_REG(data, flush_entry));
344d5bf739dSMarek Szyprowski 			iova += SPAGE_SIZE;
345d5bf739dSMarek Szyprowski 		}
346d5bf739dSMarek Szyprowski 	} else {
347*2125afbeSSam Protsenko 		writel(iova & SPAGE_MASK, SYSMMU_REG(data, flush_start));
348d5bf739dSMarek Szyprowski 		writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE,
349*2125afbeSSam Protsenko 		       SYSMMU_REG(data, flush_end));
350*2125afbeSSam Protsenko 		writel(0x1, SYSMMU_REG(data, flush_range));
3513ad6b7f3SCho KyongHo 	}
3522a96536eSKyongHo Cho }
3532a96536eSKyongHo Cho 
35402cdc365SMarek Szyprowski static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd)
3552a96536eSKyongHo Cho {
356*2125afbeSSam Protsenko 	u32 pt_base;
3572a96536eSKyongHo Cho 
358*2125afbeSSam Protsenko 	if (MMU_MAJ_VER(data->version) < 5)
359*2125afbeSSam Protsenko 		pt_base = pgd;
360*2125afbeSSam Protsenko 	else
361*2125afbeSSam Protsenko 		pt_base = pgd >> SPAGE_ORDER;
362*2125afbeSSam Protsenko 
363*2125afbeSSam Protsenko 	writel(pt_base, SYSMMU_REG(data, pt_base));
36402cdc365SMarek Szyprowski 	__sysmmu_tlb_invalidate(data);
3652a96536eSKyongHo Cho }
3662a96536eSKyongHo Cho 
367fecc49dbSMarek Szyprowski static void __sysmmu_enable_clocks(struct sysmmu_drvdata *data)
368fecc49dbSMarek Szyprowski {
369fecc49dbSMarek Szyprowski 	BUG_ON(clk_prepare_enable(data->clk_master));
370fecc49dbSMarek Szyprowski 	BUG_ON(clk_prepare_enable(data->clk));
371fecc49dbSMarek Szyprowski 	BUG_ON(clk_prepare_enable(data->pclk));
372fecc49dbSMarek Szyprowski 	BUG_ON(clk_prepare_enable(data->aclk));
373fecc49dbSMarek Szyprowski }
374fecc49dbSMarek Szyprowski 
375fecc49dbSMarek Szyprowski static void __sysmmu_disable_clocks(struct sysmmu_drvdata *data)
376fecc49dbSMarek Szyprowski {
377fecc49dbSMarek Szyprowski 	clk_disable_unprepare(data->aclk);
378fecc49dbSMarek Szyprowski 	clk_disable_unprepare(data->pclk);
379fecc49dbSMarek Szyprowski 	clk_disable_unprepare(data->clk);
380fecc49dbSMarek Szyprowski 	clk_disable_unprepare(data->clk_master);
381fecc49dbSMarek Szyprowski }
382fecc49dbSMarek Szyprowski 
383850d313eSMarek Szyprowski static void __sysmmu_get_version(struct sysmmu_drvdata *data)
384850d313eSMarek Szyprowski {
385850d313eSMarek Szyprowski 	u32 ver;
386850d313eSMarek Szyprowski 
387fecc49dbSMarek Szyprowski 	__sysmmu_enable_clocks(data);
388850d313eSMarek Szyprowski 
38984bd0428SMarek Szyprowski 	ver = readl(data->sfrbase + REG_MMU_VERSION);
390850d313eSMarek Szyprowski 
391850d313eSMarek Szyprowski 	/* controllers on some SoCs don't report proper version */
392850d313eSMarek Szyprowski 	if (ver == 0x80000001u)
393850d313eSMarek Szyprowski 		data->version = MAKE_MMU_VER(1, 0);
394850d313eSMarek Szyprowski 	else
395850d313eSMarek Szyprowski 		data->version = MMU_RAW_VER(ver);
396850d313eSMarek Szyprowski 
397850d313eSMarek Szyprowski 	dev_dbg(data->sysmmu, "hardware version: %d.%d\n",
398850d313eSMarek Szyprowski 		MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version));
399850d313eSMarek Szyprowski 
400*2125afbeSSam Protsenko 	if (MMU_MAJ_VER(data->version) < 5)
401*2125afbeSSam Protsenko 		data->variant = &sysmmu_v1_variant;
402*2125afbeSSam Protsenko 	else
403*2125afbeSSam Protsenko 		data->variant = &sysmmu_v5_variant;
404*2125afbeSSam Protsenko 
405fecc49dbSMarek Szyprowski 	__sysmmu_disable_clocks(data);
406850d313eSMarek Szyprowski }
407850d313eSMarek Szyprowski 
408d093fc7eSMarek Szyprowski static void show_fault_information(struct sysmmu_drvdata *data,
409d093fc7eSMarek Szyprowski 				   const struct sysmmu_fault_info *finfo,
410d093fc7eSMarek Szyprowski 				   sysmmu_iova_t fault_addr)
4112a96536eSKyongHo Cho {
412d09d78fcSCho KyongHo 	sysmmu_pte_t *ent;
4132a96536eSKyongHo Cho 
414ec5d241bSMarek Szyprowski 	dev_err(data->sysmmu, "%s: %s FAULT occurred at %#x\n",
415ec5d241bSMarek Szyprowski 		dev_name(data->master), finfo->name, fault_addr);
416ec5d241bSMarek Szyprowski 	dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable);
417d093fc7eSMarek Szyprowski 	ent = section_entry(phys_to_virt(data->pgtable), fault_addr);
418ec5d241bSMarek Szyprowski 	dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
4192a96536eSKyongHo Cho 	if (lv1ent_page(ent)) {
4202a96536eSKyongHo Cho 		ent = page_entry(ent, fault_addr);
421ec5d241bSMarek Szyprowski 		dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
4222a96536eSKyongHo Cho 	}
4232a96536eSKyongHo Cho }
4242a96536eSKyongHo Cho 
4252a96536eSKyongHo Cho static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
4262a96536eSKyongHo Cho {
427f171ababSSachin Kamat 	/* SYSMMU is in blocked state when interrupt occurred. */
4282a96536eSKyongHo Cho 	struct sysmmu_drvdata *data = dev_id;
429740a01eeSMarek Szyprowski 	const struct sysmmu_fault_info *finfo;
430740a01eeSMarek Szyprowski 	unsigned int i, n, itype;
431d0272ea1SKrzysztof Kozlowski 	sysmmu_iova_t fault_addr;
4327222e8dbSCho KyongHo 	int ret = -ENOSYS;
4332a96536eSKyongHo Cho 
43447a574ffSMarek Szyprowski 	WARN_ON(!data->active);
4352a96536eSKyongHo Cho 
436740a01eeSMarek Szyprowski 	if (MMU_MAJ_VER(data->version) < 5) {
437740a01eeSMarek Szyprowski 		finfo = sysmmu_faults;
438740a01eeSMarek Szyprowski 		n = ARRAY_SIZE(sysmmu_faults);
439740a01eeSMarek Szyprowski 	} else {
440740a01eeSMarek Szyprowski 		finfo = sysmmu_v5_faults;
441740a01eeSMarek Szyprowski 		n = ARRAY_SIZE(sysmmu_v5_faults);
442740a01eeSMarek Szyprowski 	}
443740a01eeSMarek Szyprowski 
4449d4e7a24SCho KyongHo 	spin_lock(&data->lock);
4459d4e7a24SCho KyongHo 
44670605870SCho KyongHo 	clk_enable(data->clk_master);
4479d4e7a24SCho KyongHo 
448*2125afbeSSam Protsenko 	itype = __ffs(readl(SYSMMU_REG(data, int_status)));
449d093fc7eSMarek Szyprowski 	for (i = 0; i < n; i++, finfo++)
450d093fc7eSMarek Szyprowski 		if (finfo->bit == itype)
451d093fc7eSMarek Szyprowski 			break;
452d093fc7eSMarek Szyprowski 	/* unknown/unsupported fault */
453d093fc7eSMarek Szyprowski 	BUG_ON(i == n);
4542a96536eSKyongHo Cho 
455d093fc7eSMarek Szyprowski 	/* print debug message */
45684bd0428SMarek Szyprowski 	fault_addr = readl(data->sfrbase + finfo->addr_reg);
457d093fc7eSMarek Szyprowski 	show_fault_information(data, finfo, fault_addr);
458d093fc7eSMarek Szyprowski 
4592a96536eSKyongHo Cho 	if (data->domain)
460a9133b99SMarek Szyprowski 		ret = report_iommu_fault(&data->domain->domain,
461d093fc7eSMarek Szyprowski 					data->master, fault_addr, finfo->type);
4621fab7fa7SCho KyongHo 	/* fault is not recovered by fault handler */
4631fab7fa7SCho KyongHo 	BUG_ON(ret != 0);
4642a96536eSKyongHo Cho 
465*2125afbeSSam Protsenko 	writel(1 << itype, SYSMMU_REG(data, int_clear));
4661fab7fa7SCho KyongHo 
46702cdc365SMarek Szyprowski 	sysmmu_unblock(data);
4682a96536eSKyongHo Cho 
46970605870SCho KyongHo 	clk_disable(data->clk_master);
47070605870SCho KyongHo 
4719d4e7a24SCho KyongHo 	spin_unlock(&data->lock);
4722a96536eSKyongHo Cho 
4732a96536eSKyongHo Cho 	return IRQ_HANDLED;
4742a96536eSKyongHo Cho }
4752a96536eSKyongHo Cho 
47647a574ffSMarek Szyprowski static void __sysmmu_disable(struct sysmmu_drvdata *data)
4772a96536eSKyongHo Cho {
4786b21a5dbSCho KyongHo 	unsigned long flags;
4796b21a5dbSCho KyongHo 
48047a574ffSMarek Szyprowski 	clk_enable(data->clk_master);
48147a574ffSMarek Szyprowski 
4826b21a5dbSCho KyongHo 	spin_lock_irqsave(&data->lock, flags);
48347a574ffSMarek Szyprowski 	writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
48447a574ffSMarek Szyprowski 	writel(0, data->sfrbase + REG_MMU_CFG);
48547a574ffSMarek Szyprowski 	data->active = false;
4869d4e7a24SCho KyongHo 	spin_unlock_irqrestore(&data->lock, flags);
4872a96536eSKyongHo Cho 
48847a574ffSMarek Szyprowski 	__sysmmu_disable_clocks(data);
4892a96536eSKyongHo Cho }
4902a96536eSKyongHo Cho 
4916b21a5dbSCho KyongHo static void __sysmmu_init_config(struct sysmmu_drvdata *data)
4926b21a5dbSCho KyongHo {
49383addecdSMarek Szyprowski 	unsigned int cfg;
494eeb5184bSCho KyongHo 
49583addecdSMarek Szyprowski 	if (data->version <= MAKE_MMU_VER(3, 1))
49683addecdSMarek Szyprowski 		cfg = CFG_LRU | CFG_QOS(15);
49783addecdSMarek Szyprowski 	else if (data->version <= MAKE_MMU_VER(3, 2))
49883addecdSMarek Szyprowski 		cfg = CFG_LRU | CFG_QOS(15) | CFG_FLPDCACHE | CFG_SYSSEL;
49983addecdSMarek Szyprowski 	else
50083addecdSMarek Szyprowski 		cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN;
5016b21a5dbSCho KyongHo 
5021a0d8dacSMarek Szyprowski 	cfg |= CFG_EAP; /* enable access protection bits check */
5031a0d8dacSMarek Szyprowski 
50484bd0428SMarek Szyprowski 	writel(cfg, data->sfrbase + REG_MMU_CFG);
5056b21a5dbSCho KyongHo }
5066b21a5dbSCho KyongHo 
50747a574ffSMarek Szyprowski static void __sysmmu_enable(struct sysmmu_drvdata *data)
5086b21a5dbSCho KyongHo {
50947a574ffSMarek Szyprowski 	unsigned long flags;
51047a574ffSMarek Szyprowski 
511fecc49dbSMarek Szyprowski 	__sysmmu_enable_clocks(data);
5126b21a5dbSCho KyongHo 
51347a574ffSMarek Szyprowski 	spin_lock_irqsave(&data->lock, flags);
51484bd0428SMarek Szyprowski 	writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
5156b21a5dbSCho KyongHo 	__sysmmu_init_config(data);
51602cdc365SMarek Szyprowski 	__sysmmu_set_ptbase(data, data->pgtable);
51784bd0428SMarek Szyprowski 	writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
51847a574ffSMarek Szyprowski 	data->active = true;
51947a574ffSMarek Szyprowski 	spin_unlock_irqrestore(&data->lock, flags);
5206b21a5dbSCho KyongHo 
521fecc49dbSMarek Szyprowski 	/*
522fecc49dbSMarek Szyprowski 	 * SYSMMU driver keeps master's clock enabled only for the short
523fecc49dbSMarek Szyprowski 	 * time, while accessing the registers. For performing address
524fecc49dbSMarek Szyprowski 	 * translation during DMA transaction it relies on the client
525fecc49dbSMarek Szyprowski 	 * driver to enable it.
526fecc49dbSMarek Szyprowski 	 */
5276b21a5dbSCho KyongHo 	clk_disable(data->clk_master);
5286b21a5dbSCho KyongHo }
5296b21a5dbSCho KyongHo 
530469acebeSMarek Szyprowski static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
53166a7ed84SCho KyongHo 					    sysmmu_iova_t iova)
53266a7ed84SCho KyongHo {
53366a7ed84SCho KyongHo 	unsigned long flags;
53466a7ed84SCho KyongHo 
53566a7ed84SCho KyongHo 	spin_lock_irqsave(&data->lock, flags);
53647a574ffSMarek Szyprowski 	if (data->active && data->version >= MAKE_MMU_VER(3, 3)) {
53701324ab2SMarek Szyprowski 		clk_enable(data->clk_master);
5387d2aa6b8SMarek Szyprowski 		if (sysmmu_block(data)) {
539cd37a296SMarek Szyprowski 			if (data->version >= MAKE_MMU_VER(5, 0))
540cd37a296SMarek Szyprowski 				__sysmmu_tlb_invalidate(data);
541cd37a296SMarek Szyprowski 			else
542d631ea98SMarek Szyprowski 				__sysmmu_tlb_invalidate_entry(data, iova, 1);
5437d2aa6b8SMarek Szyprowski 			sysmmu_unblock(data);
5447d2aa6b8SMarek Szyprowski 		}
54501324ab2SMarek Szyprowski 		clk_disable(data->clk_master);
546d631ea98SMarek Szyprowski 	}
54766a7ed84SCho KyongHo 	spin_unlock_irqrestore(&data->lock, flags);
54866a7ed84SCho KyongHo }
54966a7ed84SCho KyongHo 
550469acebeSMarek Szyprowski static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
551469acebeSMarek Szyprowski 					sysmmu_iova_t iova, size_t size)
5522a96536eSKyongHo Cho {
5532a96536eSKyongHo Cho 	unsigned long flags;
5542a96536eSKyongHo Cho 
5559d4e7a24SCho KyongHo 	spin_lock_irqsave(&data->lock, flags);
55647a574ffSMarek Szyprowski 	if (data->active) {
5573ad6b7f3SCho KyongHo 		unsigned int num_inv = 1;
55870605870SCho KyongHo 
55970605870SCho KyongHo 		clk_enable(data->clk_master);
56070605870SCho KyongHo 
5613ad6b7f3SCho KyongHo 		/*
5623ad6b7f3SCho KyongHo 		 * L2TLB invalidation required
5633ad6b7f3SCho KyongHo 		 * 4KB page: 1 invalidation
564f171ababSSachin Kamat 		 * 64KB page: 16 invalidations
565f171ababSSachin Kamat 		 * 1MB page: 64 invalidations
5663ad6b7f3SCho KyongHo 		 * because it is set-associative TLB
5673ad6b7f3SCho KyongHo 		 * with 8-way and 64 sets.
5683ad6b7f3SCho KyongHo 		 * 1MB page can be cached in one of all sets.
5693ad6b7f3SCho KyongHo 		 * 64KB page can be one of 16 consecutive sets.
5703ad6b7f3SCho KyongHo 		 */
571512bd0c6SMarek Szyprowski 		if (MMU_MAJ_VER(data->version) == 2)
572bc0d9af2SSam Protsenko 			num_inv = min_t(unsigned int, size / SPAGE_SIZE, 64);
5733ad6b7f3SCho KyongHo 
57402cdc365SMarek Szyprowski 		if (sysmmu_block(data)) {
57502cdc365SMarek Szyprowski 			__sysmmu_tlb_invalidate_entry(data, iova, num_inv);
57602cdc365SMarek Szyprowski 			sysmmu_unblock(data);
5772a96536eSKyongHo Cho 		}
57870605870SCho KyongHo 		clk_disable(data->clk_master);
5792a96536eSKyongHo Cho 	}
5809d4e7a24SCho KyongHo 	spin_unlock_irqrestore(&data->lock, flags);
5812a96536eSKyongHo Cho }
5822a96536eSKyongHo Cho 
5830b9a3694SArvind Yadav static const struct iommu_ops exynos_iommu_ops;
58496f66557SMarek Szyprowski 
5857991eb39SMarek Szyprowski static int exynos_sysmmu_probe(struct platform_device *pdev)
5862a96536eSKyongHo Cho {
58746c16d1eSCho KyongHo 	int irq, ret;
5887222e8dbSCho KyongHo 	struct device *dev = &pdev->dev;
5892a96536eSKyongHo Cho 	struct sysmmu_drvdata *data;
5907222e8dbSCho KyongHo 	struct resource *res;
5912a96536eSKyongHo Cho 
59246c16d1eSCho KyongHo 	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
59346c16d1eSCho KyongHo 	if (!data)
59446c16d1eSCho KyongHo 		return -ENOMEM;
5952a96536eSKyongHo Cho 
5967222e8dbSCho KyongHo 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
59746c16d1eSCho KyongHo 	data->sfrbase = devm_ioremap_resource(dev, res);
59846c16d1eSCho KyongHo 	if (IS_ERR(data->sfrbase))
59946c16d1eSCho KyongHo 		return PTR_ERR(data->sfrbase);
6002a96536eSKyongHo Cho 
60146c16d1eSCho KyongHo 	irq = platform_get_irq(pdev, 0);
602086f9efaSStephen Boyd 	if (irq <= 0)
60346c16d1eSCho KyongHo 		return irq;
6042a96536eSKyongHo Cho 
60546c16d1eSCho KyongHo 	ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
6062a96536eSKyongHo Cho 				dev_name(dev), data);
6072a96536eSKyongHo Cho 	if (ret) {
60846c16d1eSCho KyongHo 		dev_err(dev, "Unabled to register handler of irq %d\n", irq);
60946c16d1eSCho KyongHo 		return ret;
6102a96536eSKyongHo Cho 	}
6112a96536eSKyongHo Cho 
61246c16d1eSCho KyongHo 	data->clk = devm_clk_get(dev, "sysmmu");
6130c2b063fSMarek Szyprowski 	if (PTR_ERR(data->clk) == -ENOENT)
614740a01eeSMarek Szyprowski 		data->clk = NULL;
6150c2b063fSMarek Szyprowski 	else if (IS_ERR(data->clk))
6160c2b063fSMarek Szyprowski 		return PTR_ERR(data->clk);
617740a01eeSMarek Szyprowski 
618740a01eeSMarek Szyprowski 	data->aclk = devm_clk_get(dev, "aclk");
6190c2b063fSMarek Szyprowski 	if (PTR_ERR(data->aclk) == -ENOENT)
620740a01eeSMarek Szyprowski 		data->aclk = NULL;
6210c2b063fSMarek Szyprowski 	else if (IS_ERR(data->aclk))
6220c2b063fSMarek Szyprowski 		return PTR_ERR(data->aclk);
623740a01eeSMarek Szyprowski 
624740a01eeSMarek Szyprowski 	data->pclk = devm_clk_get(dev, "pclk");
6250c2b063fSMarek Szyprowski 	if (PTR_ERR(data->pclk) == -ENOENT)
626740a01eeSMarek Szyprowski 		data->pclk = NULL;
6270c2b063fSMarek Szyprowski 	else if (IS_ERR(data->pclk))
6280c2b063fSMarek Szyprowski 		return PTR_ERR(data->pclk);
629740a01eeSMarek Szyprowski 
630740a01eeSMarek Szyprowski 	if (!data->clk && (!data->aclk || !data->pclk)) {
631740a01eeSMarek Szyprowski 		dev_err(dev, "Failed to get device clock(s)!\n");
632740a01eeSMarek Szyprowski 		return -ENOSYS;
6332a96536eSKyongHo Cho 	}
6342a96536eSKyongHo Cho 
63570605870SCho KyongHo 	data->clk_master = devm_clk_get(dev, "master");
6360c2b063fSMarek Szyprowski 	if (PTR_ERR(data->clk_master) == -ENOENT)
637b398af21SMarek Szyprowski 		data->clk_master = NULL;
6380c2b063fSMarek Szyprowski 	else if (IS_ERR(data->clk_master))
6390c2b063fSMarek Szyprowski 		return PTR_ERR(data->clk_master);
64070605870SCho KyongHo 
6412a96536eSKyongHo Cho 	data->sysmmu = dev;
6429d4e7a24SCho KyongHo 	spin_lock_init(&data->lock);
6432a96536eSKyongHo Cho 
644*2125afbeSSam Protsenko 	__sysmmu_get_version(data);
645*2125afbeSSam Protsenko 
646d2c302b6SJoerg Roedel 	ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
647d2c302b6SJoerg Roedel 				     dev_name(data->sysmmu));
648d2c302b6SJoerg Roedel 	if (ret)
649d2c302b6SJoerg Roedel 		return ret;
650d2c302b6SJoerg Roedel 
6512d471b20SRobin Murphy 	ret = iommu_device_register(&data->iommu, &exynos_iommu_ops, dev);
652d2c302b6SJoerg Roedel 	if (ret)
653fce398d2SSam Protsenko 		goto err_iommu_register;
654d2c302b6SJoerg Roedel 
6557222e8dbSCho KyongHo 	platform_set_drvdata(pdev, data);
6567222e8dbSCho KyongHo 
657740a01eeSMarek Szyprowski 	if (PG_ENT_SHIFT < 0) {
6581a0d8dacSMarek Szyprowski 		if (MMU_MAJ_VER(data->version) < 5) {
659740a01eeSMarek Szyprowski 			PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
6601a0d8dacSMarek Szyprowski 			LV1_PROT = SYSMMU_LV1_PROT;
6611a0d8dacSMarek Szyprowski 			LV2_PROT = SYSMMU_LV2_PROT;
6621a0d8dacSMarek Szyprowski 		} else {
663740a01eeSMarek Szyprowski 			PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT;
6641a0d8dacSMarek Szyprowski 			LV1_PROT = SYSMMU_V5_LV1_PROT;
6651a0d8dacSMarek Szyprowski 			LV2_PROT = SYSMMU_V5_LV2_PROT;
6661a0d8dacSMarek Szyprowski 		}
667740a01eeSMarek Szyprowski 	}
668740a01eeSMarek Szyprowski 
6695f26ad58SSam Protsenko 	if (MMU_MAJ_VER(data->version) >= 5) {
6705f26ad58SSam Protsenko 		ret = dma_set_mask(dev, DMA_BIT_MASK(36));
6715f26ad58SSam Protsenko 		if (ret) {
6725f26ad58SSam Protsenko 			dev_err(dev, "Unable to set DMA mask: %d\n", ret);
6735f26ad58SSam Protsenko 			goto err_dma_set_mask;
6745f26ad58SSam Protsenko 		}
6755f26ad58SSam Protsenko 	}
6765f26ad58SSam Protsenko 
677928055a0SMarek Szyprowski 	/*
678928055a0SMarek Szyprowski 	 * use the first registered sysmmu device for performing
679928055a0SMarek Szyprowski 	 * dma mapping operations on iommu page tables (cpu cache flush)
680928055a0SMarek Szyprowski 	 */
681928055a0SMarek Szyprowski 	if (!dma_dev)
682928055a0SMarek Szyprowski 		dma_dev = &pdev->dev;
683928055a0SMarek Szyprowski 
6842a96536eSKyongHo Cho 	pm_runtime_enable(dev);
6852a96536eSKyongHo Cho 
6862a96536eSKyongHo Cho 	return 0;
687fce398d2SSam Protsenko 
6885f26ad58SSam Protsenko err_dma_set_mask:
6895f26ad58SSam Protsenko 	iommu_device_unregister(&data->iommu);
690fce398d2SSam Protsenko err_iommu_register:
691fce398d2SSam Protsenko 	iommu_device_sysfs_remove(&data->iommu);
692fce398d2SSam Protsenko 	return ret;
6932a96536eSKyongHo Cho }
6942a96536eSKyongHo Cho 
6959b265536SMarek Szyprowski static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
696622015e4SMarek Szyprowski {
697622015e4SMarek Szyprowski 	struct sysmmu_drvdata *data = dev_get_drvdata(dev);
69847a574ffSMarek Szyprowski 	struct device *master = data->master;
699622015e4SMarek Szyprowski 
70047a574ffSMarek Szyprowski 	if (master) {
7010f45b04dSJoerg Roedel 		struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
7029b265536SMarek Szyprowski 
7039b265536SMarek Szyprowski 		mutex_lock(&owner->rpm_lock);
70492798b45SMarek Szyprowski 		if (data->domain) {
70592798b45SMarek Szyprowski 			dev_dbg(data->sysmmu, "saving state\n");
70692798b45SMarek Szyprowski 			__sysmmu_disable(data);
70792798b45SMarek Szyprowski 		}
7089b265536SMarek Szyprowski 		mutex_unlock(&owner->rpm_lock);
709622015e4SMarek Szyprowski 	}
710622015e4SMarek Szyprowski 	return 0;
711622015e4SMarek Szyprowski }
712622015e4SMarek Szyprowski 
7139b265536SMarek Szyprowski static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
714622015e4SMarek Szyprowski {
715622015e4SMarek Szyprowski 	struct sysmmu_drvdata *data = dev_get_drvdata(dev);
71647a574ffSMarek Szyprowski 	struct device *master = data->master;
717622015e4SMarek Szyprowski 
71847a574ffSMarek Szyprowski 	if (master) {
7190f45b04dSJoerg Roedel 		struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
7209b265536SMarek Szyprowski 
7219b265536SMarek Szyprowski 		mutex_lock(&owner->rpm_lock);
72292798b45SMarek Szyprowski 		if (data->domain) {
72392798b45SMarek Szyprowski 			dev_dbg(data->sysmmu, "restoring state\n");
72447a574ffSMarek Szyprowski 			__sysmmu_enable(data);
725622015e4SMarek Szyprowski 		}
7269b265536SMarek Szyprowski 		mutex_unlock(&owner->rpm_lock);
72792798b45SMarek Szyprowski 	}
728622015e4SMarek Szyprowski 	return 0;
729622015e4SMarek Szyprowski }
730622015e4SMarek Szyprowski 
731622015e4SMarek Szyprowski static const struct dev_pm_ops sysmmu_pm_ops = {
7329b265536SMarek Szyprowski 	SET_RUNTIME_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume, NULL)
7332f5f44f2SMarek Szyprowski 	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
7349b265536SMarek Szyprowski 				pm_runtime_force_resume)
735622015e4SMarek Szyprowski };
736622015e4SMarek Szyprowski 
7379d25e3ccSMarek Szyprowski static const struct of_device_id sysmmu_of_match[] = {
7386b21a5dbSCho KyongHo 	{ .compatible	= "samsung,exynos-sysmmu", },
7396b21a5dbSCho KyongHo 	{ },
7406b21a5dbSCho KyongHo };
7416b21a5dbSCho KyongHo 
7426b21a5dbSCho KyongHo static struct platform_driver exynos_sysmmu_driver __refdata = {
7432a96536eSKyongHo Cho 	.probe	= exynos_sysmmu_probe,
7442a96536eSKyongHo Cho 	.driver	= {
7452a96536eSKyongHo Cho 		.name		= "exynos-sysmmu",
7466b21a5dbSCho KyongHo 		.of_match_table	= sysmmu_of_match,
747622015e4SMarek Szyprowski 		.pm		= &sysmmu_pm_ops,
748b54b874fSMarek Szyprowski 		.suppress_bind_attrs = true,
7492a96536eSKyongHo Cho 	}
7502a96536eSKyongHo Cho };
7512a96536eSKyongHo Cho 
7529314006dSRobin Murphy static inline void exynos_iommu_set_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
7532a96536eSKyongHo Cho {
7545e3435ebSMarek Szyprowski 	dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent),
7555e3435ebSMarek Szyprowski 				DMA_TO_DEVICE);
7566ae5343cSBen Dooks 	*ent = cpu_to_le32(val);
7575e3435ebSMarek Szyprowski 	dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent),
7585e3435ebSMarek Szyprowski 				   DMA_TO_DEVICE);
7592a96536eSKyongHo Cho }
7602a96536eSKyongHo Cho 
761e1fd1eaaSJoerg Roedel static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
7622a96536eSKyongHo Cho {
763bfa00489SMarek Szyprowski 	struct exynos_iommu_domain *domain;
7645e3435ebSMarek Szyprowski 	dma_addr_t handle;
76566a7ed84SCho KyongHo 	int i;
7662a96536eSKyongHo Cho 
767740a01eeSMarek Szyprowski 	/* Check if correct PTE offsets are initialized */
768740a01eeSMarek Szyprowski 	BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev);
7692a96536eSKyongHo Cho 
7704a376d4aSRobin Murphy 	if (type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_UNMANAGED)
7714a376d4aSRobin Murphy 		return NULL;
7724a376d4aSRobin Murphy 
773bfa00489SMarek Szyprowski 	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
774bfa00489SMarek Szyprowski 	if (!domain)
775e1fd1eaaSJoerg Roedel 		return NULL;
776e1fd1eaaSJoerg Roedel 
777bfa00489SMarek Szyprowski 	domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
778bfa00489SMarek Szyprowski 	if (!domain->pgtable)
7794a376d4aSRobin Murphy 		goto err_pgtable;
7802a96536eSKyongHo Cho 
781bfa00489SMarek Szyprowski 	domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
782bfa00489SMarek Szyprowski 	if (!domain->lv2entcnt)
7832a96536eSKyongHo Cho 		goto err_counter;
7842a96536eSKyongHo Cho 
785f171ababSSachin Kamat 	/* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
786e7527663SMarek Szyprowski 	for (i = 0; i < NUM_LV1ENTRIES; i++)
787e7527663SMarek Szyprowski 		domain->pgtable[i] = ZERO_LV2LINK;
78866a7ed84SCho KyongHo 
7895e3435ebSMarek Szyprowski 	handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE,
7905e3435ebSMarek Szyprowski 				DMA_TO_DEVICE);
7915e3435ebSMarek Szyprowski 	/* For mapping page table entries we rely on dma == phys */
7925e3435ebSMarek Szyprowski 	BUG_ON(handle != virt_to_phys(domain->pgtable));
7930d6d3da4SMarek Szyprowski 	if (dma_mapping_error(dma_dev, handle))
7940d6d3da4SMarek Szyprowski 		goto err_lv2ent;
7952a96536eSKyongHo Cho 
796bfa00489SMarek Szyprowski 	spin_lock_init(&domain->lock);
797bfa00489SMarek Szyprowski 	spin_lock_init(&domain->pgtablelock);
798bfa00489SMarek Szyprowski 	INIT_LIST_HEAD(&domain->clients);
7992a96536eSKyongHo Cho 
800bfa00489SMarek Szyprowski 	domain->domain.geometry.aperture_start = 0;
801bfa00489SMarek Szyprowski 	domain->domain.geometry.aperture_end   = ~0UL;
802bfa00489SMarek Szyprowski 	domain->domain.geometry.force_aperture = true;
8033177bb76SJoerg Roedel 
804bfa00489SMarek Szyprowski 	return &domain->domain;
8052a96536eSKyongHo Cho 
8060d6d3da4SMarek Szyprowski err_lv2ent:
8070d6d3da4SMarek Szyprowski 	free_pages((unsigned long)domain->lv2entcnt, 1);
8082a96536eSKyongHo Cho err_counter:
809bfa00489SMarek Szyprowski 	free_pages((unsigned long)domain->pgtable, 2);
8102a96536eSKyongHo Cho err_pgtable:
811bfa00489SMarek Szyprowski 	kfree(domain);
812e1fd1eaaSJoerg Roedel 	return NULL;
8132a96536eSKyongHo Cho }
8142a96536eSKyongHo Cho 
815bfa00489SMarek Szyprowski static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
8162a96536eSKyongHo Cho {
817bfa00489SMarek Szyprowski 	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
818469acebeSMarek Szyprowski 	struct sysmmu_drvdata *data, *next;
8192a96536eSKyongHo Cho 	unsigned long flags;
8202a96536eSKyongHo Cho 	int i;
8212a96536eSKyongHo Cho 
822bfa00489SMarek Szyprowski 	WARN_ON(!list_empty(&domain->clients));
8232a96536eSKyongHo Cho 
824bfa00489SMarek Szyprowski 	spin_lock_irqsave(&domain->lock, flags);
8252a96536eSKyongHo Cho 
826bfa00489SMarek Szyprowski 	list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
827e1172300SMarek Szyprowski 		spin_lock(&data->lock);
828b0d4c861SMarek Szyprowski 		__sysmmu_disable(data);
82947a574ffSMarek Szyprowski 		data->pgtable = 0;
83047a574ffSMarek Szyprowski 		data->domain = NULL;
831469acebeSMarek Szyprowski 		list_del_init(&data->domain_node);
832e1172300SMarek Szyprowski 		spin_unlock(&data->lock);
8332a96536eSKyongHo Cho 	}
8342a96536eSKyongHo Cho 
835bfa00489SMarek Szyprowski 	spin_unlock_irqrestore(&domain->lock, flags);
8362a96536eSKyongHo Cho 
8375e3435ebSMarek Szyprowski 	dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE,
8385e3435ebSMarek Szyprowski 			 DMA_TO_DEVICE);
8395e3435ebSMarek Szyprowski 
8402a96536eSKyongHo Cho 	for (i = 0; i < NUM_LV1ENTRIES; i++)
8415e3435ebSMarek Szyprowski 		if (lv1ent_page(domain->pgtable + i)) {
8425e3435ebSMarek Szyprowski 			phys_addr_t base = lv2table_base(domain->pgtable + i);
8435e3435ebSMarek Szyprowski 
8445e3435ebSMarek Szyprowski 			dma_unmap_single(dma_dev, base, LV2TABLE_SIZE,
8455e3435ebSMarek Szyprowski 					 DMA_TO_DEVICE);
846734c3c73SCho KyongHo 			kmem_cache_free(lv2table_kmem_cache,
8475e3435ebSMarek Szyprowski 					phys_to_virt(base));
8485e3435ebSMarek Szyprowski 		}
8492a96536eSKyongHo Cho 
850bfa00489SMarek Szyprowski 	free_pages((unsigned long)domain->pgtable, 2);
851bfa00489SMarek Szyprowski 	free_pages((unsigned long)domain->lv2entcnt, 1);
852bfa00489SMarek Szyprowski 	kfree(domain);
8532a96536eSKyongHo Cho }
8542a96536eSKyongHo Cho 
8555fa61cbfSMarek Szyprowski static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
8565fa61cbfSMarek Szyprowski 				    struct device *dev)
8575fa61cbfSMarek Szyprowski {
8585fa61cbfSMarek Szyprowski 	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
8590f45b04dSJoerg Roedel 	struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
8605fa61cbfSMarek Szyprowski 	phys_addr_t pagetable = virt_to_phys(domain->pgtable);
8615fa61cbfSMarek Szyprowski 	struct sysmmu_drvdata *data, *next;
8625fa61cbfSMarek Szyprowski 	unsigned long flags;
8635fa61cbfSMarek Szyprowski 
8645fa61cbfSMarek Szyprowski 	if (!has_sysmmu(dev) || owner->domain != iommu_domain)
8655fa61cbfSMarek Szyprowski 		return;
8665fa61cbfSMarek Szyprowski 
8679b265536SMarek Szyprowski 	mutex_lock(&owner->rpm_lock);
8689b265536SMarek Szyprowski 
8699b265536SMarek Szyprowski 	list_for_each_entry(data, &owner->controllers, owner_node) {
8709b265536SMarek Szyprowski 		pm_runtime_get_noresume(data->sysmmu);
8719b265536SMarek Szyprowski 		if (pm_runtime_active(data->sysmmu))
872e1172300SMarek Szyprowski 			__sysmmu_disable(data);
873e1172300SMarek Szyprowski 		pm_runtime_put(data->sysmmu);
874e1172300SMarek Szyprowski 	}
875e1172300SMarek Szyprowski 
8765fa61cbfSMarek Szyprowski 	spin_lock_irqsave(&domain->lock, flags);
8775fa61cbfSMarek Szyprowski 	list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
878e1172300SMarek Szyprowski 		spin_lock(&data->lock);
87947a574ffSMarek Szyprowski 		data->pgtable = 0;
88047a574ffSMarek Szyprowski 		data->domain = NULL;
8815fa61cbfSMarek Szyprowski 		list_del_init(&data->domain_node);
882e1172300SMarek Szyprowski 		spin_unlock(&data->lock);
8835fa61cbfSMarek Szyprowski 	}
884e1172300SMarek Szyprowski 	owner->domain = NULL;
8855fa61cbfSMarek Szyprowski 	spin_unlock_irqrestore(&domain->lock, flags);
8865fa61cbfSMarek Szyprowski 
8879b265536SMarek Szyprowski 	mutex_unlock(&owner->rpm_lock);
8885fa61cbfSMarek Szyprowski 
889b0d4c861SMarek Szyprowski 	dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n", __func__,
890b0d4c861SMarek Szyprowski 		&pagetable);
8915fa61cbfSMarek Szyprowski }
8925fa61cbfSMarek Szyprowski 
893bfa00489SMarek Szyprowski static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
8942a96536eSKyongHo Cho 				   struct device *dev)
8952a96536eSKyongHo Cho {
896bfa00489SMarek Szyprowski 	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
8970f45b04dSJoerg Roedel 	struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
898469acebeSMarek Szyprowski 	struct sysmmu_drvdata *data;
899bfa00489SMarek Szyprowski 	phys_addr_t pagetable = virt_to_phys(domain->pgtable);
9002a96536eSKyongHo Cho 	unsigned long flags;
901469acebeSMarek Szyprowski 
902469acebeSMarek Szyprowski 	if (!has_sysmmu(dev))
903469acebeSMarek Szyprowski 		return -ENODEV;
904469acebeSMarek Szyprowski 
9055fa61cbfSMarek Szyprowski 	if (owner->domain)
9065fa61cbfSMarek Szyprowski 		exynos_iommu_detach_device(owner->domain, dev);
9075fa61cbfSMarek Szyprowski 
9089b265536SMarek Szyprowski 	mutex_lock(&owner->rpm_lock);
9099b265536SMarek Szyprowski 
910e1172300SMarek Szyprowski 	spin_lock_irqsave(&domain->lock, flags);
9111b092054SMarek Szyprowski 	list_for_each_entry(data, &owner->controllers, owner_node) {
912e1172300SMarek Szyprowski 		spin_lock(&data->lock);
91347a574ffSMarek Szyprowski 		data->pgtable = pagetable;
91447a574ffSMarek Szyprowski 		data->domain = domain;
915e1172300SMarek Szyprowski 		list_add_tail(&data->domain_node, &domain->clients);
916e1172300SMarek Szyprowski 		spin_unlock(&data->lock);
917e1172300SMarek Szyprowski 	}
918e1172300SMarek Szyprowski 	owner->domain = iommu_domain;
919e1172300SMarek Szyprowski 	spin_unlock_irqrestore(&domain->lock, flags);
920e1172300SMarek Szyprowski 
921e1172300SMarek Szyprowski 	list_for_each_entry(data, &owner->controllers, owner_node) {
9229b265536SMarek Szyprowski 		pm_runtime_get_noresume(data->sysmmu);
9239b265536SMarek Szyprowski 		if (pm_runtime_active(data->sysmmu))
92447a574ffSMarek Szyprowski 			__sysmmu_enable(data);
9259b265536SMarek Szyprowski 		pm_runtime_put(data->sysmmu);
9269b265536SMarek Szyprowski 	}
9279b265536SMarek Szyprowski 
9289b265536SMarek Szyprowski 	mutex_unlock(&owner->rpm_lock);
9299b265536SMarek Szyprowski 
930b0d4c861SMarek Szyprowski 	dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa\n", __func__,
931b0d4c861SMarek Szyprowski 		&pagetable);
9327222e8dbSCho KyongHo 
933b0d4c861SMarek Szyprowski 	return 0;
9342a96536eSKyongHo Cho }
9352a96536eSKyongHo Cho 
936bfa00489SMarek Szyprowski static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
93766a7ed84SCho KyongHo 		sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
9382a96536eSKyongHo Cho {
93961128f08SCho KyongHo 	if (lv1ent_section(sent)) {
940d09d78fcSCho KyongHo 		WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
94161128f08SCho KyongHo 		return ERR_PTR(-EADDRINUSE);
94261128f08SCho KyongHo 	}
94361128f08SCho KyongHo 
9442a96536eSKyongHo Cho 	if (lv1ent_fault(sent)) {
9450d6d3da4SMarek Szyprowski 		dma_addr_t handle;
946d09d78fcSCho KyongHo 		sysmmu_pte_t *pent;
94766a7ed84SCho KyongHo 		bool need_flush_flpd_cache = lv1ent_zero(sent);
9482a96536eSKyongHo Cho 
949734c3c73SCho KyongHo 		pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
950dbf6c6efSArnd Bergmann 		BUG_ON((uintptr_t)pent & (LV2TABLE_SIZE - 1));
9512a96536eSKyongHo Cho 		if (!pent)
95261128f08SCho KyongHo 			return ERR_PTR(-ENOMEM);
9532a96536eSKyongHo Cho 
9549314006dSRobin Murphy 		exynos_iommu_set_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
955dc3814f4SColin Cross 		kmemleak_ignore(pent);
9562a96536eSKyongHo Cho 		*pgcounter = NUM_LV2ENTRIES;
9570d6d3da4SMarek Szyprowski 		handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE,
9580d6d3da4SMarek Szyprowski 					DMA_TO_DEVICE);
9590d6d3da4SMarek Szyprowski 		if (dma_mapping_error(dma_dev, handle)) {
9600d6d3da4SMarek Szyprowski 			kmem_cache_free(lv2table_kmem_cache, pent);
9610d6d3da4SMarek Szyprowski 			return ERR_PTR(-EADDRINUSE);
9620d6d3da4SMarek Szyprowski 		}
96366a7ed84SCho KyongHo 
96466a7ed84SCho KyongHo 		/*
965f171ababSSachin Kamat 		 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
966f171ababSSachin Kamat 		 * FLPD cache may cache the address of zero_l2_table. This
967f171ababSSachin Kamat 		 * function replaces the zero_l2_table with new L2 page table
968f171ababSSachin Kamat 		 * to write valid mappings.
96966a7ed84SCho KyongHo 		 * Accessing the valid area may cause page fault since FLPD
970f171ababSSachin Kamat 		 * cache may still cache zero_l2_table for the valid area
971f171ababSSachin Kamat 		 * instead of new L2 page table that has the mapping
972f171ababSSachin Kamat 		 * information of the valid area.
97366a7ed84SCho KyongHo 		 * Thus any replacement of zero_l2_table with other valid L2
97466a7ed84SCho KyongHo 		 * page table must involve FLPD cache invalidation for System
97566a7ed84SCho KyongHo 		 * MMU v3.3.
97666a7ed84SCho KyongHo 		 * FLPD cache invalidation is performed with TLB invalidation
97766a7ed84SCho KyongHo 		 * by VPN without blocking. It is safe to invalidate TLB without
97866a7ed84SCho KyongHo 		 * blocking because the target address of TLB invalidation is
97966a7ed84SCho KyongHo 		 * not currently mapped.
98066a7ed84SCho KyongHo 		 */
98166a7ed84SCho KyongHo 		if (need_flush_flpd_cache) {
982469acebeSMarek Szyprowski 			struct sysmmu_drvdata *data;
983365409dbSSachin Kamat 
984bfa00489SMarek Szyprowski 			spin_lock(&domain->lock);
985bfa00489SMarek Szyprowski 			list_for_each_entry(data, &domain->clients, domain_node)
986469acebeSMarek Szyprowski 				sysmmu_tlb_invalidate_flpdcache(data, iova);
987bfa00489SMarek Szyprowski 			spin_unlock(&domain->lock);
98866a7ed84SCho KyongHo 		}
9892a96536eSKyongHo Cho 	}
9902a96536eSKyongHo Cho 
9912a96536eSKyongHo Cho 	return page_entry(sent, iova);
9922a96536eSKyongHo Cho }
9932a96536eSKyongHo Cho 
994bfa00489SMarek Szyprowski static int lv1set_section(struct exynos_iommu_domain *domain,
99566a7ed84SCho KyongHo 			  sysmmu_pte_t *sent, sysmmu_iova_t iova,
9961a0d8dacSMarek Szyprowski 			  phys_addr_t paddr, int prot, short *pgcnt)
9972a96536eSKyongHo Cho {
99861128f08SCho KyongHo 	if (lv1ent_section(sent)) {
999d09d78fcSCho KyongHo 		WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
100061128f08SCho KyongHo 			iova);
10012a96536eSKyongHo Cho 		return -EADDRINUSE;
100261128f08SCho KyongHo 	}
10032a96536eSKyongHo Cho 
10042a96536eSKyongHo Cho 	if (lv1ent_page(sent)) {
100561128f08SCho KyongHo 		if (*pgcnt != NUM_LV2ENTRIES) {
1006d09d78fcSCho KyongHo 			WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
100761128f08SCho KyongHo 				iova);
10082a96536eSKyongHo Cho 			return -EADDRINUSE;
100961128f08SCho KyongHo 		}
10102a96536eSKyongHo Cho 
1011734c3c73SCho KyongHo 		kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
10122a96536eSKyongHo Cho 		*pgcnt = 0;
10132a96536eSKyongHo Cho 	}
10142a96536eSKyongHo Cho 
10159314006dSRobin Murphy 	exynos_iommu_set_pte(sent, mk_lv1ent_sect(paddr, prot));
10162a96536eSKyongHo Cho 
1017bfa00489SMarek Szyprowski 	spin_lock(&domain->lock);
101866a7ed84SCho KyongHo 	if (lv1ent_page_zero(sent)) {
1019469acebeSMarek Szyprowski 		struct sysmmu_drvdata *data;
102066a7ed84SCho KyongHo 		/*
102166a7ed84SCho KyongHo 		 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
102266a7ed84SCho KyongHo 		 * entry by speculative prefetch of SLPD which has no mapping.
102366a7ed84SCho KyongHo 		 */
1024bfa00489SMarek Szyprowski 		list_for_each_entry(data, &domain->clients, domain_node)
1025469acebeSMarek Szyprowski 			sysmmu_tlb_invalidate_flpdcache(data, iova);
102666a7ed84SCho KyongHo 	}
1027bfa00489SMarek Szyprowski 	spin_unlock(&domain->lock);
102866a7ed84SCho KyongHo 
10292a96536eSKyongHo Cho 	return 0;
10302a96536eSKyongHo Cho }
10312a96536eSKyongHo Cho 
1032d09d78fcSCho KyongHo static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
10331a0d8dacSMarek Szyprowski 		       int prot, short *pgcnt)
10342a96536eSKyongHo Cho {
10352a96536eSKyongHo Cho 	if (size == SPAGE_SIZE) {
10360bf4e54dSCho KyongHo 		if (WARN_ON(!lv2ent_fault(pent)))
10372a96536eSKyongHo Cho 			return -EADDRINUSE;
10382a96536eSKyongHo Cho 
10399314006dSRobin Murphy 		exynos_iommu_set_pte(pent, mk_lv2ent_spage(paddr, prot));
10402a96536eSKyongHo Cho 		*pgcnt -= 1;
10412a96536eSKyongHo Cho 	} else { /* size == LPAGE_SIZE */
10422a96536eSKyongHo Cho 		int i;
10435e3435ebSMarek Szyprowski 		dma_addr_t pent_base = virt_to_phys(pent);
1044365409dbSSachin Kamat 
10455e3435ebSMarek Szyprowski 		dma_sync_single_for_cpu(dma_dev, pent_base,
10465e3435ebSMarek Szyprowski 					sizeof(*pent) * SPAGES_PER_LPAGE,
10475e3435ebSMarek Szyprowski 					DMA_TO_DEVICE);
10482a96536eSKyongHo Cho 		for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
10490bf4e54dSCho KyongHo 			if (WARN_ON(!lv2ent_fault(pent))) {
105061128f08SCho KyongHo 				if (i > 0)
105161128f08SCho KyongHo 					memset(pent - i, 0, sizeof(*pent) * i);
10522a96536eSKyongHo Cho 				return -EADDRINUSE;
10532a96536eSKyongHo Cho 			}
10542a96536eSKyongHo Cho 
10551a0d8dacSMarek Szyprowski 			*pent = mk_lv2ent_lpage(paddr, prot);
10562a96536eSKyongHo Cho 		}
10575e3435ebSMarek Szyprowski 		dma_sync_single_for_device(dma_dev, pent_base,
10585e3435ebSMarek Szyprowski 					   sizeof(*pent) * SPAGES_PER_LPAGE,
10595e3435ebSMarek Szyprowski 					   DMA_TO_DEVICE);
10602a96536eSKyongHo Cho 		*pgcnt -= SPAGES_PER_LPAGE;
10612a96536eSKyongHo Cho 	}
10622a96536eSKyongHo Cho 
10632a96536eSKyongHo Cho 	return 0;
10642a96536eSKyongHo Cho }
10652a96536eSKyongHo Cho 
106666a7ed84SCho KyongHo /*
106766a7ed84SCho KyongHo  * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
106866a7ed84SCho KyongHo  *
1069f171ababSSachin Kamat  * System MMU v3.x has advanced logic to improve address translation
107066a7ed84SCho KyongHo  * performance with caching more page table entries by a page table walk.
1071f171ababSSachin Kamat  * However, the logic has a bug that while caching faulty page table entries,
1072f171ababSSachin Kamat  * System MMU reports page fault if the cached fault entry is hit even though
1073f171ababSSachin Kamat  * the fault entry is updated to a valid entry after the entry is cached.
1074f171ababSSachin Kamat  * To prevent caching faulty page table entries which may be updated to valid
1075f171ababSSachin Kamat  * entries later, the virtual memory manager should care about the workaround
1076f171ababSSachin Kamat  * for the problem. The following describes the workaround.
107766a7ed84SCho KyongHo  *
107866a7ed84SCho KyongHo  * Any two consecutive I/O virtual address regions must have a hole of 128KiB
1079f171ababSSachin Kamat  * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
108066a7ed84SCho KyongHo  *
1081f171ababSSachin Kamat  * Precisely, any start address of I/O virtual region must be aligned with
108266a7ed84SCho KyongHo  * the following sizes for System MMU v3.1 and v3.2.
108366a7ed84SCho KyongHo  * System MMU v3.1: 128KiB
108466a7ed84SCho KyongHo  * System MMU v3.2: 256KiB
108566a7ed84SCho KyongHo  *
108666a7ed84SCho KyongHo  * Because System MMU v3.3 caches page table entries more aggressively, it needs
1087f171ababSSachin Kamat  * more workarounds.
1088f171ababSSachin Kamat  * - Any two consecutive I/O virtual regions must have a hole of size larger
1089f171ababSSachin Kamat  *   than or equal to 128KiB.
109066a7ed84SCho KyongHo  * - Start address of an I/O virtual region must be aligned by 128KiB.
109166a7ed84SCho KyongHo  */
1092bfa00489SMarek Szyprowski static int exynos_iommu_map(struct iommu_domain *iommu_domain,
1093bfa00489SMarek Szyprowski 			    unsigned long l_iova, phys_addr_t paddr, size_t size,
1094781ca2deSTom Murphy 			    int prot, gfp_t gfp)
10952a96536eSKyongHo Cho {
1096bfa00489SMarek Szyprowski 	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1097d09d78fcSCho KyongHo 	sysmmu_pte_t *entry;
1098d09d78fcSCho KyongHo 	sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
10992a96536eSKyongHo Cho 	unsigned long flags;
11002a96536eSKyongHo Cho 	int ret = -ENOMEM;
11012a96536eSKyongHo Cho 
1102bfa00489SMarek Szyprowski 	BUG_ON(domain->pgtable == NULL);
11031a0d8dacSMarek Szyprowski 	prot &= SYSMMU_SUPPORTED_PROT_BITS;
11042a96536eSKyongHo Cho 
1105bfa00489SMarek Szyprowski 	spin_lock_irqsave(&domain->pgtablelock, flags);
11062a96536eSKyongHo Cho 
1107bfa00489SMarek Szyprowski 	entry = section_entry(domain->pgtable, iova);
11082a96536eSKyongHo Cho 
11092a96536eSKyongHo Cho 	if (size == SECT_SIZE) {
11101a0d8dacSMarek Szyprowski 		ret = lv1set_section(domain, entry, iova, paddr, prot,
1111bfa00489SMarek Szyprowski 				     &domain->lv2entcnt[lv1ent_offset(iova)]);
11122a96536eSKyongHo Cho 	} else {
1113d09d78fcSCho KyongHo 		sysmmu_pte_t *pent;
11142a96536eSKyongHo Cho 
1115bfa00489SMarek Szyprowski 		pent = alloc_lv2entry(domain, entry, iova,
1116bfa00489SMarek Szyprowski 				      &domain->lv2entcnt[lv1ent_offset(iova)]);
11172a96536eSKyongHo Cho 
111861128f08SCho KyongHo 		if (IS_ERR(pent))
111961128f08SCho KyongHo 			ret = PTR_ERR(pent);
11202a96536eSKyongHo Cho 		else
11211a0d8dacSMarek Szyprowski 			ret = lv2set_page(pent, paddr, size, prot,
1122bfa00489SMarek Szyprowski 				       &domain->lv2entcnt[lv1ent_offset(iova)]);
11232a96536eSKyongHo Cho 	}
11242a96536eSKyongHo Cho 
112561128f08SCho KyongHo 	if (ret)
11260bf4e54dSCho KyongHo 		pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
11270bf4e54dSCho KyongHo 			__func__, ret, size, iova);
11282a96536eSKyongHo Cho 
1129bfa00489SMarek Szyprowski 	spin_unlock_irqrestore(&domain->pgtablelock, flags);
11302a96536eSKyongHo Cho 
11312a96536eSKyongHo Cho 	return ret;
11322a96536eSKyongHo Cho }
11332a96536eSKyongHo Cho 
1134bfa00489SMarek Szyprowski static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain,
113566a7ed84SCho KyongHo 					      sysmmu_iova_t iova, size_t size)
113666a7ed84SCho KyongHo {
1137469acebeSMarek Szyprowski 	struct sysmmu_drvdata *data;
113866a7ed84SCho KyongHo 	unsigned long flags;
113966a7ed84SCho KyongHo 
1140bfa00489SMarek Szyprowski 	spin_lock_irqsave(&domain->lock, flags);
114166a7ed84SCho KyongHo 
1142bfa00489SMarek Szyprowski 	list_for_each_entry(data, &domain->clients, domain_node)
1143469acebeSMarek Szyprowski 		sysmmu_tlb_invalidate_entry(data, iova, size);
114466a7ed84SCho KyongHo 
1145bfa00489SMarek Szyprowski 	spin_unlock_irqrestore(&domain->lock, flags);
114666a7ed84SCho KyongHo }
114766a7ed84SCho KyongHo 
1148bfa00489SMarek Szyprowski static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
114956f8af5eSWill Deacon 				 unsigned long l_iova, size_t size,
115056f8af5eSWill Deacon 				 struct iommu_iotlb_gather *gather)
11512a96536eSKyongHo Cho {
1152bfa00489SMarek Szyprowski 	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1153d09d78fcSCho KyongHo 	sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1154d09d78fcSCho KyongHo 	sysmmu_pte_t *ent;
115561128f08SCho KyongHo 	size_t err_pgsize;
1156d09d78fcSCho KyongHo 	unsigned long flags;
11572a96536eSKyongHo Cho 
1158bfa00489SMarek Szyprowski 	BUG_ON(domain->pgtable == NULL);
11592a96536eSKyongHo Cho 
1160bfa00489SMarek Szyprowski 	spin_lock_irqsave(&domain->pgtablelock, flags);
11612a96536eSKyongHo Cho 
1162bfa00489SMarek Szyprowski 	ent = section_entry(domain->pgtable, iova);
11632a96536eSKyongHo Cho 
11642a96536eSKyongHo Cho 	if (lv1ent_section(ent)) {
11650bf4e54dSCho KyongHo 		if (WARN_ON(size < SECT_SIZE)) {
116661128f08SCho KyongHo 			err_pgsize = SECT_SIZE;
116761128f08SCho KyongHo 			goto err;
116861128f08SCho KyongHo 		}
11692a96536eSKyongHo Cho 
1170f171ababSSachin Kamat 		/* workaround for h/w bug in System MMU v3.3 */
11719314006dSRobin Murphy 		exynos_iommu_set_pte(ent, ZERO_LV2LINK);
11722a96536eSKyongHo Cho 		size = SECT_SIZE;
11732a96536eSKyongHo Cho 		goto done;
11742a96536eSKyongHo Cho 	}
11752a96536eSKyongHo Cho 
11762a96536eSKyongHo Cho 	if (unlikely(lv1ent_fault(ent))) {
11772a96536eSKyongHo Cho 		if (size > SECT_SIZE)
11782a96536eSKyongHo Cho 			size = SECT_SIZE;
11792a96536eSKyongHo Cho 		goto done;
11802a96536eSKyongHo Cho 	}
11812a96536eSKyongHo Cho 
11822a96536eSKyongHo Cho 	/* lv1ent_page(sent) == true here */
11832a96536eSKyongHo Cho 
11842a96536eSKyongHo Cho 	ent = page_entry(ent, iova);
11852a96536eSKyongHo Cho 
11862a96536eSKyongHo Cho 	if (unlikely(lv2ent_fault(ent))) {
11872a96536eSKyongHo Cho 		size = SPAGE_SIZE;
11882a96536eSKyongHo Cho 		goto done;
11892a96536eSKyongHo Cho 	}
11902a96536eSKyongHo Cho 
11912a96536eSKyongHo Cho 	if (lv2ent_small(ent)) {
11929314006dSRobin Murphy 		exynos_iommu_set_pte(ent, 0);
11932a96536eSKyongHo Cho 		size = SPAGE_SIZE;
1194bfa00489SMarek Szyprowski 		domain->lv2entcnt[lv1ent_offset(iova)] += 1;
11952a96536eSKyongHo Cho 		goto done;
11962a96536eSKyongHo Cho 	}
11972a96536eSKyongHo Cho 
11982a96536eSKyongHo Cho 	/* lv1ent_large(ent) == true here */
11990bf4e54dSCho KyongHo 	if (WARN_ON(size < LPAGE_SIZE)) {
120061128f08SCho KyongHo 		err_pgsize = LPAGE_SIZE;
120161128f08SCho KyongHo 		goto err;
120261128f08SCho KyongHo 	}
12032a96536eSKyongHo Cho 
12045e3435ebSMarek Szyprowski 	dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent),
12055e3435ebSMarek Szyprowski 				sizeof(*ent) * SPAGES_PER_LPAGE,
12065e3435ebSMarek Szyprowski 				DMA_TO_DEVICE);
12072a96536eSKyongHo Cho 	memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
12085e3435ebSMarek Szyprowski 	dma_sync_single_for_device(dma_dev, virt_to_phys(ent),
12095e3435ebSMarek Szyprowski 				   sizeof(*ent) * SPAGES_PER_LPAGE,
12105e3435ebSMarek Szyprowski 				   DMA_TO_DEVICE);
12112a96536eSKyongHo Cho 	size = LPAGE_SIZE;
1212bfa00489SMarek Szyprowski 	domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
12132a96536eSKyongHo Cho done:
1214bfa00489SMarek Szyprowski 	spin_unlock_irqrestore(&domain->pgtablelock, flags);
12152a96536eSKyongHo Cho 
1216bfa00489SMarek Szyprowski 	exynos_iommu_tlb_invalidate_entry(domain, iova, size);
12172a96536eSKyongHo Cho 
12182a96536eSKyongHo Cho 	return size;
121961128f08SCho KyongHo err:
1220bfa00489SMarek Szyprowski 	spin_unlock_irqrestore(&domain->pgtablelock, flags);
122161128f08SCho KyongHo 
12220bf4e54dSCho KyongHo 	pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
122361128f08SCho KyongHo 		__func__, size, iova, err_pgsize);
122461128f08SCho KyongHo 
122561128f08SCho KyongHo 	return 0;
12262a96536eSKyongHo Cho }
12272a96536eSKyongHo Cho 
1228bfa00489SMarek Szyprowski static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
1229bb5547acSVarun Sethi 					  dma_addr_t iova)
12302a96536eSKyongHo Cho {
1231bfa00489SMarek Szyprowski 	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1232d09d78fcSCho KyongHo 	sysmmu_pte_t *entry;
12332a96536eSKyongHo Cho 	unsigned long flags;
12342a96536eSKyongHo Cho 	phys_addr_t phys = 0;
12352a96536eSKyongHo Cho 
1236bfa00489SMarek Szyprowski 	spin_lock_irqsave(&domain->pgtablelock, flags);
12372a96536eSKyongHo Cho 
1238bfa00489SMarek Szyprowski 	entry = section_entry(domain->pgtable, iova);
12392a96536eSKyongHo Cho 
12402a96536eSKyongHo Cho 	if (lv1ent_section(entry)) {
12412a96536eSKyongHo Cho 		phys = section_phys(entry) + section_offs(iova);
12422a96536eSKyongHo Cho 	} else if (lv1ent_page(entry)) {
12432a96536eSKyongHo Cho 		entry = page_entry(entry, iova);
12442a96536eSKyongHo Cho 
12452a96536eSKyongHo Cho 		if (lv2ent_large(entry))
12462a96536eSKyongHo Cho 			phys = lpage_phys(entry) + lpage_offs(iova);
12472a96536eSKyongHo Cho 		else if (lv2ent_small(entry))
12482a96536eSKyongHo Cho 			phys = spage_phys(entry) + spage_offs(iova);
12492a96536eSKyongHo Cho 	}
12502a96536eSKyongHo Cho 
1251bfa00489SMarek Szyprowski 	spin_unlock_irqrestore(&domain->pgtablelock, flags);
12522a96536eSKyongHo Cho 
12532a96536eSKyongHo Cho 	return phys;
12542a96536eSKyongHo Cho }
12552a96536eSKyongHo Cho 
12563c51c054SJoerg Roedel static struct iommu_device *exynos_iommu_probe_device(struct device *dev)
1257bf4a1c92SAntonios Motakis {
12580f45b04dSJoerg Roedel 	struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
12597a974b29SMarek Szyprowski 	struct sysmmu_drvdata *data;
1260bf4a1c92SAntonios Motakis 
126106801db0SMarek Szyprowski 	if (!has_sysmmu(dev))
12623c51c054SJoerg Roedel 		return ERR_PTR(-ENODEV);
1263bf4a1c92SAntonios Motakis 
12647a974b29SMarek Szyprowski 	list_for_each_entry(data, &owner->controllers, owner_node) {
12657a974b29SMarek Szyprowski 		/*
12667a974b29SMarek Szyprowski 		 * SYSMMU will be runtime activated via device link
12677a974b29SMarek Szyprowski 		 * (dependency) to its master device, so there are no
12687a974b29SMarek Szyprowski 		 * direct calls to pm_runtime_get/put in this driver.
12697a974b29SMarek Szyprowski 		 */
12707a974b29SMarek Szyprowski 		data->link = device_link_add(dev, data->sysmmu,
1271ea4f6400SRafael J. Wysocki 					     DL_FLAG_STATELESS |
12727a974b29SMarek Szyprowski 					     DL_FLAG_PM_RUNTIME);
12737a974b29SMarek Szyprowski 	}
1274bf4a1c92SAntonios Motakis 
127566ae88e7SJoerg Roedel 	/* There is always at least one entry, see exynos_iommu_of_xlate() */
127666ae88e7SJoerg Roedel 	data = list_first_entry(&owner->controllers,
127766ae88e7SJoerg Roedel 				struct sysmmu_drvdata, owner_node);
127866ae88e7SJoerg Roedel 
12793c51c054SJoerg Roedel 	return &data->iommu;
1280bf4a1c92SAntonios Motakis }
1281bf4a1c92SAntonios Motakis 
12823c51c054SJoerg Roedel static void exynos_iommu_release_device(struct device *dev)
1283bf4a1c92SAntonios Motakis {
12840f45b04dSJoerg Roedel 	struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
12857a974b29SMarek Szyprowski 	struct sysmmu_drvdata *data;
1286fff2fd1aSMarek Szyprowski 
128706801db0SMarek Szyprowski 	if (!has_sysmmu(dev))
128806801db0SMarek Szyprowski 		return;
128906801db0SMarek Szyprowski 
1290fff2fd1aSMarek Szyprowski 	if (owner->domain) {
1291fff2fd1aSMarek Szyprowski 		struct iommu_group *group = iommu_group_get(dev);
1292fff2fd1aSMarek Szyprowski 
1293fff2fd1aSMarek Szyprowski 		if (group) {
1294fff2fd1aSMarek Szyprowski 			WARN_ON(owner->domain !=
1295fff2fd1aSMarek Szyprowski 				iommu_group_default_domain(group));
1296fff2fd1aSMarek Szyprowski 			exynos_iommu_detach_device(owner->domain, dev);
1297fff2fd1aSMarek Szyprowski 			iommu_group_put(group);
1298fff2fd1aSMarek Szyprowski 		}
1299fff2fd1aSMarek Szyprowski 	}
13007a974b29SMarek Szyprowski 
13017a974b29SMarek Szyprowski 	list_for_each_entry(data, &owner->controllers, owner_node)
13027a974b29SMarek Szyprowski 		device_link_del(data->link);
1303bf4a1c92SAntonios Motakis }
1304bf4a1c92SAntonios Motakis 
1305aa759fd3SMarek Szyprowski static int exynos_iommu_of_xlate(struct device *dev,
1306aa759fd3SMarek Szyprowski 				 struct of_phandle_args *spec)
1307aa759fd3SMarek Szyprowski {
1308aa759fd3SMarek Szyprowski 	struct platform_device *sysmmu = of_find_device_by_node(spec->np);
13090f45b04dSJoerg Roedel 	struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
13100bd5a0c7SMarek Szyprowski 	struct sysmmu_drvdata *data, *entry;
1311aa759fd3SMarek Szyprowski 
1312aa759fd3SMarek Szyprowski 	if (!sysmmu)
1313aa759fd3SMarek Szyprowski 		return -ENODEV;
1314aa759fd3SMarek Szyprowski 
1315aa759fd3SMarek Szyprowski 	data = platform_get_drvdata(sysmmu);
13161a260449SYu Kuai 	if (!data) {
13171a260449SYu Kuai 		put_device(&sysmmu->dev);
1318aa759fd3SMarek Szyprowski 		return -ENODEV;
13191a260449SYu Kuai 	}
1320aa759fd3SMarek Szyprowski 
1321aa759fd3SMarek Szyprowski 	if (!owner) {
1322aa759fd3SMarek Szyprowski 		owner = kzalloc(sizeof(*owner), GFP_KERNEL);
13231a260449SYu Kuai 		if (!owner) {
13241a260449SYu Kuai 			put_device(&sysmmu->dev);
1325aa759fd3SMarek Szyprowski 			return -ENOMEM;
13261a260449SYu Kuai 		}
1327aa759fd3SMarek Szyprowski 
1328aa759fd3SMarek Szyprowski 		INIT_LIST_HEAD(&owner->controllers);
13299b265536SMarek Szyprowski 		mutex_init(&owner->rpm_lock);
13300f45b04dSJoerg Roedel 		dev_iommu_priv_set(dev, owner);
1331aa759fd3SMarek Szyprowski 	}
1332aa759fd3SMarek Szyprowski 
13330bd5a0c7SMarek Szyprowski 	list_for_each_entry(entry, &owner->controllers, owner_node)
13340bd5a0c7SMarek Szyprowski 		if (entry == data)
13350bd5a0c7SMarek Szyprowski 			return 0;
13360bd5a0c7SMarek Szyprowski 
1337aa759fd3SMarek Szyprowski 	list_add_tail(&data->owner_node, &owner->controllers);
133892798b45SMarek Szyprowski 	data->master = dev;
13392f5f44f2SMarek Szyprowski 
1340aa759fd3SMarek Szyprowski 	return 0;
1341aa759fd3SMarek Szyprowski }
1342aa759fd3SMarek Szyprowski 
13430b9a3694SArvind Yadav static const struct iommu_ops exynos_iommu_ops = {
1344e1fd1eaaSJoerg Roedel 	.domain_alloc = exynos_iommu_domain_alloc,
13456d7cf02aSRobin Murphy 	.device_group = generic_device_group,
13463c51c054SJoerg Roedel 	.probe_device = exynos_iommu_probe_device,
13473c51c054SJoerg Roedel 	.release_device = exynos_iommu_release_device,
13482a96536eSKyongHo Cho 	.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
1349aa759fd3SMarek Szyprowski 	.of_xlate = exynos_iommu_of_xlate,
13509a630a4bSLu Baolu 	.default_domain_ops = &(const struct iommu_domain_ops) {
13519a630a4bSLu Baolu 		.attach_dev	= exynos_iommu_attach_device,
13529a630a4bSLu Baolu 		.detach_dev	= exynos_iommu_detach_device,
13539a630a4bSLu Baolu 		.map		= exynos_iommu_map,
13549a630a4bSLu Baolu 		.unmap		= exynos_iommu_unmap,
13559a630a4bSLu Baolu 		.iova_to_phys	= exynos_iommu_iova_to_phys,
13569a630a4bSLu Baolu 		.free		= exynos_iommu_domain_free,
13579a630a4bSLu Baolu 	}
13582a96536eSKyongHo Cho };
13592a96536eSKyongHo Cho 
13602a96536eSKyongHo Cho static int __init exynos_iommu_init(void)
13612a96536eSKyongHo Cho {
1362dc98b848SRobin Murphy 	struct device_node *np;
13632a96536eSKyongHo Cho 	int ret;
13642a96536eSKyongHo Cho 
1365dc98b848SRobin Murphy 	np = of_find_matching_node(NULL, sysmmu_of_match);
1366dc98b848SRobin Murphy 	if (!np)
1367dc98b848SRobin Murphy 		return 0;
1368dc98b848SRobin Murphy 
1369dc98b848SRobin Murphy 	of_node_put(np);
1370dc98b848SRobin Murphy 
1371734c3c73SCho KyongHo 	lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1372734c3c73SCho KyongHo 				LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1373734c3c73SCho KyongHo 	if (!lv2table_kmem_cache) {
1374734c3c73SCho KyongHo 		pr_err("%s: Failed to create kmem cache\n", __func__);
1375734c3c73SCho KyongHo 		return -ENOMEM;
1376734c3c73SCho KyongHo 	}
1377734c3c73SCho KyongHo 
13782a96536eSKyongHo Cho 	ret = platform_driver_register(&exynos_sysmmu_driver);
1379734c3c73SCho KyongHo 	if (ret) {
1380734c3c73SCho KyongHo 		pr_err("%s: Failed to register driver\n", __func__);
1381734c3c73SCho KyongHo 		goto err_reg_driver;
1382734c3c73SCho KyongHo 	}
13832a96536eSKyongHo Cho 
138466a7ed84SCho KyongHo 	zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
138566a7ed84SCho KyongHo 	if (zero_lv2_table == NULL) {
138666a7ed84SCho KyongHo 		pr_err("%s: Failed to allocate zero level2 page table\n",
138766a7ed84SCho KyongHo 			__func__);
138866a7ed84SCho KyongHo 		ret = -ENOMEM;
138966a7ed84SCho KyongHo 		goto err_zero_lv2;
139066a7ed84SCho KyongHo 	}
139166a7ed84SCho KyongHo 
1392734c3c73SCho KyongHo 	ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1393734c3c73SCho KyongHo 	if (ret) {
1394734c3c73SCho KyongHo 		pr_err("%s: Failed to register exynos-iommu driver.\n",
1395734c3c73SCho KyongHo 								__func__);
1396734c3c73SCho KyongHo 		goto err_set_iommu;
1397734c3c73SCho KyongHo 	}
13982a96536eSKyongHo Cho 
1399734c3c73SCho KyongHo 	return 0;
1400734c3c73SCho KyongHo err_set_iommu:
140166a7ed84SCho KyongHo 	kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
140266a7ed84SCho KyongHo err_zero_lv2:
1403734c3c73SCho KyongHo 	platform_driver_unregister(&exynos_sysmmu_driver);
1404734c3c73SCho KyongHo err_reg_driver:
1405734c3c73SCho KyongHo 	kmem_cache_destroy(lv2table_kmem_cache);
14062a96536eSKyongHo Cho 	return ret;
14072a96536eSKyongHo Cho }
1408928055a0SMarek Szyprowski core_initcall(exynos_iommu_init);
1409