xref: /openbmc/linux/drivers/iommu/exynos-iommu.c (revision 5e799a7c)
1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2740a01eeSMarek Szyprowski /*
3740a01eeSMarek Szyprowski  * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd.
42a96536eSKyongHo Cho  *		http://www.samsung.com
52a96536eSKyongHo Cho  */
62a96536eSKyongHo Cho 
72a96536eSKyongHo Cho #ifdef CONFIG_EXYNOS_IOMMU_DEBUG
82a96536eSKyongHo Cho #define DEBUG
92a96536eSKyongHo Cho #endif
102a96536eSKyongHo Cho 
112a96536eSKyongHo Cho #include <linux/clk.h>
128ed55c81SMarek Szyprowski #include <linux/dma-mapping.h>
132a96536eSKyongHo Cho #include <linux/err.h>
14312900c6SMarek Szyprowski #include <linux/io.h>
152a96536eSKyongHo Cho #include <linux/iommu.h>
16312900c6SMarek Szyprowski #include <linux/interrupt.h>
17514c6032SRandy Dunlap #include <linux/kmemleak.h>
182a96536eSKyongHo Cho #include <linux/list.h>
198ed55c81SMarek Szyprowski #include <linux/of.h>
208ed55c81SMarek Szyprowski #include <linux/of_platform.h>
21312900c6SMarek Szyprowski #include <linux/platform_device.h>
22312900c6SMarek Szyprowski #include <linux/pm_runtime.h>
23312900c6SMarek Szyprowski #include <linux/slab.h>
242a96536eSKyongHo Cho 
25d09d78fcSCho KyongHo typedef u32 sysmmu_iova_t;
26d09d78fcSCho KyongHo typedef u32 sysmmu_pte_t;
27d09d78fcSCho KyongHo 
28f171ababSSachin Kamat /* We do not consider super section mapping (16MB) */
292a96536eSKyongHo Cho #define SECT_ORDER 20
302a96536eSKyongHo Cho #define LPAGE_ORDER 16
312a96536eSKyongHo Cho #define SPAGE_ORDER 12
322a96536eSKyongHo Cho 
332a96536eSKyongHo Cho #define SECT_SIZE (1 << SECT_ORDER)
342a96536eSKyongHo Cho #define LPAGE_SIZE (1 << LPAGE_ORDER)
352a96536eSKyongHo Cho #define SPAGE_SIZE (1 << SPAGE_ORDER)
362a96536eSKyongHo Cho 
372a96536eSKyongHo Cho #define SECT_MASK (~(SECT_SIZE - 1))
382a96536eSKyongHo Cho #define LPAGE_MASK (~(LPAGE_SIZE - 1))
392a96536eSKyongHo Cho #define SPAGE_MASK (~(SPAGE_SIZE - 1))
402a96536eSKyongHo Cho 
4166a7ed84SCho KyongHo #define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
4266a7ed84SCho KyongHo 			   ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
4366a7ed84SCho KyongHo #define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
4466a7ed84SCho KyongHo #define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
4566a7ed84SCho KyongHo #define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
4666a7ed84SCho KyongHo 			  ((*(sent) & 3) == 1))
472a96536eSKyongHo Cho #define lv1ent_section(sent) ((*(sent) & 3) == 2)
482a96536eSKyongHo Cho 
492a96536eSKyongHo Cho #define lv2ent_fault(pent) ((*(pent) & 3) == 0)
502a96536eSKyongHo Cho #define lv2ent_small(pent) ((*(pent) & 2) == 2)
512a96536eSKyongHo Cho #define lv2ent_large(pent) ((*(pent) & 3) == 1)
522a96536eSKyongHo Cho 
53740a01eeSMarek Szyprowski /*
54740a01eeSMarek Szyprowski  * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces
55740a01eeSMarek Szyprowski  * v5.0 introduced support for 36bit physical address space by shifting
56740a01eeSMarek Szyprowski  * all page entry values by 4 bits.
57740a01eeSMarek Szyprowski  * All SYSMMU controllers in the system support the address spaces of the same
58740a01eeSMarek Szyprowski  * size, so PG_ENT_SHIFT can be initialized on first SYSMMU probe to proper
59740a01eeSMarek Szyprowski  * value (0 or 4).
60740a01eeSMarek Szyprowski  */
61740a01eeSMarek Szyprowski static short PG_ENT_SHIFT = -1;
62740a01eeSMarek Szyprowski #define SYSMMU_PG_ENT_SHIFT 0
63740a01eeSMarek Szyprowski #define SYSMMU_V5_PG_ENT_SHIFT 4
642a96536eSKyongHo Cho 
651a0d8dacSMarek Szyprowski static const sysmmu_pte_t *LV1_PROT;
661a0d8dacSMarek Szyprowski static const sysmmu_pte_t SYSMMU_LV1_PROT[] = {
671a0d8dacSMarek Szyprowski 	((0 << 15) | (0 << 10)), /* no access */
681a0d8dacSMarek Szyprowski 	((1 << 15) | (1 << 10)), /* IOMMU_READ only */
691a0d8dacSMarek Szyprowski 	((0 << 15) | (1 << 10)), /* IOMMU_WRITE not supported, use read/write */
701a0d8dacSMarek Szyprowski 	((0 << 15) | (1 << 10)), /* IOMMU_READ | IOMMU_WRITE */
711a0d8dacSMarek Szyprowski };
721a0d8dacSMarek Szyprowski static const sysmmu_pte_t SYSMMU_V5_LV1_PROT[] = {
731a0d8dacSMarek Szyprowski 	(0 << 4), /* no access */
741a0d8dacSMarek Szyprowski 	(1 << 4), /* IOMMU_READ only */
751a0d8dacSMarek Szyprowski 	(2 << 4), /* IOMMU_WRITE only */
761a0d8dacSMarek Szyprowski 	(3 << 4), /* IOMMU_READ | IOMMU_WRITE */
771a0d8dacSMarek Szyprowski };
781a0d8dacSMarek Szyprowski 
791a0d8dacSMarek Szyprowski static const sysmmu_pte_t *LV2_PROT;
801a0d8dacSMarek Szyprowski static const sysmmu_pte_t SYSMMU_LV2_PROT[] = {
811a0d8dacSMarek Szyprowski 	((0 << 9) | (0 << 4)), /* no access */
821a0d8dacSMarek Szyprowski 	((1 << 9) | (1 << 4)), /* IOMMU_READ only */
831a0d8dacSMarek Szyprowski 	((0 << 9) | (1 << 4)), /* IOMMU_WRITE not supported, use read/write */
841a0d8dacSMarek Szyprowski 	((0 << 9) | (1 << 4)), /* IOMMU_READ | IOMMU_WRITE */
851a0d8dacSMarek Szyprowski };
861a0d8dacSMarek Szyprowski static const sysmmu_pte_t SYSMMU_V5_LV2_PROT[] = {
871a0d8dacSMarek Szyprowski 	(0 << 2), /* no access */
881a0d8dacSMarek Szyprowski 	(1 << 2), /* IOMMU_READ only */
891a0d8dacSMarek Szyprowski 	(2 << 2), /* IOMMU_WRITE only */
901a0d8dacSMarek Szyprowski 	(3 << 2), /* IOMMU_READ | IOMMU_WRITE */
911a0d8dacSMarek Szyprowski };
921a0d8dacSMarek Szyprowski 
931a0d8dacSMarek Szyprowski #define SYSMMU_SUPPORTED_PROT_BITS (IOMMU_READ | IOMMU_WRITE)
941a0d8dacSMarek Szyprowski 
95740a01eeSMarek Szyprowski #define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT)
96740a01eeSMarek Szyprowski #define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK)
97740a01eeSMarek Szyprowski #define section_offs(iova) (iova & (SECT_SIZE - 1))
98740a01eeSMarek Szyprowski #define lpage_phys(pent) (sect_to_phys(*(pent)) & LPAGE_MASK)
99740a01eeSMarek Szyprowski #define lpage_offs(iova) (iova & (LPAGE_SIZE - 1))
100740a01eeSMarek Szyprowski #define spage_phys(pent) (sect_to_phys(*(pent)) & SPAGE_MASK)
101740a01eeSMarek Szyprowski #define spage_offs(iova) (iova & (SPAGE_SIZE - 1))
1022a96536eSKyongHo Cho 
1032a96536eSKyongHo Cho #define NUM_LV1ENTRIES 4096
104d09d78fcSCho KyongHo #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
1052a96536eSKyongHo Cho 
lv1ent_offset(sysmmu_iova_t iova)106d09d78fcSCho KyongHo static u32 lv1ent_offset(sysmmu_iova_t iova)
107d09d78fcSCho KyongHo {
108d09d78fcSCho KyongHo 	return iova >> SECT_ORDER;
109d09d78fcSCho KyongHo }
110d09d78fcSCho KyongHo 
lv2ent_offset(sysmmu_iova_t iova)111d09d78fcSCho KyongHo static u32 lv2ent_offset(sysmmu_iova_t iova)
112d09d78fcSCho KyongHo {
113d09d78fcSCho KyongHo 	return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
114d09d78fcSCho KyongHo }
115d09d78fcSCho KyongHo 
1165e3435ebSMarek Szyprowski #define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t))
117d09d78fcSCho KyongHo #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
1182a96536eSKyongHo Cho 
1192a96536eSKyongHo Cho #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
120740a01eeSMarek Szyprowski #define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0))
1212a96536eSKyongHo Cho 
1221a0d8dacSMarek Szyprowski #define mk_lv1ent_sect(pa, prot) ((pa >> PG_ENT_SHIFT) | LV1_PROT[prot] | 2)
123740a01eeSMarek Szyprowski #define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1)
1241a0d8dacSMarek Szyprowski #define mk_lv2ent_lpage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 1)
1251a0d8dacSMarek Szyprowski #define mk_lv2ent_spage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 2)
1262a96536eSKyongHo Cho 
1272a96536eSKyongHo Cho #define CTRL_ENABLE	0x5
1282a96536eSKyongHo Cho #define CTRL_BLOCK	0x7
1292a96536eSKyongHo Cho #define CTRL_DISABLE	0x0
1302a96536eSKyongHo Cho 
131eeb5184bSCho KyongHo #define CFG_LRU		0x1
1321a0d8dacSMarek Szyprowski #define CFG_EAP		(1 << 2)
133eeb5184bSCho KyongHo #define CFG_QOS(n)	((n & 0xF) << 7)
134eeb5184bSCho KyongHo #define CFG_ACGEN	(1 << 24) /* System MMU 3.3 only */
135eeb5184bSCho KyongHo #define CFG_SYSSEL	(1 << 22) /* System MMU 3.2 only */
136eeb5184bSCho KyongHo #define CFG_FLPDCACHE	(1 << 20) /* System MMU 3.2+ only */
137eeb5184bSCho KyongHo 
1387fee5d6fSSam Protsenko #define CTRL_VM_ENABLE			BIT(0)
1397fee5d6fSSam Protsenko #define CTRL_VM_FAULT_MODE_STALL	BIT(3)
1400892c498SSam Protsenko #define CAPA0_CAPA1_EXIST		BIT(11)
1410892c498SSam Protsenko #define CAPA1_VCR_ENABLED		BIT(14)
1420892c498SSam Protsenko 
143740a01eeSMarek Szyprowski /* common registers */
1442a96536eSKyongHo Cho #define REG_MMU_CTRL		0x000
1452a96536eSKyongHo Cho #define REG_MMU_CFG		0x004
1462a96536eSKyongHo Cho #define REG_MMU_STATUS		0x008
147740a01eeSMarek Szyprowski #define REG_MMU_VERSION		0x034
148740a01eeSMarek Szyprowski 
149740a01eeSMarek Szyprowski #define MMU_MAJ_VER(val)	((val) >> 7)
150740a01eeSMarek Szyprowski #define MMU_MIN_VER(val)	((val) & 0x7F)
151740a01eeSMarek Szyprowski #define MMU_RAW_VER(reg)	(((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
152740a01eeSMarek Szyprowski 
153740a01eeSMarek Szyprowski #define MAKE_MMU_VER(maj, min)	((((maj) & 0xF) << 7) | ((min) & 0x7F))
154740a01eeSMarek Szyprowski 
155740a01eeSMarek Szyprowski /* v1.x - v3.x registers */
1562a96536eSKyongHo Cho #define REG_PAGE_FAULT_ADDR	0x024
1572a96536eSKyongHo Cho #define REG_AW_FAULT_ADDR	0x028
1582a96536eSKyongHo Cho #define REG_AR_FAULT_ADDR	0x02C
1592a96536eSKyongHo Cho #define REG_DEFAULT_SLAVE_ADDR	0x030
1602a96536eSKyongHo Cho 
161740a01eeSMarek Szyprowski /* v5.x registers */
162740a01eeSMarek Szyprowski #define REG_V5_FAULT_AR_VA	0x070
163740a01eeSMarek Szyprowski #define REG_V5_FAULT_AW_VA	0x080
1642a96536eSKyongHo Cho 
1650892c498SSam Protsenko /* v7.x registers */
1660892c498SSam Protsenko #define REG_V7_CAPA0		0x870
1670892c498SSam Protsenko #define REG_V7_CAPA1		0x874
1687fee5d6fSSam Protsenko #define REG_V7_CTRL_VM		0x8000
1690892c498SSam Protsenko 
1700f45b04dSJoerg Roedel #define has_sysmmu(dev)		(dev_iommu_priv_get(dev) != NULL)
1716b21a5dbSCho KyongHo 
1725e3435ebSMarek Szyprowski static struct device *dma_dev;
173734c3c73SCho KyongHo static struct kmem_cache *lv2table_kmem_cache;
17466a7ed84SCho KyongHo static sysmmu_pte_t *zero_lv2_table;
17566a7ed84SCho KyongHo #define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
176734c3c73SCho KyongHo 
section_entry(sysmmu_pte_t * pgtable,sysmmu_iova_t iova)177d09d78fcSCho KyongHo static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
1782a96536eSKyongHo Cho {
1792a96536eSKyongHo Cho 	return pgtable + lv1ent_offset(iova);
1802a96536eSKyongHo Cho }
1812a96536eSKyongHo Cho 
page_entry(sysmmu_pte_t * sent,sysmmu_iova_t iova)182d09d78fcSCho KyongHo static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
1832a96536eSKyongHo Cho {
184d09d78fcSCho KyongHo 	return (sysmmu_pte_t *)phys_to_virt(
1857222e8dbSCho KyongHo 				lv2table_base(sent)) + lv2ent_offset(iova);
1862a96536eSKyongHo Cho }
1872a96536eSKyongHo Cho 
188c64074bfSSam Protsenko struct sysmmu_fault {
189c64074bfSSam Protsenko 	sysmmu_iova_t addr;	/* IOVA address that caused fault */
190c64074bfSSam Protsenko 	const char *name;	/* human readable fault name */
191c64074bfSSam Protsenko 	unsigned int type;	/* fault type for report_iommu_fault() */
192c64074bfSSam Protsenko };
193c64074bfSSam Protsenko 
194c64074bfSSam Protsenko struct sysmmu_v1_fault_info {
195c64074bfSSam Protsenko 	unsigned short addr_reg; /* register to read IOVA fault address */
196d093fc7eSMarek Szyprowski 	const char *name;	/* human readable fault name */
197d093fc7eSMarek Szyprowski 	unsigned int type;	/* fault type for report_iommu_fault */
1982a96536eSKyongHo Cho };
1992a96536eSKyongHo Cho 
200c64074bfSSam Protsenko static const struct sysmmu_v1_fault_info sysmmu_v1_faults[] = {
201c64074bfSSam Protsenko 	{ REG_PAGE_FAULT_ADDR, "PAGE", IOMMU_FAULT_READ },
202c64074bfSSam Protsenko 	{ REG_AR_FAULT_ADDR, "MULTI-HIT", IOMMU_FAULT_READ },
203c64074bfSSam Protsenko 	{ REG_AW_FAULT_ADDR, "MULTI-HIT", IOMMU_FAULT_WRITE },
204c64074bfSSam Protsenko 	{ REG_DEFAULT_SLAVE_ADDR, "BUS ERROR", IOMMU_FAULT_READ },
205c64074bfSSam Protsenko 	{ REG_AR_FAULT_ADDR, "SECURITY PROTECTION", IOMMU_FAULT_READ },
206c64074bfSSam Protsenko 	{ REG_AR_FAULT_ADDR, "ACCESS PROTECTION", IOMMU_FAULT_READ },
207c64074bfSSam Protsenko 	{ REG_AW_FAULT_ADDR, "SECURITY PROTECTION", IOMMU_FAULT_WRITE },
208c64074bfSSam Protsenko 	{ REG_AW_FAULT_ADDR, "ACCESS PROTECTION", IOMMU_FAULT_WRITE },
2092a96536eSKyongHo Cho };
2102a96536eSKyongHo Cho 
211c64074bfSSam Protsenko /* SysMMU v5 has the same faults for AR (0..4 bits) and AW (16..20 bits) */
212c64074bfSSam Protsenko static const char * const sysmmu_v5_fault_names[] = {
213c64074bfSSam Protsenko 	"PTW",
214c64074bfSSam Protsenko 	"PAGE",
215c64074bfSSam Protsenko 	"MULTI-HIT",
216c64074bfSSam Protsenko 	"ACCESS PROTECTION",
217c64074bfSSam Protsenko 	"SECURITY PROTECTION"
218740a01eeSMarek Szyprowski };
219740a01eeSMarek Szyprowski 
2202f599c3fSSam Protsenko static const char * const sysmmu_v7_fault_names[] = {
2212f599c3fSSam Protsenko 	"PTW",
2222f599c3fSSam Protsenko 	"PAGE",
2232f599c3fSSam Protsenko 	"ACCESS PROTECTION",
2242f599c3fSSam Protsenko 	"RESERVED"
2252a96536eSKyongHo Cho };
2262a96536eSKyongHo Cho 
2272860af3cSMarek Szyprowski /*
2280f45b04dSJoerg Roedel  * This structure is attached to dev->iommu->priv of the master device
2292860af3cSMarek Szyprowski  * on device add, contains a list of SYSMMU controllers defined by device tree,
2302860af3cSMarek Szyprowski  * which are bound to given master device. It is usually referenced by 'owner'
2312860af3cSMarek Szyprowski  * pointer.
2322860af3cSMarek Szyprowski */
2336b21a5dbSCho KyongHo struct exynos_iommu_owner {
2341b092054SMarek Szyprowski 	struct list_head controllers;	/* list of sysmmu_drvdata.owner_node */
2355fa61cbfSMarek Szyprowski 	struct iommu_domain *domain;	/* domain this device is attached */
2369b265536SMarek Szyprowski 	struct mutex rpm_lock;		/* for runtime pm of all sysmmus */
2376b21a5dbSCho KyongHo };
2386b21a5dbSCho KyongHo 
2392860af3cSMarek Szyprowski /*
2402860af3cSMarek Szyprowski  * This structure exynos specific generalization of struct iommu_domain.
2412860af3cSMarek Szyprowski  * It contains list of SYSMMU controllers from all master devices, which has
2422860af3cSMarek Szyprowski  * been attached to this domain and page tables of IO address space defined by
2432860af3cSMarek Szyprowski  * it. It is usually referenced by 'domain' pointer.
2442860af3cSMarek Szyprowski  */
2452a96536eSKyongHo Cho struct exynos_iommu_domain {
2462860af3cSMarek Szyprowski 	struct list_head clients; /* list of sysmmu_drvdata.domain_node */
247d09d78fcSCho KyongHo 	sysmmu_pte_t *pgtable;	/* lv1 page table, 16KB */
2482a96536eSKyongHo Cho 	short *lv2entcnt;	/* free lv2 entry counter for each section */
2492860af3cSMarek Szyprowski 	spinlock_t lock;	/* lock for modyfying list of clients */
2502a96536eSKyongHo Cho 	spinlock_t pgtablelock;	/* lock for modifying page table @ pgtable */
251e1fd1eaaSJoerg Roedel 	struct iommu_domain domain; /* generic domain data structure */
2522a96536eSKyongHo Cho };
2532a96536eSKyongHo Cho 
254c64074bfSSam Protsenko struct sysmmu_drvdata;
255c64074bfSSam Protsenko 
2562860af3cSMarek Szyprowski /*
2572125afbeSSam Protsenko  * SysMMU version specific data. Contains offsets for the registers which can
2582125afbeSSam Protsenko  * be found in different SysMMU variants, but have different offset values.
259c64074bfSSam Protsenko  * Also contains version specific callbacks to abstract the hardware.
2602125afbeSSam Protsenko  */
2612125afbeSSam Protsenko struct sysmmu_variant {
2622125afbeSSam Protsenko 	u32 pt_base;		/* page table base address (physical) */
2632125afbeSSam Protsenko 	u32 flush_all;		/* invalidate all TLB entries */
2642125afbeSSam Protsenko 	u32 flush_entry;	/* invalidate specific TLB entry */
2652125afbeSSam Protsenko 	u32 flush_range;	/* invalidate TLB entries in specified range */
2662125afbeSSam Protsenko 	u32 flush_start;	/* start address of range invalidation */
2672125afbeSSam Protsenko 	u32 flush_end;		/* end address of range invalidation */
2682125afbeSSam Protsenko 	u32 int_status;		/* interrupt status information */
2692125afbeSSam Protsenko 	u32 int_clear;		/* clear the interrupt */
2702f599c3fSSam Protsenko 	u32 fault_va;		/* IOVA address that caused fault */
2712f599c3fSSam Protsenko 	u32 fault_info;		/* fault transaction info */
272c64074bfSSam Protsenko 
273c64074bfSSam Protsenko 	int (*get_fault_info)(struct sysmmu_drvdata *data, unsigned int itype,
274c64074bfSSam Protsenko 			      struct sysmmu_fault *fault);
2752125afbeSSam Protsenko };
2762125afbeSSam Protsenko 
2772125afbeSSam Protsenko /*
2782860af3cSMarek Szyprowski  * This structure hold all data of a single SYSMMU controller, this includes
2792860af3cSMarek Szyprowski  * hw resources like registers and clocks, pointers and list nodes to connect
2802860af3cSMarek Szyprowski  * it to all other structures, internal state and parameters read from device
2812860af3cSMarek Szyprowski  * tree. It is usually referenced by 'data' pointer.
2822860af3cSMarek Szyprowski  */
2832a96536eSKyongHo Cho struct sysmmu_drvdata {
2842860af3cSMarek Szyprowski 	struct device *sysmmu;		/* SYSMMU controller device */
2852860af3cSMarek Szyprowski 	struct device *master;		/* master device (owner) */
2867a974b29SMarek Szyprowski 	struct device_link *link;	/* runtime PM link to master */
2872860af3cSMarek Szyprowski 	void __iomem *sfrbase;		/* our registers */
2882860af3cSMarek Szyprowski 	struct clk *clk;		/* SYSMMU's clock */
289740a01eeSMarek Szyprowski 	struct clk *aclk;		/* SYSMMU's aclk clock */
290740a01eeSMarek Szyprowski 	struct clk *pclk;		/* SYSMMU's pclk clock */
2912860af3cSMarek Szyprowski 	struct clk *clk_master;		/* master's device clock */
2922860af3cSMarek Szyprowski 	spinlock_t lock;		/* lock for modyfying state */
29347a574ffSMarek Szyprowski 	bool active;			/* current status */
2942860af3cSMarek Szyprowski 	struct exynos_iommu_domain *domain; /* domain we belong to */
2952860af3cSMarek Szyprowski 	struct list_head domain_node;	/* node for domain clients list */
2961b092054SMarek Szyprowski 	struct list_head owner_node;	/* node for owner controllers list */
2972860af3cSMarek Szyprowski 	phys_addr_t pgtable;		/* assigned page table structure */
2982860af3cSMarek Szyprowski 	unsigned int version;		/* our version */
299d2c302b6SJoerg Roedel 
300d2c302b6SJoerg Roedel 	struct iommu_device iommu;	/* IOMMU core handle */
3012125afbeSSam Protsenko 	const struct sysmmu_variant *variant; /* version specific data */
3020892c498SSam Protsenko 
3030892c498SSam Protsenko 	/* v7 fields */
3040892c498SSam Protsenko 	bool has_vcr;			/* virtual machine control register */
3052125afbeSSam Protsenko };
3062125afbeSSam Protsenko 
3072125afbeSSam Protsenko #define SYSMMU_REG(data, reg) ((data)->sfrbase + (data)->variant->reg)
3082125afbeSSam Protsenko 
exynos_sysmmu_v1_get_fault_info(struct sysmmu_drvdata * data,unsigned int itype,struct sysmmu_fault * fault)309c64074bfSSam Protsenko static int exynos_sysmmu_v1_get_fault_info(struct sysmmu_drvdata *data,
310c64074bfSSam Protsenko 					   unsigned int itype,
311c64074bfSSam Protsenko 					   struct sysmmu_fault *fault)
312c64074bfSSam Protsenko {
313c64074bfSSam Protsenko 	const struct sysmmu_v1_fault_info *finfo;
314c64074bfSSam Protsenko 
315c64074bfSSam Protsenko 	if (itype >= ARRAY_SIZE(sysmmu_v1_faults))
316c64074bfSSam Protsenko 		return -ENXIO;
317c64074bfSSam Protsenko 
318c64074bfSSam Protsenko 	finfo = &sysmmu_v1_faults[itype];
319c64074bfSSam Protsenko 	fault->addr = readl(data->sfrbase + finfo->addr_reg);
320c64074bfSSam Protsenko 	fault->name = finfo->name;
321c64074bfSSam Protsenko 	fault->type = finfo->type;
322c64074bfSSam Protsenko 
323c64074bfSSam Protsenko 	return 0;
324c64074bfSSam Protsenko }
325c64074bfSSam Protsenko 
exynos_sysmmu_v5_get_fault_info(struct sysmmu_drvdata * data,unsigned int itype,struct sysmmu_fault * fault)326c64074bfSSam Protsenko static int exynos_sysmmu_v5_get_fault_info(struct sysmmu_drvdata *data,
327c64074bfSSam Protsenko 					   unsigned int itype,
328c64074bfSSam Protsenko 					   struct sysmmu_fault *fault)
329c64074bfSSam Protsenko {
330c64074bfSSam Protsenko 	unsigned int addr_reg;
331c64074bfSSam Protsenko 
332c64074bfSSam Protsenko 	if (itype < ARRAY_SIZE(sysmmu_v5_fault_names)) {
333c64074bfSSam Protsenko 		fault->type = IOMMU_FAULT_READ;
334c64074bfSSam Protsenko 		addr_reg = REG_V5_FAULT_AR_VA;
335c64074bfSSam Protsenko 	} else if (itype >= 16 && itype <= 20) {
336c64074bfSSam Protsenko 		fault->type = IOMMU_FAULT_WRITE;
337c64074bfSSam Protsenko 		addr_reg = REG_V5_FAULT_AW_VA;
338c64074bfSSam Protsenko 		itype -= 16;
339c64074bfSSam Protsenko 	} else {
340c64074bfSSam Protsenko 		return -ENXIO;
341c64074bfSSam Protsenko 	}
342c64074bfSSam Protsenko 
343c64074bfSSam Protsenko 	fault->name = sysmmu_v5_fault_names[itype];
344c64074bfSSam Protsenko 	fault->addr = readl(data->sfrbase + addr_reg);
345c64074bfSSam Protsenko 
346c64074bfSSam Protsenko 	return 0;
347c64074bfSSam Protsenko }
348c64074bfSSam Protsenko 
exynos_sysmmu_v7_get_fault_info(struct sysmmu_drvdata * data,unsigned int itype,struct sysmmu_fault * fault)3492f599c3fSSam Protsenko static int exynos_sysmmu_v7_get_fault_info(struct sysmmu_drvdata *data,
3502f599c3fSSam Protsenko 					   unsigned int itype,
3512f599c3fSSam Protsenko 					   struct sysmmu_fault *fault)
3522f599c3fSSam Protsenko {
3532f599c3fSSam Protsenko 	u32 info = readl(SYSMMU_REG(data, fault_info));
3542f599c3fSSam Protsenko 
3552f599c3fSSam Protsenko 	fault->addr = readl(SYSMMU_REG(data, fault_va));
3562f599c3fSSam Protsenko 	fault->name = sysmmu_v7_fault_names[itype % 4];
3572f599c3fSSam Protsenko 	fault->type = (info & BIT(20)) ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
3582f599c3fSSam Protsenko 
3592f599c3fSSam Protsenko 	return 0;
3602f599c3fSSam Protsenko }
3612f599c3fSSam Protsenko 
3622125afbeSSam Protsenko /* SysMMU v1..v3 */
3632125afbeSSam Protsenko static const struct sysmmu_variant sysmmu_v1_variant = {
3642125afbeSSam Protsenko 	.flush_all	= 0x0c,
3652125afbeSSam Protsenko 	.flush_entry	= 0x10,
3662125afbeSSam Protsenko 	.pt_base	= 0x14,
3672125afbeSSam Protsenko 	.int_status	= 0x18,
3682125afbeSSam Protsenko 	.int_clear	= 0x1c,
369c64074bfSSam Protsenko 
370c64074bfSSam Protsenko 	.get_fault_info	= exynos_sysmmu_v1_get_fault_info,
3712125afbeSSam Protsenko };
3722125afbeSSam Protsenko 
3732f599c3fSSam Protsenko /* SysMMU v5 */
3742125afbeSSam Protsenko static const struct sysmmu_variant sysmmu_v5_variant = {
3752125afbeSSam Protsenko 	.pt_base	= 0x0c,
3762125afbeSSam Protsenko 	.flush_all	= 0x10,
3772125afbeSSam Protsenko 	.flush_entry	= 0x14,
3782125afbeSSam Protsenko 	.flush_range	= 0x18,
3792125afbeSSam Protsenko 	.flush_start	= 0x20,
3802125afbeSSam Protsenko 	.flush_end	= 0x24,
3812125afbeSSam Protsenko 	.int_status	= 0x60,
3822125afbeSSam Protsenko 	.int_clear	= 0x64,
383c64074bfSSam Protsenko 
384c64074bfSSam Protsenko 	.get_fault_info	= exynos_sysmmu_v5_get_fault_info,
3852a96536eSKyongHo Cho };
3862a96536eSKyongHo Cho 
3872f599c3fSSam Protsenko /* SysMMU v7: non-VM capable register layout */
3882f599c3fSSam Protsenko static const struct sysmmu_variant sysmmu_v7_variant = {
3892f599c3fSSam Protsenko 	.pt_base	= 0x0c,
3902f599c3fSSam Protsenko 	.flush_all	= 0x10,
3912f599c3fSSam Protsenko 	.flush_entry	= 0x14,
3922f599c3fSSam Protsenko 	.flush_range	= 0x18,
3932f599c3fSSam Protsenko 	.flush_start	= 0x20,
3942f599c3fSSam Protsenko 	.flush_end	= 0x24,
3952f599c3fSSam Protsenko 	.int_status	= 0x60,
3962f599c3fSSam Protsenko 	.int_clear	= 0x64,
3972f599c3fSSam Protsenko 	.fault_va	= 0x70,
3982f599c3fSSam Protsenko 	.fault_info	= 0x78,
3992f599c3fSSam Protsenko 
4002f599c3fSSam Protsenko 	.get_fault_info	= exynos_sysmmu_v7_get_fault_info,
4012f599c3fSSam Protsenko };
4022f599c3fSSam Protsenko 
4032f599c3fSSam Protsenko /* SysMMU v7: VM capable register layout */
4040892c498SSam Protsenko static const struct sysmmu_variant sysmmu_v7_vm_variant = {
4050892c498SSam Protsenko 	.pt_base	= 0x800c,
4060892c498SSam Protsenko 	.flush_all	= 0x8010,
4070892c498SSam Protsenko 	.flush_entry	= 0x8014,
4080892c498SSam Protsenko 	.flush_range	= 0x8018,
4090892c498SSam Protsenko 	.flush_start	= 0x8020,
4100892c498SSam Protsenko 	.flush_end	= 0x8024,
4110892c498SSam Protsenko 	.int_status	= 0x60,
4120892c498SSam Protsenko 	.int_clear	= 0x64,
4132f599c3fSSam Protsenko 	.fault_va	= 0x1000,
4142f599c3fSSam Protsenko 	.fault_info	= 0x1004,
415c64074bfSSam Protsenko 
4162f599c3fSSam Protsenko 	.get_fault_info	= exynos_sysmmu_v7_get_fault_info,
4172a96536eSKyongHo Cho };
4182a96536eSKyongHo Cho 
to_exynos_domain(struct iommu_domain * dom)419e1fd1eaaSJoerg Roedel static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
420e1fd1eaaSJoerg Roedel {
421e1fd1eaaSJoerg Roedel 	return container_of(dom, struct exynos_iommu_domain, domain);
422e1fd1eaaSJoerg Roedel }
423e1fd1eaaSJoerg Roedel 
sysmmu_unblock(struct sysmmu_drvdata * data)42402cdc365SMarek Szyprowski static void sysmmu_unblock(struct sysmmu_drvdata *data)
4252a96536eSKyongHo Cho {
42684bd0428SMarek Szyprowski 	writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
4272a96536eSKyongHo Cho }
4282a96536eSKyongHo Cho 
sysmmu_block(struct sysmmu_drvdata * data)42902cdc365SMarek Szyprowski static bool sysmmu_block(struct sysmmu_drvdata *data)
4302a96536eSKyongHo Cho {
4312a96536eSKyongHo Cho 	int i = 120;
4322a96536eSKyongHo Cho 
43384bd0428SMarek Szyprowski 	writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
43484bd0428SMarek Szyprowski 	while ((i > 0) && !(readl(data->sfrbase + REG_MMU_STATUS) & 1))
4352a96536eSKyongHo Cho 		--i;
4362a96536eSKyongHo Cho 
43784bd0428SMarek Szyprowski 	if (!(readl(data->sfrbase + REG_MMU_STATUS) & 1)) {
43802cdc365SMarek Szyprowski 		sysmmu_unblock(data);
4392a96536eSKyongHo Cho 		return false;
4402a96536eSKyongHo Cho 	}
4412a96536eSKyongHo Cho 
4422a96536eSKyongHo Cho 	return true;
4432a96536eSKyongHo Cho }
4442a96536eSKyongHo Cho 
__sysmmu_tlb_invalidate(struct sysmmu_drvdata * data)44502cdc365SMarek Szyprowski static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data)
4462a96536eSKyongHo Cho {
4472125afbeSSam Protsenko 	writel(0x1, SYSMMU_REG(data, flush_all));
4482a96536eSKyongHo Cho }
4492a96536eSKyongHo Cho 
__sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata * data,sysmmu_iova_t iova,unsigned int num_inv)45002cdc365SMarek Szyprowski static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
451d09d78fcSCho KyongHo 				sysmmu_iova_t iova, unsigned int num_inv)
4522a96536eSKyongHo Cho {
4533ad6b7f3SCho KyongHo 	unsigned int i;
454365409dbSSachin Kamat 
4552125afbeSSam Protsenko 	if (MMU_MAJ_VER(data->version) < 5 || num_inv == 1) {
4563ad6b7f3SCho KyongHo 		for (i = 0; i < num_inv; i++) {
45784bd0428SMarek Szyprowski 			writel((iova & SPAGE_MASK) | 1,
4582125afbeSSam Protsenko 			       SYSMMU_REG(data, flush_entry));
459d5bf739dSMarek Szyprowski 			iova += SPAGE_SIZE;
460d5bf739dSMarek Szyprowski 		}
461d5bf739dSMarek Szyprowski 	} else {
4622125afbeSSam Protsenko 		writel(iova & SPAGE_MASK, SYSMMU_REG(data, flush_start));
463d5bf739dSMarek Szyprowski 		writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE,
4642125afbeSSam Protsenko 		       SYSMMU_REG(data, flush_end));
4652125afbeSSam Protsenko 		writel(0x1, SYSMMU_REG(data, flush_range));
4663ad6b7f3SCho KyongHo 	}
4672a96536eSKyongHo Cho }
4682a96536eSKyongHo Cho 
__sysmmu_set_ptbase(struct sysmmu_drvdata * data,phys_addr_t pgd)46902cdc365SMarek Szyprowski static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd)
4702a96536eSKyongHo Cho {
4712125afbeSSam Protsenko 	u32 pt_base;
4722a96536eSKyongHo Cho 
4732125afbeSSam Protsenko 	if (MMU_MAJ_VER(data->version) < 5)
4742125afbeSSam Protsenko 		pt_base = pgd;
4752125afbeSSam Protsenko 	else
4762125afbeSSam Protsenko 		pt_base = pgd >> SPAGE_ORDER;
4772125afbeSSam Protsenko 
4782125afbeSSam Protsenko 	writel(pt_base, SYSMMU_REG(data, pt_base));
47902cdc365SMarek Szyprowski 	__sysmmu_tlb_invalidate(data);
4802a96536eSKyongHo Cho }
4812a96536eSKyongHo Cho 
__sysmmu_enable_clocks(struct sysmmu_drvdata * data)482fecc49dbSMarek Szyprowski static void __sysmmu_enable_clocks(struct sysmmu_drvdata *data)
483fecc49dbSMarek Szyprowski {
484fecc49dbSMarek Szyprowski 	BUG_ON(clk_prepare_enable(data->clk_master));
485fecc49dbSMarek Szyprowski 	BUG_ON(clk_prepare_enable(data->clk));
486fecc49dbSMarek Szyprowski 	BUG_ON(clk_prepare_enable(data->pclk));
487fecc49dbSMarek Szyprowski 	BUG_ON(clk_prepare_enable(data->aclk));
488fecc49dbSMarek Szyprowski }
489fecc49dbSMarek Szyprowski 
__sysmmu_disable_clocks(struct sysmmu_drvdata * data)490fecc49dbSMarek Szyprowski static void __sysmmu_disable_clocks(struct sysmmu_drvdata *data)
491fecc49dbSMarek Szyprowski {
492fecc49dbSMarek Szyprowski 	clk_disable_unprepare(data->aclk);
493fecc49dbSMarek Szyprowski 	clk_disable_unprepare(data->pclk);
494fecc49dbSMarek Szyprowski 	clk_disable_unprepare(data->clk);
495fecc49dbSMarek Szyprowski 	clk_disable_unprepare(data->clk_master);
496fecc49dbSMarek Szyprowski }
497fecc49dbSMarek Szyprowski 
__sysmmu_has_capa1(struct sysmmu_drvdata * data)4980892c498SSam Protsenko static bool __sysmmu_has_capa1(struct sysmmu_drvdata *data)
4990892c498SSam Protsenko {
5000892c498SSam Protsenko 	u32 capa0 = readl(data->sfrbase + REG_V7_CAPA0);
5010892c498SSam Protsenko 
5020892c498SSam Protsenko 	return capa0 & CAPA0_CAPA1_EXIST;
5030892c498SSam Protsenko }
5040892c498SSam Protsenko 
__sysmmu_get_vcr(struct sysmmu_drvdata * data)5050892c498SSam Protsenko static void __sysmmu_get_vcr(struct sysmmu_drvdata *data)
5060892c498SSam Protsenko {
5070892c498SSam Protsenko 	u32 capa1 = readl(data->sfrbase + REG_V7_CAPA1);
5080892c498SSam Protsenko 
5090892c498SSam Protsenko 	data->has_vcr = capa1 & CAPA1_VCR_ENABLED;
5100892c498SSam Protsenko }
5110892c498SSam Protsenko 
__sysmmu_get_version(struct sysmmu_drvdata * data)512850d313eSMarek Szyprowski static void __sysmmu_get_version(struct sysmmu_drvdata *data)
513850d313eSMarek Szyprowski {
514850d313eSMarek Szyprowski 	u32 ver;
515850d313eSMarek Szyprowski 
516fecc49dbSMarek Szyprowski 	__sysmmu_enable_clocks(data);
517850d313eSMarek Szyprowski 
51884bd0428SMarek Szyprowski 	ver = readl(data->sfrbase + REG_MMU_VERSION);
519850d313eSMarek Szyprowski 
520850d313eSMarek Szyprowski 	/* controllers on some SoCs don't report proper version */
521850d313eSMarek Szyprowski 	if (ver == 0x80000001u)
522850d313eSMarek Szyprowski 		data->version = MAKE_MMU_VER(1, 0);
523850d313eSMarek Szyprowski 	else
524850d313eSMarek Szyprowski 		data->version = MMU_RAW_VER(ver);
525850d313eSMarek Szyprowski 
526850d313eSMarek Szyprowski 	dev_dbg(data->sysmmu, "hardware version: %d.%d\n",
527850d313eSMarek Szyprowski 		MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version));
528850d313eSMarek Szyprowski 
5290892c498SSam Protsenko 	if (MMU_MAJ_VER(data->version) < 5) {
5302125afbeSSam Protsenko 		data->variant = &sysmmu_v1_variant;
5310892c498SSam Protsenko 	} else if (MMU_MAJ_VER(data->version) < 7) {
5320892c498SSam Protsenko 		data->variant = &sysmmu_v5_variant;
5330892c498SSam Protsenko 	} else {
5340892c498SSam Protsenko 		if (__sysmmu_has_capa1(data))
5350892c498SSam Protsenko 			__sysmmu_get_vcr(data);
5360892c498SSam Protsenko 		if (data->has_vcr)
5370892c498SSam Protsenko 			data->variant = &sysmmu_v7_vm_variant;
5382125afbeSSam Protsenko 		else
5392f599c3fSSam Protsenko 			data->variant = &sysmmu_v7_variant;
5400892c498SSam Protsenko 	}
5412125afbeSSam Protsenko 
542fecc49dbSMarek Szyprowski 	__sysmmu_disable_clocks(data);
543850d313eSMarek Szyprowski }
544850d313eSMarek Szyprowski 
show_fault_information(struct sysmmu_drvdata * data,const struct sysmmu_fault * fault)545d093fc7eSMarek Szyprowski static void show_fault_information(struct sysmmu_drvdata *data,
546c64074bfSSam Protsenko 				   const struct sysmmu_fault *fault)
5472a96536eSKyongHo Cho {
548d09d78fcSCho KyongHo 	sysmmu_pte_t *ent;
5492a96536eSKyongHo Cho 
550c64074bfSSam Protsenko 	dev_err(data->sysmmu, "%s: [%s] %s FAULT occurred at %#x\n",
551c64074bfSSam Protsenko 		dev_name(data->master),
552c64074bfSSam Protsenko 		fault->type == IOMMU_FAULT_READ ? "READ" : "WRITE",
553c64074bfSSam Protsenko 		fault->name, fault->addr);
554ec5d241bSMarek Szyprowski 	dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable);
555c64074bfSSam Protsenko 	ent = section_entry(phys_to_virt(data->pgtable), fault->addr);
556ec5d241bSMarek Szyprowski 	dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
5572a96536eSKyongHo Cho 	if (lv1ent_page(ent)) {
558c64074bfSSam Protsenko 		ent = page_entry(ent, fault->addr);
559ec5d241bSMarek Szyprowski 		dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
5602a96536eSKyongHo Cho 	}
5612a96536eSKyongHo Cho }
5622a96536eSKyongHo Cho 
exynos_sysmmu_irq(int irq,void * dev_id)5632a96536eSKyongHo Cho static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
5642a96536eSKyongHo Cho {
5652a96536eSKyongHo Cho 	struct sysmmu_drvdata *data = dev_id;
566c64074bfSSam Protsenko 	unsigned int itype;
567c64074bfSSam Protsenko 	struct sysmmu_fault fault;
5687222e8dbSCho KyongHo 	int ret = -ENOSYS;
5692a96536eSKyongHo Cho 
57047a574ffSMarek Szyprowski 	WARN_ON(!data->active);
5712a96536eSKyongHo Cho 
5729d4e7a24SCho KyongHo 	spin_lock(&data->lock);
57370605870SCho KyongHo 	clk_enable(data->clk_master);
5749d4e7a24SCho KyongHo 
5752125afbeSSam Protsenko 	itype = __ffs(readl(SYSMMU_REG(data, int_status)));
576c64074bfSSam Protsenko 	ret = data->variant->get_fault_info(data, itype, &fault);
577c64074bfSSam Protsenko 	if (ret) {
578c64074bfSSam Protsenko 		dev_err(data->sysmmu, "Unhandled interrupt bit %u\n", itype);
579c64074bfSSam Protsenko 		goto out;
580c64074bfSSam Protsenko 	}
581c64074bfSSam Protsenko 	show_fault_information(data, &fault);
5822a96536eSKyongHo Cho 
583c64074bfSSam Protsenko 	if (data->domain) {
584c64074bfSSam Protsenko 		ret = report_iommu_fault(&data->domain->domain, data->master,
585c64074bfSSam Protsenko 					 fault.addr, fault.type);
586c64074bfSSam Protsenko 	}
587c64074bfSSam Protsenko 	if (ret)
588c64074bfSSam Protsenko 		panic("Unrecoverable System MMU Fault!");
589d093fc7eSMarek Szyprowski 
590c64074bfSSam Protsenko out:
5912125afbeSSam Protsenko 	writel(1 << itype, SYSMMU_REG(data, int_clear));
5921fab7fa7SCho KyongHo 
593c64074bfSSam Protsenko 	/* SysMMU is in blocked state when interrupt occurred */
59402cdc365SMarek Szyprowski 	sysmmu_unblock(data);
59570605870SCho KyongHo 	clk_disable(data->clk_master);
5969d4e7a24SCho KyongHo 	spin_unlock(&data->lock);
5972a96536eSKyongHo Cho 
5982a96536eSKyongHo Cho 	return IRQ_HANDLED;
5992a96536eSKyongHo Cho }
6002a96536eSKyongHo Cho 
__sysmmu_disable(struct sysmmu_drvdata * data)60147a574ffSMarek Szyprowski static void __sysmmu_disable(struct sysmmu_drvdata *data)
6022a96536eSKyongHo Cho {
6036b21a5dbSCho KyongHo 	unsigned long flags;
6046b21a5dbSCho KyongHo 
60547a574ffSMarek Szyprowski 	clk_enable(data->clk_master);
60647a574ffSMarek Szyprowski 
6076b21a5dbSCho KyongHo 	spin_lock_irqsave(&data->lock, flags);
60847a574ffSMarek Szyprowski 	writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
60947a574ffSMarek Szyprowski 	writel(0, data->sfrbase + REG_MMU_CFG);
61047a574ffSMarek Szyprowski 	data->active = false;
6119d4e7a24SCho KyongHo 	spin_unlock_irqrestore(&data->lock, flags);
6122a96536eSKyongHo Cho 
61347a574ffSMarek Szyprowski 	__sysmmu_disable_clocks(data);
6142a96536eSKyongHo Cho }
6152a96536eSKyongHo Cho 
__sysmmu_init_config(struct sysmmu_drvdata * data)6166b21a5dbSCho KyongHo static void __sysmmu_init_config(struct sysmmu_drvdata *data)
6176b21a5dbSCho KyongHo {
61883addecdSMarek Szyprowski 	unsigned int cfg;
619eeb5184bSCho KyongHo 
62083addecdSMarek Szyprowski 	if (data->version <= MAKE_MMU_VER(3, 1))
62183addecdSMarek Szyprowski 		cfg = CFG_LRU | CFG_QOS(15);
62283addecdSMarek Szyprowski 	else if (data->version <= MAKE_MMU_VER(3, 2))
62383addecdSMarek Szyprowski 		cfg = CFG_LRU | CFG_QOS(15) | CFG_FLPDCACHE | CFG_SYSSEL;
62483addecdSMarek Szyprowski 	else
62583addecdSMarek Szyprowski 		cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN;
6266b21a5dbSCho KyongHo 
6271a0d8dacSMarek Szyprowski 	cfg |= CFG_EAP; /* enable access protection bits check */
6281a0d8dacSMarek Szyprowski 
62984bd0428SMarek Szyprowski 	writel(cfg, data->sfrbase + REG_MMU_CFG);
6306b21a5dbSCho KyongHo }
6316b21a5dbSCho KyongHo 
__sysmmu_enable_vid(struct sysmmu_drvdata * data)6327fee5d6fSSam Protsenko static void __sysmmu_enable_vid(struct sysmmu_drvdata *data)
6337fee5d6fSSam Protsenko {
6347fee5d6fSSam Protsenko 	u32 ctrl;
6357fee5d6fSSam Protsenko 
6367fee5d6fSSam Protsenko 	if (MMU_MAJ_VER(data->version) < 7 || !data->has_vcr)
6377fee5d6fSSam Protsenko 		return;
6387fee5d6fSSam Protsenko 
6397fee5d6fSSam Protsenko 	ctrl = readl(data->sfrbase + REG_V7_CTRL_VM);
6407fee5d6fSSam Protsenko 	ctrl |= CTRL_VM_ENABLE | CTRL_VM_FAULT_MODE_STALL;
6417fee5d6fSSam Protsenko 	writel(ctrl, data->sfrbase + REG_V7_CTRL_VM);
6427fee5d6fSSam Protsenko }
6437fee5d6fSSam Protsenko 
__sysmmu_enable(struct sysmmu_drvdata * data)64447a574ffSMarek Szyprowski static void __sysmmu_enable(struct sysmmu_drvdata *data)
6456b21a5dbSCho KyongHo {
64647a574ffSMarek Szyprowski 	unsigned long flags;
64747a574ffSMarek Szyprowski 
648fecc49dbSMarek Szyprowski 	__sysmmu_enable_clocks(data);
6496b21a5dbSCho KyongHo 
65047a574ffSMarek Szyprowski 	spin_lock_irqsave(&data->lock, flags);
65184bd0428SMarek Szyprowski 	writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
6526b21a5dbSCho KyongHo 	__sysmmu_init_config(data);
65302cdc365SMarek Szyprowski 	__sysmmu_set_ptbase(data, data->pgtable);
6547fee5d6fSSam Protsenko 	__sysmmu_enable_vid(data);
65584bd0428SMarek Szyprowski 	writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
65647a574ffSMarek Szyprowski 	data->active = true;
65747a574ffSMarek Szyprowski 	spin_unlock_irqrestore(&data->lock, flags);
6586b21a5dbSCho KyongHo 
659fecc49dbSMarek Szyprowski 	/*
660fecc49dbSMarek Szyprowski 	 * SYSMMU driver keeps master's clock enabled only for the short
661fecc49dbSMarek Szyprowski 	 * time, while accessing the registers. For performing address
662fecc49dbSMarek Szyprowski 	 * translation during DMA transaction it relies on the client
663fecc49dbSMarek Szyprowski 	 * driver to enable it.
664fecc49dbSMarek Szyprowski 	 */
6656b21a5dbSCho KyongHo 	clk_disable(data->clk_master);
6666b21a5dbSCho KyongHo }
6676b21a5dbSCho KyongHo 
sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata * data,sysmmu_iova_t iova)668469acebeSMarek Szyprowski static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
66966a7ed84SCho KyongHo 					    sysmmu_iova_t iova)
67066a7ed84SCho KyongHo {
67166a7ed84SCho KyongHo 	unsigned long flags;
67266a7ed84SCho KyongHo 
67366a7ed84SCho KyongHo 	spin_lock_irqsave(&data->lock, flags);
67447a574ffSMarek Szyprowski 	if (data->active && data->version >= MAKE_MMU_VER(3, 3)) {
67501324ab2SMarek Szyprowski 		clk_enable(data->clk_master);
6767d2aa6b8SMarek Szyprowski 		if (sysmmu_block(data)) {
677cd37a296SMarek Szyprowski 			if (data->version >= MAKE_MMU_VER(5, 0))
678cd37a296SMarek Szyprowski 				__sysmmu_tlb_invalidate(data);
679cd37a296SMarek Szyprowski 			else
680d631ea98SMarek Szyprowski 				__sysmmu_tlb_invalidate_entry(data, iova, 1);
6817d2aa6b8SMarek Szyprowski 			sysmmu_unblock(data);
6827d2aa6b8SMarek Szyprowski 		}
68301324ab2SMarek Szyprowski 		clk_disable(data->clk_master);
684d631ea98SMarek Szyprowski 	}
68566a7ed84SCho KyongHo 	spin_unlock_irqrestore(&data->lock, flags);
68666a7ed84SCho KyongHo }
68766a7ed84SCho KyongHo 
sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata * data,sysmmu_iova_t iova,size_t size)688469acebeSMarek Szyprowski static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
689469acebeSMarek Szyprowski 					sysmmu_iova_t iova, size_t size)
6902a96536eSKyongHo Cho {
6912a96536eSKyongHo Cho 	unsigned long flags;
6922a96536eSKyongHo Cho 
6939d4e7a24SCho KyongHo 	spin_lock_irqsave(&data->lock, flags);
69447a574ffSMarek Szyprowski 	if (data->active) {
6953ad6b7f3SCho KyongHo 		unsigned int num_inv = 1;
69670605870SCho KyongHo 
69770605870SCho KyongHo 		clk_enable(data->clk_master);
69870605870SCho KyongHo 
6993ad6b7f3SCho KyongHo 		/*
7003ad6b7f3SCho KyongHo 		 * L2TLB invalidation required
7013ad6b7f3SCho KyongHo 		 * 4KB page: 1 invalidation
702f171ababSSachin Kamat 		 * 64KB page: 16 invalidations
703f171ababSSachin Kamat 		 * 1MB page: 64 invalidations
7043ad6b7f3SCho KyongHo 		 * because it is set-associative TLB
7053ad6b7f3SCho KyongHo 		 * with 8-way and 64 sets.
7063ad6b7f3SCho KyongHo 		 * 1MB page can be cached in one of all sets.
7073ad6b7f3SCho KyongHo 		 * 64KB page can be one of 16 consecutive sets.
7083ad6b7f3SCho KyongHo 		 */
709512bd0c6SMarek Szyprowski 		if (MMU_MAJ_VER(data->version) == 2)
710bc0d9af2SSam Protsenko 			num_inv = min_t(unsigned int, size / SPAGE_SIZE, 64);
7113ad6b7f3SCho KyongHo 
71202cdc365SMarek Szyprowski 		if (sysmmu_block(data)) {
71302cdc365SMarek Szyprowski 			__sysmmu_tlb_invalidate_entry(data, iova, num_inv);
71402cdc365SMarek Szyprowski 			sysmmu_unblock(data);
7152a96536eSKyongHo Cho 		}
71670605870SCho KyongHo 		clk_disable(data->clk_master);
7172a96536eSKyongHo Cho 	}
7189d4e7a24SCho KyongHo 	spin_unlock_irqrestore(&data->lock, flags);
7192a96536eSKyongHo Cho }
7202a96536eSKyongHo Cho 
7210b9a3694SArvind Yadav static const struct iommu_ops exynos_iommu_ops;
72296f66557SMarek Szyprowski 
exynos_sysmmu_probe(struct platform_device * pdev)7237991eb39SMarek Szyprowski static int exynos_sysmmu_probe(struct platform_device *pdev)
7242a96536eSKyongHo Cho {
72546c16d1eSCho KyongHo 	int irq, ret;
7267222e8dbSCho KyongHo 	struct device *dev = &pdev->dev;
7272a96536eSKyongHo Cho 	struct sysmmu_drvdata *data;
7287222e8dbSCho KyongHo 	struct resource *res;
7292a96536eSKyongHo Cho 
73046c16d1eSCho KyongHo 	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
73146c16d1eSCho KyongHo 	if (!data)
73246c16d1eSCho KyongHo 		return -ENOMEM;
7332a96536eSKyongHo Cho 
7347222e8dbSCho KyongHo 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
73546c16d1eSCho KyongHo 	data->sfrbase = devm_ioremap_resource(dev, res);
73646c16d1eSCho KyongHo 	if (IS_ERR(data->sfrbase))
73746c16d1eSCho KyongHo 		return PTR_ERR(data->sfrbase);
7382a96536eSKyongHo Cho 
73946c16d1eSCho KyongHo 	irq = platform_get_irq(pdev, 0);
740086f9efaSStephen Boyd 	if (irq <= 0)
74146c16d1eSCho KyongHo 		return irq;
7422a96536eSKyongHo Cho 
74346c16d1eSCho KyongHo 	ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
7442a96536eSKyongHo Cho 				dev_name(dev), data);
7452a96536eSKyongHo Cho 	if (ret) {
74646c16d1eSCho KyongHo 		dev_err(dev, "Unabled to register handler of irq %d\n", irq);
74746c16d1eSCho KyongHo 		return ret;
7482a96536eSKyongHo Cho 	}
7492a96536eSKyongHo Cho 
750*5e799a7cSChristophe JAILLET 	data->clk = devm_clk_get_optional(dev, "sysmmu");
751*5e799a7cSChristophe JAILLET 	if (IS_ERR(data->clk))
7520c2b063fSMarek Szyprowski 		return PTR_ERR(data->clk);
753740a01eeSMarek Szyprowski 
754*5e799a7cSChristophe JAILLET 	data->aclk = devm_clk_get_optional(dev, "aclk");
755*5e799a7cSChristophe JAILLET 	if (IS_ERR(data->aclk))
7560c2b063fSMarek Szyprowski 		return PTR_ERR(data->aclk);
757740a01eeSMarek Szyprowski 
758*5e799a7cSChristophe JAILLET 	data->pclk = devm_clk_get_optional(dev, "pclk");
759*5e799a7cSChristophe JAILLET 	if (IS_ERR(data->pclk))
7600c2b063fSMarek Szyprowski 		return PTR_ERR(data->pclk);
761740a01eeSMarek Szyprowski 
762740a01eeSMarek Szyprowski 	if (!data->clk && (!data->aclk || !data->pclk)) {
763740a01eeSMarek Szyprowski 		dev_err(dev, "Failed to get device clock(s)!\n");
764740a01eeSMarek Szyprowski 		return -ENOSYS;
7652a96536eSKyongHo Cho 	}
7662a96536eSKyongHo Cho 
767*5e799a7cSChristophe JAILLET 	data->clk_master = devm_clk_get_optional(dev, "master");
768*5e799a7cSChristophe JAILLET 	if (IS_ERR(data->clk_master))
7690c2b063fSMarek Szyprowski 		return PTR_ERR(data->clk_master);
77070605870SCho KyongHo 
7712a96536eSKyongHo Cho 	data->sysmmu = dev;
7729d4e7a24SCho KyongHo 	spin_lock_init(&data->lock);
7732a96536eSKyongHo Cho 
7742125afbeSSam Protsenko 	__sysmmu_get_version(data);
7752125afbeSSam Protsenko 
776d2c302b6SJoerg Roedel 	ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL,
777d2c302b6SJoerg Roedel 				     dev_name(data->sysmmu));
778d2c302b6SJoerg Roedel 	if (ret)
779d2c302b6SJoerg Roedel 		return ret;
780d2c302b6SJoerg Roedel 
7817222e8dbSCho KyongHo 	platform_set_drvdata(pdev, data);
7827222e8dbSCho KyongHo 
783740a01eeSMarek Szyprowski 	if (PG_ENT_SHIFT < 0) {
7841a0d8dacSMarek Szyprowski 		if (MMU_MAJ_VER(data->version) < 5) {
785740a01eeSMarek Szyprowski 			PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
7861a0d8dacSMarek Szyprowski 			LV1_PROT = SYSMMU_LV1_PROT;
7871a0d8dacSMarek Szyprowski 			LV2_PROT = SYSMMU_LV2_PROT;
7881a0d8dacSMarek Szyprowski 		} else {
789740a01eeSMarek Szyprowski 			PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT;
7901a0d8dacSMarek Szyprowski 			LV1_PROT = SYSMMU_V5_LV1_PROT;
7911a0d8dacSMarek Szyprowski 			LV2_PROT = SYSMMU_V5_LV2_PROT;
7921a0d8dacSMarek Szyprowski 		}
793740a01eeSMarek Szyprowski 	}
794740a01eeSMarek Szyprowski 
7955f26ad58SSam Protsenko 	if (MMU_MAJ_VER(data->version) >= 5) {
7965f26ad58SSam Protsenko 		ret = dma_set_mask(dev, DMA_BIT_MASK(36));
7975f26ad58SSam Protsenko 		if (ret) {
7985f26ad58SSam Protsenko 			dev_err(dev, "Unable to set DMA mask: %d\n", ret);
7995f26ad58SSam Protsenko 			goto err_dma_set_mask;
8005f26ad58SSam Protsenko 		}
8015f26ad58SSam Protsenko 	}
8025f26ad58SSam Protsenko 
803928055a0SMarek Szyprowski 	/*
804928055a0SMarek Szyprowski 	 * use the first registered sysmmu device for performing
805928055a0SMarek Szyprowski 	 * dma mapping operations on iommu page tables (cpu cache flush)
806928055a0SMarek Szyprowski 	 */
807928055a0SMarek Szyprowski 	if (!dma_dev)
808928055a0SMarek Szyprowski 		dma_dev = &pdev->dev;
809928055a0SMarek Szyprowski 
8102a96536eSKyongHo Cho 	pm_runtime_enable(dev);
8112a96536eSKyongHo Cho 
812bbc4d205SMarek Szyprowski 	ret = iommu_device_register(&data->iommu, &exynos_iommu_ops, dev);
813bbc4d205SMarek Szyprowski 	if (ret)
814bbc4d205SMarek Szyprowski 		goto err_dma_set_mask;
815bbc4d205SMarek Szyprowski 
8162a96536eSKyongHo Cho 	return 0;
817fce398d2SSam Protsenko 
8185f26ad58SSam Protsenko err_dma_set_mask:
819fce398d2SSam Protsenko 	iommu_device_sysfs_remove(&data->iommu);
820fce398d2SSam Protsenko 	return ret;
8212a96536eSKyongHo Cho }
8222a96536eSKyongHo Cho 
exynos_sysmmu_suspend(struct device * dev)8239b265536SMarek Szyprowski static int __maybe_unused exynos_sysmmu_suspend(struct device *dev)
824622015e4SMarek Szyprowski {
825622015e4SMarek Szyprowski 	struct sysmmu_drvdata *data = dev_get_drvdata(dev);
82647a574ffSMarek Szyprowski 	struct device *master = data->master;
827622015e4SMarek Szyprowski 
82847a574ffSMarek Szyprowski 	if (master) {
8290f45b04dSJoerg Roedel 		struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
8309b265536SMarek Szyprowski 
8319b265536SMarek Szyprowski 		mutex_lock(&owner->rpm_lock);
83292798b45SMarek Szyprowski 		if (data->domain) {
83392798b45SMarek Szyprowski 			dev_dbg(data->sysmmu, "saving state\n");
83492798b45SMarek Szyprowski 			__sysmmu_disable(data);
83592798b45SMarek Szyprowski 		}
8369b265536SMarek Szyprowski 		mutex_unlock(&owner->rpm_lock);
837622015e4SMarek Szyprowski 	}
838622015e4SMarek Szyprowski 	return 0;
839622015e4SMarek Szyprowski }
840622015e4SMarek Szyprowski 
exynos_sysmmu_resume(struct device * dev)8419b265536SMarek Szyprowski static int __maybe_unused exynos_sysmmu_resume(struct device *dev)
842622015e4SMarek Szyprowski {
843622015e4SMarek Szyprowski 	struct sysmmu_drvdata *data = dev_get_drvdata(dev);
84447a574ffSMarek Szyprowski 	struct device *master = data->master;
845622015e4SMarek Szyprowski 
84647a574ffSMarek Szyprowski 	if (master) {
8470f45b04dSJoerg Roedel 		struct exynos_iommu_owner *owner = dev_iommu_priv_get(master);
8489b265536SMarek Szyprowski 
8499b265536SMarek Szyprowski 		mutex_lock(&owner->rpm_lock);
85092798b45SMarek Szyprowski 		if (data->domain) {
85192798b45SMarek Szyprowski 			dev_dbg(data->sysmmu, "restoring state\n");
85247a574ffSMarek Szyprowski 			__sysmmu_enable(data);
853622015e4SMarek Szyprowski 		}
8549b265536SMarek Szyprowski 		mutex_unlock(&owner->rpm_lock);
85592798b45SMarek Szyprowski 	}
856622015e4SMarek Szyprowski 	return 0;
857622015e4SMarek Szyprowski }
858622015e4SMarek Szyprowski 
859622015e4SMarek Szyprowski static const struct dev_pm_ops sysmmu_pm_ops = {
8609b265536SMarek Szyprowski 	SET_RUNTIME_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume, NULL)
8612f5f44f2SMarek Szyprowski 	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
8629b265536SMarek Szyprowski 				pm_runtime_force_resume)
863622015e4SMarek Szyprowski };
864622015e4SMarek Szyprowski 
8659d25e3ccSMarek Szyprowski static const struct of_device_id sysmmu_of_match[] = {
8666b21a5dbSCho KyongHo 	{ .compatible	= "samsung,exynos-sysmmu", },
8676b21a5dbSCho KyongHo 	{ },
8686b21a5dbSCho KyongHo };
8696b21a5dbSCho KyongHo 
8706b21a5dbSCho KyongHo static struct platform_driver exynos_sysmmu_driver __refdata = {
8712a96536eSKyongHo Cho 	.probe	= exynos_sysmmu_probe,
8722a96536eSKyongHo Cho 	.driver	= {
8732a96536eSKyongHo Cho 		.name		= "exynos-sysmmu",
8746b21a5dbSCho KyongHo 		.of_match_table	= sysmmu_of_match,
875622015e4SMarek Szyprowski 		.pm		= &sysmmu_pm_ops,
876b54b874fSMarek Szyprowski 		.suppress_bind_attrs = true,
8772a96536eSKyongHo Cho 	}
8782a96536eSKyongHo Cho };
8792a96536eSKyongHo Cho 
exynos_iommu_set_pte(sysmmu_pte_t * ent,sysmmu_pte_t val)8809314006dSRobin Murphy static inline void exynos_iommu_set_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
8812a96536eSKyongHo Cho {
8825e3435ebSMarek Szyprowski 	dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent),
8835e3435ebSMarek Szyprowski 				DMA_TO_DEVICE);
8846ae5343cSBen Dooks 	*ent = cpu_to_le32(val);
8855e3435ebSMarek Szyprowski 	dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent),
8865e3435ebSMarek Szyprowski 				   DMA_TO_DEVICE);
8872a96536eSKyongHo Cho }
8882a96536eSKyongHo Cho 
exynos_iommu_domain_alloc(unsigned type)889e1fd1eaaSJoerg Roedel static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
8902a96536eSKyongHo Cho {
891bfa00489SMarek Szyprowski 	struct exynos_iommu_domain *domain;
8925e3435ebSMarek Szyprowski 	dma_addr_t handle;
89366a7ed84SCho KyongHo 	int i;
8942a96536eSKyongHo Cho 
895740a01eeSMarek Szyprowski 	/* Check if correct PTE offsets are initialized */
896740a01eeSMarek Szyprowski 	BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev);
8972a96536eSKyongHo Cho 
8984a376d4aSRobin Murphy 	if (type != IOMMU_DOMAIN_DMA && type != IOMMU_DOMAIN_UNMANAGED)
8994a376d4aSRobin Murphy 		return NULL;
9004a376d4aSRobin Murphy 
901bfa00489SMarek Szyprowski 	domain = kzalloc(sizeof(*domain), GFP_KERNEL);
902bfa00489SMarek Szyprowski 	if (!domain)
903e1fd1eaaSJoerg Roedel 		return NULL;
904e1fd1eaaSJoerg Roedel 
905bfa00489SMarek Szyprowski 	domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
906bfa00489SMarek Szyprowski 	if (!domain->pgtable)
9074a376d4aSRobin Murphy 		goto err_pgtable;
9082a96536eSKyongHo Cho 
909bfa00489SMarek Szyprowski 	domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
910bfa00489SMarek Szyprowski 	if (!domain->lv2entcnt)
9112a96536eSKyongHo Cho 		goto err_counter;
9122a96536eSKyongHo Cho 
913f171ababSSachin Kamat 	/* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
914e7527663SMarek Szyprowski 	for (i = 0; i < NUM_LV1ENTRIES; i++)
915e7527663SMarek Szyprowski 		domain->pgtable[i] = ZERO_LV2LINK;
91666a7ed84SCho KyongHo 
9175e3435ebSMarek Szyprowski 	handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE,
9185e3435ebSMarek Szyprowski 				DMA_TO_DEVICE);
9195e3435ebSMarek Szyprowski 	/* For mapping page table entries we rely on dma == phys */
9205e3435ebSMarek Szyprowski 	BUG_ON(handle != virt_to_phys(domain->pgtable));
9210d6d3da4SMarek Szyprowski 	if (dma_mapping_error(dma_dev, handle))
9220d6d3da4SMarek Szyprowski 		goto err_lv2ent;
9232a96536eSKyongHo Cho 
924bfa00489SMarek Szyprowski 	spin_lock_init(&domain->lock);
925bfa00489SMarek Szyprowski 	spin_lock_init(&domain->pgtablelock);
926bfa00489SMarek Szyprowski 	INIT_LIST_HEAD(&domain->clients);
9272a96536eSKyongHo Cho 
928bfa00489SMarek Szyprowski 	domain->domain.geometry.aperture_start = 0;
929bfa00489SMarek Szyprowski 	domain->domain.geometry.aperture_end   = ~0UL;
930bfa00489SMarek Szyprowski 	domain->domain.geometry.force_aperture = true;
9313177bb76SJoerg Roedel 
932bfa00489SMarek Szyprowski 	return &domain->domain;
9332a96536eSKyongHo Cho 
9340d6d3da4SMarek Szyprowski err_lv2ent:
9350d6d3da4SMarek Szyprowski 	free_pages((unsigned long)domain->lv2entcnt, 1);
9362a96536eSKyongHo Cho err_counter:
937bfa00489SMarek Szyprowski 	free_pages((unsigned long)domain->pgtable, 2);
9382a96536eSKyongHo Cho err_pgtable:
939bfa00489SMarek Szyprowski 	kfree(domain);
940e1fd1eaaSJoerg Roedel 	return NULL;
9412a96536eSKyongHo Cho }
9422a96536eSKyongHo Cho 
exynos_iommu_domain_free(struct iommu_domain * iommu_domain)943bfa00489SMarek Szyprowski static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
9442a96536eSKyongHo Cho {
945bfa00489SMarek Szyprowski 	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
946469acebeSMarek Szyprowski 	struct sysmmu_drvdata *data, *next;
9472a96536eSKyongHo Cho 	unsigned long flags;
9482a96536eSKyongHo Cho 	int i;
9492a96536eSKyongHo Cho 
950bfa00489SMarek Szyprowski 	WARN_ON(!list_empty(&domain->clients));
9512a96536eSKyongHo Cho 
952bfa00489SMarek Szyprowski 	spin_lock_irqsave(&domain->lock, flags);
9532a96536eSKyongHo Cho 
954bfa00489SMarek Szyprowski 	list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
955e1172300SMarek Szyprowski 		spin_lock(&data->lock);
956b0d4c861SMarek Szyprowski 		__sysmmu_disable(data);
95747a574ffSMarek Szyprowski 		data->pgtable = 0;
95847a574ffSMarek Szyprowski 		data->domain = NULL;
959469acebeSMarek Szyprowski 		list_del_init(&data->domain_node);
960e1172300SMarek Szyprowski 		spin_unlock(&data->lock);
9612a96536eSKyongHo Cho 	}
9622a96536eSKyongHo Cho 
963bfa00489SMarek Szyprowski 	spin_unlock_irqrestore(&domain->lock, flags);
9642a96536eSKyongHo Cho 
9655e3435ebSMarek Szyprowski 	dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE,
9665e3435ebSMarek Szyprowski 			 DMA_TO_DEVICE);
9675e3435ebSMarek Szyprowski 
9682a96536eSKyongHo Cho 	for (i = 0; i < NUM_LV1ENTRIES; i++)
9695e3435ebSMarek Szyprowski 		if (lv1ent_page(domain->pgtable + i)) {
9705e3435ebSMarek Szyprowski 			phys_addr_t base = lv2table_base(domain->pgtable + i);
9715e3435ebSMarek Szyprowski 
9725e3435ebSMarek Szyprowski 			dma_unmap_single(dma_dev, base, LV2TABLE_SIZE,
9735e3435ebSMarek Szyprowski 					 DMA_TO_DEVICE);
974734c3c73SCho KyongHo 			kmem_cache_free(lv2table_kmem_cache,
9755e3435ebSMarek Szyprowski 					phys_to_virt(base));
9765e3435ebSMarek Szyprowski 		}
9772a96536eSKyongHo Cho 
978bfa00489SMarek Szyprowski 	free_pages((unsigned long)domain->pgtable, 2);
979bfa00489SMarek Szyprowski 	free_pages((unsigned long)domain->lv2entcnt, 1);
980bfa00489SMarek Szyprowski 	kfree(domain);
9812a96536eSKyongHo Cho }
9822a96536eSKyongHo Cho 
exynos_iommu_detach_device(struct iommu_domain * iommu_domain,struct device * dev)9835fa61cbfSMarek Szyprowski static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
9845fa61cbfSMarek Szyprowski 				    struct device *dev)
9855fa61cbfSMarek Szyprowski {
9865fa61cbfSMarek Szyprowski 	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
9870f45b04dSJoerg Roedel 	struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
9885fa61cbfSMarek Szyprowski 	phys_addr_t pagetable = virt_to_phys(domain->pgtable);
9895fa61cbfSMarek Szyprowski 	struct sysmmu_drvdata *data, *next;
9905fa61cbfSMarek Szyprowski 	unsigned long flags;
9915fa61cbfSMarek Szyprowski 
9925fa61cbfSMarek Szyprowski 	if (!has_sysmmu(dev) || owner->domain != iommu_domain)
9935fa61cbfSMarek Szyprowski 		return;
9945fa61cbfSMarek Szyprowski 
9959b265536SMarek Szyprowski 	mutex_lock(&owner->rpm_lock);
9969b265536SMarek Szyprowski 
9979b265536SMarek Szyprowski 	list_for_each_entry(data, &owner->controllers, owner_node) {
9989b265536SMarek Szyprowski 		pm_runtime_get_noresume(data->sysmmu);
9999b265536SMarek Szyprowski 		if (pm_runtime_active(data->sysmmu))
1000e1172300SMarek Szyprowski 			__sysmmu_disable(data);
1001e1172300SMarek Szyprowski 		pm_runtime_put(data->sysmmu);
1002e1172300SMarek Szyprowski 	}
1003e1172300SMarek Szyprowski 
10045fa61cbfSMarek Szyprowski 	spin_lock_irqsave(&domain->lock, flags);
10055fa61cbfSMarek Szyprowski 	list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
1006e1172300SMarek Szyprowski 		spin_lock(&data->lock);
100747a574ffSMarek Szyprowski 		data->pgtable = 0;
100847a574ffSMarek Szyprowski 		data->domain = NULL;
10095fa61cbfSMarek Szyprowski 		list_del_init(&data->domain_node);
1010e1172300SMarek Szyprowski 		spin_unlock(&data->lock);
10115fa61cbfSMarek Szyprowski 	}
1012e1172300SMarek Szyprowski 	owner->domain = NULL;
10135fa61cbfSMarek Szyprowski 	spin_unlock_irqrestore(&domain->lock, flags);
10145fa61cbfSMarek Szyprowski 
10159b265536SMarek Szyprowski 	mutex_unlock(&owner->rpm_lock);
10165fa61cbfSMarek Szyprowski 
1017b0d4c861SMarek Szyprowski 	dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n", __func__,
1018b0d4c861SMarek Szyprowski 		&pagetable);
10195fa61cbfSMarek Szyprowski }
10205fa61cbfSMarek Szyprowski 
exynos_iommu_attach_device(struct iommu_domain * iommu_domain,struct device * dev)1021bfa00489SMarek Szyprowski static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
10222a96536eSKyongHo Cho 				   struct device *dev)
10232a96536eSKyongHo Cho {
1024bfa00489SMarek Szyprowski 	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
10250f45b04dSJoerg Roedel 	struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
1026469acebeSMarek Szyprowski 	struct sysmmu_drvdata *data;
1027bfa00489SMarek Szyprowski 	phys_addr_t pagetable = virt_to_phys(domain->pgtable);
10282a96536eSKyongHo Cho 	unsigned long flags;
1029469acebeSMarek Szyprowski 
1030469acebeSMarek Szyprowski 	if (!has_sysmmu(dev))
1031469acebeSMarek Szyprowski 		return -ENODEV;
1032469acebeSMarek Szyprowski 
10335fa61cbfSMarek Szyprowski 	if (owner->domain)
10345fa61cbfSMarek Szyprowski 		exynos_iommu_detach_device(owner->domain, dev);
10355fa61cbfSMarek Szyprowski 
10369b265536SMarek Szyprowski 	mutex_lock(&owner->rpm_lock);
10379b265536SMarek Szyprowski 
1038e1172300SMarek Szyprowski 	spin_lock_irqsave(&domain->lock, flags);
10391b092054SMarek Szyprowski 	list_for_each_entry(data, &owner->controllers, owner_node) {
1040e1172300SMarek Szyprowski 		spin_lock(&data->lock);
104147a574ffSMarek Szyprowski 		data->pgtable = pagetable;
104247a574ffSMarek Szyprowski 		data->domain = domain;
1043e1172300SMarek Szyprowski 		list_add_tail(&data->domain_node, &domain->clients);
1044e1172300SMarek Szyprowski 		spin_unlock(&data->lock);
1045e1172300SMarek Szyprowski 	}
1046e1172300SMarek Szyprowski 	owner->domain = iommu_domain;
1047e1172300SMarek Szyprowski 	spin_unlock_irqrestore(&domain->lock, flags);
1048e1172300SMarek Szyprowski 
1049e1172300SMarek Szyprowski 	list_for_each_entry(data, &owner->controllers, owner_node) {
10509b265536SMarek Szyprowski 		pm_runtime_get_noresume(data->sysmmu);
10519b265536SMarek Szyprowski 		if (pm_runtime_active(data->sysmmu))
105247a574ffSMarek Szyprowski 			__sysmmu_enable(data);
10539b265536SMarek Szyprowski 		pm_runtime_put(data->sysmmu);
10549b265536SMarek Szyprowski 	}
10559b265536SMarek Szyprowski 
10569b265536SMarek Szyprowski 	mutex_unlock(&owner->rpm_lock);
10579b265536SMarek Szyprowski 
1058b0d4c861SMarek Szyprowski 	dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa\n", __func__,
1059b0d4c861SMarek Szyprowski 		&pagetable);
10607222e8dbSCho KyongHo 
1061b0d4c861SMarek Szyprowski 	return 0;
10622a96536eSKyongHo Cho }
10632a96536eSKyongHo Cho 
alloc_lv2entry(struct exynos_iommu_domain * domain,sysmmu_pte_t * sent,sysmmu_iova_t iova,short * pgcounter)1064bfa00489SMarek Szyprowski static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
106566a7ed84SCho KyongHo 		sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
10662a96536eSKyongHo Cho {
106761128f08SCho KyongHo 	if (lv1ent_section(sent)) {
1068d09d78fcSCho KyongHo 		WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
106961128f08SCho KyongHo 		return ERR_PTR(-EADDRINUSE);
107061128f08SCho KyongHo 	}
107161128f08SCho KyongHo 
10722a96536eSKyongHo Cho 	if (lv1ent_fault(sent)) {
10730d6d3da4SMarek Szyprowski 		dma_addr_t handle;
1074d09d78fcSCho KyongHo 		sysmmu_pte_t *pent;
107566a7ed84SCho KyongHo 		bool need_flush_flpd_cache = lv1ent_zero(sent);
10762a96536eSKyongHo Cho 
1077734c3c73SCho KyongHo 		pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
1078dbf6c6efSArnd Bergmann 		BUG_ON((uintptr_t)pent & (LV2TABLE_SIZE - 1));
10792a96536eSKyongHo Cho 		if (!pent)
108061128f08SCho KyongHo 			return ERR_PTR(-ENOMEM);
10812a96536eSKyongHo Cho 
10829314006dSRobin Murphy 		exynos_iommu_set_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
1083dc3814f4SColin Cross 		kmemleak_ignore(pent);
10842a96536eSKyongHo Cho 		*pgcounter = NUM_LV2ENTRIES;
10850d6d3da4SMarek Szyprowski 		handle = dma_map_single(dma_dev, pent, LV2TABLE_SIZE,
10860d6d3da4SMarek Szyprowski 					DMA_TO_DEVICE);
10870d6d3da4SMarek Szyprowski 		if (dma_mapping_error(dma_dev, handle)) {
10880d6d3da4SMarek Szyprowski 			kmem_cache_free(lv2table_kmem_cache, pent);
10890d6d3da4SMarek Szyprowski 			return ERR_PTR(-EADDRINUSE);
10900d6d3da4SMarek Szyprowski 		}
109166a7ed84SCho KyongHo 
109266a7ed84SCho KyongHo 		/*
1093f171ababSSachin Kamat 		 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
1094f171ababSSachin Kamat 		 * FLPD cache may cache the address of zero_l2_table. This
1095f171ababSSachin Kamat 		 * function replaces the zero_l2_table with new L2 page table
1096f171ababSSachin Kamat 		 * to write valid mappings.
109766a7ed84SCho KyongHo 		 * Accessing the valid area may cause page fault since FLPD
1098f171ababSSachin Kamat 		 * cache may still cache zero_l2_table for the valid area
1099f171ababSSachin Kamat 		 * instead of new L2 page table that has the mapping
1100f171ababSSachin Kamat 		 * information of the valid area.
110166a7ed84SCho KyongHo 		 * Thus any replacement of zero_l2_table with other valid L2
110266a7ed84SCho KyongHo 		 * page table must involve FLPD cache invalidation for System
110366a7ed84SCho KyongHo 		 * MMU v3.3.
110466a7ed84SCho KyongHo 		 * FLPD cache invalidation is performed with TLB invalidation
110566a7ed84SCho KyongHo 		 * by VPN without blocking. It is safe to invalidate TLB without
110666a7ed84SCho KyongHo 		 * blocking because the target address of TLB invalidation is
110766a7ed84SCho KyongHo 		 * not currently mapped.
110866a7ed84SCho KyongHo 		 */
110966a7ed84SCho KyongHo 		if (need_flush_flpd_cache) {
1110469acebeSMarek Szyprowski 			struct sysmmu_drvdata *data;
1111365409dbSSachin Kamat 
1112bfa00489SMarek Szyprowski 			spin_lock(&domain->lock);
1113bfa00489SMarek Szyprowski 			list_for_each_entry(data, &domain->clients, domain_node)
1114469acebeSMarek Szyprowski 				sysmmu_tlb_invalidate_flpdcache(data, iova);
1115bfa00489SMarek Szyprowski 			spin_unlock(&domain->lock);
111666a7ed84SCho KyongHo 		}
11172a96536eSKyongHo Cho 	}
11182a96536eSKyongHo Cho 
11192a96536eSKyongHo Cho 	return page_entry(sent, iova);
11202a96536eSKyongHo Cho }
11212a96536eSKyongHo Cho 
lv1set_section(struct exynos_iommu_domain * domain,sysmmu_pte_t * sent,sysmmu_iova_t iova,phys_addr_t paddr,int prot,short * pgcnt)1122bfa00489SMarek Szyprowski static int lv1set_section(struct exynos_iommu_domain *domain,
112366a7ed84SCho KyongHo 			  sysmmu_pte_t *sent, sysmmu_iova_t iova,
11241a0d8dacSMarek Szyprowski 			  phys_addr_t paddr, int prot, short *pgcnt)
11252a96536eSKyongHo Cho {
112661128f08SCho KyongHo 	if (lv1ent_section(sent)) {
1127d09d78fcSCho KyongHo 		WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
112861128f08SCho KyongHo 			iova);
11292a96536eSKyongHo Cho 		return -EADDRINUSE;
113061128f08SCho KyongHo 	}
11312a96536eSKyongHo Cho 
11322a96536eSKyongHo Cho 	if (lv1ent_page(sent)) {
113361128f08SCho KyongHo 		if (*pgcnt != NUM_LV2ENTRIES) {
1134d09d78fcSCho KyongHo 			WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
113561128f08SCho KyongHo 				iova);
11362a96536eSKyongHo Cho 			return -EADDRINUSE;
113761128f08SCho KyongHo 		}
11382a96536eSKyongHo Cho 
1139734c3c73SCho KyongHo 		kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
11402a96536eSKyongHo Cho 		*pgcnt = 0;
11412a96536eSKyongHo Cho 	}
11422a96536eSKyongHo Cho 
11439314006dSRobin Murphy 	exynos_iommu_set_pte(sent, mk_lv1ent_sect(paddr, prot));
11442a96536eSKyongHo Cho 
1145bfa00489SMarek Szyprowski 	spin_lock(&domain->lock);
114666a7ed84SCho KyongHo 	if (lv1ent_page_zero(sent)) {
1147469acebeSMarek Szyprowski 		struct sysmmu_drvdata *data;
114866a7ed84SCho KyongHo 		/*
114966a7ed84SCho KyongHo 		 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
115066a7ed84SCho KyongHo 		 * entry by speculative prefetch of SLPD which has no mapping.
115166a7ed84SCho KyongHo 		 */
1152bfa00489SMarek Szyprowski 		list_for_each_entry(data, &domain->clients, domain_node)
1153469acebeSMarek Szyprowski 			sysmmu_tlb_invalidate_flpdcache(data, iova);
115466a7ed84SCho KyongHo 	}
1155bfa00489SMarek Szyprowski 	spin_unlock(&domain->lock);
115666a7ed84SCho KyongHo 
11572a96536eSKyongHo Cho 	return 0;
11582a96536eSKyongHo Cho }
11592a96536eSKyongHo Cho 
lv2set_page(sysmmu_pte_t * pent,phys_addr_t paddr,size_t size,int prot,short * pgcnt)1160d09d78fcSCho KyongHo static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
11611a0d8dacSMarek Szyprowski 		       int prot, short *pgcnt)
11622a96536eSKyongHo Cho {
11632a96536eSKyongHo Cho 	if (size == SPAGE_SIZE) {
11640bf4e54dSCho KyongHo 		if (WARN_ON(!lv2ent_fault(pent)))
11652a96536eSKyongHo Cho 			return -EADDRINUSE;
11662a96536eSKyongHo Cho 
11679314006dSRobin Murphy 		exynos_iommu_set_pte(pent, mk_lv2ent_spage(paddr, prot));
11682a96536eSKyongHo Cho 		*pgcnt -= 1;
11692a96536eSKyongHo Cho 	} else { /* size == LPAGE_SIZE */
11702a96536eSKyongHo Cho 		int i;
11715e3435ebSMarek Szyprowski 		dma_addr_t pent_base = virt_to_phys(pent);
1172365409dbSSachin Kamat 
11735e3435ebSMarek Szyprowski 		dma_sync_single_for_cpu(dma_dev, pent_base,
11745e3435ebSMarek Szyprowski 					sizeof(*pent) * SPAGES_PER_LPAGE,
11755e3435ebSMarek Szyprowski 					DMA_TO_DEVICE);
11762a96536eSKyongHo Cho 		for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
11770bf4e54dSCho KyongHo 			if (WARN_ON(!lv2ent_fault(pent))) {
117861128f08SCho KyongHo 				if (i > 0)
117961128f08SCho KyongHo 					memset(pent - i, 0, sizeof(*pent) * i);
11802a96536eSKyongHo Cho 				return -EADDRINUSE;
11812a96536eSKyongHo Cho 			}
11822a96536eSKyongHo Cho 
11831a0d8dacSMarek Szyprowski 			*pent = mk_lv2ent_lpage(paddr, prot);
11842a96536eSKyongHo Cho 		}
11855e3435ebSMarek Szyprowski 		dma_sync_single_for_device(dma_dev, pent_base,
11865e3435ebSMarek Szyprowski 					   sizeof(*pent) * SPAGES_PER_LPAGE,
11875e3435ebSMarek Szyprowski 					   DMA_TO_DEVICE);
11882a96536eSKyongHo Cho 		*pgcnt -= SPAGES_PER_LPAGE;
11892a96536eSKyongHo Cho 	}
11902a96536eSKyongHo Cho 
11912a96536eSKyongHo Cho 	return 0;
11922a96536eSKyongHo Cho }
11932a96536eSKyongHo Cho 
119466a7ed84SCho KyongHo /*
119566a7ed84SCho KyongHo  * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
119666a7ed84SCho KyongHo  *
1197f171ababSSachin Kamat  * System MMU v3.x has advanced logic to improve address translation
119866a7ed84SCho KyongHo  * performance with caching more page table entries by a page table walk.
1199f171ababSSachin Kamat  * However, the logic has a bug that while caching faulty page table entries,
1200f171ababSSachin Kamat  * System MMU reports page fault if the cached fault entry is hit even though
1201f171ababSSachin Kamat  * the fault entry is updated to a valid entry after the entry is cached.
1202f171ababSSachin Kamat  * To prevent caching faulty page table entries which may be updated to valid
1203f171ababSSachin Kamat  * entries later, the virtual memory manager should care about the workaround
1204f171ababSSachin Kamat  * for the problem. The following describes the workaround.
120566a7ed84SCho KyongHo  *
120666a7ed84SCho KyongHo  * Any two consecutive I/O virtual address regions must have a hole of 128KiB
1207f171ababSSachin Kamat  * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
120866a7ed84SCho KyongHo  *
1209f171ababSSachin Kamat  * Precisely, any start address of I/O virtual region must be aligned with
121066a7ed84SCho KyongHo  * the following sizes for System MMU v3.1 and v3.2.
121166a7ed84SCho KyongHo  * System MMU v3.1: 128KiB
121266a7ed84SCho KyongHo  * System MMU v3.2: 256KiB
121366a7ed84SCho KyongHo  *
121466a7ed84SCho KyongHo  * Because System MMU v3.3 caches page table entries more aggressively, it needs
1215f171ababSSachin Kamat  * more workarounds.
1216f171ababSSachin Kamat  * - Any two consecutive I/O virtual regions must have a hole of size larger
1217f171ababSSachin Kamat  *   than or equal to 128KiB.
121866a7ed84SCho KyongHo  * - Start address of an I/O virtual region must be aligned by 128KiB.
121966a7ed84SCho KyongHo  */
exynos_iommu_map(struct iommu_domain * iommu_domain,unsigned long l_iova,phys_addr_t paddr,size_t size,int prot,gfp_t gfp)1220bfa00489SMarek Szyprowski static int exynos_iommu_map(struct iommu_domain *iommu_domain,
1221bfa00489SMarek Szyprowski 			    unsigned long l_iova, phys_addr_t paddr, size_t size,
1222781ca2deSTom Murphy 			    int prot, gfp_t gfp)
12232a96536eSKyongHo Cho {
1224bfa00489SMarek Szyprowski 	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1225d09d78fcSCho KyongHo 	sysmmu_pte_t *entry;
1226d09d78fcSCho KyongHo 	sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
12272a96536eSKyongHo Cho 	unsigned long flags;
12282a96536eSKyongHo Cho 	int ret = -ENOMEM;
12292a96536eSKyongHo Cho 
1230bfa00489SMarek Szyprowski 	BUG_ON(domain->pgtable == NULL);
12311a0d8dacSMarek Szyprowski 	prot &= SYSMMU_SUPPORTED_PROT_BITS;
12322a96536eSKyongHo Cho 
1233bfa00489SMarek Szyprowski 	spin_lock_irqsave(&domain->pgtablelock, flags);
12342a96536eSKyongHo Cho 
1235bfa00489SMarek Szyprowski 	entry = section_entry(domain->pgtable, iova);
12362a96536eSKyongHo Cho 
12372a96536eSKyongHo Cho 	if (size == SECT_SIZE) {
12381a0d8dacSMarek Szyprowski 		ret = lv1set_section(domain, entry, iova, paddr, prot,
1239bfa00489SMarek Szyprowski 				     &domain->lv2entcnt[lv1ent_offset(iova)]);
12402a96536eSKyongHo Cho 	} else {
1241d09d78fcSCho KyongHo 		sysmmu_pte_t *pent;
12422a96536eSKyongHo Cho 
1243bfa00489SMarek Szyprowski 		pent = alloc_lv2entry(domain, entry, iova,
1244bfa00489SMarek Szyprowski 				      &domain->lv2entcnt[lv1ent_offset(iova)]);
12452a96536eSKyongHo Cho 
124661128f08SCho KyongHo 		if (IS_ERR(pent))
124761128f08SCho KyongHo 			ret = PTR_ERR(pent);
12482a96536eSKyongHo Cho 		else
12491a0d8dacSMarek Szyprowski 			ret = lv2set_page(pent, paddr, size, prot,
1250bfa00489SMarek Szyprowski 				       &domain->lv2entcnt[lv1ent_offset(iova)]);
12512a96536eSKyongHo Cho 	}
12522a96536eSKyongHo Cho 
125361128f08SCho KyongHo 	if (ret)
12540bf4e54dSCho KyongHo 		pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
12550bf4e54dSCho KyongHo 			__func__, ret, size, iova);
12562a96536eSKyongHo Cho 
1257bfa00489SMarek Szyprowski 	spin_unlock_irqrestore(&domain->pgtablelock, flags);
12582a96536eSKyongHo Cho 
12592a96536eSKyongHo Cho 	return ret;
12602a96536eSKyongHo Cho }
12612a96536eSKyongHo Cho 
exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain * domain,sysmmu_iova_t iova,size_t size)1262bfa00489SMarek Szyprowski static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain,
126366a7ed84SCho KyongHo 					      sysmmu_iova_t iova, size_t size)
126466a7ed84SCho KyongHo {
1265469acebeSMarek Szyprowski 	struct sysmmu_drvdata *data;
126666a7ed84SCho KyongHo 	unsigned long flags;
126766a7ed84SCho KyongHo 
1268bfa00489SMarek Szyprowski 	spin_lock_irqsave(&domain->lock, flags);
126966a7ed84SCho KyongHo 
1270bfa00489SMarek Szyprowski 	list_for_each_entry(data, &domain->clients, domain_node)
1271469acebeSMarek Szyprowski 		sysmmu_tlb_invalidate_entry(data, iova, size);
127266a7ed84SCho KyongHo 
1273bfa00489SMarek Szyprowski 	spin_unlock_irqrestore(&domain->lock, flags);
127466a7ed84SCho KyongHo }
127566a7ed84SCho KyongHo 
exynos_iommu_unmap(struct iommu_domain * iommu_domain,unsigned long l_iova,size_t size,struct iommu_iotlb_gather * gather)1276bfa00489SMarek Szyprowski static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
127756f8af5eSWill Deacon 				 unsigned long l_iova, size_t size,
127856f8af5eSWill Deacon 				 struct iommu_iotlb_gather *gather)
12792a96536eSKyongHo Cho {
1280bfa00489SMarek Szyprowski 	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1281d09d78fcSCho KyongHo 	sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1282d09d78fcSCho KyongHo 	sysmmu_pte_t *ent;
128361128f08SCho KyongHo 	size_t err_pgsize;
1284d09d78fcSCho KyongHo 	unsigned long flags;
12852a96536eSKyongHo Cho 
1286bfa00489SMarek Szyprowski 	BUG_ON(domain->pgtable == NULL);
12872a96536eSKyongHo Cho 
1288bfa00489SMarek Szyprowski 	spin_lock_irqsave(&domain->pgtablelock, flags);
12892a96536eSKyongHo Cho 
1290bfa00489SMarek Szyprowski 	ent = section_entry(domain->pgtable, iova);
12912a96536eSKyongHo Cho 
12922a96536eSKyongHo Cho 	if (lv1ent_section(ent)) {
12930bf4e54dSCho KyongHo 		if (WARN_ON(size < SECT_SIZE)) {
129461128f08SCho KyongHo 			err_pgsize = SECT_SIZE;
129561128f08SCho KyongHo 			goto err;
129661128f08SCho KyongHo 		}
12972a96536eSKyongHo Cho 
1298f171ababSSachin Kamat 		/* workaround for h/w bug in System MMU v3.3 */
12999314006dSRobin Murphy 		exynos_iommu_set_pte(ent, ZERO_LV2LINK);
13002a96536eSKyongHo Cho 		size = SECT_SIZE;
13012a96536eSKyongHo Cho 		goto done;
13022a96536eSKyongHo Cho 	}
13032a96536eSKyongHo Cho 
13042a96536eSKyongHo Cho 	if (unlikely(lv1ent_fault(ent))) {
13052a96536eSKyongHo Cho 		if (size > SECT_SIZE)
13062a96536eSKyongHo Cho 			size = SECT_SIZE;
13072a96536eSKyongHo Cho 		goto done;
13082a96536eSKyongHo Cho 	}
13092a96536eSKyongHo Cho 
13102a96536eSKyongHo Cho 	/* lv1ent_page(sent) == true here */
13112a96536eSKyongHo Cho 
13122a96536eSKyongHo Cho 	ent = page_entry(ent, iova);
13132a96536eSKyongHo Cho 
13142a96536eSKyongHo Cho 	if (unlikely(lv2ent_fault(ent))) {
13152a96536eSKyongHo Cho 		size = SPAGE_SIZE;
13162a96536eSKyongHo Cho 		goto done;
13172a96536eSKyongHo Cho 	}
13182a96536eSKyongHo Cho 
13192a96536eSKyongHo Cho 	if (lv2ent_small(ent)) {
13209314006dSRobin Murphy 		exynos_iommu_set_pte(ent, 0);
13212a96536eSKyongHo Cho 		size = SPAGE_SIZE;
1322bfa00489SMarek Szyprowski 		domain->lv2entcnt[lv1ent_offset(iova)] += 1;
13232a96536eSKyongHo Cho 		goto done;
13242a96536eSKyongHo Cho 	}
13252a96536eSKyongHo Cho 
13262a96536eSKyongHo Cho 	/* lv1ent_large(ent) == true here */
13270bf4e54dSCho KyongHo 	if (WARN_ON(size < LPAGE_SIZE)) {
132861128f08SCho KyongHo 		err_pgsize = LPAGE_SIZE;
132961128f08SCho KyongHo 		goto err;
133061128f08SCho KyongHo 	}
13312a96536eSKyongHo Cho 
13325e3435ebSMarek Szyprowski 	dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent),
13335e3435ebSMarek Szyprowski 				sizeof(*ent) * SPAGES_PER_LPAGE,
13345e3435ebSMarek Szyprowski 				DMA_TO_DEVICE);
13352a96536eSKyongHo Cho 	memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
13365e3435ebSMarek Szyprowski 	dma_sync_single_for_device(dma_dev, virt_to_phys(ent),
13375e3435ebSMarek Szyprowski 				   sizeof(*ent) * SPAGES_PER_LPAGE,
13385e3435ebSMarek Szyprowski 				   DMA_TO_DEVICE);
13392a96536eSKyongHo Cho 	size = LPAGE_SIZE;
1340bfa00489SMarek Szyprowski 	domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
13412a96536eSKyongHo Cho done:
1342bfa00489SMarek Szyprowski 	spin_unlock_irqrestore(&domain->pgtablelock, flags);
13432a96536eSKyongHo Cho 
1344bfa00489SMarek Szyprowski 	exynos_iommu_tlb_invalidate_entry(domain, iova, size);
13452a96536eSKyongHo Cho 
13462a96536eSKyongHo Cho 	return size;
134761128f08SCho KyongHo err:
1348bfa00489SMarek Szyprowski 	spin_unlock_irqrestore(&domain->pgtablelock, flags);
134961128f08SCho KyongHo 
13500bf4e54dSCho KyongHo 	pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
135161128f08SCho KyongHo 		__func__, size, iova, err_pgsize);
135261128f08SCho KyongHo 
135361128f08SCho KyongHo 	return 0;
13542a96536eSKyongHo Cho }
13552a96536eSKyongHo Cho 
exynos_iommu_iova_to_phys(struct iommu_domain * iommu_domain,dma_addr_t iova)1356bfa00489SMarek Szyprowski static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
1357bb5547acSVarun Sethi 					  dma_addr_t iova)
13582a96536eSKyongHo Cho {
1359bfa00489SMarek Szyprowski 	struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
1360d09d78fcSCho KyongHo 	sysmmu_pte_t *entry;
13612a96536eSKyongHo Cho 	unsigned long flags;
13622a96536eSKyongHo Cho 	phys_addr_t phys = 0;
13632a96536eSKyongHo Cho 
1364bfa00489SMarek Szyprowski 	spin_lock_irqsave(&domain->pgtablelock, flags);
13652a96536eSKyongHo Cho 
1366bfa00489SMarek Szyprowski 	entry = section_entry(domain->pgtable, iova);
13672a96536eSKyongHo Cho 
13682a96536eSKyongHo Cho 	if (lv1ent_section(entry)) {
13692a96536eSKyongHo Cho 		phys = section_phys(entry) + section_offs(iova);
13702a96536eSKyongHo Cho 	} else if (lv1ent_page(entry)) {
13712a96536eSKyongHo Cho 		entry = page_entry(entry, iova);
13722a96536eSKyongHo Cho 
13732a96536eSKyongHo Cho 		if (lv2ent_large(entry))
13742a96536eSKyongHo Cho 			phys = lpage_phys(entry) + lpage_offs(iova);
13752a96536eSKyongHo Cho 		else if (lv2ent_small(entry))
13762a96536eSKyongHo Cho 			phys = spage_phys(entry) + spage_offs(iova);
13772a96536eSKyongHo Cho 	}
13782a96536eSKyongHo Cho 
1379bfa00489SMarek Szyprowski 	spin_unlock_irqrestore(&domain->pgtablelock, flags);
13802a96536eSKyongHo Cho 
13812a96536eSKyongHo Cho 	return phys;
13822a96536eSKyongHo Cho }
13832a96536eSKyongHo Cho 
exynos_iommu_probe_device(struct device * dev)13843c51c054SJoerg Roedel static struct iommu_device *exynos_iommu_probe_device(struct device *dev)
1385bf4a1c92SAntonios Motakis {
13860f45b04dSJoerg Roedel 	struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
13877a974b29SMarek Szyprowski 	struct sysmmu_drvdata *data;
1388bf4a1c92SAntonios Motakis 
138906801db0SMarek Szyprowski 	if (!has_sysmmu(dev))
13903c51c054SJoerg Roedel 		return ERR_PTR(-ENODEV);
1391bf4a1c92SAntonios Motakis 
13927a974b29SMarek Szyprowski 	list_for_each_entry(data, &owner->controllers, owner_node) {
13937a974b29SMarek Szyprowski 		/*
13947a974b29SMarek Szyprowski 		 * SYSMMU will be runtime activated via device link
13957a974b29SMarek Szyprowski 		 * (dependency) to its master device, so there are no
13967a974b29SMarek Szyprowski 		 * direct calls to pm_runtime_get/put in this driver.
13977a974b29SMarek Szyprowski 		 */
13987a974b29SMarek Szyprowski 		data->link = device_link_add(dev, data->sysmmu,
1399ea4f6400SRafael J. Wysocki 					     DL_FLAG_STATELESS |
14007a974b29SMarek Szyprowski 					     DL_FLAG_PM_RUNTIME);
14017a974b29SMarek Szyprowski 	}
1402bf4a1c92SAntonios Motakis 
140366ae88e7SJoerg Roedel 	/* There is always at least one entry, see exynos_iommu_of_xlate() */
140466ae88e7SJoerg Roedel 	data = list_first_entry(&owner->controllers,
140566ae88e7SJoerg Roedel 				struct sysmmu_drvdata, owner_node);
140666ae88e7SJoerg Roedel 
14073c51c054SJoerg Roedel 	return &data->iommu;
1408bf4a1c92SAntonios Motakis }
1409bf4a1c92SAntonios Motakis 
exynos_iommu_set_platform_dma(struct device * dev)14103c51c054SJoerg Roedel static void exynos_iommu_set_platform_dma(struct device *dev)
1411bf4a1c92SAntonios Motakis {
14120f45b04dSJoerg Roedel 	struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
14137a974b29SMarek Szyprowski 
1414fff2fd1aSMarek Szyprowski 	if (owner->domain) {
1415fff2fd1aSMarek Szyprowski 		struct iommu_group *group = iommu_group_get(dev);
1416fff2fd1aSMarek Szyprowski 
1417fff2fd1aSMarek Szyprowski 		if (group) {
1418fff2fd1aSMarek Szyprowski 			exynos_iommu_detach_device(owner->domain, dev);
1419189d496bSMarek Szyprowski 			iommu_group_put(group);
1420fff2fd1aSMarek Szyprowski 		}
1421fff2fd1aSMarek Szyprowski 	}
1422189d496bSMarek Szyprowski }
1423fff2fd1aSMarek Szyprowski 
exynos_iommu_release_device(struct device * dev)1424fff2fd1aSMarek Szyprowski static void exynos_iommu_release_device(struct device *dev)
1425fff2fd1aSMarek Szyprowski {
1426fff2fd1aSMarek Szyprowski 	struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
14277a974b29SMarek Szyprowski 	struct sysmmu_drvdata *data;
14287a974b29SMarek Szyprowski 
14297a974b29SMarek Szyprowski 	exynos_iommu_set_platform_dma(dev);
1430bf4a1c92SAntonios Motakis 
1431bf4a1c92SAntonios Motakis 	list_for_each_entry(data, &owner->controllers, owner_node)
1432aa759fd3SMarek Szyprowski 		device_link_del(data->link);
1433aa759fd3SMarek Szyprowski }
1434aa759fd3SMarek Szyprowski 
exynos_iommu_of_xlate(struct device * dev,struct of_phandle_args * spec)1435aa759fd3SMarek Szyprowski static int exynos_iommu_of_xlate(struct device *dev,
14360f45b04dSJoerg Roedel 				 struct of_phandle_args *spec)
14370bd5a0c7SMarek Szyprowski {
1438aa759fd3SMarek Szyprowski 	struct platform_device *sysmmu = of_find_device_by_node(spec->np);
1439aa759fd3SMarek Szyprowski 	struct exynos_iommu_owner *owner = dev_iommu_priv_get(dev);
1440aa759fd3SMarek Szyprowski 	struct sysmmu_drvdata *data, *entry;
1441aa759fd3SMarek Szyprowski 
1442aa759fd3SMarek Szyprowski 	if (!sysmmu)
14431a260449SYu Kuai 		return -ENODEV;
14441a260449SYu Kuai 
1445aa759fd3SMarek Szyprowski 	data = platform_get_drvdata(sysmmu);
14461a260449SYu Kuai 	if (!data) {
1447aa759fd3SMarek Szyprowski 		put_device(&sysmmu->dev);
1448aa759fd3SMarek Szyprowski 		return -ENODEV;
1449aa759fd3SMarek Szyprowski 	}
14501a260449SYu Kuai 
14511a260449SYu Kuai 	if (!owner) {
1452aa759fd3SMarek Szyprowski 		owner = kzalloc(sizeof(*owner), GFP_KERNEL);
14531a260449SYu Kuai 		if (!owner) {
1454aa759fd3SMarek Szyprowski 			put_device(&sysmmu->dev);
1455aa759fd3SMarek Szyprowski 			return -ENOMEM;
14569b265536SMarek Szyprowski 		}
14570f45b04dSJoerg Roedel 
1458aa759fd3SMarek Szyprowski 		INIT_LIST_HEAD(&owner->controllers);
1459aa759fd3SMarek Szyprowski 		mutex_init(&owner->rpm_lock);
14600bd5a0c7SMarek Szyprowski 		dev_iommu_priv_set(dev, owner);
14610bd5a0c7SMarek Szyprowski 	}
14620bd5a0c7SMarek Szyprowski 
14630bd5a0c7SMarek Szyprowski 	list_for_each_entry(entry, &owner->controllers, owner_node)
1464aa759fd3SMarek Szyprowski 		if (entry == data)
146592798b45SMarek Szyprowski 			return 0;
14662f5f44f2SMarek Szyprowski 
1467aa759fd3SMarek Szyprowski 	list_add_tail(&data->owner_node, &owner->controllers);
1468aa759fd3SMarek Szyprowski 	data->master = dev;
1469aa759fd3SMarek Szyprowski 
14700b9a3694SArvind Yadav 	return 0;
1471e1fd1eaaSJoerg Roedel }
14726d7cf02aSRobin Murphy 
1473189d496bSMarek Szyprowski static const struct iommu_ops exynos_iommu_ops = {
1474189d496bSMarek Szyprowski 	.domain_alloc = exynos_iommu_domain_alloc,
1475189d496bSMarek Szyprowski 	.device_group = generic_device_group,
14763c51c054SJoerg Roedel #ifdef CONFIG_ARM
14773c51c054SJoerg Roedel 	.set_platform_dma_ops = exynos_iommu_set_platform_dma,
14782a96536eSKyongHo Cho #endif
1479aa759fd3SMarek Szyprowski 	.probe_device = exynos_iommu_probe_device,
14809a630a4bSLu Baolu 	.release_device = exynos_iommu_release_device,
14819a630a4bSLu Baolu 	.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
14829a630a4bSLu Baolu 	.of_xlate = exynos_iommu_of_xlate,
14839a630a4bSLu Baolu 	.default_domain_ops = &(const struct iommu_domain_ops) {
14849a630a4bSLu Baolu 		.attach_dev	= exynos_iommu_attach_device,
14859a630a4bSLu Baolu 		.map		= exynos_iommu_map,
14869a630a4bSLu Baolu 		.unmap		= exynos_iommu_unmap,
14872a96536eSKyongHo Cho 		.iova_to_phys	= exynos_iommu_iova_to_phys,
14882a96536eSKyongHo Cho 		.free		= exynos_iommu_domain_free,
14892a96536eSKyongHo Cho 	}
14902a96536eSKyongHo Cho };
1491dc98b848SRobin Murphy 
exynos_iommu_init(void)14922a96536eSKyongHo Cho static int __init exynos_iommu_init(void)
14932a96536eSKyongHo Cho {
1494dc98b848SRobin Murphy 	struct device_node *np;
1495dc98b848SRobin Murphy 	int ret;
1496dc98b848SRobin Murphy 
1497dc98b848SRobin Murphy 	np = of_find_matching_node(NULL, sysmmu_of_match);
1498dc98b848SRobin Murphy 	if (!np)
1499dc98b848SRobin Murphy 		return 0;
1500734c3c73SCho KyongHo 
1501734c3c73SCho KyongHo 	of_node_put(np);
1502734c3c73SCho KyongHo 
1503734c3c73SCho KyongHo 	lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1504734c3c73SCho KyongHo 				LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1505734c3c73SCho KyongHo 	if (!lv2table_kmem_cache) {
1506734c3c73SCho KyongHo 		pr_err("%s: Failed to create kmem cache\n", __func__);
150766a7ed84SCho KyongHo 		return -ENOMEM;
150866a7ed84SCho KyongHo 	}
150966a7ed84SCho KyongHo 
151066a7ed84SCho KyongHo 	zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
151166a7ed84SCho KyongHo 	if (zero_lv2_table == NULL) {
151266a7ed84SCho KyongHo 		pr_err("%s: Failed to allocate zero level2 page table\n",
151366a7ed84SCho KyongHo 			__func__);
151466a7ed84SCho KyongHo 		ret = -ENOMEM;
1515bbc4d205SMarek Szyprowski 		goto err_zero_lv2;
1516bbc4d205SMarek Szyprowski 	}
1517bbc4d205SMarek Szyprowski 
1518bbc4d205SMarek Szyprowski 	ret = platform_driver_register(&exynos_sysmmu_driver);
1519bbc4d205SMarek Szyprowski 	if (ret) {
1520bbc4d205SMarek Szyprowski 		pr_err("%s: Failed to register driver\n", __func__);
1521734c3c73SCho KyongHo 		goto err_reg_driver;
1522734c3c73SCho KyongHo 	}
152353719876SYang Yingliang 
1524bbc4d205SMarek Szyprowski 	return 0;
1525734c3c73SCho KyongHo err_reg_driver:
15262a96536eSKyongHo Cho 	kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
15272a96536eSKyongHo Cho err_zero_lv2:
1528928055a0SMarek Szyprowski 	kmem_cache_destroy(lv2table_kmem_cache);
1529 	return ret;
1530 }
1531 core_initcall(exynos_iommu_init);
1532