xref: /openbmc/linux/drivers/iommu/exynos-iommu.c (revision eeb5184b)
12a96536eSKyongHo Cho /* linux/drivers/iommu/exynos_iommu.c
22a96536eSKyongHo Cho  *
32a96536eSKyongHo Cho  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
42a96536eSKyongHo Cho  *		http://www.samsung.com
52a96536eSKyongHo Cho  *
62a96536eSKyongHo Cho  * This program is free software; you can redistribute it and/or modify
72a96536eSKyongHo Cho  * it under the terms of the GNU General Public License version 2 as
82a96536eSKyongHo Cho  * published by the Free Software Foundation.
92a96536eSKyongHo Cho  */
102a96536eSKyongHo Cho 
112a96536eSKyongHo Cho #ifdef CONFIG_EXYNOS_IOMMU_DEBUG
122a96536eSKyongHo Cho #define DEBUG
132a96536eSKyongHo Cho #endif
142a96536eSKyongHo Cho 
152a96536eSKyongHo Cho #include <linux/io.h>
162a96536eSKyongHo Cho #include <linux/interrupt.h>
172a96536eSKyongHo Cho #include <linux/platform_device.h>
182a96536eSKyongHo Cho #include <linux/slab.h>
192a96536eSKyongHo Cho #include <linux/pm_runtime.h>
202a96536eSKyongHo Cho #include <linux/clk.h>
212a96536eSKyongHo Cho #include <linux/err.h>
222a96536eSKyongHo Cho #include <linux/mm.h>
232a96536eSKyongHo Cho #include <linux/iommu.h>
242a96536eSKyongHo Cho #include <linux/errno.h>
252a96536eSKyongHo Cho #include <linux/list.h>
262a96536eSKyongHo Cho #include <linux/memblock.h>
272a96536eSKyongHo Cho #include <linux/export.h>
282a96536eSKyongHo Cho 
292a96536eSKyongHo Cho #include <asm/cacheflush.h>
302a96536eSKyongHo Cho #include <asm/pgtable.h>
312a96536eSKyongHo Cho 
32d09d78fcSCho KyongHo typedef u32 sysmmu_iova_t;
33d09d78fcSCho KyongHo typedef u32 sysmmu_pte_t;
34d09d78fcSCho KyongHo 
352a96536eSKyongHo Cho /* We does not consider super section mapping (16MB) */
362a96536eSKyongHo Cho #define SECT_ORDER 20
372a96536eSKyongHo Cho #define LPAGE_ORDER 16
382a96536eSKyongHo Cho #define SPAGE_ORDER 12
392a96536eSKyongHo Cho 
402a96536eSKyongHo Cho #define SECT_SIZE (1 << SECT_ORDER)
412a96536eSKyongHo Cho #define LPAGE_SIZE (1 << LPAGE_ORDER)
422a96536eSKyongHo Cho #define SPAGE_SIZE (1 << SPAGE_ORDER)
432a96536eSKyongHo Cho 
442a96536eSKyongHo Cho #define SECT_MASK (~(SECT_SIZE - 1))
452a96536eSKyongHo Cho #define LPAGE_MASK (~(LPAGE_SIZE - 1))
462a96536eSKyongHo Cho #define SPAGE_MASK (~(SPAGE_SIZE - 1))
472a96536eSKyongHo Cho 
482a96536eSKyongHo Cho #define lv1ent_fault(sent) (((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
492a96536eSKyongHo Cho #define lv1ent_page(sent) ((*(sent) & 3) == 1)
502a96536eSKyongHo Cho #define lv1ent_section(sent) ((*(sent) & 3) == 2)
512a96536eSKyongHo Cho 
522a96536eSKyongHo Cho #define lv2ent_fault(pent) ((*(pent) & 3) == 0)
532a96536eSKyongHo Cho #define lv2ent_small(pent) ((*(pent) & 2) == 2)
542a96536eSKyongHo Cho #define lv2ent_large(pent) ((*(pent) & 3) == 1)
552a96536eSKyongHo Cho 
56d09d78fcSCho KyongHo static u32 sysmmu_page_offset(sysmmu_iova_t iova, u32 size)
57d09d78fcSCho KyongHo {
58d09d78fcSCho KyongHo 	return iova & (size - 1);
59d09d78fcSCho KyongHo }
602a96536eSKyongHo Cho 
61d09d78fcSCho KyongHo #define section_phys(sent) (*(sent) & SECT_MASK)
62d09d78fcSCho KyongHo #define section_offs(iova) sysmmu_page_offset((iova), SECT_SIZE)
63d09d78fcSCho KyongHo #define lpage_phys(pent) (*(pent) & LPAGE_MASK)
64d09d78fcSCho KyongHo #define lpage_offs(iova) sysmmu_page_offset((iova), LPAGE_SIZE)
65d09d78fcSCho KyongHo #define spage_phys(pent) (*(pent) & SPAGE_MASK)
66d09d78fcSCho KyongHo #define spage_offs(iova) sysmmu_page_offset((iova), SPAGE_SIZE)
672a96536eSKyongHo Cho 
682a96536eSKyongHo Cho #define NUM_LV1ENTRIES 4096
69d09d78fcSCho KyongHo #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
702a96536eSKyongHo Cho 
71d09d78fcSCho KyongHo static u32 lv1ent_offset(sysmmu_iova_t iova)
72d09d78fcSCho KyongHo {
73d09d78fcSCho KyongHo 	return iova >> SECT_ORDER;
74d09d78fcSCho KyongHo }
75d09d78fcSCho KyongHo 
76d09d78fcSCho KyongHo static u32 lv2ent_offset(sysmmu_iova_t iova)
77d09d78fcSCho KyongHo {
78d09d78fcSCho KyongHo 	return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
79d09d78fcSCho KyongHo }
80d09d78fcSCho KyongHo 
81d09d78fcSCho KyongHo #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
822a96536eSKyongHo Cho 
832a96536eSKyongHo Cho #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
842a96536eSKyongHo Cho 
852a96536eSKyongHo Cho #define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
862a96536eSKyongHo Cho 
872a96536eSKyongHo Cho #define mk_lv1ent_sect(pa) ((pa) | 2)
882a96536eSKyongHo Cho #define mk_lv1ent_page(pa) ((pa) | 1)
892a96536eSKyongHo Cho #define mk_lv2ent_lpage(pa) ((pa) | 1)
902a96536eSKyongHo Cho #define mk_lv2ent_spage(pa) ((pa) | 2)
912a96536eSKyongHo Cho 
922a96536eSKyongHo Cho #define CTRL_ENABLE	0x5
932a96536eSKyongHo Cho #define CTRL_BLOCK	0x7
942a96536eSKyongHo Cho #define CTRL_DISABLE	0x0
952a96536eSKyongHo Cho 
96eeb5184bSCho KyongHo #define CFG_LRU		0x1
97eeb5184bSCho KyongHo #define CFG_QOS(n)	((n & 0xF) << 7)
98eeb5184bSCho KyongHo #define CFG_MASK	0x0150FFFF /* Selecting bit 0-15, 20, 22 and 24 */
99eeb5184bSCho KyongHo #define CFG_ACGEN	(1 << 24) /* System MMU 3.3 only */
100eeb5184bSCho KyongHo #define CFG_SYSSEL	(1 << 22) /* System MMU 3.2 only */
101eeb5184bSCho KyongHo #define CFG_FLPDCACHE	(1 << 20) /* System MMU 3.2+ only */
102eeb5184bSCho KyongHo 
1032a96536eSKyongHo Cho #define REG_MMU_CTRL		0x000
1042a96536eSKyongHo Cho #define REG_MMU_CFG		0x004
1052a96536eSKyongHo Cho #define REG_MMU_STATUS		0x008
1062a96536eSKyongHo Cho #define REG_MMU_FLUSH		0x00C
1072a96536eSKyongHo Cho #define REG_MMU_FLUSH_ENTRY	0x010
1082a96536eSKyongHo Cho #define REG_PT_BASE_ADDR	0x014
1092a96536eSKyongHo Cho #define REG_INT_STATUS		0x018
1102a96536eSKyongHo Cho #define REG_INT_CLEAR		0x01C
1112a96536eSKyongHo Cho 
1122a96536eSKyongHo Cho #define REG_PAGE_FAULT_ADDR	0x024
1132a96536eSKyongHo Cho #define REG_AW_FAULT_ADDR	0x028
1142a96536eSKyongHo Cho #define REG_AR_FAULT_ADDR	0x02C
1152a96536eSKyongHo Cho #define REG_DEFAULT_SLAVE_ADDR	0x030
1162a96536eSKyongHo Cho 
1172a96536eSKyongHo Cho #define REG_MMU_VERSION		0x034
1182a96536eSKyongHo Cho 
119eeb5184bSCho KyongHo #define MMU_MAJ_VER(val)	((val) >> 7)
120eeb5184bSCho KyongHo #define MMU_MIN_VER(val)	((val) & 0x7F)
121eeb5184bSCho KyongHo #define MMU_RAW_VER(reg)	(((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
122eeb5184bSCho KyongHo 
123eeb5184bSCho KyongHo #define MAKE_MMU_VER(maj, min)	((((maj) & 0xF) << 7) | ((min) & 0x7F))
124eeb5184bSCho KyongHo 
1252a96536eSKyongHo Cho #define REG_PB0_SADDR		0x04C
1262a96536eSKyongHo Cho #define REG_PB0_EADDR		0x050
1272a96536eSKyongHo Cho #define REG_PB1_SADDR		0x054
1282a96536eSKyongHo Cho #define REG_PB1_EADDR		0x058
1292a96536eSKyongHo Cho 
1306b21a5dbSCho KyongHo #define has_sysmmu(dev)		(dev->archdata.iommu != NULL)
1316b21a5dbSCho KyongHo 
132734c3c73SCho KyongHo static struct kmem_cache *lv2table_kmem_cache;
133734c3c73SCho KyongHo 
134d09d78fcSCho KyongHo static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
1352a96536eSKyongHo Cho {
1362a96536eSKyongHo Cho 	return pgtable + lv1ent_offset(iova);
1372a96536eSKyongHo Cho }
1382a96536eSKyongHo Cho 
139d09d78fcSCho KyongHo static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
1402a96536eSKyongHo Cho {
141d09d78fcSCho KyongHo 	return (sysmmu_pte_t *)phys_to_virt(
1427222e8dbSCho KyongHo 				lv2table_base(sent)) + lv2ent_offset(iova);
1432a96536eSKyongHo Cho }
1442a96536eSKyongHo Cho 
1452a96536eSKyongHo Cho enum exynos_sysmmu_inttype {
1462a96536eSKyongHo Cho 	SYSMMU_PAGEFAULT,
1472a96536eSKyongHo Cho 	SYSMMU_AR_MULTIHIT,
1482a96536eSKyongHo Cho 	SYSMMU_AW_MULTIHIT,
1492a96536eSKyongHo Cho 	SYSMMU_BUSERROR,
1502a96536eSKyongHo Cho 	SYSMMU_AR_SECURITY,
1512a96536eSKyongHo Cho 	SYSMMU_AR_ACCESS,
1522a96536eSKyongHo Cho 	SYSMMU_AW_SECURITY,
1532a96536eSKyongHo Cho 	SYSMMU_AW_PROTECTION, /* 7 */
1542a96536eSKyongHo Cho 	SYSMMU_FAULT_UNKNOWN,
1552a96536eSKyongHo Cho 	SYSMMU_FAULTS_NUM
1562a96536eSKyongHo Cho };
1572a96536eSKyongHo Cho 
1582a96536eSKyongHo Cho static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
1592a96536eSKyongHo Cho 	REG_PAGE_FAULT_ADDR,
1602a96536eSKyongHo Cho 	REG_AR_FAULT_ADDR,
1612a96536eSKyongHo Cho 	REG_AW_FAULT_ADDR,
1622a96536eSKyongHo Cho 	REG_DEFAULT_SLAVE_ADDR,
1632a96536eSKyongHo Cho 	REG_AR_FAULT_ADDR,
1642a96536eSKyongHo Cho 	REG_AR_FAULT_ADDR,
1652a96536eSKyongHo Cho 	REG_AW_FAULT_ADDR,
1662a96536eSKyongHo Cho 	REG_AW_FAULT_ADDR
1672a96536eSKyongHo Cho };
1682a96536eSKyongHo Cho 
1692a96536eSKyongHo Cho static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
1702a96536eSKyongHo Cho 	"PAGE FAULT",
1712a96536eSKyongHo Cho 	"AR MULTI-HIT FAULT",
1722a96536eSKyongHo Cho 	"AW MULTI-HIT FAULT",
1732a96536eSKyongHo Cho 	"BUS ERROR",
1742a96536eSKyongHo Cho 	"AR SECURITY PROTECTION FAULT",
1752a96536eSKyongHo Cho 	"AR ACCESS PROTECTION FAULT",
1762a96536eSKyongHo Cho 	"AW SECURITY PROTECTION FAULT",
1772a96536eSKyongHo Cho 	"AW ACCESS PROTECTION FAULT",
1782a96536eSKyongHo Cho 	"UNKNOWN FAULT"
1792a96536eSKyongHo Cho };
1802a96536eSKyongHo Cho 
1816b21a5dbSCho KyongHo /* attached to dev.archdata.iommu of the master device */
1826b21a5dbSCho KyongHo struct exynos_iommu_owner {
1836b21a5dbSCho KyongHo 	struct list_head client; /* entry of exynos_iommu_domain.clients */
1846b21a5dbSCho KyongHo 	struct device *dev;
1856b21a5dbSCho KyongHo 	struct device *sysmmu;
1866b21a5dbSCho KyongHo 	struct iommu_domain *domain;
1876b21a5dbSCho KyongHo 	void *vmm_data;         /* IO virtual memory manager's data */
1886b21a5dbSCho KyongHo 	spinlock_t lock;        /* Lock to preserve consistency of System MMU */
1896b21a5dbSCho KyongHo };
1906b21a5dbSCho KyongHo 
1912a96536eSKyongHo Cho struct exynos_iommu_domain {
1922a96536eSKyongHo Cho 	struct list_head clients; /* list of sysmmu_drvdata.node */
193d09d78fcSCho KyongHo 	sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */
1942a96536eSKyongHo Cho 	short *lv2entcnt; /* free lv2 entry counter for each section */
1952a96536eSKyongHo Cho 	spinlock_t lock; /* lock for this structure */
1962a96536eSKyongHo Cho 	spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
1972a96536eSKyongHo Cho };
1982a96536eSKyongHo Cho 
1992a96536eSKyongHo Cho struct sysmmu_drvdata {
2002a96536eSKyongHo Cho 	struct device *sysmmu;	/* System MMU's device descriptor */
2016b21a5dbSCho KyongHo 	struct device *master;	/* Owner of system MMU */
2027222e8dbSCho KyongHo 	void __iomem *sfrbase;
2037222e8dbSCho KyongHo 	struct clk *clk;
20470605870SCho KyongHo 	struct clk *clk_master;
2052a96536eSKyongHo Cho 	int activations;
2069d4e7a24SCho KyongHo 	spinlock_t lock;
2072a96536eSKyongHo Cho 	struct iommu_domain *domain;
2087222e8dbSCho KyongHo 	phys_addr_t pgtable;
2092a96536eSKyongHo Cho };
2102a96536eSKyongHo Cho 
2112a96536eSKyongHo Cho static bool set_sysmmu_active(struct sysmmu_drvdata *data)
2122a96536eSKyongHo Cho {
2132a96536eSKyongHo Cho 	/* return true if the System MMU was not active previously
2142a96536eSKyongHo Cho 	   and it needs to be initialized */
2152a96536eSKyongHo Cho 	return ++data->activations == 1;
2162a96536eSKyongHo Cho }
2172a96536eSKyongHo Cho 
2182a96536eSKyongHo Cho static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
2192a96536eSKyongHo Cho {
2202a96536eSKyongHo Cho 	/* return true if the System MMU is needed to be disabled */
2212a96536eSKyongHo Cho 	BUG_ON(data->activations < 1);
2222a96536eSKyongHo Cho 	return --data->activations == 0;
2232a96536eSKyongHo Cho }
2242a96536eSKyongHo Cho 
2252a96536eSKyongHo Cho static bool is_sysmmu_active(struct sysmmu_drvdata *data)
2262a96536eSKyongHo Cho {
2272a96536eSKyongHo Cho 	return data->activations > 0;
2282a96536eSKyongHo Cho }
2292a96536eSKyongHo Cho 
2302a96536eSKyongHo Cho static void sysmmu_unblock(void __iomem *sfrbase)
2312a96536eSKyongHo Cho {
2322a96536eSKyongHo Cho 	__raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
2332a96536eSKyongHo Cho }
2342a96536eSKyongHo Cho 
235eeb5184bSCho KyongHo static unsigned int __raw_sysmmu_version(struct sysmmu_drvdata *data)
236eeb5184bSCho KyongHo {
237eeb5184bSCho KyongHo 	return MMU_RAW_VER(__raw_readl(data->sfrbase + REG_MMU_VERSION));
238eeb5184bSCho KyongHo }
239eeb5184bSCho KyongHo 
2402a96536eSKyongHo Cho static bool sysmmu_block(void __iomem *sfrbase)
2412a96536eSKyongHo Cho {
2422a96536eSKyongHo Cho 	int i = 120;
2432a96536eSKyongHo Cho 
2442a96536eSKyongHo Cho 	__raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL);
2452a96536eSKyongHo Cho 	while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1))
2462a96536eSKyongHo Cho 		--i;
2472a96536eSKyongHo Cho 
2482a96536eSKyongHo Cho 	if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) {
2492a96536eSKyongHo Cho 		sysmmu_unblock(sfrbase);
2502a96536eSKyongHo Cho 		return false;
2512a96536eSKyongHo Cho 	}
2522a96536eSKyongHo Cho 
2532a96536eSKyongHo Cho 	return true;
2542a96536eSKyongHo Cho }
2552a96536eSKyongHo Cho 
2562a96536eSKyongHo Cho static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
2572a96536eSKyongHo Cho {
2582a96536eSKyongHo Cho 	__raw_writel(0x1, sfrbase + REG_MMU_FLUSH);
2592a96536eSKyongHo Cho }
2602a96536eSKyongHo Cho 
2612a96536eSKyongHo Cho static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
262d09d78fcSCho KyongHo 				sysmmu_iova_t iova, unsigned int num_inv)
2632a96536eSKyongHo Cho {
2643ad6b7f3SCho KyongHo 	unsigned int i;
2653ad6b7f3SCho KyongHo 	for (i = 0; i < num_inv; i++) {
2663ad6b7f3SCho KyongHo 		__raw_writel((iova & SPAGE_MASK) | 1,
2673ad6b7f3SCho KyongHo 				sfrbase + REG_MMU_FLUSH_ENTRY);
2683ad6b7f3SCho KyongHo 		iova += SPAGE_SIZE;
2693ad6b7f3SCho KyongHo 	}
2702a96536eSKyongHo Cho }
2712a96536eSKyongHo Cho 
2722a96536eSKyongHo Cho static void __sysmmu_set_ptbase(void __iomem *sfrbase,
273d09d78fcSCho KyongHo 				       phys_addr_t pgd)
2742a96536eSKyongHo Cho {
2752a96536eSKyongHo Cho 	__raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR);
2762a96536eSKyongHo Cho 
2772a96536eSKyongHo Cho 	__sysmmu_tlb_invalidate(sfrbase);
2782a96536eSKyongHo Cho }
2792a96536eSKyongHo Cho 
2801fab7fa7SCho KyongHo static void show_fault_information(const char *name,
2811fab7fa7SCho KyongHo 		enum exynos_sysmmu_inttype itype,
282d09d78fcSCho KyongHo 		phys_addr_t pgtable_base, sysmmu_iova_t fault_addr)
2832a96536eSKyongHo Cho {
284d09d78fcSCho KyongHo 	sysmmu_pte_t *ent;
2852a96536eSKyongHo Cho 
2862a96536eSKyongHo Cho 	if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
2872a96536eSKyongHo Cho 		itype = SYSMMU_FAULT_UNKNOWN;
2882a96536eSKyongHo Cho 
289d09d78fcSCho KyongHo 	pr_err("%s occurred at %#x by %s(Page table base: %pa)\n",
2901fab7fa7SCho KyongHo 		sysmmu_fault_name[itype], fault_addr, name, &pgtable_base);
2912a96536eSKyongHo Cho 
2927222e8dbSCho KyongHo 	ent = section_entry(phys_to_virt(pgtable_base), fault_addr);
293d09d78fcSCho KyongHo 	pr_err("\tLv1 entry: %#x\n", *ent);
2942a96536eSKyongHo Cho 
2952a96536eSKyongHo Cho 	if (lv1ent_page(ent)) {
2962a96536eSKyongHo Cho 		ent = page_entry(ent, fault_addr);
297d09d78fcSCho KyongHo 		pr_err("\t Lv2 entry: %#x\n", *ent);
2982a96536eSKyongHo Cho 	}
2992a96536eSKyongHo Cho }
3002a96536eSKyongHo Cho 
3012a96536eSKyongHo Cho static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
3022a96536eSKyongHo Cho {
3032a96536eSKyongHo Cho 	/* SYSMMU is in blocked when interrupt occurred. */
3042a96536eSKyongHo Cho 	struct sysmmu_drvdata *data = dev_id;
3052a96536eSKyongHo Cho 	enum exynos_sysmmu_inttype itype;
306d09d78fcSCho KyongHo 	sysmmu_iova_t addr = -1;
3077222e8dbSCho KyongHo 	int ret = -ENOSYS;
3082a96536eSKyongHo Cho 
3092a96536eSKyongHo Cho 	WARN_ON(!is_sysmmu_active(data));
3102a96536eSKyongHo Cho 
3119d4e7a24SCho KyongHo 	spin_lock(&data->lock);
3129d4e7a24SCho KyongHo 
31370605870SCho KyongHo 	if (!IS_ERR(data->clk_master))
31470605870SCho KyongHo 		clk_enable(data->clk_master);
3159d4e7a24SCho KyongHo 
3162a96536eSKyongHo Cho 	itype = (enum exynos_sysmmu_inttype)
3177222e8dbSCho KyongHo 		__ffs(__raw_readl(data->sfrbase + REG_INT_STATUS));
3182a96536eSKyongHo Cho 	if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
3192a96536eSKyongHo Cho 		itype = SYSMMU_FAULT_UNKNOWN;
3202a96536eSKyongHo Cho 	else
3217222e8dbSCho KyongHo 		addr = __raw_readl(data->sfrbase + fault_reg_offset[itype]);
3222a96536eSKyongHo Cho 
3231fab7fa7SCho KyongHo 	if (itype == SYSMMU_FAULT_UNKNOWN) {
3241fab7fa7SCho KyongHo 		pr_err("%s: Fault is not occurred by System MMU '%s'!\n",
3251fab7fa7SCho KyongHo 			__func__, dev_name(data->sysmmu));
3261fab7fa7SCho KyongHo 		pr_err("%s: Please check if IRQ is correctly configured.\n",
3271fab7fa7SCho KyongHo 			__func__);
3281fab7fa7SCho KyongHo 		BUG();
3291fab7fa7SCho KyongHo 	} else {
330d09d78fcSCho KyongHo 		unsigned int base =
3311fab7fa7SCho KyongHo 				__raw_readl(data->sfrbase + REG_PT_BASE_ADDR);
3321fab7fa7SCho KyongHo 		show_fault_information(dev_name(data->sysmmu),
3331fab7fa7SCho KyongHo 					itype, base, addr);
3342a96536eSKyongHo Cho 		if (data->domain)
3351fab7fa7SCho KyongHo 			ret = report_iommu_fault(data->domain,
3366b21a5dbSCho KyongHo 					data->master, addr, itype);
3372a96536eSKyongHo Cho 	}
3382a96536eSKyongHo Cho 
3391fab7fa7SCho KyongHo 	/* fault is not recovered by fault handler */
3401fab7fa7SCho KyongHo 	BUG_ON(ret != 0);
3412a96536eSKyongHo Cho 
3421fab7fa7SCho KyongHo 	__raw_writel(1 << itype, data->sfrbase + REG_INT_CLEAR);
3431fab7fa7SCho KyongHo 
3447222e8dbSCho KyongHo 	sysmmu_unblock(data->sfrbase);
3452a96536eSKyongHo Cho 
34670605870SCho KyongHo 	if (!IS_ERR(data->clk_master))
34770605870SCho KyongHo 		clk_disable(data->clk_master);
34870605870SCho KyongHo 
3499d4e7a24SCho KyongHo 	spin_unlock(&data->lock);
3502a96536eSKyongHo Cho 
3512a96536eSKyongHo Cho 	return IRQ_HANDLED;
3522a96536eSKyongHo Cho }
3532a96536eSKyongHo Cho 
3546b21a5dbSCho KyongHo static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data)
3552a96536eSKyongHo Cho {
35670605870SCho KyongHo 	if (!IS_ERR(data->clk_master))
35770605870SCho KyongHo 		clk_enable(data->clk_master);
35870605870SCho KyongHo 
3597222e8dbSCho KyongHo 	__raw_writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
3606b21a5dbSCho KyongHo 	__raw_writel(0, data->sfrbase + REG_MMU_CFG);
3612a96536eSKyongHo Cho 
3627222e8dbSCho KyongHo 	clk_disable(data->clk);
36370605870SCho KyongHo 	if (!IS_ERR(data->clk_master))
36470605870SCho KyongHo 		clk_disable(data->clk_master);
3656b21a5dbSCho KyongHo }
3662a96536eSKyongHo Cho 
3676b21a5dbSCho KyongHo static bool __sysmmu_disable(struct sysmmu_drvdata *data)
3686b21a5dbSCho KyongHo {
3696b21a5dbSCho KyongHo 	bool disabled;
3706b21a5dbSCho KyongHo 	unsigned long flags;
3716b21a5dbSCho KyongHo 
3726b21a5dbSCho KyongHo 	spin_lock_irqsave(&data->lock, flags);
3736b21a5dbSCho KyongHo 
3746b21a5dbSCho KyongHo 	disabled = set_sysmmu_inactive(data);
3756b21a5dbSCho KyongHo 
3766b21a5dbSCho KyongHo 	if (disabled) {
3772a96536eSKyongHo Cho 		data->pgtable = 0;
3782a96536eSKyongHo Cho 		data->domain = NULL;
3796b21a5dbSCho KyongHo 
3806b21a5dbSCho KyongHo 		__sysmmu_disable_nocount(data);
3816b21a5dbSCho KyongHo 
3826b21a5dbSCho KyongHo 		dev_dbg(data->sysmmu, "Disabled\n");
3836b21a5dbSCho KyongHo 	} else  {
3846b21a5dbSCho KyongHo 		dev_dbg(data->sysmmu, "%d times left to disable\n",
3856b21a5dbSCho KyongHo 					data->activations);
3866b21a5dbSCho KyongHo 	}
3876b21a5dbSCho KyongHo 
3889d4e7a24SCho KyongHo 	spin_unlock_irqrestore(&data->lock, flags);
3892a96536eSKyongHo Cho 
3902a96536eSKyongHo Cho 	return disabled;
3912a96536eSKyongHo Cho }
3922a96536eSKyongHo Cho 
3936b21a5dbSCho KyongHo static void __sysmmu_init_config(struct sysmmu_drvdata *data)
3946b21a5dbSCho KyongHo {
395eeb5184bSCho KyongHo 	unsigned int cfg = CFG_LRU | CFG_QOS(15);
396eeb5184bSCho KyongHo 	unsigned int ver;
397eeb5184bSCho KyongHo 
398eeb5184bSCho KyongHo 	ver = __raw_sysmmu_version(data);
399eeb5184bSCho KyongHo 	if (MMU_MAJ_VER(ver) == 3) {
400eeb5184bSCho KyongHo 		if (MMU_MIN_VER(ver) >= 2) {
401eeb5184bSCho KyongHo 			cfg |= CFG_FLPDCACHE;
402eeb5184bSCho KyongHo 			if (MMU_MIN_VER(ver) == 3) {
403eeb5184bSCho KyongHo 				cfg |= CFG_ACGEN;
404eeb5184bSCho KyongHo 				cfg &= ~CFG_LRU;
405eeb5184bSCho KyongHo 			} else {
406eeb5184bSCho KyongHo 				cfg |= CFG_SYSSEL;
407eeb5184bSCho KyongHo 			}
408eeb5184bSCho KyongHo 		}
409eeb5184bSCho KyongHo 	}
4106b21a5dbSCho KyongHo 
4116b21a5dbSCho KyongHo 	__raw_writel(cfg, data->sfrbase + REG_MMU_CFG);
4126b21a5dbSCho KyongHo }
4136b21a5dbSCho KyongHo 
4146b21a5dbSCho KyongHo static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
4156b21a5dbSCho KyongHo {
4166b21a5dbSCho KyongHo 	if (!IS_ERR(data->clk_master))
4176b21a5dbSCho KyongHo 		clk_enable(data->clk_master);
4186b21a5dbSCho KyongHo 	clk_enable(data->clk);
4196b21a5dbSCho KyongHo 
4206b21a5dbSCho KyongHo 	__raw_writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
4216b21a5dbSCho KyongHo 
4226b21a5dbSCho KyongHo 	__sysmmu_init_config(data);
4236b21a5dbSCho KyongHo 
4246b21a5dbSCho KyongHo 	__sysmmu_set_ptbase(data->sfrbase, data->pgtable);
4256b21a5dbSCho KyongHo 
4266b21a5dbSCho KyongHo 	__raw_writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
4276b21a5dbSCho KyongHo 
4286b21a5dbSCho KyongHo 	if (!IS_ERR(data->clk_master))
4296b21a5dbSCho KyongHo 		clk_disable(data->clk_master);
4306b21a5dbSCho KyongHo }
4316b21a5dbSCho KyongHo 
4326b21a5dbSCho KyongHo static int __sysmmu_enable(struct sysmmu_drvdata *data,
4336b21a5dbSCho KyongHo 			phys_addr_t pgtable, struct iommu_domain *domain)
4346b21a5dbSCho KyongHo {
4356b21a5dbSCho KyongHo 	int ret = 0;
4366b21a5dbSCho KyongHo 	unsigned long flags;
4376b21a5dbSCho KyongHo 
4386b21a5dbSCho KyongHo 	spin_lock_irqsave(&data->lock, flags);
4396b21a5dbSCho KyongHo 	if (set_sysmmu_active(data)) {
4406b21a5dbSCho KyongHo 		data->pgtable = pgtable;
4416b21a5dbSCho KyongHo 		data->domain = domain;
4426b21a5dbSCho KyongHo 
4436b21a5dbSCho KyongHo 		__sysmmu_enable_nocount(data);
4446b21a5dbSCho KyongHo 
4456b21a5dbSCho KyongHo 		dev_dbg(data->sysmmu, "Enabled\n");
4466b21a5dbSCho KyongHo 	} else {
4476b21a5dbSCho KyongHo 		ret = (pgtable == data->pgtable) ? 1 : -EBUSY;
4486b21a5dbSCho KyongHo 
4496b21a5dbSCho KyongHo 		dev_dbg(data->sysmmu, "already enabled\n");
4506b21a5dbSCho KyongHo 	}
4516b21a5dbSCho KyongHo 
4526b21a5dbSCho KyongHo 	if (WARN_ON(ret < 0))
4536b21a5dbSCho KyongHo 		set_sysmmu_inactive(data); /* decrement count */
4546b21a5dbSCho KyongHo 
4556b21a5dbSCho KyongHo 	spin_unlock_irqrestore(&data->lock, flags);
4566b21a5dbSCho KyongHo 
4576b21a5dbSCho KyongHo 	return ret;
4586b21a5dbSCho KyongHo }
4596b21a5dbSCho KyongHo 
4602a96536eSKyongHo Cho /* __exynos_sysmmu_enable: Enables System MMU
4612a96536eSKyongHo Cho  *
4622a96536eSKyongHo Cho  * returns -error if an error occurred and System MMU is not enabled,
4632a96536eSKyongHo Cho  * 0 if the System MMU has been just enabled and 1 if System MMU was already
4642a96536eSKyongHo Cho  * enabled before.
4652a96536eSKyongHo Cho  */
4666b21a5dbSCho KyongHo static int __exynos_sysmmu_enable(struct device *dev, phys_addr_t pgtable,
4676b21a5dbSCho KyongHo 				  struct iommu_domain *domain)
4682a96536eSKyongHo Cho {
4697222e8dbSCho KyongHo 	int ret = 0;
4702a96536eSKyongHo Cho 	unsigned long flags;
4716b21a5dbSCho KyongHo 	struct exynos_iommu_owner *owner = dev->archdata.iommu;
4726b21a5dbSCho KyongHo 	struct sysmmu_drvdata *data;
4732a96536eSKyongHo Cho 
4746b21a5dbSCho KyongHo 	BUG_ON(!has_sysmmu(dev));
4752a96536eSKyongHo Cho 
4766b21a5dbSCho KyongHo 	spin_lock_irqsave(&owner->lock, flags);
4772a96536eSKyongHo Cho 
4786b21a5dbSCho KyongHo 	data = dev_get_drvdata(owner->sysmmu);
4792a96536eSKyongHo Cho 
4806b21a5dbSCho KyongHo 	ret = __sysmmu_enable(data, pgtable, domain);
4816b21a5dbSCho KyongHo 	if (ret >= 0)
4826b21a5dbSCho KyongHo 		data->master = dev;
4832a96536eSKyongHo Cho 
4846b21a5dbSCho KyongHo 	spin_unlock_irqrestore(&owner->lock, flags);
4852a96536eSKyongHo Cho 
4862a96536eSKyongHo Cho 	return ret;
4872a96536eSKyongHo Cho }
4882a96536eSKyongHo Cho 
489d09d78fcSCho KyongHo int exynos_sysmmu_enable(struct device *dev, phys_addr_t pgtable)
4902a96536eSKyongHo Cho {
4912a96536eSKyongHo Cho 	BUG_ON(!memblock_is_memory(pgtable));
4922a96536eSKyongHo Cho 
4936b21a5dbSCho KyongHo 	return __exynos_sysmmu_enable(dev, pgtable, NULL);
4942a96536eSKyongHo Cho }
4952a96536eSKyongHo Cho 
49677e38350SSachin Kamat static bool exynos_sysmmu_disable(struct device *dev)
4972a96536eSKyongHo Cho {
4986b21a5dbSCho KyongHo 	unsigned long flags;
4996b21a5dbSCho KyongHo 	bool disabled = true;
5006b21a5dbSCho KyongHo 	struct exynos_iommu_owner *owner = dev->archdata.iommu;
5016b21a5dbSCho KyongHo 	struct sysmmu_drvdata *data;
5022a96536eSKyongHo Cho 
5036b21a5dbSCho KyongHo 	BUG_ON(!has_sysmmu(dev));
5046b21a5dbSCho KyongHo 
5056b21a5dbSCho KyongHo 	spin_lock_irqsave(&owner->lock, flags);
5066b21a5dbSCho KyongHo 
5076b21a5dbSCho KyongHo 	data = dev_get_drvdata(owner->sysmmu);
5086b21a5dbSCho KyongHo 
5096b21a5dbSCho KyongHo 	disabled = __sysmmu_disable(data);
5106b21a5dbSCho KyongHo 	if (disabled)
5116b21a5dbSCho KyongHo 		data->master = NULL;
5126b21a5dbSCho KyongHo 
5136b21a5dbSCho KyongHo 	spin_unlock_irqrestore(&owner->lock, flags);
5142a96536eSKyongHo Cho 
5152a96536eSKyongHo Cho 	return disabled;
5162a96536eSKyongHo Cho }
5172a96536eSKyongHo Cho 
518d09d78fcSCho KyongHo static void sysmmu_tlb_invalidate_entry(struct device *dev, sysmmu_iova_t iova,
5193ad6b7f3SCho KyongHo 					size_t size)
5202a96536eSKyongHo Cho {
5216b21a5dbSCho KyongHo 	struct exynos_iommu_owner *owner = dev->archdata.iommu;
5222a96536eSKyongHo Cho 	unsigned long flags;
5236b21a5dbSCho KyongHo 	struct sysmmu_drvdata *data;
5246b21a5dbSCho KyongHo 
5256b21a5dbSCho KyongHo 	data = dev_get_drvdata(owner->sysmmu);
5262a96536eSKyongHo Cho 
5279d4e7a24SCho KyongHo 	spin_lock_irqsave(&data->lock, flags);
5282a96536eSKyongHo Cho 	if (is_sysmmu_active(data)) {
5293ad6b7f3SCho KyongHo 		unsigned int num_inv = 1;
53070605870SCho KyongHo 
53170605870SCho KyongHo 		if (!IS_ERR(data->clk_master))
53270605870SCho KyongHo 			clk_enable(data->clk_master);
53370605870SCho KyongHo 
5343ad6b7f3SCho KyongHo 		/*
5353ad6b7f3SCho KyongHo 		 * L2TLB invalidation required
5363ad6b7f3SCho KyongHo 		 * 4KB page: 1 invalidation
5373ad6b7f3SCho KyongHo 		 * 64KB page: 16 invalidation
5383ad6b7f3SCho KyongHo 		 * 1MB page: 64 invalidation
5393ad6b7f3SCho KyongHo 		 * because it is set-associative TLB
5403ad6b7f3SCho KyongHo 		 * with 8-way and 64 sets.
5413ad6b7f3SCho KyongHo 		 * 1MB page can be cached in one of all sets.
5423ad6b7f3SCho KyongHo 		 * 64KB page can be one of 16 consecutive sets.
5433ad6b7f3SCho KyongHo 		 */
544eeb5184bSCho KyongHo 		if (MMU_MAJ_VER(__raw_sysmmu_version(data)) == 2)
5453ad6b7f3SCho KyongHo 			num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
5463ad6b7f3SCho KyongHo 
5477222e8dbSCho KyongHo 		if (sysmmu_block(data->sfrbase)) {
5482a96536eSKyongHo Cho 			__sysmmu_tlb_invalidate_entry(
5493ad6b7f3SCho KyongHo 				data->sfrbase, iova, num_inv);
5507222e8dbSCho KyongHo 			sysmmu_unblock(data->sfrbase);
5512a96536eSKyongHo Cho 		}
55270605870SCho KyongHo 		if (!IS_ERR(data->clk_master))
55370605870SCho KyongHo 			clk_disable(data->clk_master);
5542a96536eSKyongHo Cho 	} else {
5556b21a5dbSCho KyongHo 		dev_dbg(dev, "disabled. Skipping TLB invalidation @ %#x\n",
5566b21a5dbSCho KyongHo 			iova);
5572a96536eSKyongHo Cho 	}
5589d4e7a24SCho KyongHo 	spin_unlock_irqrestore(&data->lock, flags);
5592a96536eSKyongHo Cho }
5602a96536eSKyongHo Cho 
5612a96536eSKyongHo Cho void exynos_sysmmu_tlb_invalidate(struct device *dev)
5622a96536eSKyongHo Cho {
5636b21a5dbSCho KyongHo 	struct exynos_iommu_owner *owner = dev->archdata.iommu;
5642a96536eSKyongHo Cho 	unsigned long flags;
5656b21a5dbSCho KyongHo 	struct sysmmu_drvdata *data;
5666b21a5dbSCho KyongHo 
5676b21a5dbSCho KyongHo 	data = dev_get_drvdata(owner->sysmmu);
5682a96536eSKyongHo Cho 
5699d4e7a24SCho KyongHo 	spin_lock_irqsave(&data->lock, flags);
5702a96536eSKyongHo Cho 	if (is_sysmmu_active(data)) {
57170605870SCho KyongHo 		if (!IS_ERR(data->clk_master))
57270605870SCho KyongHo 			clk_enable(data->clk_master);
5737222e8dbSCho KyongHo 		if (sysmmu_block(data->sfrbase)) {
5747222e8dbSCho KyongHo 			__sysmmu_tlb_invalidate(data->sfrbase);
5757222e8dbSCho KyongHo 			sysmmu_unblock(data->sfrbase);
5762a96536eSKyongHo Cho 		}
57770605870SCho KyongHo 		if (!IS_ERR(data->clk_master))
57870605870SCho KyongHo 			clk_disable(data->clk_master);
5792a96536eSKyongHo Cho 	} else {
5806b21a5dbSCho KyongHo 		dev_dbg(dev, "disabled. Skipping TLB invalidation\n");
5812a96536eSKyongHo Cho 	}
5829d4e7a24SCho KyongHo 	spin_unlock_irqrestore(&data->lock, flags);
5832a96536eSKyongHo Cho }
5842a96536eSKyongHo Cho 
5856b21a5dbSCho KyongHo static int __init exynos_sysmmu_probe(struct platform_device *pdev)
5862a96536eSKyongHo Cho {
58746c16d1eSCho KyongHo 	int irq, ret;
5887222e8dbSCho KyongHo 	struct device *dev = &pdev->dev;
5892a96536eSKyongHo Cho 	struct sysmmu_drvdata *data;
5907222e8dbSCho KyongHo 	struct resource *res;
5912a96536eSKyongHo Cho 
59246c16d1eSCho KyongHo 	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
59346c16d1eSCho KyongHo 	if (!data)
59446c16d1eSCho KyongHo 		return -ENOMEM;
5952a96536eSKyongHo Cho 
5967222e8dbSCho KyongHo 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
59746c16d1eSCho KyongHo 	data->sfrbase = devm_ioremap_resource(dev, res);
59846c16d1eSCho KyongHo 	if (IS_ERR(data->sfrbase))
59946c16d1eSCho KyongHo 		return PTR_ERR(data->sfrbase);
6002a96536eSKyongHo Cho 
60146c16d1eSCho KyongHo 	irq = platform_get_irq(pdev, 0);
60246c16d1eSCho KyongHo 	if (irq <= 0) {
6030bf4e54dSCho KyongHo 		dev_err(dev, "Unable to find IRQ resource\n");
60446c16d1eSCho KyongHo 		return irq;
6052a96536eSKyongHo Cho 	}
6062a96536eSKyongHo Cho 
60746c16d1eSCho KyongHo 	ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
6082a96536eSKyongHo Cho 				dev_name(dev), data);
6092a96536eSKyongHo Cho 	if (ret) {
61046c16d1eSCho KyongHo 		dev_err(dev, "Unabled to register handler of irq %d\n", irq);
61146c16d1eSCho KyongHo 		return ret;
6122a96536eSKyongHo Cho 	}
6132a96536eSKyongHo Cho 
61446c16d1eSCho KyongHo 	data->clk = devm_clk_get(dev, "sysmmu");
61546c16d1eSCho KyongHo 	if (IS_ERR(data->clk)) {
61646c16d1eSCho KyongHo 		dev_err(dev, "Failed to get clock!\n");
61746c16d1eSCho KyongHo 		return PTR_ERR(data->clk);
61846c16d1eSCho KyongHo 	} else  {
61946c16d1eSCho KyongHo 		ret = clk_prepare(data->clk);
62046c16d1eSCho KyongHo 		if (ret) {
62146c16d1eSCho KyongHo 			dev_err(dev, "Failed to prepare clk\n");
62246c16d1eSCho KyongHo 			return ret;
62346c16d1eSCho KyongHo 		}
6242a96536eSKyongHo Cho 	}
6252a96536eSKyongHo Cho 
62670605870SCho KyongHo 	data->clk_master = devm_clk_get(dev, "master");
62770605870SCho KyongHo 	if (!IS_ERR(data->clk_master)) {
62870605870SCho KyongHo 		ret = clk_prepare(data->clk_master);
62970605870SCho KyongHo 		if (ret) {
63070605870SCho KyongHo 			clk_unprepare(data->clk);
63170605870SCho KyongHo 			dev_err(dev, "Failed to prepare master's clk\n");
63270605870SCho KyongHo 			return ret;
63370605870SCho KyongHo 		}
63470605870SCho KyongHo 	}
63570605870SCho KyongHo 
6362a96536eSKyongHo Cho 	data->sysmmu = dev;
6379d4e7a24SCho KyongHo 	spin_lock_init(&data->lock);
6382a96536eSKyongHo Cho 
6397222e8dbSCho KyongHo 	platform_set_drvdata(pdev, data);
6407222e8dbSCho KyongHo 
6412a96536eSKyongHo Cho 	pm_runtime_enable(dev);
6422a96536eSKyongHo Cho 
6432a96536eSKyongHo Cho 	return 0;
6442a96536eSKyongHo Cho }
6452a96536eSKyongHo Cho 
6466b21a5dbSCho KyongHo static const struct of_device_id sysmmu_of_match[] __initconst = {
6476b21a5dbSCho KyongHo 	{ .compatible	= "samsung,exynos-sysmmu", },
6486b21a5dbSCho KyongHo 	{ },
6496b21a5dbSCho KyongHo };
6506b21a5dbSCho KyongHo 
6516b21a5dbSCho KyongHo static struct platform_driver exynos_sysmmu_driver __refdata = {
6522a96536eSKyongHo Cho 	.probe	= exynos_sysmmu_probe,
6532a96536eSKyongHo Cho 	.driver	= {
6542a96536eSKyongHo Cho 		.owner		= THIS_MODULE,
6552a96536eSKyongHo Cho 		.name		= "exynos-sysmmu",
6566b21a5dbSCho KyongHo 		.of_match_table	= sysmmu_of_match,
6572a96536eSKyongHo Cho 	}
6582a96536eSKyongHo Cho };
6592a96536eSKyongHo Cho 
6602a96536eSKyongHo Cho static inline void pgtable_flush(void *vastart, void *vaend)
6612a96536eSKyongHo Cho {
6622a96536eSKyongHo Cho 	dmac_flush_range(vastart, vaend);
6632a96536eSKyongHo Cho 	outer_flush_range(virt_to_phys(vastart),
6642a96536eSKyongHo Cho 				virt_to_phys(vaend));
6652a96536eSKyongHo Cho }
6662a96536eSKyongHo Cho 
6672a96536eSKyongHo Cho static int exynos_iommu_domain_init(struct iommu_domain *domain)
6682a96536eSKyongHo Cho {
6692a96536eSKyongHo Cho 	struct exynos_iommu_domain *priv;
6702a96536eSKyongHo Cho 
6712a96536eSKyongHo Cho 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
6722a96536eSKyongHo Cho 	if (!priv)
6732a96536eSKyongHo Cho 		return -ENOMEM;
6742a96536eSKyongHo Cho 
675d09d78fcSCho KyongHo 	priv->pgtable = (sysmmu_pte_t *)__get_free_pages(
6762a96536eSKyongHo Cho 						GFP_KERNEL | __GFP_ZERO, 2);
6772a96536eSKyongHo Cho 	if (!priv->pgtable)
6782a96536eSKyongHo Cho 		goto err_pgtable;
6792a96536eSKyongHo Cho 
6802a96536eSKyongHo Cho 	priv->lv2entcnt = (short *)__get_free_pages(
6812a96536eSKyongHo Cho 						GFP_KERNEL | __GFP_ZERO, 1);
6822a96536eSKyongHo Cho 	if (!priv->lv2entcnt)
6832a96536eSKyongHo Cho 		goto err_counter;
6842a96536eSKyongHo Cho 
6852a96536eSKyongHo Cho 	pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
6862a96536eSKyongHo Cho 
6872a96536eSKyongHo Cho 	spin_lock_init(&priv->lock);
6882a96536eSKyongHo Cho 	spin_lock_init(&priv->pgtablelock);
6892a96536eSKyongHo Cho 	INIT_LIST_HEAD(&priv->clients);
6902a96536eSKyongHo Cho 
691eb51637bSSachin Kamat 	domain->geometry.aperture_start = 0;
692eb51637bSSachin Kamat 	domain->geometry.aperture_end   = ~0UL;
693eb51637bSSachin Kamat 	domain->geometry.force_aperture = true;
6943177bb76SJoerg Roedel 
6952a96536eSKyongHo Cho 	domain->priv = priv;
6962a96536eSKyongHo Cho 	return 0;
6972a96536eSKyongHo Cho 
6982a96536eSKyongHo Cho err_counter:
6992a96536eSKyongHo Cho 	free_pages((unsigned long)priv->pgtable, 2);
7002a96536eSKyongHo Cho err_pgtable:
7012a96536eSKyongHo Cho 	kfree(priv);
7022a96536eSKyongHo Cho 	return -ENOMEM;
7032a96536eSKyongHo Cho }
7042a96536eSKyongHo Cho 
7052a96536eSKyongHo Cho static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
7062a96536eSKyongHo Cho {
7072a96536eSKyongHo Cho 	struct exynos_iommu_domain *priv = domain->priv;
7086b21a5dbSCho KyongHo 	struct exynos_iommu_owner *owner;
7092a96536eSKyongHo Cho 	unsigned long flags;
7102a96536eSKyongHo Cho 	int i;
7112a96536eSKyongHo Cho 
7122a96536eSKyongHo Cho 	WARN_ON(!list_empty(&priv->clients));
7132a96536eSKyongHo Cho 
7142a96536eSKyongHo Cho 	spin_lock_irqsave(&priv->lock, flags);
7152a96536eSKyongHo Cho 
7166b21a5dbSCho KyongHo 	list_for_each_entry(owner, &priv->clients, client) {
7176b21a5dbSCho KyongHo 		while (!exynos_sysmmu_disable(owner->dev))
7182a96536eSKyongHo Cho 			; /* until System MMU is actually disabled */
7192a96536eSKyongHo Cho 	}
7202a96536eSKyongHo Cho 
7216b21a5dbSCho KyongHo 	while (!list_empty(&priv->clients))
7226b21a5dbSCho KyongHo 		list_del_init(priv->clients.next);
7236b21a5dbSCho KyongHo 
7242a96536eSKyongHo Cho 	spin_unlock_irqrestore(&priv->lock, flags);
7252a96536eSKyongHo Cho 
7262a96536eSKyongHo Cho 	for (i = 0; i < NUM_LV1ENTRIES; i++)
7272a96536eSKyongHo Cho 		if (lv1ent_page(priv->pgtable + i))
728734c3c73SCho KyongHo 			kmem_cache_free(lv2table_kmem_cache,
729734c3c73SCho KyongHo 				phys_to_virt(lv2table_base(priv->pgtable + i)));
7302a96536eSKyongHo Cho 
7312a96536eSKyongHo Cho 	free_pages((unsigned long)priv->pgtable, 2);
7322a96536eSKyongHo Cho 	free_pages((unsigned long)priv->lv2entcnt, 1);
7332a96536eSKyongHo Cho 	kfree(domain->priv);
7342a96536eSKyongHo Cho 	domain->priv = NULL;
7352a96536eSKyongHo Cho }
7362a96536eSKyongHo Cho 
7372a96536eSKyongHo Cho static int exynos_iommu_attach_device(struct iommu_domain *domain,
7382a96536eSKyongHo Cho 				   struct device *dev)
7392a96536eSKyongHo Cho {
7406b21a5dbSCho KyongHo 	struct exynos_iommu_owner *owner = dev->archdata.iommu;
7412a96536eSKyongHo Cho 	struct exynos_iommu_domain *priv = domain->priv;
7427222e8dbSCho KyongHo 	phys_addr_t pagetable = virt_to_phys(priv->pgtable);
7432a96536eSKyongHo Cho 	unsigned long flags;
7442a96536eSKyongHo Cho 	int ret;
7452a96536eSKyongHo Cho 
7462a96536eSKyongHo Cho 	spin_lock_irqsave(&priv->lock, flags);
7472a96536eSKyongHo Cho 
7486b21a5dbSCho KyongHo 	ret = __exynos_sysmmu_enable(dev, pagetable, domain);
7492a96536eSKyongHo Cho 	if (ret == 0) {
7506b21a5dbSCho KyongHo 		list_add_tail(&owner->client, &priv->clients);
7516b21a5dbSCho KyongHo 		owner->domain = domain;
7522a96536eSKyongHo Cho 	}
7532a96536eSKyongHo Cho 
7542a96536eSKyongHo Cho 	spin_unlock_irqrestore(&priv->lock, flags);
7552a96536eSKyongHo Cho 
7562a96536eSKyongHo Cho 	if (ret < 0) {
7577222e8dbSCho KyongHo 		dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
7587222e8dbSCho KyongHo 					__func__, &pagetable);
7597222e8dbSCho KyongHo 		return ret;
7602a96536eSKyongHo Cho 	}
7612a96536eSKyongHo Cho 
7627222e8dbSCho KyongHo 	dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n",
7637222e8dbSCho KyongHo 		__func__, &pagetable, (ret == 0) ? "" : ", again");
7647222e8dbSCho KyongHo 
7652a96536eSKyongHo Cho 	return ret;
7662a96536eSKyongHo Cho }
7672a96536eSKyongHo Cho 
7682a96536eSKyongHo Cho static void exynos_iommu_detach_device(struct iommu_domain *domain,
7692a96536eSKyongHo Cho 				    struct device *dev)
7702a96536eSKyongHo Cho {
7716b21a5dbSCho KyongHo 	struct exynos_iommu_owner *owner;
7722a96536eSKyongHo Cho 	struct exynos_iommu_domain *priv = domain->priv;
7737222e8dbSCho KyongHo 	phys_addr_t pagetable = virt_to_phys(priv->pgtable);
7742a96536eSKyongHo Cho 	unsigned long flags;
7752a96536eSKyongHo Cho 
7762a96536eSKyongHo Cho 	spin_lock_irqsave(&priv->lock, flags);
7772a96536eSKyongHo Cho 
7786b21a5dbSCho KyongHo 	list_for_each_entry(owner, &priv->clients, client) {
7796b21a5dbSCho KyongHo 		if (owner == dev->archdata.iommu) {
7806b21a5dbSCho KyongHo 			if (exynos_sysmmu_disable(dev)) {
7816b21a5dbSCho KyongHo 				list_del_init(&owner->client);
7826b21a5dbSCho KyongHo 				owner->domain = NULL;
7836b21a5dbSCho KyongHo 			}
7842a96536eSKyongHo Cho 			break;
7852a96536eSKyongHo Cho 		}
7862a96536eSKyongHo Cho 	}
7872a96536eSKyongHo Cho 
7882a96536eSKyongHo Cho 	spin_unlock_irqrestore(&priv->lock, flags);
7892a96536eSKyongHo Cho 
7906b21a5dbSCho KyongHo 	if (owner == dev->archdata.iommu)
7916b21a5dbSCho KyongHo 		dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
7926b21a5dbSCho KyongHo 					__func__, &pagetable);
7936b21a5dbSCho KyongHo 	else
7946b21a5dbSCho KyongHo 		dev_err(dev, "%s: No IOMMU is attached\n", __func__);
7952a96536eSKyongHo Cho }
7962a96536eSKyongHo Cho 
797d09d78fcSCho KyongHo static sysmmu_pte_t *alloc_lv2entry(sysmmu_pte_t *sent, sysmmu_iova_t iova,
7982a96536eSKyongHo Cho 					short *pgcounter)
7992a96536eSKyongHo Cho {
80061128f08SCho KyongHo 	if (lv1ent_section(sent)) {
801d09d78fcSCho KyongHo 		WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
80261128f08SCho KyongHo 		return ERR_PTR(-EADDRINUSE);
80361128f08SCho KyongHo 	}
80461128f08SCho KyongHo 
8052a96536eSKyongHo Cho 	if (lv1ent_fault(sent)) {
806d09d78fcSCho KyongHo 		sysmmu_pte_t *pent;
8072a96536eSKyongHo Cho 
808734c3c73SCho KyongHo 		pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
809d09d78fcSCho KyongHo 		BUG_ON((unsigned int)pent & (LV2TABLE_SIZE - 1));
8102a96536eSKyongHo Cho 		if (!pent)
81161128f08SCho KyongHo 			return ERR_PTR(-ENOMEM);
8122a96536eSKyongHo Cho 
8137222e8dbSCho KyongHo 		*sent = mk_lv1ent_page(virt_to_phys(pent));
8142a96536eSKyongHo Cho 		*pgcounter = NUM_LV2ENTRIES;
8152a96536eSKyongHo Cho 		pgtable_flush(pent, pent + NUM_LV2ENTRIES);
8162a96536eSKyongHo Cho 		pgtable_flush(sent, sent + 1);
8172a96536eSKyongHo Cho 	}
8182a96536eSKyongHo Cho 
8192a96536eSKyongHo Cho 	return page_entry(sent, iova);
8202a96536eSKyongHo Cho }
8212a96536eSKyongHo Cho 
822d09d78fcSCho KyongHo static int lv1set_section(sysmmu_pte_t *sent, sysmmu_iova_t iova,
82361128f08SCho KyongHo 			  phys_addr_t paddr, short *pgcnt)
8242a96536eSKyongHo Cho {
82561128f08SCho KyongHo 	if (lv1ent_section(sent)) {
826d09d78fcSCho KyongHo 		WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
82761128f08SCho KyongHo 			iova);
8282a96536eSKyongHo Cho 		return -EADDRINUSE;
82961128f08SCho KyongHo 	}
8302a96536eSKyongHo Cho 
8312a96536eSKyongHo Cho 	if (lv1ent_page(sent)) {
83261128f08SCho KyongHo 		if (*pgcnt != NUM_LV2ENTRIES) {
833d09d78fcSCho KyongHo 			WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
83461128f08SCho KyongHo 				iova);
8352a96536eSKyongHo Cho 			return -EADDRINUSE;
83661128f08SCho KyongHo 		}
8372a96536eSKyongHo Cho 
838734c3c73SCho KyongHo 		kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
8392a96536eSKyongHo Cho 		*pgcnt = 0;
8402a96536eSKyongHo Cho 	}
8412a96536eSKyongHo Cho 
8422a96536eSKyongHo Cho 	*sent = mk_lv1ent_sect(paddr);
8432a96536eSKyongHo Cho 
8442a96536eSKyongHo Cho 	pgtable_flush(sent, sent + 1);
8452a96536eSKyongHo Cho 
8462a96536eSKyongHo Cho 	return 0;
8472a96536eSKyongHo Cho }
8482a96536eSKyongHo Cho 
849d09d78fcSCho KyongHo static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
8502a96536eSKyongHo Cho 								short *pgcnt)
8512a96536eSKyongHo Cho {
8522a96536eSKyongHo Cho 	if (size == SPAGE_SIZE) {
8530bf4e54dSCho KyongHo 		if (WARN_ON(!lv2ent_fault(pent)))
8542a96536eSKyongHo Cho 			return -EADDRINUSE;
8552a96536eSKyongHo Cho 
8562a96536eSKyongHo Cho 		*pent = mk_lv2ent_spage(paddr);
8572a96536eSKyongHo Cho 		pgtable_flush(pent, pent + 1);
8582a96536eSKyongHo Cho 		*pgcnt -= 1;
8592a96536eSKyongHo Cho 	} else { /* size == LPAGE_SIZE */
8602a96536eSKyongHo Cho 		int i;
8612a96536eSKyongHo Cho 		for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
8620bf4e54dSCho KyongHo 			if (WARN_ON(!lv2ent_fault(pent))) {
86361128f08SCho KyongHo 				if (i > 0)
86461128f08SCho KyongHo 					memset(pent - i, 0, sizeof(*pent) * i);
8652a96536eSKyongHo Cho 				return -EADDRINUSE;
8662a96536eSKyongHo Cho 			}
8672a96536eSKyongHo Cho 
8682a96536eSKyongHo Cho 			*pent = mk_lv2ent_lpage(paddr);
8692a96536eSKyongHo Cho 		}
8702a96536eSKyongHo Cho 		pgtable_flush(pent - SPAGES_PER_LPAGE, pent);
8712a96536eSKyongHo Cho 		*pgcnt -= SPAGES_PER_LPAGE;
8722a96536eSKyongHo Cho 	}
8732a96536eSKyongHo Cho 
8742a96536eSKyongHo Cho 	return 0;
8752a96536eSKyongHo Cho }
8762a96536eSKyongHo Cho 
877d09d78fcSCho KyongHo static int exynos_iommu_map(struct iommu_domain *domain, unsigned long l_iova,
8782a96536eSKyongHo Cho 			 phys_addr_t paddr, size_t size, int prot)
8792a96536eSKyongHo Cho {
8802a96536eSKyongHo Cho 	struct exynos_iommu_domain *priv = domain->priv;
881d09d78fcSCho KyongHo 	sysmmu_pte_t *entry;
882d09d78fcSCho KyongHo 	sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
8832a96536eSKyongHo Cho 	unsigned long flags;
8842a96536eSKyongHo Cho 	int ret = -ENOMEM;
8852a96536eSKyongHo Cho 
8862a96536eSKyongHo Cho 	BUG_ON(priv->pgtable == NULL);
8872a96536eSKyongHo Cho 
8882a96536eSKyongHo Cho 	spin_lock_irqsave(&priv->pgtablelock, flags);
8892a96536eSKyongHo Cho 
8902a96536eSKyongHo Cho 	entry = section_entry(priv->pgtable, iova);
8912a96536eSKyongHo Cho 
8922a96536eSKyongHo Cho 	if (size == SECT_SIZE) {
89361128f08SCho KyongHo 		ret = lv1set_section(entry, iova, paddr,
8942a96536eSKyongHo Cho 					&priv->lv2entcnt[lv1ent_offset(iova)]);
8952a96536eSKyongHo Cho 	} else {
896d09d78fcSCho KyongHo 		sysmmu_pte_t *pent;
8972a96536eSKyongHo Cho 
8982a96536eSKyongHo Cho 		pent = alloc_lv2entry(entry, iova,
8992a96536eSKyongHo Cho 					&priv->lv2entcnt[lv1ent_offset(iova)]);
9002a96536eSKyongHo Cho 
90161128f08SCho KyongHo 		if (IS_ERR(pent))
90261128f08SCho KyongHo 			ret = PTR_ERR(pent);
9032a96536eSKyongHo Cho 		else
9042a96536eSKyongHo Cho 			ret = lv2set_page(pent, paddr, size,
9052a96536eSKyongHo Cho 					&priv->lv2entcnt[lv1ent_offset(iova)]);
9062a96536eSKyongHo Cho 	}
9072a96536eSKyongHo Cho 
90861128f08SCho KyongHo 	if (ret)
9090bf4e54dSCho KyongHo 		pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
9100bf4e54dSCho KyongHo 			__func__, ret, size, iova);
9112a96536eSKyongHo Cho 
9122a96536eSKyongHo Cho 	spin_unlock_irqrestore(&priv->pgtablelock, flags);
9132a96536eSKyongHo Cho 
9142a96536eSKyongHo Cho 	return ret;
9152a96536eSKyongHo Cho }
9162a96536eSKyongHo Cho 
9172a96536eSKyongHo Cho static size_t exynos_iommu_unmap(struct iommu_domain *domain,
918d09d78fcSCho KyongHo 					unsigned long l_iova, size_t size)
9192a96536eSKyongHo Cho {
9202a96536eSKyongHo Cho 	struct exynos_iommu_domain *priv = domain->priv;
9216b21a5dbSCho KyongHo 	struct exynos_iommu_owner *owner;
922d09d78fcSCho KyongHo 	sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
923d09d78fcSCho KyongHo 	sysmmu_pte_t *ent;
92461128f08SCho KyongHo 	size_t err_pgsize;
925d09d78fcSCho KyongHo 	unsigned long flags;
9262a96536eSKyongHo Cho 
9272a96536eSKyongHo Cho 	BUG_ON(priv->pgtable == NULL);
9282a96536eSKyongHo Cho 
9292a96536eSKyongHo Cho 	spin_lock_irqsave(&priv->pgtablelock, flags);
9302a96536eSKyongHo Cho 
9312a96536eSKyongHo Cho 	ent = section_entry(priv->pgtable, iova);
9322a96536eSKyongHo Cho 
9332a96536eSKyongHo Cho 	if (lv1ent_section(ent)) {
9340bf4e54dSCho KyongHo 		if (WARN_ON(size < SECT_SIZE)) {
93561128f08SCho KyongHo 			err_pgsize = SECT_SIZE;
93661128f08SCho KyongHo 			goto err;
93761128f08SCho KyongHo 		}
9382a96536eSKyongHo Cho 
9392a96536eSKyongHo Cho 		*ent = 0;
9402a96536eSKyongHo Cho 		pgtable_flush(ent, ent + 1);
9412a96536eSKyongHo Cho 		size = SECT_SIZE;
9422a96536eSKyongHo Cho 		goto done;
9432a96536eSKyongHo Cho 	}
9442a96536eSKyongHo Cho 
9452a96536eSKyongHo Cho 	if (unlikely(lv1ent_fault(ent))) {
9462a96536eSKyongHo Cho 		if (size > SECT_SIZE)
9472a96536eSKyongHo Cho 			size = SECT_SIZE;
9482a96536eSKyongHo Cho 		goto done;
9492a96536eSKyongHo Cho 	}
9502a96536eSKyongHo Cho 
9512a96536eSKyongHo Cho 	/* lv1ent_page(sent) == true here */
9522a96536eSKyongHo Cho 
9532a96536eSKyongHo Cho 	ent = page_entry(ent, iova);
9542a96536eSKyongHo Cho 
9552a96536eSKyongHo Cho 	if (unlikely(lv2ent_fault(ent))) {
9562a96536eSKyongHo Cho 		size = SPAGE_SIZE;
9572a96536eSKyongHo Cho 		goto done;
9582a96536eSKyongHo Cho 	}
9592a96536eSKyongHo Cho 
9602a96536eSKyongHo Cho 	if (lv2ent_small(ent)) {
9612a96536eSKyongHo Cho 		*ent = 0;
9622a96536eSKyongHo Cho 		size = SPAGE_SIZE;
9636cb47ed7SCho KyongHo 		pgtable_flush(ent, ent + 1);
9642a96536eSKyongHo Cho 		priv->lv2entcnt[lv1ent_offset(iova)] += 1;
9652a96536eSKyongHo Cho 		goto done;
9662a96536eSKyongHo Cho 	}
9672a96536eSKyongHo Cho 
9682a96536eSKyongHo Cho 	/* lv1ent_large(ent) == true here */
9690bf4e54dSCho KyongHo 	if (WARN_ON(size < LPAGE_SIZE)) {
97061128f08SCho KyongHo 		err_pgsize = LPAGE_SIZE;
97161128f08SCho KyongHo 		goto err;
97261128f08SCho KyongHo 	}
9732a96536eSKyongHo Cho 
9742a96536eSKyongHo Cho 	memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
9756cb47ed7SCho KyongHo 	pgtable_flush(ent, ent + SPAGES_PER_LPAGE);
9762a96536eSKyongHo Cho 
9772a96536eSKyongHo Cho 	size = LPAGE_SIZE;
9782a96536eSKyongHo Cho 	priv->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
9792a96536eSKyongHo Cho done:
9802a96536eSKyongHo Cho 	spin_unlock_irqrestore(&priv->pgtablelock, flags);
9812a96536eSKyongHo Cho 
9822a96536eSKyongHo Cho 	spin_lock_irqsave(&priv->lock, flags);
9836b21a5dbSCho KyongHo 	list_for_each_entry(owner, &priv->clients, client)
9846b21a5dbSCho KyongHo 		sysmmu_tlb_invalidate_entry(owner->dev, iova, size);
9852a96536eSKyongHo Cho 	spin_unlock_irqrestore(&priv->lock, flags);
9862a96536eSKyongHo Cho 
9872a96536eSKyongHo Cho 	return size;
98861128f08SCho KyongHo err:
98961128f08SCho KyongHo 	spin_unlock_irqrestore(&priv->pgtablelock, flags);
99061128f08SCho KyongHo 
9910bf4e54dSCho KyongHo 	pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
99261128f08SCho KyongHo 		__func__, size, iova, err_pgsize);
99361128f08SCho KyongHo 
99461128f08SCho KyongHo 	return 0;
9952a96536eSKyongHo Cho }
9962a96536eSKyongHo Cho 
9972a96536eSKyongHo Cho static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
998bb5547acSVarun Sethi 					  dma_addr_t iova)
9992a96536eSKyongHo Cho {
10002a96536eSKyongHo Cho 	struct exynos_iommu_domain *priv = domain->priv;
1001d09d78fcSCho KyongHo 	sysmmu_pte_t *entry;
10022a96536eSKyongHo Cho 	unsigned long flags;
10032a96536eSKyongHo Cho 	phys_addr_t phys = 0;
10042a96536eSKyongHo Cho 
10052a96536eSKyongHo Cho 	spin_lock_irqsave(&priv->pgtablelock, flags);
10062a96536eSKyongHo Cho 
10072a96536eSKyongHo Cho 	entry = section_entry(priv->pgtable, iova);
10082a96536eSKyongHo Cho 
10092a96536eSKyongHo Cho 	if (lv1ent_section(entry)) {
10102a96536eSKyongHo Cho 		phys = section_phys(entry) + section_offs(iova);
10112a96536eSKyongHo Cho 	} else if (lv1ent_page(entry)) {
10122a96536eSKyongHo Cho 		entry = page_entry(entry, iova);
10132a96536eSKyongHo Cho 
10142a96536eSKyongHo Cho 		if (lv2ent_large(entry))
10152a96536eSKyongHo Cho 			phys = lpage_phys(entry) + lpage_offs(iova);
10162a96536eSKyongHo Cho 		else if (lv2ent_small(entry))
10172a96536eSKyongHo Cho 			phys = spage_phys(entry) + spage_offs(iova);
10182a96536eSKyongHo Cho 	}
10192a96536eSKyongHo Cho 
10202a96536eSKyongHo Cho 	spin_unlock_irqrestore(&priv->pgtablelock, flags);
10212a96536eSKyongHo Cho 
10222a96536eSKyongHo Cho 	return phys;
10232a96536eSKyongHo Cho }
10242a96536eSKyongHo Cho 
1025bf4a1c92SAntonios Motakis static int exynos_iommu_add_device(struct device *dev)
1026bf4a1c92SAntonios Motakis {
1027bf4a1c92SAntonios Motakis 	struct iommu_group *group;
1028bf4a1c92SAntonios Motakis 	int ret;
1029bf4a1c92SAntonios Motakis 
1030bf4a1c92SAntonios Motakis 	group = iommu_group_get(dev);
1031bf4a1c92SAntonios Motakis 
1032bf4a1c92SAntonios Motakis 	if (!group) {
1033bf4a1c92SAntonios Motakis 		group = iommu_group_alloc();
1034bf4a1c92SAntonios Motakis 		if (IS_ERR(group)) {
1035bf4a1c92SAntonios Motakis 			dev_err(dev, "Failed to allocate IOMMU group\n");
1036bf4a1c92SAntonios Motakis 			return PTR_ERR(group);
1037bf4a1c92SAntonios Motakis 		}
1038bf4a1c92SAntonios Motakis 	}
1039bf4a1c92SAntonios Motakis 
1040bf4a1c92SAntonios Motakis 	ret = iommu_group_add_device(group, dev);
1041bf4a1c92SAntonios Motakis 	iommu_group_put(group);
1042bf4a1c92SAntonios Motakis 
1043bf4a1c92SAntonios Motakis 	return ret;
1044bf4a1c92SAntonios Motakis }
1045bf4a1c92SAntonios Motakis 
1046bf4a1c92SAntonios Motakis static void exynos_iommu_remove_device(struct device *dev)
1047bf4a1c92SAntonios Motakis {
1048bf4a1c92SAntonios Motakis 	iommu_group_remove_device(dev);
1049bf4a1c92SAntonios Motakis }
1050bf4a1c92SAntonios Motakis 
10512a96536eSKyongHo Cho static struct iommu_ops exynos_iommu_ops = {
10522a96536eSKyongHo Cho 	.domain_init = &exynos_iommu_domain_init,
10532a96536eSKyongHo Cho 	.domain_destroy = &exynos_iommu_domain_destroy,
10542a96536eSKyongHo Cho 	.attach_dev = &exynos_iommu_attach_device,
10552a96536eSKyongHo Cho 	.detach_dev = &exynos_iommu_detach_device,
10562a96536eSKyongHo Cho 	.map = &exynos_iommu_map,
10572a96536eSKyongHo Cho 	.unmap = &exynos_iommu_unmap,
10582a96536eSKyongHo Cho 	.iova_to_phys = &exynos_iommu_iova_to_phys,
1059bf4a1c92SAntonios Motakis 	.add_device = &exynos_iommu_add_device,
1060bf4a1c92SAntonios Motakis 	.remove_device = &exynos_iommu_remove_device,
10612a96536eSKyongHo Cho 	.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
10622a96536eSKyongHo Cho };
10632a96536eSKyongHo Cho 
10642a96536eSKyongHo Cho static int __init exynos_iommu_init(void)
10652a96536eSKyongHo Cho {
10662a96536eSKyongHo Cho 	int ret;
10672a96536eSKyongHo Cho 
1068734c3c73SCho KyongHo 	lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1069734c3c73SCho KyongHo 				LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1070734c3c73SCho KyongHo 	if (!lv2table_kmem_cache) {
1071734c3c73SCho KyongHo 		pr_err("%s: Failed to create kmem cache\n", __func__);
1072734c3c73SCho KyongHo 		return -ENOMEM;
1073734c3c73SCho KyongHo 	}
1074734c3c73SCho KyongHo 
10752a96536eSKyongHo Cho 	ret = platform_driver_register(&exynos_sysmmu_driver);
1076734c3c73SCho KyongHo 	if (ret) {
1077734c3c73SCho KyongHo 		pr_err("%s: Failed to register driver\n", __func__);
1078734c3c73SCho KyongHo 		goto err_reg_driver;
1079734c3c73SCho KyongHo 	}
10802a96536eSKyongHo Cho 
1081734c3c73SCho KyongHo 	ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1082734c3c73SCho KyongHo 	if (ret) {
1083734c3c73SCho KyongHo 		pr_err("%s: Failed to register exynos-iommu driver.\n",
1084734c3c73SCho KyongHo 								__func__);
1085734c3c73SCho KyongHo 		goto err_set_iommu;
1086734c3c73SCho KyongHo 	}
10872a96536eSKyongHo Cho 
1088734c3c73SCho KyongHo 	return 0;
1089734c3c73SCho KyongHo err_set_iommu:
1090734c3c73SCho KyongHo 	platform_driver_unregister(&exynos_sysmmu_driver);
1091734c3c73SCho KyongHo err_reg_driver:
1092734c3c73SCho KyongHo 	kmem_cache_destroy(lv2table_kmem_cache);
10932a96536eSKyongHo Cho 	return ret;
10942a96536eSKyongHo Cho }
10952a96536eSKyongHo Cho subsys_initcall(exynos_iommu_init);
1096