xref: /openbmc/linux/drivers/iommu/exynos-iommu.c (revision 6cb47ed7)
12a96536eSKyongHo Cho /* linux/drivers/iommu/exynos_iommu.c
22a96536eSKyongHo Cho  *
32a96536eSKyongHo Cho  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
42a96536eSKyongHo Cho  *		http://www.samsung.com
52a96536eSKyongHo Cho  *
62a96536eSKyongHo Cho  * This program is free software; you can redistribute it and/or modify
72a96536eSKyongHo Cho  * it under the terms of the GNU General Public License version 2 as
82a96536eSKyongHo Cho  * published by the Free Software Foundation.
92a96536eSKyongHo Cho  */
102a96536eSKyongHo Cho 
112a96536eSKyongHo Cho #ifdef CONFIG_EXYNOS_IOMMU_DEBUG
122a96536eSKyongHo Cho #define DEBUG
132a96536eSKyongHo Cho #endif
142a96536eSKyongHo Cho 
152a96536eSKyongHo Cho #include <linux/io.h>
162a96536eSKyongHo Cho #include <linux/interrupt.h>
172a96536eSKyongHo Cho #include <linux/platform_device.h>
182a96536eSKyongHo Cho #include <linux/slab.h>
192a96536eSKyongHo Cho #include <linux/pm_runtime.h>
202a96536eSKyongHo Cho #include <linux/clk.h>
212a96536eSKyongHo Cho #include <linux/err.h>
222a96536eSKyongHo Cho #include <linux/mm.h>
232a96536eSKyongHo Cho #include <linux/iommu.h>
242a96536eSKyongHo Cho #include <linux/errno.h>
252a96536eSKyongHo Cho #include <linux/list.h>
262a96536eSKyongHo Cho #include <linux/memblock.h>
272a96536eSKyongHo Cho #include <linux/export.h>
282a96536eSKyongHo Cho 
292a96536eSKyongHo Cho #include <asm/cacheflush.h>
302a96536eSKyongHo Cho #include <asm/pgtable.h>
312a96536eSKyongHo Cho 
322a96536eSKyongHo Cho /* We does not consider super section mapping (16MB) */
332a96536eSKyongHo Cho #define SECT_ORDER 20
342a96536eSKyongHo Cho #define LPAGE_ORDER 16
352a96536eSKyongHo Cho #define SPAGE_ORDER 12
362a96536eSKyongHo Cho 
372a96536eSKyongHo Cho #define SECT_SIZE (1 << SECT_ORDER)
382a96536eSKyongHo Cho #define LPAGE_SIZE (1 << LPAGE_ORDER)
392a96536eSKyongHo Cho #define SPAGE_SIZE (1 << SPAGE_ORDER)
402a96536eSKyongHo Cho 
412a96536eSKyongHo Cho #define SECT_MASK (~(SECT_SIZE - 1))
422a96536eSKyongHo Cho #define LPAGE_MASK (~(LPAGE_SIZE - 1))
432a96536eSKyongHo Cho #define SPAGE_MASK (~(SPAGE_SIZE - 1))
442a96536eSKyongHo Cho 
452a96536eSKyongHo Cho #define lv1ent_fault(sent) (((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
462a96536eSKyongHo Cho #define lv1ent_page(sent) ((*(sent) & 3) == 1)
472a96536eSKyongHo Cho #define lv1ent_section(sent) ((*(sent) & 3) == 2)
482a96536eSKyongHo Cho 
492a96536eSKyongHo Cho #define lv2ent_fault(pent) ((*(pent) & 3) == 0)
502a96536eSKyongHo Cho #define lv2ent_small(pent) ((*(pent) & 2) == 2)
512a96536eSKyongHo Cho #define lv2ent_large(pent) ((*(pent) & 3) == 1)
522a96536eSKyongHo Cho 
532a96536eSKyongHo Cho #define section_phys(sent) (*(sent) & SECT_MASK)
542a96536eSKyongHo Cho #define section_offs(iova) ((iova) & 0xFFFFF)
552a96536eSKyongHo Cho #define lpage_phys(pent) (*(pent) & LPAGE_MASK)
562a96536eSKyongHo Cho #define lpage_offs(iova) ((iova) & 0xFFFF)
572a96536eSKyongHo Cho #define spage_phys(pent) (*(pent) & SPAGE_MASK)
582a96536eSKyongHo Cho #define spage_offs(iova) ((iova) & 0xFFF)
592a96536eSKyongHo Cho 
602a96536eSKyongHo Cho #define lv1ent_offset(iova) ((iova) >> SECT_ORDER)
612a96536eSKyongHo Cho #define lv2ent_offset(iova) (((iova) & 0xFF000) >> SPAGE_ORDER)
622a96536eSKyongHo Cho 
632a96536eSKyongHo Cho #define NUM_LV1ENTRIES 4096
642a96536eSKyongHo Cho #define NUM_LV2ENTRIES 256
652a96536eSKyongHo Cho 
662a96536eSKyongHo Cho #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long))
672a96536eSKyongHo Cho 
682a96536eSKyongHo Cho #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
692a96536eSKyongHo Cho 
702a96536eSKyongHo Cho #define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
712a96536eSKyongHo Cho 
722a96536eSKyongHo Cho #define mk_lv1ent_sect(pa) ((pa) | 2)
732a96536eSKyongHo Cho #define mk_lv1ent_page(pa) ((pa) | 1)
742a96536eSKyongHo Cho #define mk_lv2ent_lpage(pa) ((pa) | 1)
752a96536eSKyongHo Cho #define mk_lv2ent_spage(pa) ((pa) | 2)
762a96536eSKyongHo Cho 
772a96536eSKyongHo Cho #define CTRL_ENABLE	0x5
782a96536eSKyongHo Cho #define CTRL_BLOCK	0x7
792a96536eSKyongHo Cho #define CTRL_DISABLE	0x0
802a96536eSKyongHo Cho 
812a96536eSKyongHo Cho #define REG_MMU_CTRL		0x000
822a96536eSKyongHo Cho #define REG_MMU_CFG		0x004
832a96536eSKyongHo Cho #define REG_MMU_STATUS		0x008
842a96536eSKyongHo Cho #define REG_MMU_FLUSH		0x00C
852a96536eSKyongHo Cho #define REG_MMU_FLUSH_ENTRY	0x010
862a96536eSKyongHo Cho #define REG_PT_BASE_ADDR	0x014
872a96536eSKyongHo Cho #define REG_INT_STATUS		0x018
882a96536eSKyongHo Cho #define REG_INT_CLEAR		0x01C
892a96536eSKyongHo Cho 
902a96536eSKyongHo Cho #define REG_PAGE_FAULT_ADDR	0x024
912a96536eSKyongHo Cho #define REG_AW_FAULT_ADDR	0x028
922a96536eSKyongHo Cho #define REG_AR_FAULT_ADDR	0x02C
932a96536eSKyongHo Cho #define REG_DEFAULT_SLAVE_ADDR	0x030
942a96536eSKyongHo Cho 
952a96536eSKyongHo Cho #define REG_MMU_VERSION		0x034
962a96536eSKyongHo Cho 
972a96536eSKyongHo Cho #define REG_PB0_SADDR		0x04C
982a96536eSKyongHo Cho #define REG_PB0_EADDR		0x050
992a96536eSKyongHo Cho #define REG_PB1_SADDR		0x054
1002a96536eSKyongHo Cho #define REG_PB1_EADDR		0x058
1012a96536eSKyongHo Cho 
102734c3c73SCho KyongHo static struct kmem_cache *lv2table_kmem_cache;
103734c3c73SCho KyongHo 
1042a96536eSKyongHo Cho static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova)
1052a96536eSKyongHo Cho {
1062a96536eSKyongHo Cho 	return pgtable + lv1ent_offset(iova);
1072a96536eSKyongHo Cho }
1082a96536eSKyongHo Cho 
1092a96536eSKyongHo Cho static unsigned long *page_entry(unsigned long *sent, unsigned long iova)
1102a96536eSKyongHo Cho {
1117222e8dbSCho KyongHo 	return (unsigned long *)phys_to_virt(
1127222e8dbSCho KyongHo 				lv2table_base(sent)) + lv2ent_offset(iova);
1132a96536eSKyongHo Cho }
1142a96536eSKyongHo Cho 
1152a96536eSKyongHo Cho enum exynos_sysmmu_inttype {
1162a96536eSKyongHo Cho 	SYSMMU_PAGEFAULT,
1172a96536eSKyongHo Cho 	SYSMMU_AR_MULTIHIT,
1182a96536eSKyongHo Cho 	SYSMMU_AW_MULTIHIT,
1192a96536eSKyongHo Cho 	SYSMMU_BUSERROR,
1202a96536eSKyongHo Cho 	SYSMMU_AR_SECURITY,
1212a96536eSKyongHo Cho 	SYSMMU_AR_ACCESS,
1222a96536eSKyongHo Cho 	SYSMMU_AW_SECURITY,
1232a96536eSKyongHo Cho 	SYSMMU_AW_PROTECTION, /* 7 */
1242a96536eSKyongHo Cho 	SYSMMU_FAULT_UNKNOWN,
1252a96536eSKyongHo Cho 	SYSMMU_FAULTS_NUM
1262a96536eSKyongHo Cho };
1272a96536eSKyongHo Cho 
1282a96536eSKyongHo Cho /*
1292a96536eSKyongHo Cho  * @itype: type of fault.
1302a96536eSKyongHo Cho  * @pgtable_base: the physical address of page table base. This is 0 if @itype
1312a96536eSKyongHo Cho  *                is SYSMMU_BUSERROR.
1322a96536eSKyongHo Cho  * @fault_addr: the device (virtual) address that the System MMU tried to
1332a96536eSKyongHo Cho  *             translated. This is 0 if @itype is SYSMMU_BUSERROR.
1342a96536eSKyongHo Cho  */
1352a96536eSKyongHo Cho typedef int (*sysmmu_fault_handler_t)(enum exynos_sysmmu_inttype itype,
1367222e8dbSCho KyongHo 			phys_addr_t pgtable_base, unsigned long fault_addr);
1372a96536eSKyongHo Cho 
1382a96536eSKyongHo Cho static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
1392a96536eSKyongHo Cho 	REG_PAGE_FAULT_ADDR,
1402a96536eSKyongHo Cho 	REG_AR_FAULT_ADDR,
1412a96536eSKyongHo Cho 	REG_AW_FAULT_ADDR,
1422a96536eSKyongHo Cho 	REG_DEFAULT_SLAVE_ADDR,
1432a96536eSKyongHo Cho 	REG_AR_FAULT_ADDR,
1442a96536eSKyongHo Cho 	REG_AR_FAULT_ADDR,
1452a96536eSKyongHo Cho 	REG_AW_FAULT_ADDR,
1462a96536eSKyongHo Cho 	REG_AW_FAULT_ADDR
1472a96536eSKyongHo Cho };
1482a96536eSKyongHo Cho 
1492a96536eSKyongHo Cho static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
1502a96536eSKyongHo Cho 	"PAGE FAULT",
1512a96536eSKyongHo Cho 	"AR MULTI-HIT FAULT",
1522a96536eSKyongHo Cho 	"AW MULTI-HIT FAULT",
1532a96536eSKyongHo Cho 	"BUS ERROR",
1542a96536eSKyongHo Cho 	"AR SECURITY PROTECTION FAULT",
1552a96536eSKyongHo Cho 	"AR ACCESS PROTECTION FAULT",
1562a96536eSKyongHo Cho 	"AW SECURITY PROTECTION FAULT",
1572a96536eSKyongHo Cho 	"AW ACCESS PROTECTION FAULT",
1582a96536eSKyongHo Cho 	"UNKNOWN FAULT"
1592a96536eSKyongHo Cho };
1602a96536eSKyongHo Cho 
1612a96536eSKyongHo Cho struct exynos_iommu_domain {
1622a96536eSKyongHo Cho 	struct list_head clients; /* list of sysmmu_drvdata.node */
1632a96536eSKyongHo Cho 	unsigned long *pgtable; /* lv1 page table, 16KB */
1642a96536eSKyongHo Cho 	short *lv2entcnt; /* free lv2 entry counter for each section */
1652a96536eSKyongHo Cho 	spinlock_t lock; /* lock for this structure */
1662a96536eSKyongHo Cho 	spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
1672a96536eSKyongHo Cho };
1682a96536eSKyongHo Cho 
1692a96536eSKyongHo Cho struct sysmmu_drvdata {
1702a96536eSKyongHo Cho 	struct list_head node; /* entry of exynos_iommu_domain.clients */
1712a96536eSKyongHo Cho 	struct device *sysmmu;	/* System MMU's device descriptor */
1722a96536eSKyongHo Cho 	struct device *dev;	/* Owner of system MMU */
1732a96536eSKyongHo Cho 	char *dbgname;
1747222e8dbSCho KyongHo 	void __iomem *sfrbase;
1757222e8dbSCho KyongHo 	struct clk *clk;
1762a96536eSKyongHo Cho 	int activations;
1772a96536eSKyongHo Cho 	rwlock_t lock;
1782a96536eSKyongHo Cho 	struct iommu_domain *domain;
1792a96536eSKyongHo Cho 	sysmmu_fault_handler_t fault_handler;
1807222e8dbSCho KyongHo 	phys_addr_t pgtable;
1812a96536eSKyongHo Cho };
1822a96536eSKyongHo Cho 
1832a96536eSKyongHo Cho static bool set_sysmmu_active(struct sysmmu_drvdata *data)
1842a96536eSKyongHo Cho {
1852a96536eSKyongHo Cho 	/* return true if the System MMU was not active previously
1862a96536eSKyongHo Cho 	   and it needs to be initialized */
1872a96536eSKyongHo Cho 	return ++data->activations == 1;
1882a96536eSKyongHo Cho }
1892a96536eSKyongHo Cho 
1902a96536eSKyongHo Cho static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
1912a96536eSKyongHo Cho {
1922a96536eSKyongHo Cho 	/* return true if the System MMU is needed to be disabled */
1932a96536eSKyongHo Cho 	BUG_ON(data->activations < 1);
1942a96536eSKyongHo Cho 	return --data->activations == 0;
1952a96536eSKyongHo Cho }
1962a96536eSKyongHo Cho 
1972a96536eSKyongHo Cho static bool is_sysmmu_active(struct sysmmu_drvdata *data)
1982a96536eSKyongHo Cho {
1992a96536eSKyongHo Cho 	return data->activations > 0;
2002a96536eSKyongHo Cho }
2012a96536eSKyongHo Cho 
2022a96536eSKyongHo Cho static void sysmmu_unblock(void __iomem *sfrbase)
2032a96536eSKyongHo Cho {
2042a96536eSKyongHo Cho 	__raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
2052a96536eSKyongHo Cho }
2062a96536eSKyongHo Cho 
2072a96536eSKyongHo Cho static bool sysmmu_block(void __iomem *sfrbase)
2082a96536eSKyongHo Cho {
2092a96536eSKyongHo Cho 	int i = 120;
2102a96536eSKyongHo Cho 
2112a96536eSKyongHo Cho 	__raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL);
2122a96536eSKyongHo Cho 	while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1))
2132a96536eSKyongHo Cho 		--i;
2142a96536eSKyongHo Cho 
2152a96536eSKyongHo Cho 	if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) {
2162a96536eSKyongHo Cho 		sysmmu_unblock(sfrbase);
2172a96536eSKyongHo Cho 		return false;
2182a96536eSKyongHo Cho 	}
2192a96536eSKyongHo Cho 
2202a96536eSKyongHo Cho 	return true;
2212a96536eSKyongHo Cho }
2222a96536eSKyongHo Cho 
2232a96536eSKyongHo Cho static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
2242a96536eSKyongHo Cho {
2252a96536eSKyongHo Cho 	__raw_writel(0x1, sfrbase + REG_MMU_FLUSH);
2262a96536eSKyongHo Cho }
2272a96536eSKyongHo Cho 
2282a96536eSKyongHo Cho static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
2293ad6b7f3SCho KyongHo 				unsigned long iova, unsigned int num_inv)
2302a96536eSKyongHo Cho {
2313ad6b7f3SCho KyongHo 	unsigned int i;
2323ad6b7f3SCho KyongHo 	for (i = 0; i < num_inv; i++) {
2333ad6b7f3SCho KyongHo 		__raw_writel((iova & SPAGE_MASK) | 1,
2343ad6b7f3SCho KyongHo 				sfrbase + REG_MMU_FLUSH_ENTRY);
2353ad6b7f3SCho KyongHo 		iova += SPAGE_SIZE;
2363ad6b7f3SCho KyongHo 	}
2372a96536eSKyongHo Cho }
2382a96536eSKyongHo Cho 
2392a96536eSKyongHo Cho static void __sysmmu_set_ptbase(void __iomem *sfrbase,
2402a96536eSKyongHo Cho 				       unsigned long pgd)
2412a96536eSKyongHo Cho {
2422a96536eSKyongHo Cho 	__raw_writel(0x1, sfrbase + REG_MMU_CFG); /* 16KB LV1, LRU */
2432a96536eSKyongHo Cho 	__raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR);
2442a96536eSKyongHo Cho 
2452a96536eSKyongHo Cho 	__sysmmu_tlb_invalidate(sfrbase);
2462a96536eSKyongHo Cho }
2472a96536eSKyongHo Cho 
2482a96536eSKyongHo Cho static void __set_fault_handler(struct sysmmu_drvdata *data,
2492a96536eSKyongHo Cho 					sysmmu_fault_handler_t handler)
2502a96536eSKyongHo Cho {
2512a96536eSKyongHo Cho 	unsigned long flags;
2522a96536eSKyongHo Cho 
2532a96536eSKyongHo Cho 	write_lock_irqsave(&data->lock, flags);
2542a96536eSKyongHo Cho 	data->fault_handler = handler;
2552a96536eSKyongHo Cho 	write_unlock_irqrestore(&data->lock, flags);
2562a96536eSKyongHo Cho }
2572a96536eSKyongHo Cho 
2582a96536eSKyongHo Cho void exynos_sysmmu_set_fault_handler(struct device *dev,
2592a96536eSKyongHo Cho 					sysmmu_fault_handler_t handler)
2602a96536eSKyongHo Cho {
2612a96536eSKyongHo Cho 	struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
2622a96536eSKyongHo Cho 
2632a96536eSKyongHo Cho 	__set_fault_handler(data, handler);
2642a96536eSKyongHo Cho }
2652a96536eSKyongHo Cho 
2662a96536eSKyongHo Cho static int default_fault_handler(enum exynos_sysmmu_inttype itype,
2677222e8dbSCho KyongHo 			phys_addr_t pgtable_base, unsigned long fault_addr)
2682a96536eSKyongHo Cho {
2692a96536eSKyongHo Cho 	unsigned long *ent;
2702a96536eSKyongHo Cho 
2712a96536eSKyongHo Cho 	if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
2722a96536eSKyongHo Cho 		itype = SYSMMU_FAULT_UNKNOWN;
2732a96536eSKyongHo Cho 
2747222e8dbSCho KyongHo 	pr_err("%s occurred at 0x%lx(Page table base: %pa)\n",
2757222e8dbSCho KyongHo 			sysmmu_fault_name[itype], fault_addr, &pgtable_base);
2762a96536eSKyongHo Cho 
2777222e8dbSCho KyongHo 	ent = section_entry(phys_to_virt(pgtable_base), fault_addr);
2782a96536eSKyongHo Cho 	pr_err("\tLv1 entry: 0x%lx\n", *ent);
2792a96536eSKyongHo Cho 
2802a96536eSKyongHo Cho 	if (lv1ent_page(ent)) {
2812a96536eSKyongHo Cho 		ent = page_entry(ent, fault_addr);
2822a96536eSKyongHo Cho 		pr_err("\t Lv2 entry: 0x%lx\n", *ent);
2832a96536eSKyongHo Cho 	}
2842a96536eSKyongHo Cho 
2852a96536eSKyongHo Cho 	pr_err("Generating Kernel OOPS... because it is unrecoverable.\n");
2862a96536eSKyongHo Cho 
2872a96536eSKyongHo Cho 	BUG();
2882a96536eSKyongHo Cho 
2892a96536eSKyongHo Cho 	return 0;
2902a96536eSKyongHo Cho }
2912a96536eSKyongHo Cho 
2922a96536eSKyongHo Cho static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
2932a96536eSKyongHo Cho {
2942a96536eSKyongHo Cho 	/* SYSMMU is in blocked when interrupt occurred. */
2952a96536eSKyongHo Cho 	struct sysmmu_drvdata *data = dev_id;
2962a96536eSKyongHo Cho 	enum exynos_sysmmu_inttype itype;
2972a96536eSKyongHo Cho 	unsigned long addr = -1;
2987222e8dbSCho KyongHo 	int ret = -ENOSYS;
2992a96536eSKyongHo Cho 
3002a96536eSKyongHo Cho 	read_lock(&data->lock);
3012a96536eSKyongHo Cho 
3022a96536eSKyongHo Cho 	WARN_ON(!is_sysmmu_active(data));
3032a96536eSKyongHo Cho 
3042a96536eSKyongHo Cho 	itype = (enum exynos_sysmmu_inttype)
3057222e8dbSCho KyongHo 		__ffs(__raw_readl(data->sfrbase + REG_INT_STATUS));
3062a96536eSKyongHo Cho 	if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
3072a96536eSKyongHo Cho 		itype = SYSMMU_FAULT_UNKNOWN;
3082a96536eSKyongHo Cho 	else
3097222e8dbSCho KyongHo 		addr = __raw_readl(data->sfrbase + fault_reg_offset[itype]);
3102a96536eSKyongHo Cho 
3112a96536eSKyongHo Cho 	if (data->domain)
3127222e8dbSCho KyongHo 		ret = report_iommu_fault(data->domain, data->dev, addr, itype);
3132a96536eSKyongHo Cho 
3142a96536eSKyongHo Cho 	if ((ret == -ENOSYS) && data->fault_handler) {
3152a96536eSKyongHo Cho 		unsigned long base = data->pgtable;
3162a96536eSKyongHo Cho 		if (itype != SYSMMU_FAULT_UNKNOWN)
3177222e8dbSCho KyongHo 			base = __raw_readl(data->sfrbase + REG_PT_BASE_ADDR);
3182a96536eSKyongHo Cho 		ret = data->fault_handler(itype, base, addr);
3192a96536eSKyongHo Cho 	}
3202a96536eSKyongHo Cho 
3212a96536eSKyongHo Cho 	if (!ret && (itype != SYSMMU_FAULT_UNKNOWN))
3227222e8dbSCho KyongHo 		__raw_writel(1 << itype, data->sfrbase + REG_INT_CLEAR);
3232a96536eSKyongHo Cho 	else
3242a96536eSKyongHo Cho 		dev_dbg(data->sysmmu, "(%s) %s is not handled.\n",
3252a96536eSKyongHo Cho 				data->dbgname, sysmmu_fault_name[itype]);
3262a96536eSKyongHo Cho 
3272a96536eSKyongHo Cho 	if (itype != SYSMMU_FAULT_UNKNOWN)
3287222e8dbSCho KyongHo 		sysmmu_unblock(data->sfrbase);
3292a96536eSKyongHo Cho 
3302a96536eSKyongHo Cho 	read_unlock(&data->lock);
3312a96536eSKyongHo Cho 
3322a96536eSKyongHo Cho 	return IRQ_HANDLED;
3332a96536eSKyongHo Cho }
3342a96536eSKyongHo Cho 
3352a96536eSKyongHo Cho static bool __exynos_sysmmu_disable(struct sysmmu_drvdata *data)
3362a96536eSKyongHo Cho {
3372a96536eSKyongHo Cho 	unsigned long flags;
3382a96536eSKyongHo Cho 	bool disabled = false;
3392a96536eSKyongHo Cho 
3402a96536eSKyongHo Cho 	write_lock_irqsave(&data->lock, flags);
3412a96536eSKyongHo Cho 
3422a96536eSKyongHo Cho 	if (!set_sysmmu_inactive(data))
3432a96536eSKyongHo Cho 		goto finish;
3442a96536eSKyongHo Cho 
3457222e8dbSCho KyongHo 	__raw_writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
3462a96536eSKyongHo Cho 
3477222e8dbSCho KyongHo 	if (!IS_ERR(data->clk))
3487222e8dbSCho KyongHo 		clk_disable(data->clk);
3492a96536eSKyongHo Cho 
3502a96536eSKyongHo Cho 	disabled = true;
3512a96536eSKyongHo Cho 	data->pgtable = 0;
3522a96536eSKyongHo Cho 	data->domain = NULL;
3532a96536eSKyongHo Cho finish:
3542a96536eSKyongHo Cho 	write_unlock_irqrestore(&data->lock, flags);
3552a96536eSKyongHo Cho 
3562a96536eSKyongHo Cho 	if (disabled)
3572a96536eSKyongHo Cho 		dev_dbg(data->sysmmu, "(%s) Disabled\n", data->dbgname);
3582a96536eSKyongHo Cho 	else
3592a96536eSKyongHo Cho 		dev_dbg(data->sysmmu, "(%s) %d times left to be disabled\n",
3602a96536eSKyongHo Cho 					data->dbgname, data->activations);
3612a96536eSKyongHo Cho 
3622a96536eSKyongHo Cho 	return disabled;
3632a96536eSKyongHo Cho }
3642a96536eSKyongHo Cho 
3652a96536eSKyongHo Cho /* __exynos_sysmmu_enable: Enables System MMU
3662a96536eSKyongHo Cho  *
3672a96536eSKyongHo Cho  * returns -error if an error occurred and System MMU is not enabled,
3682a96536eSKyongHo Cho  * 0 if the System MMU has been just enabled and 1 if System MMU was already
3692a96536eSKyongHo Cho  * enabled before.
3702a96536eSKyongHo Cho  */
3712a96536eSKyongHo Cho static int __exynos_sysmmu_enable(struct sysmmu_drvdata *data,
3722a96536eSKyongHo Cho 			unsigned long pgtable, struct iommu_domain *domain)
3732a96536eSKyongHo Cho {
3747222e8dbSCho KyongHo 	int ret = 0;
3752a96536eSKyongHo Cho 	unsigned long flags;
3762a96536eSKyongHo Cho 
3772a96536eSKyongHo Cho 	write_lock_irqsave(&data->lock, flags);
3782a96536eSKyongHo Cho 
3792a96536eSKyongHo Cho 	if (!set_sysmmu_active(data)) {
3802a96536eSKyongHo Cho 		if (WARN_ON(pgtable != data->pgtable)) {
3812a96536eSKyongHo Cho 			ret = -EBUSY;
3822a96536eSKyongHo Cho 			set_sysmmu_inactive(data);
3832a96536eSKyongHo Cho 		} else {
3842a96536eSKyongHo Cho 			ret = 1;
3852a96536eSKyongHo Cho 		}
3862a96536eSKyongHo Cho 
3872a96536eSKyongHo Cho 		dev_dbg(data->sysmmu, "(%s) Already enabled\n", data->dbgname);
3882a96536eSKyongHo Cho 		goto finish;
3892a96536eSKyongHo Cho 	}
3902a96536eSKyongHo Cho 
3917222e8dbSCho KyongHo 	if (!IS_ERR(data->clk))
3927222e8dbSCho KyongHo 		clk_enable(data->clk);
3932a96536eSKyongHo Cho 
3942a96536eSKyongHo Cho 	data->pgtable = pgtable;
3952a96536eSKyongHo Cho 
3967222e8dbSCho KyongHo 	__sysmmu_set_ptbase(data->sfrbase, pgtable);
3972a96536eSKyongHo Cho 
3987222e8dbSCho KyongHo 	__raw_writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
3992a96536eSKyongHo Cho 
4002a96536eSKyongHo Cho 	data->domain = domain;
4012a96536eSKyongHo Cho 
4022a96536eSKyongHo Cho 	dev_dbg(data->sysmmu, "(%s) Enabled\n", data->dbgname);
4032a96536eSKyongHo Cho finish:
4042a96536eSKyongHo Cho 	write_unlock_irqrestore(&data->lock, flags);
4052a96536eSKyongHo Cho 
4062a96536eSKyongHo Cho 	return ret;
4072a96536eSKyongHo Cho }
4082a96536eSKyongHo Cho 
4092a96536eSKyongHo Cho int exynos_sysmmu_enable(struct device *dev, unsigned long pgtable)
4102a96536eSKyongHo Cho {
4112a96536eSKyongHo Cho 	struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
4122a96536eSKyongHo Cho 	int ret;
4132a96536eSKyongHo Cho 
4142a96536eSKyongHo Cho 	BUG_ON(!memblock_is_memory(pgtable));
4152a96536eSKyongHo Cho 
4162a96536eSKyongHo Cho 	ret = pm_runtime_get_sync(data->sysmmu);
4172a96536eSKyongHo Cho 	if (ret < 0) {
4182a96536eSKyongHo Cho 		dev_dbg(data->sysmmu, "(%s) Failed to enable\n", data->dbgname);
4192a96536eSKyongHo Cho 		return ret;
4202a96536eSKyongHo Cho 	}
4212a96536eSKyongHo Cho 
4222a96536eSKyongHo Cho 	ret = __exynos_sysmmu_enable(data, pgtable, NULL);
4232a96536eSKyongHo Cho 	if (WARN_ON(ret < 0)) {
4242a96536eSKyongHo Cho 		pm_runtime_put(data->sysmmu);
4252a96536eSKyongHo Cho 		dev_err(data->sysmmu,
4267222e8dbSCho KyongHo 			"(%s) Already enabled with page table %#x\n",
4272a96536eSKyongHo Cho 			data->dbgname, data->pgtable);
4282a96536eSKyongHo Cho 	} else {
4292a96536eSKyongHo Cho 		data->dev = dev;
4302a96536eSKyongHo Cho 	}
4312a96536eSKyongHo Cho 
4322a96536eSKyongHo Cho 	return ret;
4332a96536eSKyongHo Cho }
4342a96536eSKyongHo Cho 
43577e38350SSachin Kamat static bool exynos_sysmmu_disable(struct device *dev)
4362a96536eSKyongHo Cho {
4372a96536eSKyongHo Cho 	struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
4382a96536eSKyongHo Cho 	bool disabled;
4392a96536eSKyongHo Cho 
4402a96536eSKyongHo Cho 	disabled = __exynos_sysmmu_disable(data);
4412a96536eSKyongHo Cho 	pm_runtime_put(data->sysmmu);
4422a96536eSKyongHo Cho 
4432a96536eSKyongHo Cho 	return disabled;
4442a96536eSKyongHo Cho }
4452a96536eSKyongHo Cho 
4463ad6b7f3SCho KyongHo static void sysmmu_tlb_invalidate_entry(struct device *dev, unsigned long iova,
4473ad6b7f3SCho KyongHo 					size_t size)
4482a96536eSKyongHo Cho {
4492a96536eSKyongHo Cho 	unsigned long flags;
4502a96536eSKyongHo Cho 	struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
4512a96536eSKyongHo Cho 
4522a96536eSKyongHo Cho 	read_lock_irqsave(&data->lock, flags);
4532a96536eSKyongHo Cho 
4542a96536eSKyongHo Cho 	if (is_sysmmu_active(data)) {
4553ad6b7f3SCho KyongHo 		unsigned int maj;
4563ad6b7f3SCho KyongHo 		unsigned int num_inv = 1;
4573ad6b7f3SCho KyongHo 		maj = __raw_readl(data->sfrbase + REG_MMU_VERSION);
4583ad6b7f3SCho KyongHo 		/*
4593ad6b7f3SCho KyongHo 		 * L2TLB invalidation required
4603ad6b7f3SCho KyongHo 		 * 4KB page: 1 invalidation
4613ad6b7f3SCho KyongHo 		 * 64KB page: 16 invalidation
4623ad6b7f3SCho KyongHo 		 * 1MB page: 64 invalidation
4633ad6b7f3SCho KyongHo 		 * because it is set-associative TLB
4643ad6b7f3SCho KyongHo 		 * with 8-way and 64 sets.
4653ad6b7f3SCho KyongHo 		 * 1MB page can be cached in one of all sets.
4663ad6b7f3SCho KyongHo 		 * 64KB page can be one of 16 consecutive sets.
4673ad6b7f3SCho KyongHo 		 */
4683ad6b7f3SCho KyongHo 		if ((maj >> 28) == 2) /* major version number */
4693ad6b7f3SCho KyongHo 			num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
4703ad6b7f3SCho KyongHo 
4717222e8dbSCho KyongHo 		if (sysmmu_block(data->sfrbase)) {
4722a96536eSKyongHo Cho 			__sysmmu_tlb_invalidate_entry(
4733ad6b7f3SCho KyongHo 				data->sfrbase, iova, num_inv);
4747222e8dbSCho KyongHo 			sysmmu_unblock(data->sfrbase);
4752a96536eSKyongHo Cho 		}
4762a96536eSKyongHo Cho 	} else {
4772a96536eSKyongHo Cho 		dev_dbg(data->sysmmu,
4782a96536eSKyongHo Cho 			"(%s) Disabled. Skipping invalidating TLB.\n",
4792a96536eSKyongHo Cho 			data->dbgname);
4802a96536eSKyongHo Cho 	}
4812a96536eSKyongHo Cho 
4822a96536eSKyongHo Cho 	read_unlock_irqrestore(&data->lock, flags);
4832a96536eSKyongHo Cho }
4842a96536eSKyongHo Cho 
4852a96536eSKyongHo Cho void exynos_sysmmu_tlb_invalidate(struct device *dev)
4862a96536eSKyongHo Cho {
4872a96536eSKyongHo Cho 	unsigned long flags;
4882a96536eSKyongHo Cho 	struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
4892a96536eSKyongHo Cho 
4902a96536eSKyongHo Cho 	read_lock_irqsave(&data->lock, flags);
4912a96536eSKyongHo Cho 
4922a96536eSKyongHo Cho 	if (is_sysmmu_active(data)) {
4937222e8dbSCho KyongHo 		if (sysmmu_block(data->sfrbase)) {
4947222e8dbSCho KyongHo 			__sysmmu_tlb_invalidate(data->sfrbase);
4957222e8dbSCho KyongHo 			sysmmu_unblock(data->sfrbase);
4962a96536eSKyongHo Cho 		}
4972a96536eSKyongHo Cho 	} else {
4982a96536eSKyongHo Cho 		dev_dbg(data->sysmmu,
4992a96536eSKyongHo Cho 			"(%s) Disabled. Skipping invalidating TLB.\n",
5002a96536eSKyongHo Cho 			data->dbgname);
5012a96536eSKyongHo Cho 	}
5022a96536eSKyongHo Cho 
5032a96536eSKyongHo Cho 	read_unlock_irqrestore(&data->lock, flags);
5042a96536eSKyongHo Cho }
5052a96536eSKyongHo Cho 
5062a96536eSKyongHo Cho static int exynos_sysmmu_probe(struct platform_device *pdev)
5072a96536eSKyongHo Cho {
5087222e8dbSCho KyongHo 	int ret;
5097222e8dbSCho KyongHo 	struct device *dev = &pdev->dev;
5102a96536eSKyongHo Cho 	struct sysmmu_drvdata *data;
5117222e8dbSCho KyongHo 	struct resource *res;
5122a96536eSKyongHo Cho 
5132a96536eSKyongHo Cho 	data = kzalloc(sizeof(*data), GFP_KERNEL);
5142a96536eSKyongHo Cho 	if (!data) {
5152a96536eSKyongHo Cho 		dev_dbg(dev, "Not enough memory\n");
5162a96536eSKyongHo Cho 		ret = -ENOMEM;
5172a96536eSKyongHo Cho 		goto err_alloc;
5182a96536eSKyongHo Cho 	}
5192a96536eSKyongHo Cho 
5207222e8dbSCho KyongHo 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
5212a96536eSKyongHo Cho 	if (!res) {
5222a96536eSKyongHo Cho 		dev_dbg(dev, "Unable to find IOMEM region\n");
5232a96536eSKyongHo Cho 		ret = -ENOENT;
5247222e8dbSCho KyongHo 		goto err_init;
5252a96536eSKyongHo Cho 	}
5262a96536eSKyongHo Cho 
5277222e8dbSCho KyongHo 	data->sfrbase = ioremap(res->start, resource_size(res));
5287222e8dbSCho KyongHo 	if (!data->sfrbase) {
5297222e8dbSCho KyongHo 		dev_dbg(dev, "Unable to map IOMEM @ PA:%#x\n", res->start);
5302a96536eSKyongHo Cho 		ret = -ENOENT;
5312a96536eSKyongHo Cho 		goto err_res;
5322a96536eSKyongHo Cho 	}
5332a96536eSKyongHo Cho 
5347222e8dbSCho KyongHo 	ret = platform_get_irq(pdev, 0);
5352a96536eSKyongHo Cho 	if (ret <= 0) {
5362a96536eSKyongHo Cho 		dev_dbg(dev, "Unable to find IRQ resource\n");
5372a96536eSKyongHo Cho 		goto err_irq;
5382a96536eSKyongHo Cho 	}
5392a96536eSKyongHo Cho 
5402a96536eSKyongHo Cho 	ret = request_irq(ret, exynos_sysmmu_irq, 0,
5412a96536eSKyongHo Cho 				dev_name(dev), data);
5422a96536eSKyongHo Cho 	if (ret) {
5432a96536eSKyongHo Cho 		dev_dbg(dev, "Unabled to register interrupt handler\n");
5442a96536eSKyongHo Cho 		goto err_irq;
5452a96536eSKyongHo Cho 	}
5462a96536eSKyongHo Cho 
5472a96536eSKyongHo Cho 	if (dev_get_platdata(dev)) {
5487222e8dbSCho KyongHo 		data->clk = clk_get(dev, "sysmmu");
5497222e8dbSCho KyongHo 		if (IS_ERR(data->clk))
5502a96536eSKyongHo Cho 			dev_dbg(dev, "No clock descriptor registered\n");
5512a96536eSKyongHo Cho 	}
5522a96536eSKyongHo Cho 
5532a96536eSKyongHo Cho 	data->sysmmu = dev;
5542a96536eSKyongHo Cho 	rwlock_init(&data->lock);
5552a96536eSKyongHo Cho 	INIT_LIST_HEAD(&data->node);
5562a96536eSKyongHo Cho 
5572a96536eSKyongHo Cho 	__set_fault_handler(data, &default_fault_handler);
5582a96536eSKyongHo Cho 
5597222e8dbSCho KyongHo 	platform_set_drvdata(pdev, data);
5607222e8dbSCho KyongHo 
5612a96536eSKyongHo Cho 	if (dev->parent)
5622a96536eSKyongHo Cho 		pm_runtime_enable(dev);
5632a96536eSKyongHo Cho 
5642a96536eSKyongHo Cho 	dev_dbg(dev, "(%s) Initialized\n", data->dbgname);
5652a96536eSKyongHo Cho 	return 0;
5662a96536eSKyongHo Cho err_irq:
5677222e8dbSCho KyongHo 	free_irq(platform_get_irq(pdev, 0), data);
5682a96536eSKyongHo Cho err_res:
5697222e8dbSCho KyongHo 	iounmap(data->sfrbase);
5702a96536eSKyongHo Cho err_init:
5712a96536eSKyongHo Cho 	kfree(data);
5722a96536eSKyongHo Cho err_alloc:
5732a96536eSKyongHo Cho 	dev_err(dev, "Failed to initialize\n");
5742a96536eSKyongHo Cho 	return ret;
5752a96536eSKyongHo Cho }
5762a96536eSKyongHo Cho 
5772a96536eSKyongHo Cho static struct platform_driver exynos_sysmmu_driver = {
5782a96536eSKyongHo Cho 	.probe		= exynos_sysmmu_probe,
5792a96536eSKyongHo Cho 	.driver		= {
5802a96536eSKyongHo Cho 		.owner		= THIS_MODULE,
5812a96536eSKyongHo Cho 		.name		= "exynos-sysmmu",
5822a96536eSKyongHo Cho 	}
5832a96536eSKyongHo Cho };
5842a96536eSKyongHo Cho 
5852a96536eSKyongHo Cho static inline void pgtable_flush(void *vastart, void *vaend)
5862a96536eSKyongHo Cho {
5872a96536eSKyongHo Cho 	dmac_flush_range(vastart, vaend);
5882a96536eSKyongHo Cho 	outer_flush_range(virt_to_phys(vastart),
5892a96536eSKyongHo Cho 				virt_to_phys(vaend));
5902a96536eSKyongHo Cho }
5912a96536eSKyongHo Cho 
5922a96536eSKyongHo Cho static int exynos_iommu_domain_init(struct iommu_domain *domain)
5932a96536eSKyongHo Cho {
5942a96536eSKyongHo Cho 	struct exynos_iommu_domain *priv;
5952a96536eSKyongHo Cho 
5962a96536eSKyongHo Cho 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
5972a96536eSKyongHo Cho 	if (!priv)
5982a96536eSKyongHo Cho 		return -ENOMEM;
5992a96536eSKyongHo Cho 
6002a96536eSKyongHo Cho 	priv->pgtable = (unsigned long *)__get_free_pages(
6012a96536eSKyongHo Cho 						GFP_KERNEL | __GFP_ZERO, 2);
6022a96536eSKyongHo Cho 	if (!priv->pgtable)
6032a96536eSKyongHo Cho 		goto err_pgtable;
6042a96536eSKyongHo Cho 
6052a96536eSKyongHo Cho 	priv->lv2entcnt = (short *)__get_free_pages(
6062a96536eSKyongHo Cho 						GFP_KERNEL | __GFP_ZERO, 1);
6072a96536eSKyongHo Cho 	if (!priv->lv2entcnt)
6082a96536eSKyongHo Cho 		goto err_counter;
6092a96536eSKyongHo Cho 
6102a96536eSKyongHo Cho 	pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
6112a96536eSKyongHo Cho 
6122a96536eSKyongHo Cho 	spin_lock_init(&priv->lock);
6132a96536eSKyongHo Cho 	spin_lock_init(&priv->pgtablelock);
6142a96536eSKyongHo Cho 	INIT_LIST_HEAD(&priv->clients);
6152a96536eSKyongHo Cho 
616eb51637bSSachin Kamat 	domain->geometry.aperture_start = 0;
617eb51637bSSachin Kamat 	domain->geometry.aperture_end   = ~0UL;
618eb51637bSSachin Kamat 	domain->geometry.force_aperture = true;
6193177bb76SJoerg Roedel 
6202a96536eSKyongHo Cho 	domain->priv = priv;
6212a96536eSKyongHo Cho 	return 0;
6222a96536eSKyongHo Cho 
6232a96536eSKyongHo Cho err_counter:
6242a96536eSKyongHo Cho 	free_pages((unsigned long)priv->pgtable, 2);
6252a96536eSKyongHo Cho err_pgtable:
6262a96536eSKyongHo Cho 	kfree(priv);
6272a96536eSKyongHo Cho 	return -ENOMEM;
6282a96536eSKyongHo Cho }
6292a96536eSKyongHo Cho 
6302a96536eSKyongHo Cho static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
6312a96536eSKyongHo Cho {
6322a96536eSKyongHo Cho 	struct exynos_iommu_domain *priv = domain->priv;
6332a96536eSKyongHo Cho 	struct sysmmu_drvdata *data;
6342a96536eSKyongHo Cho 	unsigned long flags;
6352a96536eSKyongHo Cho 	int i;
6362a96536eSKyongHo Cho 
6372a96536eSKyongHo Cho 	WARN_ON(!list_empty(&priv->clients));
6382a96536eSKyongHo Cho 
6392a96536eSKyongHo Cho 	spin_lock_irqsave(&priv->lock, flags);
6402a96536eSKyongHo Cho 
6412a96536eSKyongHo Cho 	list_for_each_entry(data, &priv->clients, node) {
6422a96536eSKyongHo Cho 		while (!exynos_sysmmu_disable(data->dev))
6432a96536eSKyongHo Cho 			; /* until System MMU is actually disabled */
6442a96536eSKyongHo Cho 	}
6452a96536eSKyongHo Cho 
6462a96536eSKyongHo Cho 	spin_unlock_irqrestore(&priv->lock, flags);
6472a96536eSKyongHo Cho 
6482a96536eSKyongHo Cho 	for (i = 0; i < NUM_LV1ENTRIES; i++)
6492a96536eSKyongHo Cho 		if (lv1ent_page(priv->pgtable + i))
650734c3c73SCho KyongHo 			kmem_cache_free(lv2table_kmem_cache,
651734c3c73SCho KyongHo 				phys_to_virt(lv2table_base(priv->pgtable + i)));
6522a96536eSKyongHo Cho 
6532a96536eSKyongHo Cho 	free_pages((unsigned long)priv->pgtable, 2);
6542a96536eSKyongHo Cho 	free_pages((unsigned long)priv->lv2entcnt, 1);
6552a96536eSKyongHo Cho 	kfree(domain->priv);
6562a96536eSKyongHo Cho 	domain->priv = NULL;
6572a96536eSKyongHo Cho }
6582a96536eSKyongHo Cho 
6592a96536eSKyongHo Cho static int exynos_iommu_attach_device(struct iommu_domain *domain,
6602a96536eSKyongHo Cho 				   struct device *dev)
6612a96536eSKyongHo Cho {
6622a96536eSKyongHo Cho 	struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
6632a96536eSKyongHo Cho 	struct exynos_iommu_domain *priv = domain->priv;
6647222e8dbSCho KyongHo 	phys_addr_t pagetable = virt_to_phys(priv->pgtable);
6652a96536eSKyongHo Cho 	unsigned long flags;
6662a96536eSKyongHo Cho 	int ret;
6672a96536eSKyongHo Cho 
6682a96536eSKyongHo Cho 	ret = pm_runtime_get_sync(data->sysmmu);
6692a96536eSKyongHo Cho 	if (ret < 0)
6702a96536eSKyongHo Cho 		return ret;
6712a96536eSKyongHo Cho 
6722a96536eSKyongHo Cho 	ret = 0;
6732a96536eSKyongHo Cho 
6742a96536eSKyongHo Cho 	spin_lock_irqsave(&priv->lock, flags);
6752a96536eSKyongHo Cho 
6767222e8dbSCho KyongHo 	ret = __exynos_sysmmu_enable(data, pagetable, domain);
6772a96536eSKyongHo Cho 
6782a96536eSKyongHo Cho 	if (ret == 0) {
6792a96536eSKyongHo Cho 		/* 'data->node' must not be appeared in priv->clients */
6802a96536eSKyongHo Cho 		BUG_ON(!list_empty(&data->node));
6812a96536eSKyongHo Cho 		data->dev = dev;
6822a96536eSKyongHo Cho 		list_add_tail(&data->node, &priv->clients);
6832a96536eSKyongHo Cho 	}
6842a96536eSKyongHo Cho 
6852a96536eSKyongHo Cho 	spin_unlock_irqrestore(&priv->lock, flags);
6862a96536eSKyongHo Cho 
6872a96536eSKyongHo Cho 	if (ret < 0) {
6887222e8dbSCho KyongHo 		dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
6897222e8dbSCho KyongHo 					__func__, &pagetable);
6902a96536eSKyongHo Cho 		pm_runtime_put(data->sysmmu);
6917222e8dbSCho KyongHo 		return ret;
6922a96536eSKyongHo Cho 	}
6932a96536eSKyongHo Cho 
6947222e8dbSCho KyongHo 	dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n",
6957222e8dbSCho KyongHo 		__func__, &pagetable, (ret == 0) ? "" : ", again");
6967222e8dbSCho KyongHo 
6972a96536eSKyongHo Cho 	return ret;
6982a96536eSKyongHo Cho }
6992a96536eSKyongHo Cho 
7002a96536eSKyongHo Cho static void exynos_iommu_detach_device(struct iommu_domain *domain,
7012a96536eSKyongHo Cho 				    struct device *dev)
7022a96536eSKyongHo Cho {
7032a96536eSKyongHo Cho 	struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
7042a96536eSKyongHo Cho 	struct exynos_iommu_domain *priv = domain->priv;
7052a96536eSKyongHo Cho 	struct list_head *pos;
7067222e8dbSCho KyongHo 	phys_addr_t pagetable = virt_to_phys(priv->pgtable);
7072a96536eSKyongHo Cho 	unsigned long flags;
7082a96536eSKyongHo Cho 	bool found = false;
7092a96536eSKyongHo Cho 
7102a96536eSKyongHo Cho 	spin_lock_irqsave(&priv->lock, flags);
7112a96536eSKyongHo Cho 
7122a96536eSKyongHo Cho 	list_for_each(pos, &priv->clients) {
7132a96536eSKyongHo Cho 		if (list_entry(pos, struct sysmmu_drvdata, node) == data) {
7142a96536eSKyongHo Cho 			found = true;
7152a96536eSKyongHo Cho 			break;
7162a96536eSKyongHo Cho 		}
7172a96536eSKyongHo Cho 	}
7182a96536eSKyongHo Cho 
7192a96536eSKyongHo Cho 	if (!found)
7202a96536eSKyongHo Cho 		goto finish;
7212a96536eSKyongHo Cho 
7222a96536eSKyongHo Cho 	if (__exynos_sysmmu_disable(data)) {
7237222e8dbSCho KyongHo 		dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
7247222e8dbSCho KyongHo 					__func__, &pagetable);
725f8ffcc92SWei Yongjun 		list_del_init(&data->node);
7262a96536eSKyongHo Cho 
7272a96536eSKyongHo Cho 	} else {
7287222e8dbSCho KyongHo 		dev_dbg(dev, "%s: Detaching IOMMU with pgtable %pa delayed",
7297222e8dbSCho KyongHo 					__func__, &pagetable);
7302a96536eSKyongHo Cho 	}
7312a96536eSKyongHo Cho 
7322a96536eSKyongHo Cho finish:
7332a96536eSKyongHo Cho 	spin_unlock_irqrestore(&priv->lock, flags);
7342a96536eSKyongHo Cho 
7352a96536eSKyongHo Cho 	if (found)
7362a96536eSKyongHo Cho 		pm_runtime_put(data->sysmmu);
7372a96536eSKyongHo Cho }
7382a96536eSKyongHo Cho 
7392a96536eSKyongHo Cho static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova,
7402a96536eSKyongHo Cho 					short *pgcounter)
7412a96536eSKyongHo Cho {
74261128f08SCho KyongHo 	if (lv1ent_section(sent)) {
74361128f08SCho KyongHo 		WARN(1, "Trying mapping on %#08lx mapped with 1MiB page", iova);
74461128f08SCho KyongHo 		return ERR_PTR(-EADDRINUSE);
74561128f08SCho KyongHo 	}
74661128f08SCho KyongHo 
7472a96536eSKyongHo Cho 	if (lv1ent_fault(sent)) {
7482a96536eSKyongHo Cho 		unsigned long *pent;
7492a96536eSKyongHo Cho 
750734c3c73SCho KyongHo 		pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
7512a96536eSKyongHo Cho 		BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
7522a96536eSKyongHo Cho 		if (!pent)
75361128f08SCho KyongHo 			return ERR_PTR(-ENOMEM);
7542a96536eSKyongHo Cho 
7557222e8dbSCho KyongHo 		*sent = mk_lv1ent_page(virt_to_phys(pent));
7562a96536eSKyongHo Cho 		*pgcounter = NUM_LV2ENTRIES;
7572a96536eSKyongHo Cho 		pgtable_flush(pent, pent + NUM_LV2ENTRIES);
7582a96536eSKyongHo Cho 		pgtable_flush(sent, sent + 1);
7592a96536eSKyongHo Cho 	}
7602a96536eSKyongHo Cho 
7612a96536eSKyongHo Cho 	return page_entry(sent, iova);
7622a96536eSKyongHo Cho }
7632a96536eSKyongHo Cho 
76461128f08SCho KyongHo static int lv1set_section(unsigned long *sent, unsigned long iova,
76561128f08SCho KyongHo 			  phys_addr_t paddr, short *pgcnt)
7662a96536eSKyongHo Cho {
76761128f08SCho KyongHo 	if (lv1ent_section(sent)) {
76861128f08SCho KyongHo 		WARN(1, "Trying mapping on 1MiB@%#08lx that is mapped",
76961128f08SCho KyongHo 			iova);
7702a96536eSKyongHo Cho 		return -EADDRINUSE;
77161128f08SCho KyongHo 	}
7722a96536eSKyongHo Cho 
7732a96536eSKyongHo Cho 	if (lv1ent_page(sent)) {
77461128f08SCho KyongHo 		if (*pgcnt != NUM_LV2ENTRIES) {
77561128f08SCho KyongHo 			WARN(1, "Trying mapping on 1MiB@%#08lx that is mapped",
77661128f08SCho KyongHo 				iova);
7772a96536eSKyongHo Cho 			return -EADDRINUSE;
77861128f08SCho KyongHo 		}
7792a96536eSKyongHo Cho 
780734c3c73SCho KyongHo 		kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
7812a96536eSKyongHo Cho 		*pgcnt = 0;
7822a96536eSKyongHo Cho 	}
7832a96536eSKyongHo Cho 
7842a96536eSKyongHo Cho 	*sent = mk_lv1ent_sect(paddr);
7852a96536eSKyongHo Cho 
7862a96536eSKyongHo Cho 	pgtable_flush(sent, sent + 1);
7872a96536eSKyongHo Cho 
7882a96536eSKyongHo Cho 	return 0;
7892a96536eSKyongHo Cho }
7902a96536eSKyongHo Cho 
7912a96536eSKyongHo Cho static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size,
7922a96536eSKyongHo Cho 								short *pgcnt)
7932a96536eSKyongHo Cho {
7942a96536eSKyongHo Cho 	if (size == SPAGE_SIZE) {
79561128f08SCho KyongHo 		if (!lv2ent_fault(pent)) {
79661128f08SCho KyongHo 			WARN(1, "Trying mapping on 4KiB where mapping exists");
7972a96536eSKyongHo Cho 			return -EADDRINUSE;
79861128f08SCho KyongHo 		}
7992a96536eSKyongHo Cho 
8002a96536eSKyongHo Cho 		*pent = mk_lv2ent_spage(paddr);
8012a96536eSKyongHo Cho 		pgtable_flush(pent, pent + 1);
8022a96536eSKyongHo Cho 		*pgcnt -= 1;
8032a96536eSKyongHo Cho 	} else { /* size == LPAGE_SIZE */
8042a96536eSKyongHo Cho 		int i;
8052a96536eSKyongHo Cho 		for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
8062a96536eSKyongHo Cho 			if (!lv2ent_fault(pent)) {
80761128f08SCho KyongHo 				WARN(1,
80861128f08SCho KyongHo 				"Trying mapping on 64KiB where mapping exists");
80961128f08SCho KyongHo 				if (i > 0)
81061128f08SCho KyongHo 					memset(pent - i, 0, sizeof(*pent) * i);
8112a96536eSKyongHo Cho 				return -EADDRINUSE;
8122a96536eSKyongHo Cho 			}
8132a96536eSKyongHo Cho 
8142a96536eSKyongHo Cho 			*pent = mk_lv2ent_lpage(paddr);
8152a96536eSKyongHo Cho 		}
8162a96536eSKyongHo Cho 		pgtable_flush(pent - SPAGES_PER_LPAGE, pent);
8172a96536eSKyongHo Cho 		*pgcnt -= SPAGES_PER_LPAGE;
8182a96536eSKyongHo Cho 	}
8192a96536eSKyongHo Cho 
8202a96536eSKyongHo Cho 	return 0;
8212a96536eSKyongHo Cho }
8222a96536eSKyongHo Cho 
8232a96536eSKyongHo Cho static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova,
8242a96536eSKyongHo Cho 			 phys_addr_t paddr, size_t size, int prot)
8252a96536eSKyongHo Cho {
8262a96536eSKyongHo Cho 	struct exynos_iommu_domain *priv = domain->priv;
8272a96536eSKyongHo Cho 	unsigned long *entry;
8282a96536eSKyongHo Cho 	unsigned long flags;
8292a96536eSKyongHo Cho 	int ret = -ENOMEM;
8302a96536eSKyongHo Cho 
8312a96536eSKyongHo Cho 	BUG_ON(priv->pgtable == NULL);
8322a96536eSKyongHo Cho 
8332a96536eSKyongHo Cho 	spin_lock_irqsave(&priv->pgtablelock, flags);
8342a96536eSKyongHo Cho 
8352a96536eSKyongHo Cho 	entry = section_entry(priv->pgtable, iova);
8362a96536eSKyongHo Cho 
8372a96536eSKyongHo Cho 	if (size == SECT_SIZE) {
83861128f08SCho KyongHo 		ret = lv1set_section(entry, iova, paddr,
8392a96536eSKyongHo Cho 					&priv->lv2entcnt[lv1ent_offset(iova)]);
8402a96536eSKyongHo Cho 	} else {
8412a96536eSKyongHo Cho 		unsigned long *pent;
8422a96536eSKyongHo Cho 
8432a96536eSKyongHo Cho 		pent = alloc_lv2entry(entry, iova,
8442a96536eSKyongHo Cho 					&priv->lv2entcnt[lv1ent_offset(iova)]);
8452a96536eSKyongHo Cho 
84661128f08SCho KyongHo 		if (IS_ERR(pent))
84761128f08SCho KyongHo 			ret = PTR_ERR(pent);
8482a96536eSKyongHo Cho 		else
8492a96536eSKyongHo Cho 			ret = lv2set_page(pent, paddr, size,
8502a96536eSKyongHo Cho 					&priv->lv2entcnt[lv1ent_offset(iova)]);
8512a96536eSKyongHo Cho 	}
8522a96536eSKyongHo Cho 
85361128f08SCho KyongHo 	if (ret)
8542a96536eSKyongHo Cho 		pr_debug("%s: Failed to map iova 0x%lx/0x%x bytes\n",
8552a96536eSKyongHo Cho 							__func__, iova, size);
8562a96536eSKyongHo Cho 
8572a96536eSKyongHo Cho 	spin_unlock_irqrestore(&priv->pgtablelock, flags);
8582a96536eSKyongHo Cho 
8592a96536eSKyongHo Cho 	return ret;
8602a96536eSKyongHo Cho }
8612a96536eSKyongHo Cho 
8622a96536eSKyongHo Cho static size_t exynos_iommu_unmap(struct iommu_domain *domain,
8632a96536eSKyongHo Cho 					       unsigned long iova, size_t size)
8642a96536eSKyongHo Cho {
8652a96536eSKyongHo Cho 	struct exynos_iommu_domain *priv = domain->priv;
8662a96536eSKyongHo Cho 	struct sysmmu_drvdata *data;
8672a96536eSKyongHo Cho 	unsigned long flags;
8682a96536eSKyongHo Cho 	unsigned long *ent;
86961128f08SCho KyongHo 	size_t err_pgsize;
8702a96536eSKyongHo Cho 
8712a96536eSKyongHo Cho 	BUG_ON(priv->pgtable == NULL);
8722a96536eSKyongHo Cho 
8732a96536eSKyongHo Cho 	spin_lock_irqsave(&priv->pgtablelock, flags);
8742a96536eSKyongHo Cho 
8752a96536eSKyongHo Cho 	ent = section_entry(priv->pgtable, iova);
8762a96536eSKyongHo Cho 
8772a96536eSKyongHo Cho 	if (lv1ent_section(ent)) {
87861128f08SCho KyongHo 		if (size < SECT_SIZE) {
87961128f08SCho KyongHo 			err_pgsize = SECT_SIZE;
88061128f08SCho KyongHo 			goto err;
88161128f08SCho KyongHo 		}
8822a96536eSKyongHo Cho 
8832a96536eSKyongHo Cho 		*ent = 0;
8842a96536eSKyongHo Cho 		pgtable_flush(ent, ent + 1);
8852a96536eSKyongHo Cho 		size = SECT_SIZE;
8862a96536eSKyongHo Cho 		goto done;
8872a96536eSKyongHo Cho 	}
8882a96536eSKyongHo Cho 
8892a96536eSKyongHo Cho 	if (unlikely(lv1ent_fault(ent))) {
8902a96536eSKyongHo Cho 		if (size > SECT_SIZE)
8912a96536eSKyongHo Cho 			size = SECT_SIZE;
8922a96536eSKyongHo Cho 		goto done;
8932a96536eSKyongHo Cho 	}
8942a96536eSKyongHo Cho 
8952a96536eSKyongHo Cho 	/* lv1ent_page(sent) == true here */
8962a96536eSKyongHo Cho 
8972a96536eSKyongHo Cho 	ent = page_entry(ent, iova);
8982a96536eSKyongHo Cho 
8992a96536eSKyongHo Cho 	if (unlikely(lv2ent_fault(ent))) {
9002a96536eSKyongHo Cho 		size = SPAGE_SIZE;
9012a96536eSKyongHo Cho 		goto done;
9022a96536eSKyongHo Cho 	}
9032a96536eSKyongHo Cho 
9042a96536eSKyongHo Cho 	if (lv2ent_small(ent)) {
9052a96536eSKyongHo Cho 		*ent = 0;
9062a96536eSKyongHo Cho 		size = SPAGE_SIZE;
9076cb47ed7SCho KyongHo 		pgtable_flush(ent, ent + 1);
9082a96536eSKyongHo Cho 		priv->lv2entcnt[lv1ent_offset(iova)] += 1;
9092a96536eSKyongHo Cho 		goto done;
9102a96536eSKyongHo Cho 	}
9112a96536eSKyongHo Cho 
9122a96536eSKyongHo Cho 	/* lv1ent_large(ent) == true here */
91361128f08SCho KyongHo 	if (size < LPAGE_SIZE) {
91461128f08SCho KyongHo 		err_pgsize = LPAGE_SIZE;
91561128f08SCho KyongHo 		goto err;
91661128f08SCho KyongHo 	}
9172a96536eSKyongHo Cho 
9182a96536eSKyongHo Cho 	memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
9196cb47ed7SCho KyongHo 	pgtable_flush(ent, ent + SPAGES_PER_LPAGE);
9202a96536eSKyongHo Cho 
9212a96536eSKyongHo Cho 	size = LPAGE_SIZE;
9222a96536eSKyongHo Cho 	priv->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
9232a96536eSKyongHo Cho done:
9242a96536eSKyongHo Cho 	spin_unlock_irqrestore(&priv->pgtablelock, flags);
9252a96536eSKyongHo Cho 
9262a96536eSKyongHo Cho 	spin_lock_irqsave(&priv->lock, flags);
9272a96536eSKyongHo Cho 	list_for_each_entry(data, &priv->clients, node)
9283ad6b7f3SCho KyongHo 		sysmmu_tlb_invalidate_entry(data->dev, iova, size);
9292a96536eSKyongHo Cho 	spin_unlock_irqrestore(&priv->lock, flags);
9302a96536eSKyongHo Cho 
9312a96536eSKyongHo Cho 	return size;
93261128f08SCho KyongHo err:
93361128f08SCho KyongHo 	spin_unlock_irqrestore(&priv->pgtablelock, flags);
93461128f08SCho KyongHo 
93561128f08SCho KyongHo 	WARN(1,
93661128f08SCho KyongHo 	"%s: Failed due to size(%#x) @ %#08lx is smaller than page size %#x\n",
93761128f08SCho KyongHo 	__func__, size, iova, err_pgsize);
93861128f08SCho KyongHo 
93961128f08SCho KyongHo 	return 0;
9402a96536eSKyongHo Cho }
9412a96536eSKyongHo Cho 
9422a96536eSKyongHo Cho static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
943bb5547acSVarun Sethi 					  dma_addr_t iova)
9442a96536eSKyongHo Cho {
9452a96536eSKyongHo Cho 	struct exynos_iommu_domain *priv = domain->priv;
9462a96536eSKyongHo Cho 	unsigned long *entry;
9472a96536eSKyongHo Cho 	unsigned long flags;
9482a96536eSKyongHo Cho 	phys_addr_t phys = 0;
9492a96536eSKyongHo Cho 
9502a96536eSKyongHo Cho 	spin_lock_irqsave(&priv->pgtablelock, flags);
9512a96536eSKyongHo Cho 
9522a96536eSKyongHo Cho 	entry = section_entry(priv->pgtable, iova);
9532a96536eSKyongHo Cho 
9542a96536eSKyongHo Cho 	if (lv1ent_section(entry)) {
9552a96536eSKyongHo Cho 		phys = section_phys(entry) + section_offs(iova);
9562a96536eSKyongHo Cho 	} else if (lv1ent_page(entry)) {
9572a96536eSKyongHo Cho 		entry = page_entry(entry, iova);
9582a96536eSKyongHo Cho 
9592a96536eSKyongHo Cho 		if (lv2ent_large(entry))
9602a96536eSKyongHo Cho 			phys = lpage_phys(entry) + lpage_offs(iova);
9612a96536eSKyongHo Cho 		else if (lv2ent_small(entry))
9622a96536eSKyongHo Cho 			phys = spage_phys(entry) + spage_offs(iova);
9632a96536eSKyongHo Cho 	}
9642a96536eSKyongHo Cho 
9652a96536eSKyongHo Cho 	spin_unlock_irqrestore(&priv->pgtablelock, flags);
9662a96536eSKyongHo Cho 
9672a96536eSKyongHo Cho 	return phys;
9682a96536eSKyongHo Cho }
9692a96536eSKyongHo Cho 
9702a96536eSKyongHo Cho static struct iommu_ops exynos_iommu_ops = {
9712a96536eSKyongHo Cho 	.domain_init = &exynos_iommu_domain_init,
9722a96536eSKyongHo Cho 	.domain_destroy = &exynos_iommu_domain_destroy,
9732a96536eSKyongHo Cho 	.attach_dev = &exynos_iommu_attach_device,
9742a96536eSKyongHo Cho 	.detach_dev = &exynos_iommu_detach_device,
9752a96536eSKyongHo Cho 	.map = &exynos_iommu_map,
9762a96536eSKyongHo Cho 	.unmap = &exynos_iommu_unmap,
9772a96536eSKyongHo Cho 	.iova_to_phys = &exynos_iommu_iova_to_phys,
9782a96536eSKyongHo Cho 	.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
9792a96536eSKyongHo Cho };
9802a96536eSKyongHo Cho 
9812a96536eSKyongHo Cho static int __init exynos_iommu_init(void)
9822a96536eSKyongHo Cho {
9832a96536eSKyongHo Cho 	int ret;
9842a96536eSKyongHo Cho 
985734c3c73SCho KyongHo 	lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
986734c3c73SCho KyongHo 				LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
987734c3c73SCho KyongHo 	if (!lv2table_kmem_cache) {
988734c3c73SCho KyongHo 		pr_err("%s: Failed to create kmem cache\n", __func__);
989734c3c73SCho KyongHo 		return -ENOMEM;
990734c3c73SCho KyongHo 	}
991734c3c73SCho KyongHo 
9922a96536eSKyongHo Cho 	ret = platform_driver_register(&exynos_sysmmu_driver);
993734c3c73SCho KyongHo 	if (ret) {
994734c3c73SCho KyongHo 		pr_err("%s: Failed to register driver\n", __func__);
995734c3c73SCho KyongHo 		goto err_reg_driver;
996734c3c73SCho KyongHo 	}
9972a96536eSKyongHo Cho 
998734c3c73SCho KyongHo 	ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
999734c3c73SCho KyongHo 	if (ret) {
1000734c3c73SCho KyongHo 		pr_err("%s: Failed to register exynos-iommu driver.\n",
1001734c3c73SCho KyongHo 								__func__);
1002734c3c73SCho KyongHo 		goto err_set_iommu;
1003734c3c73SCho KyongHo 	}
10042a96536eSKyongHo Cho 
1005734c3c73SCho KyongHo 	return 0;
1006734c3c73SCho KyongHo err_set_iommu:
1007734c3c73SCho KyongHo 	platform_driver_unregister(&exynos_sysmmu_driver);
1008734c3c73SCho KyongHo err_reg_driver:
1009734c3c73SCho KyongHo 	kmem_cache_destroy(lv2table_kmem_cache);
10102a96536eSKyongHo Cho 	return ret;
10112a96536eSKyongHo Cho }
10122a96536eSKyongHo Cho subsys_initcall(exynos_iommu_init);
1013