xref: /openbmc/linux/drivers/iommu/exynos-iommu.c (revision 3ad6b7f3)
12a96536eSKyongHo Cho /* linux/drivers/iommu/exynos_iommu.c
22a96536eSKyongHo Cho  *
32a96536eSKyongHo Cho  * Copyright (c) 2011 Samsung Electronics Co., Ltd.
42a96536eSKyongHo Cho  *		http://www.samsung.com
52a96536eSKyongHo Cho  *
62a96536eSKyongHo Cho  * This program is free software; you can redistribute it and/or modify
72a96536eSKyongHo Cho  * it under the terms of the GNU General Public License version 2 as
82a96536eSKyongHo Cho  * published by the Free Software Foundation.
92a96536eSKyongHo Cho  */
102a96536eSKyongHo Cho 
112a96536eSKyongHo Cho #ifdef CONFIG_EXYNOS_IOMMU_DEBUG
122a96536eSKyongHo Cho #define DEBUG
132a96536eSKyongHo Cho #endif
142a96536eSKyongHo Cho 
152a96536eSKyongHo Cho #include <linux/io.h>
162a96536eSKyongHo Cho #include <linux/interrupt.h>
172a96536eSKyongHo Cho #include <linux/platform_device.h>
182a96536eSKyongHo Cho #include <linux/slab.h>
192a96536eSKyongHo Cho #include <linux/pm_runtime.h>
202a96536eSKyongHo Cho #include <linux/clk.h>
212a96536eSKyongHo Cho #include <linux/err.h>
222a96536eSKyongHo Cho #include <linux/mm.h>
232a96536eSKyongHo Cho #include <linux/iommu.h>
242a96536eSKyongHo Cho #include <linux/errno.h>
252a96536eSKyongHo Cho #include <linux/list.h>
262a96536eSKyongHo Cho #include <linux/memblock.h>
272a96536eSKyongHo Cho #include <linux/export.h>
282a96536eSKyongHo Cho 
292a96536eSKyongHo Cho #include <asm/cacheflush.h>
302a96536eSKyongHo Cho #include <asm/pgtable.h>
312a96536eSKyongHo Cho 
322a96536eSKyongHo Cho /* We does not consider super section mapping (16MB) */
332a96536eSKyongHo Cho #define SECT_ORDER 20
342a96536eSKyongHo Cho #define LPAGE_ORDER 16
352a96536eSKyongHo Cho #define SPAGE_ORDER 12
362a96536eSKyongHo Cho 
372a96536eSKyongHo Cho #define SECT_SIZE (1 << SECT_ORDER)
382a96536eSKyongHo Cho #define LPAGE_SIZE (1 << LPAGE_ORDER)
392a96536eSKyongHo Cho #define SPAGE_SIZE (1 << SPAGE_ORDER)
402a96536eSKyongHo Cho 
412a96536eSKyongHo Cho #define SECT_MASK (~(SECT_SIZE - 1))
422a96536eSKyongHo Cho #define LPAGE_MASK (~(LPAGE_SIZE - 1))
432a96536eSKyongHo Cho #define SPAGE_MASK (~(SPAGE_SIZE - 1))
442a96536eSKyongHo Cho 
452a96536eSKyongHo Cho #define lv1ent_fault(sent) (((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
462a96536eSKyongHo Cho #define lv1ent_page(sent) ((*(sent) & 3) == 1)
472a96536eSKyongHo Cho #define lv1ent_section(sent) ((*(sent) & 3) == 2)
482a96536eSKyongHo Cho 
492a96536eSKyongHo Cho #define lv2ent_fault(pent) ((*(pent) & 3) == 0)
502a96536eSKyongHo Cho #define lv2ent_small(pent) ((*(pent) & 2) == 2)
512a96536eSKyongHo Cho #define lv2ent_large(pent) ((*(pent) & 3) == 1)
522a96536eSKyongHo Cho 
532a96536eSKyongHo Cho #define section_phys(sent) (*(sent) & SECT_MASK)
542a96536eSKyongHo Cho #define section_offs(iova) ((iova) & 0xFFFFF)
552a96536eSKyongHo Cho #define lpage_phys(pent) (*(pent) & LPAGE_MASK)
562a96536eSKyongHo Cho #define lpage_offs(iova) ((iova) & 0xFFFF)
572a96536eSKyongHo Cho #define spage_phys(pent) (*(pent) & SPAGE_MASK)
582a96536eSKyongHo Cho #define spage_offs(iova) ((iova) & 0xFFF)
592a96536eSKyongHo Cho 
602a96536eSKyongHo Cho #define lv1ent_offset(iova) ((iova) >> SECT_ORDER)
612a96536eSKyongHo Cho #define lv2ent_offset(iova) (((iova) & 0xFF000) >> SPAGE_ORDER)
622a96536eSKyongHo Cho 
632a96536eSKyongHo Cho #define NUM_LV1ENTRIES 4096
642a96536eSKyongHo Cho #define NUM_LV2ENTRIES 256
652a96536eSKyongHo Cho 
662a96536eSKyongHo Cho #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long))
672a96536eSKyongHo Cho 
682a96536eSKyongHo Cho #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
692a96536eSKyongHo Cho 
702a96536eSKyongHo Cho #define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
712a96536eSKyongHo Cho 
722a96536eSKyongHo Cho #define mk_lv1ent_sect(pa) ((pa) | 2)
732a96536eSKyongHo Cho #define mk_lv1ent_page(pa) ((pa) | 1)
742a96536eSKyongHo Cho #define mk_lv2ent_lpage(pa) ((pa) | 1)
752a96536eSKyongHo Cho #define mk_lv2ent_spage(pa) ((pa) | 2)
762a96536eSKyongHo Cho 
772a96536eSKyongHo Cho #define CTRL_ENABLE	0x5
782a96536eSKyongHo Cho #define CTRL_BLOCK	0x7
792a96536eSKyongHo Cho #define CTRL_DISABLE	0x0
802a96536eSKyongHo Cho 
812a96536eSKyongHo Cho #define REG_MMU_CTRL		0x000
822a96536eSKyongHo Cho #define REG_MMU_CFG		0x004
832a96536eSKyongHo Cho #define REG_MMU_STATUS		0x008
842a96536eSKyongHo Cho #define REG_MMU_FLUSH		0x00C
852a96536eSKyongHo Cho #define REG_MMU_FLUSH_ENTRY	0x010
862a96536eSKyongHo Cho #define REG_PT_BASE_ADDR	0x014
872a96536eSKyongHo Cho #define REG_INT_STATUS		0x018
882a96536eSKyongHo Cho #define REG_INT_CLEAR		0x01C
892a96536eSKyongHo Cho 
902a96536eSKyongHo Cho #define REG_PAGE_FAULT_ADDR	0x024
912a96536eSKyongHo Cho #define REG_AW_FAULT_ADDR	0x028
922a96536eSKyongHo Cho #define REG_AR_FAULT_ADDR	0x02C
932a96536eSKyongHo Cho #define REG_DEFAULT_SLAVE_ADDR	0x030
942a96536eSKyongHo Cho 
952a96536eSKyongHo Cho #define REG_MMU_VERSION		0x034
962a96536eSKyongHo Cho 
972a96536eSKyongHo Cho #define REG_PB0_SADDR		0x04C
982a96536eSKyongHo Cho #define REG_PB0_EADDR		0x050
992a96536eSKyongHo Cho #define REG_PB1_SADDR		0x054
1002a96536eSKyongHo Cho #define REG_PB1_EADDR		0x058
1012a96536eSKyongHo Cho 
102734c3c73SCho KyongHo static struct kmem_cache *lv2table_kmem_cache;
103734c3c73SCho KyongHo 
1042a96536eSKyongHo Cho static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova)
1052a96536eSKyongHo Cho {
1062a96536eSKyongHo Cho 	return pgtable + lv1ent_offset(iova);
1072a96536eSKyongHo Cho }
1082a96536eSKyongHo Cho 
1092a96536eSKyongHo Cho static unsigned long *page_entry(unsigned long *sent, unsigned long iova)
1102a96536eSKyongHo Cho {
1117222e8dbSCho KyongHo 	return (unsigned long *)phys_to_virt(
1127222e8dbSCho KyongHo 				lv2table_base(sent)) + lv2ent_offset(iova);
1132a96536eSKyongHo Cho }
1142a96536eSKyongHo Cho 
1152a96536eSKyongHo Cho enum exynos_sysmmu_inttype {
1162a96536eSKyongHo Cho 	SYSMMU_PAGEFAULT,
1172a96536eSKyongHo Cho 	SYSMMU_AR_MULTIHIT,
1182a96536eSKyongHo Cho 	SYSMMU_AW_MULTIHIT,
1192a96536eSKyongHo Cho 	SYSMMU_BUSERROR,
1202a96536eSKyongHo Cho 	SYSMMU_AR_SECURITY,
1212a96536eSKyongHo Cho 	SYSMMU_AR_ACCESS,
1222a96536eSKyongHo Cho 	SYSMMU_AW_SECURITY,
1232a96536eSKyongHo Cho 	SYSMMU_AW_PROTECTION, /* 7 */
1242a96536eSKyongHo Cho 	SYSMMU_FAULT_UNKNOWN,
1252a96536eSKyongHo Cho 	SYSMMU_FAULTS_NUM
1262a96536eSKyongHo Cho };
1272a96536eSKyongHo Cho 
1282a96536eSKyongHo Cho /*
1292a96536eSKyongHo Cho  * @itype: type of fault.
1302a96536eSKyongHo Cho  * @pgtable_base: the physical address of page table base. This is 0 if @itype
1312a96536eSKyongHo Cho  *                is SYSMMU_BUSERROR.
1322a96536eSKyongHo Cho  * @fault_addr: the device (virtual) address that the System MMU tried to
1332a96536eSKyongHo Cho  *             translated. This is 0 if @itype is SYSMMU_BUSERROR.
1342a96536eSKyongHo Cho  */
1352a96536eSKyongHo Cho typedef int (*sysmmu_fault_handler_t)(enum exynos_sysmmu_inttype itype,
1367222e8dbSCho KyongHo 			phys_addr_t pgtable_base, unsigned long fault_addr);
1372a96536eSKyongHo Cho 
1382a96536eSKyongHo Cho static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
1392a96536eSKyongHo Cho 	REG_PAGE_FAULT_ADDR,
1402a96536eSKyongHo Cho 	REG_AR_FAULT_ADDR,
1412a96536eSKyongHo Cho 	REG_AW_FAULT_ADDR,
1422a96536eSKyongHo Cho 	REG_DEFAULT_SLAVE_ADDR,
1432a96536eSKyongHo Cho 	REG_AR_FAULT_ADDR,
1442a96536eSKyongHo Cho 	REG_AR_FAULT_ADDR,
1452a96536eSKyongHo Cho 	REG_AW_FAULT_ADDR,
1462a96536eSKyongHo Cho 	REG_AW_FAULT_ADDR
1472a96536eSKyongHo Cho };
1482a96536eSKyongHo Cho 
1492a96536eSKyongHo Cho static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
1502a96536eSKyongHo Cho 	"PAGE FAULT",
1512a96536eSKyongHo Cho 	"AR MULTI-HIT FAULT",
1522a96536eSKyongHo Cho 	"AW MULTI-HIT FAULT",
1532a96536eSKyongHo Cho 	"BUS ERROR",
1542a96536eSKyongHo Cho 	"AR SECURITY PROTECTION FAULT",
1552a96536eSKyongHo Cho 	"AR ACCESS PROTECTION FAULT",
1562a96536eSKyongHo Cho 	"AW SECURITY PROTECTION FAULT",
1572a96536eSKyongHo Cho 	"AW ACCESS PROTECTION FAULT",
1582a96536eSKyongHo Cho 	"UNKNOWN FAULT"
1592a96536eSKyongHo Cho };
1602a96536eSKyongHo Cho 
1612a96536eSKyongHo Cho struct exynos_iommu_domain {
1622a96536eSKyongHo Cho 	struct list_head clients; /* list of sysmmu_drvdata.node */
1632a96536eSKyongHo Cho 	unsigned long *pgtable; /* lv1 page table, 16KB */
1642a96536eSKyongHo Cho 	short *lv2entcnt; /* free lv2 entry counter for each section */
1652a96536eSKyongHo Cho 	spinlock_t lock; /* lock for this structure */
1662a96536eSKyongHo Cho 	spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
1672a96536eSKyongHo Cho };
1682a96536eSKyongHo Cho 
1692a96536eSKyongHo Cho struct sysmmu_drvdata {
1702a96536eSKyongHo Cho 	struct list_head node; /* entry of exynos_iommu_domain.clients */
1712a96536eSKyongHo Cho 	struct device *sysmmu;	/* System MMU's device descriptor */
1722a96536eSKyongHo Cho 	struct device *dev;	/* Owner of system MMU */
1732a96536eSKyongHo Cho 	char *dbgname;
1747222e8dbSCho KyongHo 	void __iomem *sfrbase;
1757222e8dbSCho KyongHo 	struct clk *clk;
1762a96536eSKyongHo Cho 	int activations;
1772a96536eSKyongHo Cho 	rwlock_t lock;
1782a96536eSKyongHo Cho 	struct iommu_domain *domain;
1792a96536eSKyongHo Cho 	sysmmu_fault_handler_t fault_handler;
1807222e8dbSCho KyongHo 	phys_addr_t pgtable;
1812a96536eSKyongHo Cho };
1822a96536eSKyongHo Cho 
1832a96536eSKyongHo Cho static bool set_sysmmu_active(struct sysmmu_drvdata *data)
1842a96536eSKyongHo Cho {
1852a96536eSKyongHo Cho 	/* return true if the System MMU was not active previously
1862a96536eSKyongHo Cho 	   and it needs to be initialized */
1872a96536eSKyongHo Cho 	return ++data->activations == 1;
1882a96536eSKyongHo Cho }
1892a96536eSKyongHo Cho 
1902a96536eSKyongHo Cho static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
1912a96536eSKyongHo Cho {
1922a96536eSKyongHo Cho 	/* return true if the System MMU is needed to be disabled */
1932a96536eSKyongHo Cho 	BUG_ON(data->activations < 1);
1942a96536eSKyongHo Cho 	return --data->activations == 0;
1952a96536eSKyongHo Cho }
1962a96536eSKyongHo Cho 
1972a96536eSKyongHo Cho static bool is_sysmmu_active(struct sysmmu_drvdata *data)
1982a96536eSKyongHo Cho {
1992a96536eSKyongHo Cho 	return data->activations > 0;
2002a96536eSKyongHo Cho }
2012a96536eSKyongHo Cho 
2022a96536eSKyongHo Cho static void sysmmu_unblock(void __iomem *sfrbase)
2032a96536eSKyongHo Cho {
2042a96536eSKyongHo Cho 	__raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
2052a96536eSKyongHo Cho }
2062a96536eSKyongHo Cho 
2072a96536eSKyongHo Cho static bool sysmmu_block(void __iomem *sfrbase)
2082a96536eSKyongHo Cho {
2092a96536eSKyongHo Cho 	int i = 120;
2102a96536eSKyongHo Cho 
2112a96536eSKyongHo Cho 	__raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL);
2122a96536eSKyongHo Cho 	while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1))
2132a96536eSKyongHo Cho 		--i;
2142a96536eSKyongHo Cho 
2152a96536eSKyongHo Cho 	if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) {
2162a96536eSKyongHo Cho 		sysmmu_unblock(sfrbase);
2172a96536eSKyongHo Cho 		return false;
2182a96536eSKyongHo Cho 	}
2192a96536eSKyongHo Cho 
2202a96536eSKyongHo Cho 	return true;
2212a96536eSKyongHo Cho }
2222a96536eSKyongHo Cho 
2232a96536eSKyongHo Cho static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
2242a96536eSKyongHo Cho {
2252a96536eSKyongHo Cho 	__raw_writel(0x1, sfrbase + REG_MMU_FLUSH);
2262a96536eSKyongHo Cho }
2272a96536eSKyongHo Cho 
2282a96536eSKyongHo Cho static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
2293ad6b7f3SCho KyongHo 				unsigned long iova, unsigned int num_inv)
2302a96536eSKyongHo Cho {
2313ad6b7f3SCho KyongHo 	unsigned int i;
2323ad6b7f3SCho KyongHo 	for (i = 0; i < num_inv; i++) {
2333ad6b7f3SCho KyongHo 		__raw_writel((iova & SPAGE_MASK) | 1,
2343ad6b7f3SCho KyongHo 				sfrbase + REG_MMU_FLUSH_ENTRY);
2353ad6b7f3SCho KyongHo 		iova += SPAGE_SIZE;
2363ad6b7f3SCho KyongHo 	}
2372a96536eSKyongHo Cho }
2382a96536eSKyongHo Cho 
2392a96536eSKyongHo Cho static void __sysmmu_set_ptbase(void __iomem *sfrbase,
2402a96536eSKyongHo Cho 				       unsigned long pgd)
2412a96536eSKyongHo Cho {
2422a96536eSKyongHo Cho 	__raw_writel(0x1, sfrbase + REG_MMU_CFG); /* 16KB LV1, LRU */
2432a96536eSKyongHo Cho 	__raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR);
2442a96536eSKyongHo Cho 
2452a96536eSKyongHo Cho 	__sysmmu_tlb_invalidate(sfrbase);
2462a96536eSKyongHo Cho }
2472a96536eSKyongHo Cho 
2482a96536eSKyongHo Cho static void __sysmmu_set_prefbuf(void __iomem *sfrbase, unsigned long base,
2492a96536eSKyongHo Cho 						unsigned long size, int idx)
2502a96536eSKyongHo Cho {
2512a96536eSKyongHo Cho 	__raw_writel(base, sfrbase + REG_PB0_SADDR + idx * 8);
2522a96536eSKyongHo Cho 	__raw_writel(size - 1 + base,  sfrbase + REG_PB0_EADDR + idx * 8);
2532a96536eSKyongHo Cho }
2542a96536eSKyongHo Cho 
2552a96536eSKyongHo Cho static void __set_fault_handler(struct sysmmu_drvdata *data,
2562a96536eSKyongHo Cho 					sysmmu_fault_handler_t handler)
2572a96536eSKyongHo Cho {
2582a96536eSKyongHo Cho 	unsigned long flags;
2592a96536eSKyongHo Cho 
2602a96536eSKyongHo Cho 	write_lock_irqsave(&data->lock, flags);
2612a96536eSKyongHo Cho 	data->fault_handler = handler;
2622a96536eSKyongHo Cho 	write_unlock_irqrestore(&data->lock, flags);
2632a96536eSKyongHo Cho }
2642a96536eSKyongHo Cho 
2652a96536eSKyongHo Cho void exynos_sysmmu_set_fault_handler(struct device *dev,
2662a96536eSKyongHo Cho 					sysmmu_fault_handler_t handler)
2672a96536eSKyongHo Cho {
2682a96536eSKyongHo Cho 	struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
2692a96536eSKyongHo Cho 
2702a96536eSKyongHo Cho 	__set_fault_handler(data, handler);
2712a96536eSKyongHo Cho }
2722a96536eSKyongHo Cho 
2732a96536eSKyongHo Cho static int default_fault_handler(enum exynos_sysmmu_inttype itype,
2747222e8dbSCho KyongHo 			phys_addr_t pgtable_base, unsigned long fault_addr)
2752a96536eSKyongHo Cho {
2762a96536eSKyongHo Cho 	unsigned long *ent;
2772a96536eSKyongHo Cho 
2782a96536eSKyongHo Cho 	if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
2792a96536eSKyongHo Cho 		itype = SYSMMU_FAULT_UNKNOWN;
2802a96536eSKyongHo Cho 
2817222e8dbSCho KyongHo 	pr_err("%s occurred at 0x%lx(Page table base: %pa)\n",
2827222e8dbSCho KyongHo 			sysmmu_fault_name[itype], fault_addr, &pgtable_base);
2832a96536eSKyongHo Cho 
2847222e8dbSCho KyongHo 	ent = section_entry(phys_to_virt(pgtable_base), fault_addr);
2852a96536eSKyongHo Cho 	pr_err("\tLv1 entry: 0x%lx\n", *ent);
2862a96536eSKyongHo Cho 
2872a96536eSKyongHo Cho 	if (lv1ent_page(ent)) {
2882a96536eSKyongHo Cho 		ent = page_entry(ent, fault_addr);
2892a96536eSKyongHo Cho 		pr_err("\t Lv2 entry: 0x%lx\n", *ent);
2902a96536eSKyongHo Cho 	}
2912a96536eSKyongHo Cho 
2922a96536eSKyongHo Cho 	pr_err("Generating Kernel OOPS... because it is unrecoverable.\n");
2932a96536eSKyongHo Cho 
2942a96536eSKyongHo Cho 	BUG();
2952a96536eSKyongHo Cho 
2962a96536eSKyongHo Cho 	return 0;
2972a96536eSKyongHo Cho }
2982a96536eSKyongHo Cho 
2992a96536eSKyongHo Cho static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
3002a96536eSKyongHo Cho {
3012a96536eSKyongHo Cho 	/* SYSMMU is in blocked when interrupt occurred. */
3022a96536eSKyongHo Cho 	struct sysmmu_drvdata *data = dev_id;
3032a96536eSKyongHo Cho 	enum exynos_sysmmu_inttype itype;
3042a96536eSKyongHo Cho 	unsigned long addr = -1;
3057222e8dbSCho KyongHo 	int ret = -ENOSYS;
3062a96536eSKyongHo Cho 
3072a96536eSKyongHo Cho 	read_lock(&data->lock);
3082a96536eSKyongHo Cho 
3092a96536eSKyongHo Cho 	WARN_ON(!is_sysmmu_active(data));
3102a96536eSKyongHo Cho 
3112a96536eSKyongHo Cho 	itype = (enum exynos_sysmmu_inttype)
3127222e8dbSCho KyongHo 		__ffs(__raw_readl(data->sfrbase + REG_INT_STATUS));
3132a96536eSKyongHo Cho 	if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
3142a96536eSKyongHo Cho 		itype = SYSMMU_FAULT_UNKNOWN;
3152a96536eSKyongHo Cho 	else
3167222e8dbSCho KyongHo 		addr = __raw_readl(data->sfrbase + fault_reg_offset[itype]);
3172a96536eSKyongHo Cho 
3182a96536eSKyongHo Cho 	if (data->domain)
3197222e8dbSCho KyongHo 		ret = report_iommu_fault(data->domain, data->dev, addr, itype);
3202a96536eSKyongHo Cho 
3212a96536eSKyongHo Cho 	if ((ret == -ENOSYS) && data->fault_handler) {
3222a96536eSKyongHo Cho 		unsigned long base = data->pgtable;
3232a96536eSKyongHo Cho 		if (itype != SYSMMU_FAULT_UNKNOWN)
3247222e8dbSCho KyongHo 			base = __raw_readl(data->sfrbase + REG_PT_BASE_ADDR);
3252a96536eSKyongHo Cho 		ret = data->fault_handler(itype, base, addr);
3262a96536eSKyongHo Cho 	}
3272a96536eSKyongHo Cho 
3282a96536eSKyongHo Cho 	if (!ret && (itype != SYSMMU_FAULT_UNKNOWN))
3297222e8dbSCho KyongHo 		__raw_writel(1 << itype, data->sfrbase + REG_INT_CLEAR);
3302a96536eSKyongHo Cho 	else
3312a96536eSKyongHo Cho 		dev_dbg(data->sysmmu, "(%s) %s is not handled.\n",
3322a96536eSKyongHo Cho 				data->dbgname, sysmmu_fault_name[itype]);
3332a96536eSKyongHo Cho 
3342a96536eSKyongHo Cho 	if (itype != SYSMMU_FAULT_UNKNOWN)
3357222e8dbSCho KyongHo 		sysmmu_unblock(data->sfrbase);
3362a96536eSKyongHo Cho 
3372a96536eSKyongHo Cho 	read_unlock(&data->lock);
3382a96536eSKyongHo Cho 
3392a96536eSKyongHo Cho 	return IRQ_HANDLED;
3402a96536eSKyongHo Cho }
3412a96536eSKyongHo Cho 
3422a96536eSKyongHo Cho static bool __exynos_sysmmu_disable(struct sysmmu_drvdata *data)
3432a96536eSKyongHo Cho {
3442a96536eSKyongHo Cho 	unsigned long flags;
3452a96536eSKyongHo Cho 	bool disabled = false;
3462a96536eSKyongHo Cho 
3472a96536eSKyongHo Cho 	write_lock_irqsave(&data->lock, flags);
3482a96536eSKyongHo Cho 
3492a96536eSKyongHo Cho 	if (!set_sysmmu_inactive(data))
3502a96536eSKyongHo Cho 		goto finish;
3512a96536eSKyongHo Cho 
3527222e8dbSCho KyongHo 	__raw_writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
3532a96536eSKyongHo Cho 
3547222e8dbSCho KyongHo 	if (!IS_ERR(data->clk))
3557222e8dbSCho KyongHo 		clk_disable(data->clk);
3562a96536eSKyongHo Cho 
3572a96536eSKyongHo Cho 	disabled = true;
3582a96536eSKyongHo Cho 	data->pgtable = 0;
3592a96536eSKyongHo Cho 	data->domain = NULL;
3602a96536eSKyongHo Cho finish:
3612a96536eSKyongHo Cho 	write_unlock_irqrestore(&data->lock, flags);
3622a96536eSKyongHo Cho 
3632a96536eSKyongHo Cho 	if (disabled)
3642a96536eSKyongHo Cho 		dev_dbg(data->sysmmu, "(%s) Disabled\n", data->dbgname);
3652a96536eSKyongHo Cho 	else
3662a96536eSKyongHo Cho 		dev_dbg(data->sysmmu, "(%s) %d times left to be disabled\n",
3672a96536eSKyongHo Cho 					data->dbgname, data->activations);
3682a96536eSKyongHo Cho 
3692a96536eSKyongHo Cho 	return disabled;
3702a96536eSKyongHo Cho }
3712a96536eSKyongHo Cho 
3722a96536eSKyongHo Cho /* __exynos_sysmmu_enable: Enables System MMU
3732a96536eSKyongHo Cho  *
3742a96536eSKyongHo Cho  * returns -error if an error occurred and System MMU is not enabled,
3752a96536eSKyongHo Cho  * 0 if the System MMU has been just enabled and 1 if System MMU was already
3762a96536eSKyongHo Cho  * enabled before.
3772a96536eSKyongHo Cho  */
3782a96536eSKyongHo Cho static int __exynos_sysmmu_enable(struct sysmmu_drvdata *data,
3792a96536eSKyongHo Cho 			unsigned long pgtable, struct iommu_domain *domain)
3802a96536eSKyongHo Cho {
3817222e8dbSCho KyongHo 	int ret = 0;
3822a96536eSKyongHo Cho 	unsigned long flags;
3832a96536eSKyongHo Cho 
3842a96536eSKyongHo Cho 	write_lock_irqsave(&data->lock, flags);
3852a96536eSKyongHo Cho 
3862a96536eSKyongHo Cho 	if (!set_sysmmu_active(data)) {
3872a96536eSKyongHo Cho 		if (WARN_ON(pgtable != data->pgtable)) {
3882a96536eSKyongHo Cho 			ret = -EBUSY;
3892a96536eSKyongHo Cho 			set_sysmmu_inactive(data);
3902a96536eSKyongHo Cho 		} else {
3912a96536eSKyongHo Cho 			ret = 1;
3922a96536eSKyongHo Cho 		}
3932a96536eSKyongHo Cho 
3942a96536eSKyongHo Cho 		dev_dbg(data->sysmmu, "(%s) Already enabled\n", data->dbgname);
3952a96536eSKyongHo Cho 		goto finish;
3962a96536eSKyongHo Cho 	}
3972a96536eSKyongHo Cho 
3987222e8dbSCho KyongHo 	if (!IS_ERR(data->clk))
3997222e8dbSCho KyongHo 		clk_enable(data->clk);
4002a96536eSKyongHo Cho 
4012a96536eSKyongHo Cho 	data->pgtable = pgtable;
4022a96536eSKyongHo Cho 
4037222e8dbSCho KyongHo 	__sysmmu_set_ptbase(data->sfrbase, pgtable);
4047222e8dbSCho KyongHo 	if ((readl(data->sfrbase + REG_MMU_VERSION) >> 28) == 3) {
4052a96536eSKyongHo Cho 		/* System MMU version is 3.x */
4062a96536eSKyongHo Cho 		__raw_writel((1 << 12) | (2 << 28),
4077222e8dbSCho KyongHo 				data->sfrbase + REG_MMU_CFG);
4087222e8dbSCho KyongHo 		__sysmmu_set_prefbuf(data->sfrbase, 0, -1, 0);
4097222e8dbSCho KyongHo 		__sysmmu_set_prefbuf(data->sfrbase, 0, -1, 1);
4102a96536eSKyongHo Cho 	}
4112a96536eSKyongHo Cho 
4127222e8dbSCho KyongHo 	__raw_writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
4132a96536eSKyongHo Cho 
4142a96536eSKyongHo Cho 	data->domain = domain;
4152a96536eSKyongHo Cho 
4162a96536eSKyongHo Cho 	dev_dbg(data->sysmmu, "(%s) Enabled\n", data->dbgname);
4172a96536eSKyongHo Cho finish:
4182a96536eSKyongHo Cho 	write_unlock_irqrestore(&data->lock, flags);
4192a96536eSKyongHo Cho 
4202a96536eSKyongHo Cho 	return ret;
4212a96536eSKyongHo Cho }
4222a96536eSKyongHo Cho 
4232a96536eSKyongHo Cho int exynos_sysmmu_enable(struct device *dev, unsigned long pgtable)
4242a96536eSKyongHo Cho {
4252a96536eSKyongHo Cho 	struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
4262a96536eSKyongHo Cho 	int ret;
4272a96536eSKyongHo Cho 
4282a96536eSKyongHo Cho 	BUG_ON(!memblock_is_memory(pgtable));
4292a96536eSKyongHo Cho 
4302a96536eSKyongHo Cho 	ret = pm_runtime_get_sync(data->sysmmu);
4312a96536eSKyongHo Cho 	if (ret < 0) {
4322a96536eSKyongHo Cho 		dev_dbg(data->sysmmu, "(%s) Failed to enable\n", data->dbgname);
4332a96536eSKyongHo Cho 		return ret;
4342a96536eSKyongHo Cho 	}
4352a96536eSKyongHo Cho 
4362a96536eSKyongHo Cho 	ret = __exynos_sysmmu_enable(data, pgtable, NULL);
4372a96536eSKyongHo Cho 	if (WARN_ON(ret < 0)) {
4382a96536eSKyongHo Cho 		pm_runtime_put(data->sysmmu);
4392a96536eSKyongHo Cho 		dev_err(data->sysmmu,
4407222e8dbSCho KyongHo 			"(%s) Already enabled with page table %#x\n",
4412a96536eSKyongHo Cho 			data->dbgname, data->pgtable);
4422a96536eSKyongHo Cho 	} else {
4432a96536eSKyongHo Cho 		data->dev = dev;
4442a96536eSKyongHo Cho 	}
4452a96536eSKyongHo Cho 
4462a96536eSKyongHo Cho 	return ret;
4472a96536eSKyongHo Cho }
4482a96536eSKyongHo Cho 
44977e38350SSachin Kamat static bool exynos_sysmmu_disable(struct device *dev)
4502a96536eSKyongHo Cho {
4512a96536eSKyongHo Cho 	struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
4522a96536eSKyongHo Cho 	bool disabled;
4532a96536eSKyongHo Cho 
4542a96536eSKyongHo Cho 	disabled = __exynos_sysmmu_disable(data);
4552a96536eSKyongHo Cho 	pm_runtime_put(data->sysmmu);
4562a96536eSKyongHo Cho 
4572a96536eSKyongHo Cho 	return disabled;
4582a96536eSKyongHo Cho }
4592a96536eSKyongHo Cho 
4603ad6b7f3SCho KyongHo static void sysmmu_tlb_invalidate_entry(struct device *dev, unsigned long iova,
4613ad6b7f3SCho KyongHo 					size_t size)
4622a96536eSKyongHo Cho {
4632a96536eSKyongHo Cho 	unsigned long flags;
4642a96536eSKyongHo Cho 	struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
4652a96536eSKyongHo Cho 
4662a96536eSKyongHo Cho 	read_lock_irqsave(&data->lock, flags);
4672a96536eSKyongHo Cho 
4682a96536eSKyongHo Cho 	if (is_sysmmu_active(data)) {
4693ad6b7f3SCho KyongHo 		unsigned int maj;
4703ad6b7f3SCho KyongHo 		unsigned int num_inv = 1;
4713ad6b7f3SCho KyongHo 		maj = __raw_readl(data->sfrbase + REG_MMU_VERSION);
4723ad6b7f3SCho KyongHo 		/*
4733ad6b7f3SCho KyongHo 		 * L2TLB invalidation required
4743ad6b7f3SCho KyongHo 		 * 4KB page: 1 invalidation
4753ad6b7f3SCho KyongHo 		 * 64KB page: 16 invalidation
4763ad6b7f3SCho KyongHo 		 * 1MB page: 64 invalidation
4773ad6b7f3SCho KyongHo 		 * because it is set-associative TLB
4783ad6b7f3SCho KyongHo 		 * with 8-way and 64 sets.
4793ad6b7f3SCho KyongHo 		 * 1MB page can be cached in one of all sets.
4803ad6b7f3SCho KyongHo 		 * 64KB page can be one of 16 consecutive sets.
4813ad6b7f3SCho KyongHo 		 */
4823ad6b7f3SCho KyongHo 		if ((maj >> 28) == 2) /* major version number */
4833ad6b7f3SCho KyongHo 			num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
4843ad6b7f3SCho KyongHo 
4857222e8dbSCho KyongHo 		if (sysmmu_block(data->sfrbase)) {
4862a96536eSKyongHo Cho 			__sysmmu_tlb_invalidate_entry(
4873ad6b7f3SCho KyongHo 				data->sfrbase, iova, num_inv);
4887222e8dbSCho KyongHo 			sysmmu_unblock(data->sfrbase);
4892a96536eSKyongHo Cho 		}
4902a96536eSKyongHo Cho 	} else {
4912a96536eSKyongHo Cho 		dev_dbg(data->sysmmu,
4922a96536eSKyongHo Cho 			"(%s) Disabled. Skipping invalidating TLB.\n",
4932a96536eSKyongHo Cho 			data->dbgname);
4942a96536eSKyongHo Cho 	}
4952a96536eSKyongHo Cho 
4962a96536eSKyongHo Cho 	read_unlock_irqrestore(&data->lock, flags);
4972a96536eSKyongHo Cho }
4982a96536eSKyongHo Cho 
4992a96536eSKyongHo Cho void exynos_sysmmu_tlb_invalidate(struct device *dev)
5002a96536eSKyongHo Cho {
5012a96536eSKyongHo Cho 	unsigned long flags;
5022a96536eSKyongHo Cho 	struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
5032a96536eSKyongHo Cho 
5042a96536eSKyongHo Cho 	read_lock_irqsave(&data->lock, flags);
5052a96536eSKyongHo Cho 
5062a96536eSKyongHo Cho 	if (is_sysmmu_active(data)) {
5077222e8dbSCho KyongHo 		if (sysmmu_block(data->sfrbase)) {
5087222e8dbSCho KyongHo 			__sysmmu_tlb_invalidate(data->sfrbase);
5097222e8dbSCho KyongHo 			sysmmu_unblock(data->sfrbase);
5102a96536eSKyongHo Cho 		}
5112a96536eSKyongHo Cho 	} else {
5122a96536eSKyongHo Cho 		dev_dbg(data->sysmmu,
5132a96536eSKyongHo Cho 			"(%s) Disabled. Skipping invalidating TLB.\n",
5142a96536eSKyongHo Cho 			data->dbgname);
5152a96536eSKyongHo Cho 	}
5162a96536eSKyongHo Cho 
5172a96536eSKyongHo Cho 	read_unlock_irqrestore(&data->lock, flags);
5182a96536eSKyongHo Cho }
5192a96536eSKyongHo Cho 
5202a96536eSKyongHo Cho static int exynos_sysmmu_probe(struct platform_device *pdev)
5212a96536eSKyongHo Cho {
5227222e8dbSCho KyongHo 	int ret;
5237222e8dbSCho KyongHo 	struct device *dev = &pdev->dev;
5242a96536eSKyongHo Cho 	struct sysmmu_drvdata *data;
5257222e8dbSCho KyongHo 	struct resource *res;
5262a96536eSKyongHo Cho 
5272a96536eSKyongHo Cho 	data = kzalloc(sizeof(*data), GFP_KERNEL);
5282a96536eSKyongHo Cho 	if (!data) {
5292a96536eSKyongHo Cho 		dev_dbg(dev, "Not enough memory\n");
5302a96536eSKyongHo Cho 		ret = -ENOMEM;
5312a96536eSKyongHo Cho 		goto err_alloc;
5322a96536eSKyongHo Cho 	}
5332a96536eSKyongHo Cho 
5347222e8dbSCho KyongHo 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
5352a96536eSKyongHo Cho 	if (!res) {
5362a96536eSKyongHo Cho 		dev_dbg(dev, "Unable to find IOMEM region\n");
5372a96536eSKyongHo Cho 		ret = -ENOENT;
5387222e8dbSCho KyongHo 		goto err_init;
5392a96536eSKyongHo Cho 	}
5402a96536eSKyongHo Cho 
5417222e8dbSCho KyongHo 	data->sfrbase = ioremap(res->start, resource_size(res));
5427222e8dbSCho KyongHo 	if (!data->sfrbase) {
5437222e8dbSCho KyongHo 		dev_dbg(dev, "Unable to map IOMEM @ PA:%#x\n", res->start);
5442a96536eSKyongHo Cho 		ret = -ENOENT;
5452a96536eSKyongHo Cho 		goto err_res;
5462a96536eSKyongHo Cho 	}
5472a96536eSKyongHo Cho 
5487222e8dbSCho KyongHo 	ret = platform_get_irq(pdev, 0);
5492a96536eSKyongHo Cho 	if (ret <= 0) {
5502a96536eSKyongHo Cho 		dev_dbg(dev, "Unable to find IRQ resource\n");
5512a96536eSKyongHo Cho 		goto err_irq;
5522a96536eSKyongHo Cho 	}
5532a96536eSKyongHo Cho 
5542a96536eSKyongHo Cho 	ret = request_irq(ret, exynos_sysmmu_irq, 0,
5552a96536eSKyongHo Cho 				dev_name(dev), data);
5562a96536eSKyongHo Cho 	if (ret) {
5572a96536eSKyongHo Cho 		dev_dbg(dev, "Unabled to register interrupt handler\n");
5582a96536eSKyongHo Cho 		goto err_irq;
5592a96536eSKyongHo Cho 	}
5602a96536eSKyongHo Cho 
5612a96536eSKyongHo Cho 	if (dev_get_platdata(dev)) {
5627222e8dbSCho KyongHo 		data->clk = clk_get(dev, "sysmmu");
5637222e8dbSCho KyongHo 		if (IS_ERR(data->clk))
5642a96536eSKyongHo Cho 			dev_dbg(dev, "No clock descriptor registered\n");
5652a96536eSKyongHo Cho 	}
5662a96536eSKyongHo Cho 
5672a96536eSKyongHo Cho 	data->sysmmu = dev;
5682a96536eSKyongHo Cho 	rwlock_init(&data->lock);
5692a96536eSKyongHo Cho 	INIT_LIST_HEAD(&data->node);
5702a96536eSKyongHo Cho 
5712a96536eSKyongHo Cho 	__set_fault_handler(data, &default_fault_handler);
5722a96536eSKyongHo Cho 
5737222e8dbSCho KyongHo 	platform_set_drvdata(pdev, data);
5747222e8dbSCho KyongHo 
5752a96536eSKyongHo Cho 	if (dev->parent)
5762a96536eSKyongHo Cho 		pm_runtime_enable(dev);
5772a96536eSKyongHo Cho 
5782a96536eSKyongHo Cho 	dev_dbg(dev, "(%s) Initialized\n", data->dbgname);
5792a96536eSKyongHo Cho 	return 0;
5802a96536eSKyongHo Cho err_irq:
5817222e8dbSCho KyongHo 	free_irq(platform_get_irq(pdev, 0), data);
5822a96536eSKyongHo Cho err_res:
5837222e8dbSCho KyongHo 	iounmap(data->sfrbase);
5842a96536eSKyongHo Cho err_init:
5852a96536eSKyongHo Cho 	kfree(data);
5862a96536eSKyongHo Cho err_alloc:
5872a96536eSKyongHo Cho 	dev_err(dev, "Failed to initialize\n");
5882a96536eSKyongHo Cho 	return ret;
5892a96536eSKyongHo Cho }
5902a96536eSKyongHo Cho 
5912a96536eSKyongHo Cho static struct platform_driver exynos_sysmmu_driver = {
5922a96536eSKyongHo Cho 	.probe		= exynos_sysmmu_probe,
5932a96536eSKyongHo Cho 	.driver		= {
5942a96536eSKyongHo Cho 		.owner		= THIS_MODULE,
5952a96536eSKyongHo Cho 		.name		= "exynos-sysmmu",
5962a96536eSKyongHo Cho 	}
5972a96536eSKyongHo Cho };
5982a96536eSKyongHo Cho 
5992a96536eSKyongHo Cho static inline void pgtable_flush(void *vastart, void *vaend)
6002a96536eSKyongHo Cho {
6012a96536eSKyongHo Cho 	dmac_flush_range(vastart, vaend);
6022a96536eSKyongHo Cho 	outer_flush_range(virt_to_phys(vastart),
6032a96536eSKyongHo Cho 				virt_to_phys(vaend));
6042a96536eSKyongHo Cho }
6052a96536eSKyongHo Cho 
6062a96536eSKyongHo Cho static int exynos_iommu_domain_init(struct iommu_domain *domain)
6072a96536eSKyongHo Cho {
6082a96536eSKyongHo Cho 	struct exynos_iommu_domain *priv;
6092a96536eSKyongHo Cho 
6102a96536eSKyongHo Cho 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
6112a96536eSKyongHo Cho 	if (!priv)
6122a96536eSKyongHo Cho 		return -ENOMEM;
6132a96536eSKyongHo Cho 
6142a96536eSKyongHo Cho 	priv->pgtable = (unsigned long *)__get_free_pages(
6152a96536eSKyongHo Cho 						GFP_KERNEL | __GFP_ZERO, 2);
6162a96536eSKyongHo Cho 	if (!priv->pgtable)
6172a96536eSKyongHo Cho 		goto err_pgtable;
6182a96536eSKyongHo Cho 
6192a96536eSKyongHo Cho 	priv->lv2entcnt = (short *)__get_free_pages(
6202a96536eSKyongHo Cho 						GFP_KERNEL | __GFP_ZERO, 1);
6212a96536eSKyongHo Cho 	if (!priv->lv2entcnt)
6222a96536eSKyongHo Cho 		goto err_counter;
6232a96536eSKyongHo Cho 
6242a96536eSKyongHo Cho 	pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
6252a96536eSKyongHo Cho 
6262a96536eSKyongHo Cho 	spin_lock_init(&priv->lock);
6272a96536eSKyongHo Cho 	spin_lock_init(&priv->pgtablelock);
6282a96536eSKyongHo Cho 	INIT_LIST_HEAD(&priv->clients);
6292a96536eSKyongHo Cho 
630eb51637bSSachin Kamat 	domain->geometry.aperture_start = 0;
631eb51637bSSachin Kamat 	domain->geometry.aperture_end   = ~0UL;
632eb51637bSSachin Kamat 	domain->geometry.force_aperture = true;
6333177bb76SJoerg Roedel 
6342a96536eSKyongHo Cho 	domain->priv = priv;
6352a96536eSKyongHo Cho 	return 0;
6362a96536eSKyongHo Cho 
6372a96536eSKyongHo Cho err_counter:
6382a96536eSKyongHo Cho 	free_pages((unsigned long)priv->pgtable, 2);
6392a96536eSKyongHo Cho err_pgtable:
6402a96536eSKyongHo Cho 	kfree(priv);
6412a96536eSKyongHo Cho 	return -ENOMEM;
6422a96536eSKyongHo Cho }
6432a96536eSKyongHo Cho 
6442a96536eSKyongHo Cho static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
6452a96536eSKyongHo Cho {
6462a96536eSKyongHo Cho 	struct exynos_iommu_domain *priv = domain->priv;
6472a96536eSKyongHo Cho 	struct sysmmu_drvdata *data;
6482a96536eSKyongHo Cho 	unsigned long flags;
6492a96536eSKyongHo Cho 	int i;
6502a96536eSKyongHo Cho 
6512a96536eSKyongHo Cho 	WARN_ON(!list_empty(&priv->clients));
6522a96536eSKyongHo Cho 
6532a96536eSKyongHo Cho 	spin_lock_irqsave(&priv->lock, flags);
6542a96536eSKyongHo Cho 
6552a96536eSKyongHo Cho 	list_for_each_entry(data, &priv->clients, node) {
6562a96536eSKyongHo Cho 		while (!exynos_sysmmu_disable(data->dev))
6572a96536eSKyongHo Cho 			; /* until System MMU is actually disabled */
6582a96536eSKyongHo Cho 	}
6592a96536eSKyongHo Cho 
6602a96536eSKyongHo Cho 	spin_unlock_irqrestore(&priv->lock, flags);
6612a96536eSKyongHo Cho 
6622a96536eSKyongHo Cho 	for (i = 0; i < NUM_LV1ENTRIES; i++)
6632a96536eSKyongHo Cho 		if (lv1ent_page(priv->pgtable + i))
664734c3c73SCho KyongHo 			kmem_cache_free(lv2table_kmem_cache,
665734c3c73SCho KyongHo 				phys_to_virt(lv2table_base(priv->pgtable + i)));
6662a96536eSKyongHo Cho 
6672a96536eSKyongHo Cho 	free_pages((unsigned long)priv->pgtable, 2);
6682a96536eSKyongHo Cho 	free_pages((unsigned long)priv->lv2entcnt, 1);
6692a96536eSKyongHo Cho 	kfree(domain->priv);
6702a96536eSKyongHo Cho 	domain->priv = NULL;
6712a96536eSKyongHo Cho }
6722a96536eSKyongHo Cho 
6732a96536eSKyongHo Cho static int exynos_iommu_attach_device(struct iommu_domain *domain,
6742a96536eSKyongHo Cho 				   struct device *dev)
6752a96536eSKyongHo Cho {
6762a96536eSKyongHo Cho 	struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
6772a96536eSKyongHo Cho 	struct exynos_iommu_domain *priv = domain->priv;
6787222e8dbSCho KyongHo 	phys_addr_t pagetable = virt_to_phys(priv->pgtable);
6792a96536eSKyongHo Cho 	unsigned long flags;
6802a96536eSKyongHo Cho 	int ret;
6812a96536eSKyongHo Cho 
6822a96536eSKyongHo Cho 	ret = pm_runtime_get_sync(data->sysmmu);
6832a96536eSKyongHo Cho 	if (ret < 0)
6842a96536eSKyongHo Cho 		return ret;
6852a96536eSKyongHo Cho 
6862a96536eSKyongHo Cho 	ret = 0;
6872a96536eSKyongHo Cho 
6882a96536eSKyongHo Cho 	spin_lock_irqsave(&priv->lock, flags);
6892a96536eSKyongHo Cho 
6907222e8dbSCho KyongHo 	ret = __exynos_sysmmu_enable(data, pagetable, domain);
6912a96536eSKyongHo Cho 
6922a96536eSKyongHo Cho 	if (ret == 0) {
6932a96536eSKyongHo Cho 		/* 'data->node' must not be appeared in priv->clients */
6942a96536eSKyongHo Cho 		BUG_ON(!list_empty(&data->node));
6952a96536eSKyongHo Cho 		data->dev = dev;
6962a96536eSKyongHo Cho 		list_add_tail(&data->node, &priv->clients);
6972a96536eSKyongHo Cho 	}
6982a96536eSKyongHo Cho 
6992a96536eSKyongHo Cho 	spin_unlock_irqrestore(&priv->lock, flags);
7002a96536eSKyongHo Cho 
7012a96536eSKyongHo Cho 	if (ret < 0) {
7027222e8dbSCho KyongHo 		dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
7037222e8dbSCho KyongHo 					__func__, &pagetable);
7042a96536eSKyongHo Cho 		pm_runtime_put(data->sysmmu);
7057222e8dbSCho KyongHo 		return ret;
7062a96536eSKyongHo Cho 	}
7072a96536eSKyongHo Cho 
7087222e8dbSCho KyongHo 	dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n",
7097222e8dbSCho KyongHo 		__func__, &pagetable, (ret == 0) ? "" : ", again");
7107222e8dbSCho KyongHo 
7112a96536eSKyongHo Cho 	return ret;
7122a96536eSKyongHo Cho }
7132a96536eSKyongHo Cho 
7142a96536eSKyongHo Cho static void exynos_iommu_detach_device(struct iommu_domain *domain,
7152a96536eSKyongHo Cho 				    struct device *dev)
7162a96536eSKyongHo Cho {
7172a96536eSKyongHo Cho 	struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
7182a96536eSKyongHo Cho 	struct exynos_iommu_domain *priv = domain->priv;
7192a96536eSKyongHo Cho 	struct list_head *pos;
7207222e8dbSCho KyongHo 	phys_addr_t pagetable = virt_to_phys(priv->pgtable);
7212a96536eSKyongHo Cho 	unsigned long flags;
7222a96536eSKyongHo Cho 	bool found = false;
7232a96536eSKyongHo Cho 
7242a96536eSKyongHo Cho 	spin_lock_irqsave(&priv->lock, flags);
7252a96536eSKyongHo Cho 
7262a96536eSKyongHo Cho 	list_for_each(pos, &priv->clients) {
7272a96536eSKyongHo Cho 		if (list_entry(pos, struct sysmmu_drvdata, node) == data) {
7282a96536eSKyongHo Cho 			found = true;
7292a96536eSKyongHo Cho 			break;
7302a96536eSKyongHo Cho 		}
7312a96536eSKyongHo Cho 	}
7322a96536eSKyongHo Cho 
7332a96536eSKyongHo Cho 	if (!found)
7342a96536eSKyongHo Cho 		goto finish;
7352a96536eSKyongHo Cho 
7362a96536eSKyongHo Cho 	if (__exynos_sysmmu_disable(data)) {
7377222e8dbSCho KyongHo 		dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
7387222e8dbSCho KyongHo 					__func__, &pagetable);
739f8ffcc92SWei Yongjun 		list_del_init(&data->node);
7402a96536eSKyongHo Cho 
7412a96536eSKyongHo Cho 	} else {
7427222e8dbSCho KyongHo 		dev_dbg(dev, "%s: Detaching IOMMU with pgtable %pa delayed",
7437222e8dbSCho KyongHo 					__func__, &pagetable);
7442a96536eSKyongHo Cho 	}
7452a96536eSKyongHo Cho 
7462a96536eSKyongHo Cho finish:
7472a96536eSKyongHo Cho 	spin_unlock_irqrestore(&priv->lock, flags);
7482a96536eSKyongHo Cho 
7492a96536eSKyongHo Cho 	if (found)
7502a96536eSKyongHo Cho 		pm_runtime_put(data->sysmmu);
7512a96536eSKyongHo Cho }
7522a96536eSKyongHo Cho 
7532a96536eSKyongHo Cho static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova,
7542a96536eSKyongHo Cho 					short *pgcounter)
7552a96536eSKyongHo Cho {
75661128f08SCho KyongHo 	if (lv1ent_section(sent)) {
75761128f08SCho KyongHo 		WARN(1, "Trying mapping on %#08lx mapped with 1MiB page", iova);
75861128f08SCho KyongHo 		return ERR_PTR(-EADDRINUSE);
75961128f08SCho KyongHo 	}
76061128f08SCho KyongHo 
7612a96536eSKyongHo Cho 	if (lv1ent_fault(sent)) {
7622a96536eSKyongHo Cho 		unsigned long *pent;
7632a96536eSKyongHo Cho 
764734c3c73SCho KyongHo 		pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
7652a96536eSKyongHo Cho 		BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
7662a96536eSKyongHo Cho 		if (!pent)
76761128f08SCho KyongHo 			return ERR_PTR(-ENOMEM);
7682a96536eSKyongHo Cho 
7697222e8dbSCho KyongHo 		*sent = mk_lv1ent_page(virt_to_phys(pent));
7702a96536eSKyongHo Cho 		*pgcounter = NUM_LV2ENTRIES;
7712a96536eSKyongHo Cho 		pgtable_flush(pent, pent + NUM_LV2ENTRIES);
7722a96536eSKyongHo Cho 		pgtable_flush(sent, sent + 1);
7732a96536eSKyongHo Cho 	}
7742a96536eSKyongHo Cho 
7752a96536eSKyongHo Cho 	return page_entry(sent, iova);
7762a96536eSKyongHo Cho }
7772a96536eSKyongHo Cho 
77861128f08SCho KyongHo static int lv1set_section(unsigned long *sent, unsigned long iova,
77961128f08SCho KyongHo 			  phys_addr_t paddr, short *pgcnt)
7802a96536eSKyongHo Cho {
78161128f08SCho KyongHo 	if (lv1ent_section(sent)) {
78261128f08SCho KyongHo 		WARN(1, "Trying mapping on 1MiB@%#08lx that is mapped",
78361128f08SCho KyongHo 			iova);
7842a96536eSKyongHo Cho 		return -EADDRINUSE;
78561128f08SCho KyongHo 	}
7862a96536eSKyongHo Cho 
7872a96536eSKyongHo Cho 	if (lv1ent_page(sent)) {
78861128f08SCho KyongHo 		if (*pgcnt != NUM_LV2ENTRIES) {
78961128f08SCho KyongHo 			WARN(1, "Trying mapping on 1MiB@%#08lx that is mapped",
79061128f08SCho KyongHo 				iova);
7912a96536eSKyongHo Cho 			return -EADDRINUSE;
79261128f08SCho KyongHo 		}
7932a96536eSKyongHo Cho 
794734c3c73SCho KyongHo 		kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
7952a96536eSKyongHo Cho 		*pgcnt = 0;
7962a96536eSKyongHo Cho 	}
7972a96536eSKyongHo Cho 
7982a96536eSKyongHo Cho 	*sent = mk_lv1ent_sect(paddr);
7992a96536eSKyongHo Cho 
8002a96536eSKyongHo Cho 	pgtable_flush(sent, sent + 1);
8012a96536eSKyongHo Cho 
8022a96536eSKyongHo Cho 	return 0;
8032a96536eSKyongHo Cho }
8042a96536eSKyongHo Cho 
8052a96536eSKyongHo Cho static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size,
8062a96536eSKyongHo Cho 								short *pgcnt)
8072a96536eSKyongHo Cho {
8082a96536eSKyongHo Cho 	if (size == SPAGE_SIZE) {
80961128f08SCho KyongHo 		if (!lv2ent_fault(pent)) {
81061128f08SCho KyongHo 			WARN(1, "Trying mapping on 4KiB where mapping exists");
8112a96536eSKyongHo Cho 			return -EADDRINUSE;
81261128f08SCho KyongHo 		}
8132a96536eSKyongHo Cho 
8142a96536eSKyongHo Cho 		*pent = mk_lv2ent_spage(paddr);
8152a96536eSKyongHo Cho 		pgtable_flush(pent, pent + 1);
8162a96536eSKyongHo Cho 		*pgcnt -= 1;
8172a96536eSKyongHo Cho 	} else { /* size == LPAGE_SIZE */
8182a96536eSKyongHo Cho 		int i;
8192a96536eSKyongHo Cho 		for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
8202a96536eSKyongHo Cho 			if (!lv2ent_fault(pent)) {
82161128f08SCho KyongHo 				WARN(1,
82261128f08SCho KyongHo 				"Trying mapping on 64KiB where mapping exists");
82361128f08SCho KyongHo 				if (i > 0)
82461128f08SCho KyongHo 					memset(pent - i, 0, sizeof(*pent) * i);
8252a96536eSKyongHo Cho 				return -EADDRINUSE;
8262a96536eSKyongHo Cho 			}
8272a96536eSKyongHo Cho 
8282a96536eSKyongHo Cho 			*pent = mk_lv2ent_lpage(paddr);
8292a96536eSKyongHo Cho 		}
8302a96536eSKyongHo Cho 		pgtable_flush(pent - SPAGES_PER_LPAGE, pent);
8312a96536eSKyongHo Cho 		*pgcnt -= SPAGES_PER_LPAGE;
8322a96536eSKyongHo Cho 	}
8332a96536eSKyongHo Cho 
8342a96536eSKyongHo Cho 	return 0;
8352a96536eSKyongHo Cho }
8362a96536eSKyongHo Cho 
8372a96536eSKyongHo Cho static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova,
8382a96536eSKyongHo Cho 			 phys_addr_t paddr, size_t size, int prot)
8392a96536eSKyongHo Cho {
8402a96536eSKyongHo Cho 	struct exynos_iommu_domain *priv = domain->priv;
8412a96536eSKyongHo Cho 	unsigned long *entry;
8422a96536eSKyongHo Cho 	unsigned long flags;
8432a96536eSKyongHo Cho 	int ret = -ENOMEM;
8442a96536eSKyongHo Cho 
8452a96536eSKyongHo Cho 	BUG_ON(priv->pgtable == NULL);
8462a96536eSKyongHo Cho 
8472a96536eSKyongHo Cho 	spin_lock_irqsave(&priv->pgtablelock, flags);
8482a96536eSKyongHo Cho 
8492a96536eSKyongHo Cho 	entry = section_entry(priv->pgtable, iova);
8502a96536eSKyongHo Cho 
8512a96536eSKyongHo Cho 	if (size == SECT_SIZE) {
85261128f08SCho KyongHo 		ret = lv1set_section(entry, iova, paddr,
8532a96536eSKyongHo Cho 					&priv->lv2entcnt[lv1ent_offset(iova)]);
8542a96536eSKyongHo Cho 	} else {
8552a96536eSKyongHo Cho 		unsigned long *pent;
8562a96536eSKyongHo Cho 
8572a96536eSKyongHo Cho 		pent = alloc_lv2entry(entry, iova,
8582a96536eSKyongHo Cho 					&priv->lv2entcnt[lv1ent_offset(iova)]);
8592a96536eSKyongHo Cho 
86061128f08SCho KyongHo 		if (IS_ERR(pent))
86161128f08SCho KyongHo 			ret = PTR_ERR(pent);
8622a96536eSKyongHo Cho 		else
8632a96536eSKyongHo Cho 			ret = lv2set_page(pent, paddr, size,
8642a96536eSKyongHo Cho 					&priv->lv2entcnt[lv1ent_offset(iova)]);
8652a96536eSKyongHo Cho 	}
8662a96536eSKyongHo Cho 
86761128f08SCho KyongHo 	if (ret)
8682a96536eSKyongHo Cho 		pr_debug("%s: Failed to map iova 0x%lx/0x%x bytes\n",
8692a96536eSKyongHo Cho 							__func__, iova, size);
8702a96536eSKyongHo Cho 
8712a96536eSKyongHo Cho 	spin_unlock_irqrestore(&priv->pgtablelock, flags);
8722a96536eSKyongHo Cho 
8732a96536eSKyongHo Cho 	return ret;
8742a96536eSKyongHo Cho }
8752a96536eSKyongHo Cho 
8762a96536eSKyongHo Cho static size_t exynos_iommu_unmap(struct iommu_domain *domain,
8772a96536eSKyongHo Cho 					       unsigned long iova, size_t size)
8782a96536eSKyongHo Cho {
8792a96536eSKyongHo Cho 	struct exynos_iommu_domain *priv = domain->priv;
8802a96536eSKyongHo Cho 	struct sysmmu_drvdata *data;
8812a96536eSKyongHo Cho 	unsigned long flags;
8822a96536eSKyongHo Cho 	unsigned long *ent;
88361128f08SCho KyongHo 	size_t err_pgsize;
8842a96536eSKyongHo Cho 
8852a96536eSKyongHo Cho 	BUG_ON(priv->pgtable == NULL);
8862a96536eSKyongHo Cho 
8872a96536eSKyongHo Cho 	spin_lock_irqsave(&priv->pgtablelock, flags);
8882a96536eSKyongHo Cho 
8892a96536eSKyongHo Cho 	ent = section_entry(priv->pgtable, iova);
8902a96536eSKyongHo Cho 
8912a96536eSKyongHo Cho 	if (lv1ent_section(ent)) {
89261128f08SCho KyongHo 		if (size < SECT_SIZE) {
89361128f08SCho KyongHo 			err_pgsize = SECT_SIZE;
89461128f08SCho KyongHo 			goto err;
89561128f08SCho KyongHo 		}
8962a96536eSKyongHo Cho 
8972a96536eSKyongHo Cho 		*ent = 0;
8982a96536eSKyongHo Cho 		pgtable_flush(ent, ent + 1);
8992a96536eSKyongHo Cho 		size = SECT_SIZE;
9002a96536eSKyongHo Cho 		goto done;
9012a96536eSKyongHo Cho 	}
9022a96536eSKyongHo Cho 
9032a96536eSKyongHo Cho 	if (unlikely(lv1ent_fault(ent))) {
9042a96536eSKyongHo Cho 		if (size > SECT_SIZE)
9052a96536eSKyongHo Cho 			size = SECT_SIZE;
9062a96536eSKyongHo Cho 		goto done;
9072a96536eSKyongHo Cho 	}
9082a96536eSKyongHo Cho 
9092a96536eSKyongHo Cho 	/* lv1ent_page(sent) == true here */
9102a96536eSKyongHo Cho 
9112a96536eSKyongHo Cho 	ent = page_entry(ent, iova);
9122a96536eSKyongHo Cho 
9132a96536eSKyongHo Cho 	if (unlikely(lv2ent_fault(ent))) {
9142a96536eSKyongHo Cho 		size = SPAGE_SIZE;
9152a96536eSKyongHo Cho 		goto done;
9162a96536eSKyongHo Cho 	}
9172a96536eSKyongHo Cho 
9182a96536eSKyongHo Cho 	if (lv2ent_small(ent)) {
9192a96536eSKyongHo Cho 		*ent = 0;
9202a96536eSKyongHo Cho 		size = SPAGE_SIZE;
9212a96536eSKyongHo Cho 		priv->lv2entcnt[lv1ent_offset(iova)] += 1;
9222a96536eSKyongHo Cho 		goto done;
9232a96536eSKyongHo Cho 	}
9242a96536eSKyongHo Cho 
9252a96536eSKyongHo Cho 	/* lv1ent_large(ent) == true here */
92661128f08SCho KyongHo 	if (size < LPAGE_SIZE) {
92761128f08SCho KyongHo 		err_pgsize = LPAGE_SIZE;
92861128f08SCho KyongHo 		goto err;
92961128f08SCho KyongHo 	}
9302a96536eSKyongHo Cho 
9312a96536eSKyongHo Cho 	memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
9322a96536eSKyongHo Cho 
9332a96536eSKyongHo Cho 	size = LPAGE_SIZE;
9342a96536eSKyongHo Cho 	priv->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
9352a96536eSKyongHo Cho done:
9362a96536eSKyongHo Cho 	spin_unlock_irqrestore(&priv->pgtablelock, flags);
9372a96536eSKyongHo Cho 
9382a96536eSKyongHo Cho 	spin_lock_irqsave(&priv->lock, flags);
9392a96536eSKyongHo Cho 	list_for_each_entry(data, &priv->clients, node)
9403ad6b7f3SCho KyongHo 		sysmmu_tlb_invalidate_entry(data->dev, iova, size);
9412a96536eSKyongHo Cho 	spin_unlock_irqrestore(&priv->lock, flags);
9422a96536eSKyongHo Cho 
9432a96536eSKyongHo Cho 	return size;
94461128f08SCho KyongHo err:
94561128f08SCho KyongHo 	spin_unlock_irqrestore(&priv->pgtablelock, flags);
94661128f08SCho KyongHo 
94761128f08SCho KyongHo 	WARN(1,
94861128f08SCho KyongHo 	"%s: Failed due to size(%#x) @ %#08lx is smaller than page size %#x\n",
94961128f08SCho KyongHo 	__func__, size, iova, err_pgsize);
95061128f08SCho KyongHo 
95161128f08SCho KyongHo 	return 0;
9522a96536eSKyongHo Cho }
9532a96536eSKyongHo Cho 
9542a96536eSKyongHo Cho static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
955bb5547acSVarun Sethi 					  dma_addr_t iova)
9562a96536eSKyongHo Cho {
9572a96536eSKyongHo Cho 	struct exynos_iommu_domain *priv = domain->priv;
9582a96536eSKyongHo Cho 	unsigned long *entry;
9592a96536eSKyongHo Cho 	unsigned long flags;
9602a96536eSKyongHo Cho 	phys_addr_t phys = 0;
9612a96536eSKyongHo Cho 
9622a96536eSKyongHo Cho 	spin_lock_irqsave(&priv->pgtablelock, flags);
9632a96536eSKyongHo Cho 
9642a96536eSKyongHo Cho 	entry = section_entry(priv->pgtable, iova);
9652a96536eSKyongHo Cho 
9662a96536eSKyongHo Cho 	if (lv1ent_section(entry)) {
9672a96536eSKyongHo Cho 		phys = section_phys(entry) + section_offs(iova);
9682a96536eSKyongHo Cho 	} else if (lv1ent_page(entry)) {
9692a96536eSKyongHo Cho 		entry = page_entry(entry, iova);
9702a96536eSKyongHo Cho 
9712a96536eSKyongHo Cho 		if (lv2ent_large(entry))
9722a96536eSKyongHo Cho 			phys = lpage_phys(entry) + lpage_offs(iova);
9732a96536eSKyongHo Cho 		else if (lv2ent_small(entry))
9742a96536eSKyongHo Cho 			phys = spage_phys(entry) + spage_offs(iova);
9752a96536eSKyongHo Cho 	}
9762a96536eSKyongHo Cho 
9772a96536eSKyongHo Cho 	spin_unlock_irqrestore(&priv->pgtablelock, flags);
9782a96536eSKyongHo Cho 
9792a96536eSKyongHo Cho 	return phys;
9802a96536eSKyongHo Cho }
9812a96536eSKyongHo Cho 
9822a96536eSKyongHo Cho static struct iommu_ops exynos_iommu_ops = {
9832a96536eSKyongHo Cho 	.domain_init = &exynos_iommu_domain_init,
9842a96536eSKyongHo Cho 	.domain_destroy = &exynos_iommu_domain_destroy,
9852a96536eSKyongHo Cho 	.attach_dev = &exynos_iommu_attach_device,
9862a96536eSKyongHo Cho 	.detach_dev = &exynos_iommu_detach_device,
9872a96536eSKyongHo Cho 	.map = &exynos_iommu_map,
9882a96536eSKyongHo Cho 	.unmap = &exynos_iommu_unmap,
9892a96536eSKyongHo Cho 	.iova_to_phys = &exynos_iommu_iova_to_phys,
9902a96536eSKyongHo Cho 	.pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
9912a96536eSKyongHo Cho };
9922a96536eSKyongHo Cho 
9932a96536eSKyongHo Cho static int __init exynos_iommu_init(void)
9942a96536eSKyongHo Cho {
9952a96536eSKyongHo Cho 	int ret;
9962a96536eSKyongHo Cho 
997734c3c73SCho KyongHo 	lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
998734c3c73SCho KyongHo 				LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
999734c3c73SCho KyongHo 	if (!lv2table_kmem_cache) {
1000734c3c73SCho KyongHo 		pr_err("%s: Failed to create kmem cache\n", __func__);
1001734c3c73SCho KyongHo 		return -ENOMEM;
1002734c3c73SCho KyongHo 	}
1003734c3c73SCho KyongHo 
10042a96536eSKyongHo Cho 	ret = platform_driver_register(&exynos_sysmmu_driver);
1005734c3c73SCho KyongHo 	if (ret) {
1006734c3c73SCho KyongHo 		pr_err("%s: Failed to register driver\n", __func__);
1007734c3c73SCho KyongHo 		goto err_reg_driver;
1008734c3c73SCho KyongHo 	}
10092a96536eSKyongHo Cho 
1010734c3c73SCho KyongHo 	ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1011734c3c73SCho KyongHo 	if (ret) {
1012734c3c73SCho KyongHo 		pr_err("%s: Failed to register exynos-iommu driver.\n",
1013734c3c73SCho KyongHo 								__func__);
1014734c3c73SCho KyongHo 		goto err_set_iommu;
1015734c3c73SCho KyongHo 	}
10162a96536eSKyongHo Cho 
1017734c3c73SCho KyongHo 	return 0;
1018734c3c73SCho KyongHo err_set_iommu:
1019734c3c73SCho KyongHo 	platform_driver_unregister(&exynos_sysmmu_driver);
1020734c3c73SCho KyongHo err_reg_driver:
1021734c3c73SCho KyongHo 	kmem_cache_destroy(lv2table_kmem_cache);
10222a96536eSKyongHo Cho 	return ret;
10232a96536eSKyongHo Cho }
10242a96536eSKyongHo Cho subsys_initcall(exynos_iommu_init);
1025