1740a01eeSMarek Szyprowski /* 2740a01eeSMarek Szyprowski * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd. 32a96536eSKyongHo Cho * http://www.samsung.com 42a96536eSKyongHo Cho * 52a96536eSKyongHo Cho * This program is free software; you can redistribute it and/or modify 62a96536eSKyongHo Cho * it under the terms of the GNU General Public License version 2 as 72a96536eSKyongHo Cho * published by the Free Software Foundation. 82a96536eSKyongHo Cho */ 92a96536eSKyongHo Cho 102a96536eSKyongHo Cho #ifdef CONFIG_EXYNOS_IOMMU_DEBUG 112a96536eSKyongHo Cho #define DEBUG 122a96536eSKyongHo Cho #endif 132a96536eSKyongHo Cho 142a96536eSKyongHo Cho #include <linux/clk.h> 158ed55c81SMarek Szyprowski #include <linux/dma-mapping.h> 162a96536eSKyongHo Cho #include <linux/err.h> 17312900c6SMarek Szyprowski #include <linux/io.h> 182a96536eSKyongHo Cho #include <linux/iommu.h> 19312900c6SMarek Szyprowski #include <linux/interrupt.h> 202a96536eSKyongHo Cho #include <linux/list.h> 218ed55c81SMarek Szyprowski #include <linux/of.h> 228ed55c81SMarek Szyprowski #include <linux/of_iommu.h> 238ed55c81SMarek Szyprowski #include <linux/of_platform.h> 24312900c6SMarek Szyprowski #include <linux/platform_device.h> 25312900c6SMarek Szyprowski #include <linux/pm_runtime.h> 26312900c6SMarek Szyprowski #include <linux/slab.h> 2758c6f6a3SMarek Szyprowski #include <linux/dma-iommu.h> 282a96536eSKyongHo Cho 29d09d78fcSCho KyongHo typedef u32 sysmmu_iova_t; 30d09d78fcSCho KyongHo typedef u32 sysmmu_pte_t; 31d09d78fcSCho KyongHo 32f171ababSSachin Kamat /* We do not consider super section mapping (16MB) */ 332a96536eSKyongHo Cho #define SECT_ORDER 20 342a96536eSKyongHo Cho #define LPAGE_ORDER 16 352a96536eSKyongHo Cho #define SPAGE_ORDER 12 362a96536eSKyongHo Cho 372a96536eSKyongHo Cho #define SECT_SIZE (1 << SECT_ORDER) 382a96536eSKyongHo Cho #define LPAGE_SIZE (1 << LPAGE_ORDER) 392a96536eSKyongHo Cho #define SPAGE_SIZE (1 << SPAGE_ORDER) 402a96536eSKyongHo Cho 412a96536eSKyongHo Cho #define SECT_MASK (~(SECT_SIZE - 1)) 422a96536eSKyongHo Cho #define LPAGE_MASK (~(LPAGE_SIZE - 1)) 432a96536eSKyongHo Cho #define SPAGE_MASK (~(SPAGE_SIZE - 1)) 442a96536eSKyongHo Cho 4566a7ed84SCho KyongHo #define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \ 4666a7ed84SCho KyongHo ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3)) 4766a7ed84SCho KyongHo #define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK) 4866a7ed84SCho KyongHo #define lv1ent_page_zero(sent) ((*(sent) & 3) == 1) 4966a7ed84SCho KyongHo #define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \ 5066a7ed84SCho KyongHo ((*(sent) & 3) == 1)) 512a96536eSKyongHo Cho #define lv1ent_section(sent) ((*(sent) & 3) == 2) 522a96536eSKyongHo Cho 532a96536eSKyongHo Cho #define lv2ent_fault(pent) ((*(pent) & 3) == 0) 542a96536eSKyongHo Cho #define lv2ent_small(pent) ((*(pent) & 2) == 2) 552a96536eSKyongHo Cho #define lv2ent_large(pent) ((*(pent) & 3) == 1) 562a96536eSKyongHo Cho 57740a01eeSMarek Szyprowski /* 58740a01eeSMarek Szyprowski * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces 59740a01eeSMarek Szyprowski * v5.0 introduced support for 36bit physical address space by shifting 60740a01eeSMarek Szyprowski * all page entry values by 4 bits. 61740a01eeSMarek Szyprowski * All SYSMMU controllers in the system support the address spaces of the same 62740a01eeSMarek Szyprowski * size, so PG_ENT_SHIFT can be initialized on first SYSMMU probe to proper 63740a01eeSMarek Szyprowski * value (0 or 4). 64740a01eeSMarek Szyprowski */ 65740a01eeSMarek Szyprowski static short PG_ENT_SHIFT = -1; 66740a01eeSMarek Szyprowski #define SYSMMU_PG_ENT_SHIFT 0 67740a01eeSMarek Szyprowski #define SYSMMU_V5_PG_ENT_SHIFT 4 682a96536eSKyongHo Cho 69740a01eeSMarek Szyprowski #define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT) 70740a01eeSMarek Szyprowski #define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK) 71740a01eeSMarek Szyprowski #define section_offs(iova) (iova & (SECT_SIZE - 1)) 72740a01eeSMarek Szyprowski #define lpage_phys(pent) (sect_to_phys(*(pent)) & LPAGE_MASK) 73740a01eeSMarek Szyprowski #define lpage_offs(iova) (iova & (LPAGE_SIZE - 1)) 74740a01eeSMarek Szyprowski #define spage_phys(pent) (sect_to_phys(*(pent)) & SPAGE_MASK) 75740a01eeSMarek Szyprowski #define spage_offs(iova) (iova & (SPAGE_SIZE - 1)) 762a96536eSKyongHo Cho 772a96536eSKyongHo Cho #define NUM_LV1ENTRIES 4096 78d09d78fcSCho KyongHo #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE) 792a96536eSKyongHo Cho 80d09d78fcSCho KyongHo static u32 lv1ent_offset(sysmmu_iova_t iova) 81d09d78fcSCho KyongHo { 82d09d78fcSCho KyongHo return iova >> SECT_ORDER; 83d09d78fcSCho KyongHo } 84d09d78fcSCho KyongHo 85d09d78fcSCho KyongHo static u32 lv2ent_offset(sysmmu_iova_t iova) 86d09d78fcSCho KyongHo { 87d09d78fcSCho KyongHo return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1); 88d09d78fcSCho KyongHo } 89d09d78fcSCho KyongHo 905e3435ebSMarek Szyprowski #define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t)) 91d09d78fcSCho KyongHo #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t)) 922a96536eSKyongHo Cho 932a96536eSKyongHo Cho #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE) 94740a01eeSMarek Szyprowski #define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0)) 952a96536eSKyongHo Cho 96740a01eeSMarek Szyprowski #define mk_lv1ent_sect(pa) ((pa >> PG_ENT_SHIFT) | 2) 97740a01eeSMarek Szyprowski #define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1) 98740a01eeSMarek Szyprowski #define mk_lv2ent_lpage(pa) ((pa >> PG_ENT_SHIFT) | 1) 99740a01eeSMarek Szyprowski #define mk_lv2ent_spage(pa) ((pa >> PG_ENT_SHIFT) | 2) 1002a96536eSKyongHo Cho 1012a96536eSKyongHo Cho #define CTRL_ENABLE 0x5 1022a96536eSKyongHo Cho #define CTRL_BLOCK 0x7 1032a96536eSKyongHo Cho #define CTRL_DISABLE 0x0 1042a96536eSKyongHo Cho 105eeb5184bSCho KyongHo #define CFG_LRU 0x1 106eeb5184bSCho KyongHo #define CFG_QOS(n) ((n & 0xF) << 7) 107eeb5184bSCho KyongHo #define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */ 108eeb5184bSCho KyongHo #define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */ 109eeb5184bSCho KyongHo #define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */ 110eeb5184bSCho KyongHo 111740a01eeSMarek Szyprowski /* common registers */ 1122a96536eSKyongHo Cho #define REG_MMU_CTRL 0x000 1132a96536eSKyongHo Cho #define REG_MMU_CFG 0x004 1142a96536eSKyongHo Cho #define REG_MMU_STATUS 0x008 115740a01eeSMarek Szyprowski #define REG_MMU_VERSION 0x034 116740a01eeSMarek Szyprowski 117740a01eeSMarek Szyprowski #define MMU_MAJ_VER(val) ((val) >> 7) 118740a01eeSMarek Szyprowski #define MMU_MIN_VER(val) ((val) & 0x7F) 119740a01eeSMarek Szyprowski #define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */ 120740a01eeSMarek Szyprowski 121740a01eeSMarek Szyprowski #define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F)) 122740a01eeSMarek Szyprowski 123740a01eeSMarek Szyprowski /* v1.x - v3.x registers */ 1242a96536eSKyongHo Cho #define REG_MMU_FLUSH 0x00C 1252a96536eSKyongHo Cho #define REG_MMU_FLUSH_ENTRY 0x010 1262a96536eSKyongHo Cho #define REG_PT_BASE_ADDR 0x014 1272a96536eSKyongHo Cho #define REG_INT_STATUS 0x018 1282a96536eSKyongHo Cho #define REG_INT_CLEAR 0x01C 1292a96536eSKyongHo Cho 1302a96536eSKyongHo Cho #define REG_PAGE_FAULT_ADDR 0x024 1312a96536eSKyongHo Cho #define REG_AW_FAULT_ADDR 0x028 1322a96536eSKyongHo Cho #define REG_AR_FAULT_ADDR 0x02C 1332a96536eSKyongHo Cho #define REG_DEFAULT_SLAVE_ADDR 0x030 1342a96536eSKyongHo Cho 135740a01eeSMarek Szyprowski /* v5.x registers */ 136740a01eeSMarek Szyprowski #define REG_V5_PT_BASE_PFN 0x00C 137740a01eeSMarek Szyprowski #define REG_V5_MMU_FLUSH_ALL 0x010 138740a01eeSMarek Szyprowski #define REG_V5_MMU_FLUSH_ENTRY 0x014 139740a01eeSMarek Szyprowski #define REG_V5_INT_STATUS 0x060 140740a01eeSMarek Szyprowski #define REG_V5_INT_CLEAR 0x064 141740a01eeSMarek Szyprowski #define REG_V5_FAULT_AR_VA 0x070 142740a01eeSMarek Szyprowski #define REG_V5_FAULT_AW_VA 0x080 1432a96536eSKyongHo Cho 1446b21a5dbSCho KyongHo #define has_sysmmu(dev) (dev->archdata.iommu != NULL) 1456b21a5dbSCho KyongHo 1465e3435ebSMarek Szyprowski static struct device *dma_dev; 147734c3c73SCho KyongHo static struct kmem_cache *lv2table_kmem_cache; 14866a7ed84SCho KyongHo static sysmmu_pte_t *zero_lv2_table; 14966a7ed84SCho KyongHo #define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table)) 150734c3c73SCho KyongHo 151d09d78fcSCho KyongHo static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova) 1522a96536eSKyongHo Cho { 1532a96536eSKyongHo Cho return pgtable + lv1ent_offset(iova); 1542a96536eSKyongHo Cho } 1552a96536eSKyongHo Cho 156d09d78fcSCho KyongHo static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova) 1572a96536eSKyongHo Cho { 158d09d78fcSCho KyongHo return (sysmmu_pte_t *)phys_to_virt( 1597222e8dbSCho KyongHo lv2table_base(sent)) + lv2ent_offset(iova); 1602a96536eSKyongHo Cho } 1612a96536eSKyongHo Cho 162d093fc7eSMarek Szyprowski /* 163d093fc7eSMarek Szyprowski * IOMMU fault information register 164d093fc7eSMarek Szyprowski */ 165d093fc7eSMarek Szyprowski struct sysmmu_fault_info { 166d093fc7eSMarek Szyprowski unsigned int bit; /* bit number in STATUS register */ 167d093fc7eSMarek Szyprowski unsigned short addr_reg; /* register to read VA fault address */ 168d093fc7eSMarek Szyprowski const char *name; /* human readable fault name */ 169d093fc7eSMarek Szyprowski unsigned int type; /* fault type for report_iommu_fault */ 1702a96536eSKyongHo Cho }; 1712a96536eSKyongHo Cho 172d093fc7eSMarek Szyprowski static const struct sysmmu_fault_info sysmmu_faults[] = { 173d093fc7eSMarek Szyprowski { 0, REG_PAGE_FAULT_ADDR, "PAGE", IOMMU_FAULT_READ }, 174d093fc7eSMarek Szyprowski { 1, REG_AR_FAULT_ADDR, "AR MULTI-HIT", IOMMU_FAULT_READ }, 175d093fc7eSMarek Szyprowski { 2, REG_AW_FAULT_ADDR, "AW MULTI-HIT", IOMMU_FAULT_WRITE }, 176d093fc7eSMarek Szyprowski { 3, REG_DEFAULT_SLAVE_ADDR, "BUS ERROR", IOMMU_FAULT_READ }, 177d093fc7eSMarek Szyprowski { 4, REG_AR_FAULT_ADDR, "AR SECURITY PROTECTION", IOMMU_FAULT_READ }, 178d093fc7eSMarek Szyprowski { 5, REG_AR_FAULT_ADDR, "AR ACCESS PROTECTION", IOMMU_FAULT_READ }, 179d093fc7eSMarek Szyprowski { 6, REG_AW_FAULT_ADDR, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE }, 180d093fc7eSMarek Szyprowski { 7, REG_AW_FAULT_ADDR, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE }, 1812a96536eSKyongHo Cho }; 1822a96536eSKyongHo Cho 183740a01eeSMarek Szyprowski static const struct sysmmu_fault_info sysmmu_v5_faults[] = { 184740a01eeSMarek Szyprowski { 0, REG_V5_FAULT_AR_VA, "AR PTW", IOMMU_FAULT_READ }, 185740a01eeSMarek Szyprowski { 1, REG_V5_FAULT_AR_VA, "AR PAGE", IOMMU_FAULT_READ }, 186740a01eeSMarek Szyprowski { 2, REG_V5_FAULT_AR_VA, "AR MULTI-HIT", IOMMU_FAULT_READ }, 187740a01eeSMarek Szyprowski { 3, REG_V5_FAULT_AR_VA, "AR ACCESS PROTECTION", IOMMU_FAULT_READ }, 188740a01eeSMarek Szyprowski { 4, REG_V5_FAULT_AR_VA, "AR SECURITY PROTECTION", IOMMU_FAULT_READ }, 189740a01eeSMarek Szyprowski { 16, REG_V5_FAULT_AW_VA, "AW PTW", IOMMU_FAULT_WRITE }, 190740a01eeSMarek Szyprowski { 17, REG_V5_FAULT_AW_VA, "AW PAGE", IOMMU_FAULT_WRITE }, 191740a01eeSMarek Szyprowski { 18, REG_V5_FAULT_AW_VA, "AW MULTI-HIT", IOMMU_FAULT_WRITE }, 192740a01eeSMarek Szyprowski { 19, REG_V5_FAULT_AW_VA, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE }, 193740a01eeSMarek Szyprowski { 20, REG_V5_FAULT_AW_VA, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE }, 194740a01eeSMarek Szyprowski }; 195740a01eeSMarek Szyprowski 1962860af3cSMarek Szyprowski /* 1972860af3cSMarek Szyprowski * This structure is attached to dev.archdata.iommu of the master device 1982860af3cSMarek Szyprowski * on device add, contains a list of SYSMMU controllers defined by device tree, 1992860af3cSMarek Szyprowski * which are bound to given master device. It is usually referenced by 'owner' 2002860af3cSMarek Szyprowski * pointer. 2012860af3cSMarek Szyprowski */ 2026b21a5dbSCho KyongHo struct exynos_iommu_owner { 2031b092054SMarek Szyprowski struct list_head controllers; /* list of sysmmu_drvdata.owner_node */ 2045fa61cbfSMarek Szyprowski struct iommu_domain *domain; /* domain this device is attached */ 2056b21a5dbSCho KyongHo }; 2066b21a5dbSCho KyongHo 2072860af3cSMarek Szyprowski /* 2082860af3cSMarek Szyprowski * This structure exynos specific generalization of struct iommu_domain. 2092860af3cSMarek Szyprowski * It contains list of SYSMMU controllers from all master devices, which has 2102860af3cSMarek Szyprowski * been attached to this domain and page tables of IO address space defined by 2112860af3cSMarek Szyprowski * it. It is usually referenced by 'domain' pointer. 2122860af3cSMarek Szyprowski */ 2132a96536eSKyongHo Cho struct exynos_iommu_domain { 2142860af3cSMarek Szyprowski struct list_head clients; /* list of sysmmu_drvdata.domain_node */ 215d09d78fcSCho KyongHo sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */ 2162a96536eSKyongHo Cho short *lv2entcnt; /* free lv2 entry counter for each section */ 2172860af3cSMarek Szyprowski spinlock_t lock; /* lock for modyfying list of clients */ 2182a96536eSKyongHo Cho spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */ 219e1fd1eaaSJoerg Roedel struct iommu_domain domain; /* generic domain data structure */ 2202a96536eSKyongHo Cho }; 2212a96536eSKyongHo Cho 2222860af3cSMarek Szyprowski /* 2232860af3cSMarek Szyprowski * This structure hold all data of a single SYSMMU controller, this includes 2242860af3cSMarek Szyprowski * hw resources like registers and clocks, pointers and list nodes to connect 2252860af3cSMarek Szyprowski * it to all other structures, internal state and parameters read from device 2262860af3cSMarek Szyprowski * tree. It is usually referenced by 'data' pointer. 2272860af3cSMarek Szyprowski */ 2282a96536eSKyongHo Cho struct sysmmu_drvdata { 2292860af3cSMarek Szyprowski struct device *sysmmu; /* SYSMMU controller device */ 2302860af3cSMarek Szyprowski struct device *master; /* master device (owner) */ 2312860af3cSMarek Szyprowski void __iomem *sfrbase; /* our registers */ 2322860af3cSMarek Szyprowski struct clk *clk; /* SYSMMU's clock */ 233740a01eeSMarek Szyprowski struct clk *aclk; /* SYSMMU's aclk clock */ 234740a01eeSMarek Szyprowski struct clk *pclk; /* SYSMMU's pclk clock */ 2352860af3cSMarek Szyprowski struct clk *clk_master; /* master's device clock */ 2362860af3cSMarek Szyprowski int activations; /* number of calls to sysmmu_enable */ 2372860af3cSMarek Szyprowski spinlock_t lock; /* lock for modyfying state */ 2382860af3cSMarek Szyprowski struct exynos_iommu_domain *domain; /* domain we belong to */ 2392860af3cSMarek Szyprowski struct list_head domain_node; /* node for domain clients list */ 2401b092054SMarek Szyprowski struct list_head owner_node; /* node for owner controllers list */ 2412860af3cSMarek Szyprowski phys_addr_t pgtable; /* assigned page table structure */ 2422860af3cSMarek Szyprowski unsigned int version; /* our version */ 2432a96536eSKyongHo Cho }; 2442a96536eSKyongHo Cho 245e1fd1eaaSJoerg Roedel static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom) 246e1fd1eaaSJoerg Roedel { 247e1fd1eaaSJoerg Roedel return container_of(dom, struct exynos_iommu_domain, domain); 248e1fd1eaaSJoerg Roedel } 249e1fd1eaaSJoerg Roedel 2502a96536eSKyongHo Cho static bool set_sysmmu_active(struct sysmmu_drvdata *data) 2512a96536eSKyongHo Cho { 2522a96536eSKyongHo Cho /* return true if the System MMU was not active previously 2532a96536eSKyongHo Cho and it needs to be initialized */ 2542a96536eSKyongHo Cho return ++data->activations == 1; 2552a96536eSKyongHo Cho } 2562a96536eSKyongHo Cho 2572a96536eSKyongHo Cho static bool set_sysmmu_inactive(struct sysmmu_drvdata *data) 2582a96536eSKyongHo Cho { 2592a96536eSKyongHo Cho /* return true if the System MMU is needed to be disabled */ 2602a96536eSKyongHo Cho BUG_ON(data->activations < 1); 2612a96536eSKyongHo Cho return --data->activations == 0; 2622a96536eSKyongHo Cho } 2632a96536eSKyongHo Cho 2642a96536eSKyongHo Cho static bool is_sysmmu_active(struct sysmmu_drvdata *data) 2652a96536eSKyongHo Cho { 2662a96536eSKyongHo Cho return data->activations > 0; 2672a96536eSKyongHo Cho } 2682a96536eSKyongHo Cho 26902cdc365SMarek Szyprowski static void sysmmu_unblock(struct sysmmu_drvdata *data) 2702a96536eSKyongHo Cho { 27184bd0428SMarek Szyprowski writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL); 2722a96536eSKyongHo Cho } 2732a96536eSKyongHo Cho 27402cdc365SMarek Szyprowski static bool sysmmu_block(struct sysmmu_drvdata *data) 2752a96536eSKyongHo Cho { 2762a96536eSKyongHo Cho int i = 120; 2772a96536eSKyongHo Cho 27884bd0428SMarek Szyprowski writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL); 27984bd0428SMarek Szyprowski while ((i > 0) && !(readl(data->sfrbase + REG_MMU_STATUS) & 1)) 2802a96536eSKyongHo Cho --i; 2812a96536eSKyongHo Cho 28284bd0428SMarek Szyprowski if (!(readl(data->sfrbase + REG_MMU_STATUS) & 1)) { 28302cdc365SMarek Szyprowski sysmmu_unblock(data); 2842a96536eSKyongHo Cho return false; 2852a96536eSKyongHo Cho } 2862a96536eSKyongHo Cho 2872a96536eSKyongHo Cho return true; 2882a96536eSKyongHo Cho } 2892a96536eSKyongHo Cho 29002cdc365SMarek Szyprowski static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data) 2912a96536eSKyongHo Cho { 292740a01eeSMarek Szyprowski if (MMU_MAJ_VER(data->version) < 5) 29384bd0428SMarek Szyprowski writel(0x1, data->sfrbase + REG_MMU_FLUSH); 294740a01eeSMarek Szyprowski else 29584bd0428SMarek Szyprowski writel(0x1, data->sfrbase + REG_V5_MMU_FLUSH_ALL); 2962a96536eSKyongHo Cho } 2972a96536eSKyongHo Cho 29802cdc365SMarek Szyprowski static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data, 299d09d78fcSCho KyongHo sysmmu_iova_t iova, unsigned int num_inv) 3002a96536eSKyongHo Cho { 3013ad6b7f3SCho KyongHo unsigned int i; 302365409dbSSachin Kamat 3033ad6b7f3SCho KyongHo for (i = 0; i < num_inv; i++) { 304740a01eeSMarek Szyprowski if (MMU_MAJ_VER(data->version) < 5) 30584bd0428SMarek Szyprowski writel((iova & SPAGE_MASK) | 1, 30602cdc365SMarek Szyprowski data->sfrbase + REG_MMU_FLUSH_ENTRY); 307740a01eeSMarek Szyprowski else 30884bd0428SMarek Szyprowski writel((iova & SPAGE_MASK) | 1, 309740a01eeSMarek Szyprowski data->sfrbase + REG_V5_MMU_FLUSH_ENTRY); 3103ad6b7f3SCho KyongHo iova += SPAGE_SIZE; 3113ad6b7f3SCho KyongHo } 3122a96536eSKyongHo Cho } 3132a96536eSKyongHo Cho 31402cdc365SMarek Szyprowski static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd) 3152a96536eSKyongHo Cho { 316740a01eeSMarek Szyprowski if (MMU_MAJ_VER(data->version) < 5) 31784bd0428SMarek Szyprowski writel(pgd, data->sfrbase + REG_PT_BASE_ADDR); 318740a01eeSMarek Szyprowski else 31984bd0428SMarek Szyprowski writel(pgd >> PAGE_SHIFT, 320740a01eeSMarek Szyprowski data->sfrbase + REG_V5_PT_BASE_PFN); 3212a96536eSKyongHo Cho 32202cdc365SMarek Szyprowski __sysmmu_tlb_invalidate(data); 3232a96536eSKyongHo Cho } 3242a96536eSKyongHo Cho 325fecc49dbSMarek Szyprowski static void __sysmmu_enable_clocks(struct sysmmu_drvdata *data) 326fecc49dbSMarek Szyprowski { 327fecc49dbSMarek Szyprowski BUG_ON(clk_prepare_enable(data->clk_master)); 328fecc49dbSMarek Szyprowski BUG_ON(clk_prepare_enable(data->clk)); 329fecc49dbSMarek Szyprowski BUG_ON(clk_prepare_enable(data->pclk)); 330fecc49dbSMarek Szyprowski BUG_ON(clk_prepare_enable(data->aclk)); 331fecc49dbSMarek Szyprowski } 332fecc49dbSMarek Szyprowski 333fecc49dbSMarek Szyprowski static void __sysmmu_disable_clocks(struct sysmmu_drvdata *data) 334fecc49dbSMarek Szyprowski { 335fecc49dbSMarek Szyprowski clk_disable_unprepare(data->aclk); 336fecc49dbSMarek Szyprowski clk_disable_unprepare(data->pclk); 337fecc49dbSMarek Szyprowski clk_disable_unprepare(data->clk); 338fecc49dbSMarek Szyprowski clk_disable_unprepare(data->clk_master); 339fecc49dbSMarek Szyprowski } 340fecc49dbSMarek Szyprowski 341850d313eSMarek Szyprowski static void __sysmmu_get_version(struct sysmmu_drvdata *data) 342850d313eSMarek Szyprowski { 343850d313eSMarek Szyprowski u32 ver; 344850d313eSMarek Szyprowski 345fecc49dbSMarek Szyprowski __sysmmu_enable_clocks(data); 346850d313eSMarek Szyprowski 34784bd0428SMarek Szyprowski ver = readl(data->sfrbase + REG_MMU_VERSION); 348850d313eSMarek Szyprowski 349850d313eSMarek Szyprowski /* controllers on some SoCs don't report proper version */ 350850d313eSMarek Szyprowski if (ver == 0x80000001u) 351850d313eSMarek Szyprowski data->version = MAKE_MMU_VER(1, 0); 352850d313eSMarek Szyprowski else 353850d313eSMarek Szyprowski data->version = MMU_RAW_VER(ver); 354850d313eSMarek Szyprowski 355850d313eSMarek Szyprowski dev_dbg(data->sysmmu, "hardware version: %d.%d\n", 356850d313eSMarek Szyprowski MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version)); 357850d313eSMarek Szyprowski 358fecc49dbSMarek Szyprowski __sysmmu_disable_clocks(data); 359850d313eSMarek Szyprowski } 360850d313eSMarek Szyprowski 361d093fc7eSMarek Szyprowski static void show_fault_information(struct sysmmu_drvdata *data, 362d093fc7eSMarek Szyprowski const struct sysmmu_fault_info *finfo, 363d093fc7eSMarek Szyprowski sysmmu_iova_t fault_addr) 3642a96536eSKyongHo Cho { 365d09d78fcSCho KyongHo sysmmu_pte_t *ent; 3662a96536eSKyongHo Cho 367d093fc7eSMarek Szyprowski dev_err(data->sysmmu, "%s FAULT occurred at %#x (page table base: %pa)\n", 368d093fc7eSMarek Szyprowski finfo->name, fault_addr, &data->pgtable); 369d093fc7eSMarek Szyprowski ent = section_entry(phys_to_virt(data->pgtable), fault_addr); 370d093fc7eSMarek Szyprowski dev_err(data->sysmmu, "\tLv1 entry: %#x\n", *ent); 3712a96536eSKyongHo Cho if (lv1ent_page(ent)) { 3722a96536eSKyongHo Cho ent = page_entry(ent, fault_addr); 373d093fc7eSMarek Szyprowski dev_err(data->sysmmu, "\t Lv2 entry: %#x\n", *ent); 3742a96536eSKyongHo Cho } 3752a96536eSKyongHo Cho } 3762a96536eSKyongHo Cho 3772a96536eSKyongHo Cho static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id) 3782a96536eSKyongHo Cho { 379f171ababSSachin Kamat /* SYSMMU is in blocked state when interrupt occurred. */ 3802a96536eSKyongHo Cho struct sysmmu_drvdata *data = dev_id; 381740a01eeSMarek Szyprowski const struct sysmmu_fault_info *finfo; 382740a01eeSMarek Szyprowski unsigned int i, n, itype; 383d093fc7eSMarek Szyprowski sysmmu_iova_t fault_addr = -1; 384740a01eeSMarek Szyprowski unsigned short reg_status, reg_clear; 3857222e8dbSCho KyongHo int ret = -ENOSYS; 3862a96536eSKyongHo Cho 3872a96536eSKyongHo Cho WARN_ON(!is_sysmmu_active(data)); 3882a96536eSKyongHo Cho 389740a01eeSMarek Szyprowski if (MMU_MAJ_VER(data->version) < 5) { 390740a01eeSMarek Szyprowski reg_status = REG_INT_STATUS; 391740a01eeSMarek Szyprowski reg_clear = REG_INT_CLEAR; 392740a01eeSMarek Szyprowski finfo = sysmmu_faults; 393740a01eeSMarek Szyprowski n = ARRAY_SIZE(sysmmu_faults); 394740a01eeSMarek Szyprowski } else { 395740a01eeSMarek Szyprowski reg_status = REG_V5_INT_STATUS; 396740a01eeSMarek Szyprowski reg_clear = REG_V5_INT_CLEAR; 397740a01eeSMarek Szyprowski finfo = sysmmu_v5_faults; 398740a01eeSMarek Szyprowski n = ARRAY_SIZE(sysmmu_v5_faults); 399740a01eeSMarek Szyprowski } 400740a01eeSMarek Szyprowski 4019d4e7a24SCho KyongHo spin_lock(&data->lock); 4029d4e7a24SCho KyongHo 40370605870SCho KyongHo clk_enable(data->clk_master); 4049d4e7a24SCho KyongHo 40584bd0428SMarek Szyprowski itype = __ffs(readl(data->sfrbase + reg_status)); 406d093fc7eSMarek Szyprowski for (i = 0; i < n; i++, finfo++) 407d093fc7eSMarek Szyprowski if (finfo->bit == itype) 408d093fc7eSMarek Szyprowski break; 409d093fc7eSMarek Szyprowski /* unknown/unsupported fault */ 410d093fc7eSMarek Szyprowski BUG_ON(i == n); 4112a96536eSKyongHo Cho 412d093fc7eSMarek Szyprowski /* print debug message */ 41384bd0428SMarek Szyprowski fault_addr = readl(data->sfrbase + finfo->addr_reg); 414d093fc7eSMarek Szyprowski show_fault_information(data, finfo, fault_addr); 415d093fc7eSMarek Szyprowski 4162a96536eSKyongHo Cho if (data->domain) 417a9133b99SMarek Szyprowski ret = report_iommu_fault(&data->domain->domain, 418d093fc7eSMarek Szyprowski data->master, fault_addr, finfo->type); 4191fab7fa7SCho KyongHo /* fault is not recovered by fault handler */ 4201fab7fa7SCho KyongHo BUG_ON(ret != 0); 4212a96536eSKyongHo Cho 42284bd0428SMarek Szyprowski writel(1 << itype, data->sfrbase + reg_clear); 4231fab7fa7SCho KyongHo 42402cdc365SMarek Szyprowski sysmmu_unblock(data); 4252a96536eSKyongHo Cho 42670605870SCho KyongHo clk_disable(data->clk_master); 42770605870SCho KyongHo 4289d4e7a24SCho KyongHo spin_unlock(&data->lock); 4292a96536eSKyongHo Cho 4302a96536eSKyongHo Cho return IRQ_HANDLED; 4312a96536eSKyongHo Cho } 4322a96536eSKyongHo Cho 4336b21a5dbSCho KyongHo static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data) 4342a96536eSKyongHo Cho { 43570605870SCho KyongHo clk_enable(data->clk_master); 43670605870SCho KyongHo 43784bd0428SMarek Szyprowski writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL); 43884bd0428SMarek Szyprowski writel(0, data->sfrbase + REG_MMU_CFG); 4392a96536eSKyongHo Cho 440fecc49dbSMarek Szyprowski __sysmmu_disable_clocks(data); 4416b21a5dbSCho KyongHo } 4422a96536eSKyongHo Cho 4436b21a5dbSCho KyongHo static bool __sysmmu_disable(struct sysmmu_drvdata *data) 4446b21a5dbSCho KyongHo { 4456b21a5dbSCho KyongHo bool disabled; 4466b21a5dbSCho KyongHo unsigned long flags; 4476b21a5dbSCho KyongHo 4486b21a5dbSCho KyongHo spin_lock_irqsave(&data->lock, flags); 4496b21a5dbSCho KyongHo 4506b21a5dbSCho KyongHo disabled = set_sysmmu_inactive(data); 4516b21a5dbSCho KyongHo 4526b21a5dbSCho KyongHo if (disabled) { 4532a96536eSKyongHo Cho data->pgtable = 0; 4542a96536eSKyongHo Cho data->domain = NULL; 4556b21a5dbSCho KyongHo 4566b21a5dbSCho KyongHo __sysmmu_disable_nocount(data); 4576b21a5dbSCho KyongHo 4586b21a5dbSCho KyongHo dev_dbg(data->sysmmu, "Disabled\n"); 4596b21a5dbSCho KyongHo } else { 4606b21a5dbSCho KyongHo dev_dbg(data->sysmmu, "%d times left to disable\n", 4616b21a5dbSCho KyongHo data->activations); 4626b21a5dbSCho KyongHo } 4636b21a5dbSCho KyongHo 4649d4e7a24SCho KyongHo spin_unlock_irqrestore(&data->lock, flags); 4652a96536eSKyongHo Cho 4662a96536eSKyongHo Cho return disabled; 4672a96536eSKyongHo Cho } 4682a96536eSKyongHo Cho 4696b21a5dbSCho KyongHo static void __sysmmu_init_config(struct sysmmu_drvdata *data) 4706b21a5dbSCho KyongHo { 47183addecdSMarek Szyprowski unsigned int cfg; 472eeb5184bSCho KyongHo 47383addecdSMarek Szyprowski if (data->version <= MAKE_MMU_VER(3, 1)) 47483addecdSMarek Szyprowski cfg = CFG_LRU | CFG_QOS(15); 47583addecdSMarek Szyprowski else if (data->version <= MAKE_MMU_VER(3, 2)) 47683addecdSMarek Szyprowski cfg = CFG_LRU | CFG_QOS(15) | CFG_FLPDCACHE | CFG_SYSSEL; 47783addecdSMarek Szyprowski else 47883addecdSMarek Szyprowski cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN; 4796b21a5dbSCho KyongHo 48084bd0428SMarek Szyprowski writel(cfg, data->sfrbase + REG_MMU_CFG); 4816b21a5dbSCho KyongHo } 4826b21a5dbSCho KyongHo 4836b21a5dbSCho KyongHo static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data) 4846b21a5dbSCho KyongHo { 485fecc49dbSMarek Szyprowski __sysmmu_enable_clocks(data); 4866b21a5dbSCho KyongHo 48784bd0428SMarek Szyprowski writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL); 4886b21a5dbSCho KyongHo 4896b21a5dbSCho KyongHo __sysmmu_init_config(data); 4906b21a5dbSCho KyongHo 49102cdc365SMarek Szyprowski __sysmmu_set_ptbase(data, data->pgtable); 4926b21a5dbSCho KyongHo 49384bd0428SMarek Szyprowski writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL); 4946b21a5dbSCho KyongHo 495fecc49dbSMarek Szyprowski /* 496fecc49dbSMarek Szyprowski * SYSMMU driver keeps master's clock enabled only for the short 497fecc49dbSMarek Szyprowski * time, while accessing the registers. For performing address 498fecc49dbSMarek Szyprowski * translation during DMA transaction it relies on the client 499fecc49dbSMarek Szyprowski * driver to enable it. 500fecc49dbSMarek Szyprowski */ 5016b21a5dbSCho KyongHo clk_disable(data->clk_master); 5026b21a5dbSCho KyongHo } 5036b21a5dbSCho KyongHo 504bfa00489SMarek Szyprowski static int __sysmmu_enable(struct sysmmu_drvdata *data, phys_addr_t pgtable, 505a9133b99SMarek Szyprowski struct exynos_iommu_domain *domain) 5066b21a5dbSCho KyongHo { 5076b21a5dbSCho KyongHo int ret = 0; 5086b21a5dbSCho KyongHo unsigned long flags; 5096b21a5dbSCho KyongHo 5106b21a5dbSCho KyongHo spin_lock_irqsave(&data->lock, flags); 5116b21a5dbSCho KyongHo if (set_sysmmu_active(data)) { 5126b21a5dbSCho KyongHo data->pgtable = pgtable; 513a9133b99SMarek Szyprowski data->domain = domain; 5146b21a5dbSCho KyongHo 5156b21a5dbSCho KyongHo __sysmmu_enable_nocount(data); 5166b21a5dbSCho KyongHo 5176b21a5dbSCho KyongHo dev_dbg(data->sysmmu, "Enabled\n"); 5186b21a5dbSCho KyongHo } else { 5196b21a5dbSCho KyongHo ret = (pgtable == data->pgtable) ? 1 : -EBUSY; 5206b21a5dbSCho KyongHo 5216b21a5dbSCho KyongHo dev_dbg(data->sysmmu, "already enabled\n"); 5226b21a5dbSCho KyongHo } 5236b21a5dbSCho KyongHo 5246b21a5dbSCho KyongHo if (WARN_ON(ret < 0)) 5256b21a5dbSCho KyongHo set_sysmmu_inactive(data); /* decrement count */ 5266b21a5dbSCho KyongHo 5276b21a5dbSCho KyongHo spin_unlock_irqrestore(&data->lock, flags); 5286b21a5dbSCho KyongHo 5296b21a5dbSCho KyongHo return ret; 5306b21a5dbSCho KyongHo } 5316b21a5dbSCho KyongHo 532469acebeSMarek Szyprowski static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data, 53366a7ed84SCho KyongHo sysmmu_iova_t iova) 53466a7ed84SCho KyongHo { 53566a7ed84SCho KyongHo unsigned long flags; 53666a7ed84SCho KyongHo 53766a7ed84SCho KyongHo 53866a7ed84SCho KyongHo spin_lock_irqsave(&data->lock, flags); 53901324ab2SMarek Szyprowski if (is_sysmmu_active(data) && data->version >= MAKE_MMU_VER(3, 3)) { 54001324ab2SMarek Szyprowski clk_enable(data->clk_master); 541d631ea98SMarek Szyprowski __sysmmu_tlb_invalidate_entry(data, iova, 1); 54201324ab2SMarek Szyprowski clk_disable(data->clk_master); 543d631ea98SMarek Szyprowski } 54466a7ed84SCho KyongHo spin_unlock_irqrestore(&data->lock, flags); 54566a7ed84SCho KyongHo 54666a7ed84SCho KyongHo } 54766a7ed84SCho KyongHo 548469acebeSMarek Szyprowski static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data, 549469acebeSMarek Szyprowski sysmmu_iova_t iova, size_t size) 5502a96536eSKyongHo Cho { 5512a96536eSKyongHo Cho unsigned long flags; 5522a96536eSKyongHo Cho 5539d4e7a24SCho KyongHo spin_lock_irqsave(&data->lock, flags); 5542a96536eSKyongHo Cho if (is_sysmmu_active(data)) { 5553ad6b7f3SCho KyongHo unsigned int num_inv = 1; 55670605870SCho KyongHo 55770605870SCho KyongHo clk_enable(data->clk_master); 55870605870SCho KyongHo 5593ad6b7f3SCho KyongHo /* 5603ad6b7f3SCho KyongHo * L2TLB invalidation required 5613ad6b7f3SCho KyongHo * 4KB page: 1 invalidation 562f171ababSSachin Kamat * 64KB page: 16 invalidations 563f171ababSSachin Kamat * 1MB page: 64 invalidations 5643ad6b7f3SCho KyongHo * because it is set-associative TLB 5653ad6b7f3SCho KyongHo * with 8-way and 64 sets. 5663ad6b7f3SCho KyongHo * 1MB page can be cached in one of all sets. 5673ad6b7f3SCho KyongHo * 64KB page can be one of 16 consecutive sets. 5683ad6b7f3SCho KyongHo */ 569512bd0c6SMarek Szyprowski if (MMU_MAJ_VER(data->version) == 2) 5703ad6b7f3SCho KyongHo num_inv = min_t(unsigned int, size / PAGE_SIZE, 64); 5713ad6b7f3SCho KyongHo 57202cdc365SMarek Szyprowski if (sysmmu_block(data)) { 57302cdc365SMarek Szyprowski __sysmmu_tlb_invalidate_entry(data, iova, num_inv); 57402cdc365SMarek Szyprowski sysmmu_unblock(data); 5752a96536eSKyongHo Cho } 57670605870SCho KyongHo clk_disable(data->clk_master); 5772a96536eSKyongHo Cho } else { 578469acebeSMarek Szyprowski dev_dbg(data->master, 579469acebeSMarek Szyprowski "disabled. Skipping TLB invalidation @ %#x\n", iova); 5802a96536eSKyongHo Cho } 5819d4e7a24SCho KyongHo spin_unlock_irqrestore(&data->lock, flags); 5822a96536eSKyongHo Cho } 5832a96536eSKyongHo Cho 58496f66557SMarek Szyprowski static struct iommu_ops exynos_iommu_ops; 58596f66557SMarek Szyprowski 5866b21a5dbSCho KyongHo static int __init exynos_sysmmu_probe(struct platform_device *pdev) 5872a96536eSKyongHo Cho { 58846c16d1eSCho KyongHo int irq, ret; 5897222e8dbSCho KyongHo struct device *dev = &pdev->dev; 5902a96536eSKyongHo Cho struct sysmmu_drvdata *data; 5917222e8dbSCho KyongHo struct resource *res; 5922a96536eSKyongHo Cho 59346c16d1eSCho KyongHo data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); 59446c16d1eSCho KyongHo if (!data) 59546c16d1eSCho KyongHo return -ENOMEM; 5962a96536eSKyongHo Cho 5977222e8dbSCho KyongHo res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 59846c16d1eSCho KyongHo data->sfrbase = devm_ioremap_resource(dev, res); 59946c16d1eSCho KyongHo if (IS_ERR(data->sfrbase)) 60046c16d1eSCho KyongHo return PTR_ERR(data->sfrbase); 6012a96536eSKyongHo Cho 60246c16d1eSCho KyongHo irq = platform_get_irq(pdev, 0); 60346c16d1eSCho KyongHo if (irq <= 0) { 6040bf4e54dSCho KyongHo dev_err(dev, "Unable to find IRQ resource\n"); 60546c16d1eSCho KyongHo return irq; 6062a96536eSKyongHo Cho } 6072a96536eSKyongHo Cho 60846c16d1eSCho KyongHo ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0, 6092a96536eSKyongHo Cho dev_name(dev), data); 6102a96536eSKyongHo Cho if (ret) { 61146c16d1eSCho KyongHo dev_err(dev, "Unabled to register handler of irq %d\n", irq); 61246c16d1eSCho KyongHo return ret; 6132a96536eSKyongHo Cho } 6142a96536eSKyongHo Cho 61546c16d1eSCho KyongHo data->clk = devm_clk_get(dev, "sysmmu"); 6160c2b063fSMarek Szyprowski if (PTR_ERR(data->clk) == -ENOENT) 617740a01eeSMarek Szyprowski data->clk = NULL; 6180c2b063fSMarek Szyprowski else if (IS_ERR(data->clk)) 6190c2b063fSMarek Szyprowski return PTR_ERR(data->clk); 620740a01eeSMarek Szyprowski 621740a01eeSMarek Szyprowski data->aclk = devm_clk_get(dev, "aclk"); 6220c2b063fSMarek Szyprowski if (PTR_ERR(data->aclk) == -ENOENT) 623740a01eeSMarek Szyprowski data->aclk = NULL; 6240c2b063fSMarek Szyprowski else if (IS_ERR(data->aclk)) 6250c2b063fSMarek Szyprowski return PTR_ERR(data->aclk); 626740a01eeSMarek Szyprowski 627740a01eeSMarek Szyprowski data->pclk = devm_clk_get(dev, "pclk"); 6280c2b063fSMarek Szyprowski if (PTR_ERR(data->pclk) == -ENOENT) 629740a01eeSMarek Szyprowski data->pclk = NULL; 6300c2b063fSMarek Szyprowski else if (IS_ERR(data->pclk)) 6310c2b063fSMarek Szyprowski return PTR_ERR(data->pclk); 632740a01eeSMarek Szyprowski 633740a01eeSMarek Szyprowski if (!data->clk && (!data->aclk || !data->pclk)) { 634740a01eeSMarek Szyprowski dev_err(dev, "Failed to get device clock(s)!\n"); 635740a01eeSMarek Szyprowski return -ENOSYS; 6362a96536eSKyongHo Cho } 6372a96536eSKyongHo Cho 63870605870SCho KyongHo data->clk_master = devm_clk_get(dev, "master"); 6390c2b063fSMarek Szyprowski if (PTR_ERR(data->clk_master) == -ENOENT) 640b398af21SMarek Szyprowski data->clk_master = NULL; 6410c2b063fSMarek Szyprowski else if (IS_ERR(data->clk_master)) 6420c2b063fSMarek Szyprowski return PTR_ERR(data->clk_master); 64370605870SCho KyongHo 6442a96536eSKyongHo Cho data->sysmmu = dev; 6459d4e7a24SCho KyongHo spin_lock_init(&data->lock); 6462a96536eSKyongHo Cho 6477222e8dbSCho KyongHo platform_set_drvdata(pdev, data); 6487222e8dbSCho KyongHo 649850d313eSMarek Szyprowski __sysmmu_get_version(data); 650740a01eeSMarek Szyprowski if (PG_ENT_SHIFT < 0) { 651740a01eeSMarek Szyprowski if (MMU_MAJ_VER(data->version) < 5) 652740a01eeSMarek Szyprowski PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT; 653740a01eeSMarek Szyprowski else 654740a01eeSMarek Szyprowski PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT; 655740a01eeSMarek Szyprowski } 656740a01eeSMarek Szyprowski 6572a96536eSKyongHo Cho pm_runtime_enable(dev); 6582a96536eSKyongHo Cho 65996f66557SMarek Szyprowski of_iommu_set_ops(dev->of_node, &exynos_iommu_ops); 66096f66557SMarek Szyprowski 6612a96536eSKyongHo Cho return 0; 6622a96536eSKyongHo Cho } 6632a96536eSKyongHo Cho 664622015e4SMarek Szyprowski #ifdef CONFIG_PM_SLEEP 665622015e4SMarek Szyprowski static int exynos_sysmmu_suspend(struct device *dev) 666622015e4SMarek Szyprowski { 667622015e4SMarek Szyprowski struct sysmmu_drvdata *data = dev_get_drvdata(dev); 668622015e4SMarek Szyprowski 669622015e4SMarek Szyprowski dev_dbg(dev, "suspend\n"); 670622015e4SMarek Szyprowski if (is_sysmmu_active(data)) { 671622015e4SMarek Szyprowski __sysmmu_disable_nocount(data); 672622015e4SMarek Szyprowski pm_runtime_put(dev); 673622015e4SMarek Szyprowski } 674622015e4SMarek Szyprowski return 0; 675622015e4SMarek Szyprowski } 676622015e4SMarek Szyprowski 677622015e4SMarek Szyprowski static int exynos_sysmmu_resume(struct device *dev) 678622015e4SMarek Szyprowski { 679622015e4SMarek Szyprowski struct sysmmu_drvdata *data = dev_get_drvdata(dev); 680622015e4SMarek Szyprowski 681622015e4SMarek Szyprowski dev_dbg(dev, "resume\n"); 682622015e4SMarek Szyprowski if (is_sysmmu_active(data)) { 683622015e4SMarek Szyprowski pm_runtime_get_sync(dev); 684622015e4SMarek Szyprowski __sysmmu_enable_nocount(data); 685622015e4SMarek Szyprowski } 686622015e4SMarek Szyprowski return 0; 687622015e4SMarek Szyprowski } 688622015e4SMarek Szyprowski #endif 689622015e4SMarek Szyprowski 690622015e4SMarek Szyprowski static const struct dev_pm_ops sysmmu_pm_ops = { 691622015e4SMarek Szyprowski SET_LATE_SYSTEM_SLEEP_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume) 692622015e4SMarek Szyprowski }; 693622015e4SMarek Szyprowski 6946b21a5dbSCho KyongHo static const struct of_device_id sysmmu_of_match[] __initconst = { 6956b21a5dbSCho KyongHo { .compatible = "samsung,exynos-sysmmu", }, 6966b21a5dbSCho KyongHo { }, 6976b21a5dbSCho KyongHo }; 6986b21a5dbSCho KyongHo 6996b21a5dbSCho KyongHo static struct platform_driver exynos_sysmmu_driver __refdata = { 7002a96536eSKyongHo Cho .probe = exynos_sysmmu_probe, 7012a96536eSKyongHo Cho .driver = { 7022a96536eSKyongHo Cho .name = "exynos-sysmmu", 7036b21a5dbSCho KyongHo .of_match_table = sysmmu_of_match, 704622015e4SMarek Szyprowski .pm = &sysmmu_pm_ops, 705b54b874fSMarek Szyprowski .suppress_bind_attrs = true, 7062a96536eSKyongHo Cho } 7072a96536eSKyongHo Cho }; 7082a96536eSKyongHo Cho 7095e3435ebSMarek Szyprowski static inline void update_pte(sysmmu_pte_t *ent, sysmmu_pte_t val) 7102a96536eSKyongHo Cho { 7115e3435ebSMarek Szyprowski dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent), 7125e3435ebSMarek Szyprowski DMA_TO_DEVICE); 7135e3435ebSMarek Szyprowski *ent = val; 7145e3435ebSMarek Szyprowski dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent), 7155e3435ebSMarek Szyprowski DMA_TO_DEVICE); 7162a96536eSKyongHo Cho } 7172a96536eSKyongHo Cho 718e1fd1eaaSJoerg Roedel static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type) 7192a96536eSKyongHo Cho { 720bfa00489SMarek Szyprowski struct exynos_iommu_domain *domain; 7215e3435ebSMarek Szyprowski dma_addr_t handle; 72266a7ed84SCho KyongHo int i; 7232a96536eSKyongHo Cho 724740a01eeSMarek Szyprowski /* Check if correct PTE offsets are initialized */ 725740a01eeSMarek Szyprowski BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev); 7262a96536eSKyongHo Cho 727bfa00489SMarek Szyprowski domain = kzalloc(sizeof(*domain), GFP_KERNEL); 728bfa00489SMarek Szyprowski if (!domain) 729e1fd1eaaSJoerg Roedel return NULL; 730e1fd1eaaSJoerg Roedel 73158c6f6a3SMarek Szyprowski if (type == IOMMU_DOMAIN_DMA) { 73258c6f6a3SMarek Szyprowski if (iommu_get_dma_cookie(&domain->domain) != 0) 73358c6f6a3SMarek Szyprowski goto err_pgtable; 73458c6f6a3SMarek Szyprowski } else if (type != IOMMU_DOMAIN_UNMANAGED) { 73558c6f6a3SMarek Szyprowski goto err_pgtable; 73658c6f6a3SMarek Szyprowski } 73758c6f6a3SMarek Szyprowski 738bfa00489SMarek Szyprowski domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2); 739bfa00489SMarek Szyprowski if (!domain->pgtable) 74058c6f6a3SMarek Szyprowski goto err_dma_cookie; 7412a96536eSKyongHo Cho 742bfa00489SMarek Szyprowski domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1); 743bfa00489SMarek Szyprowski if (!domain->lv2entcnt) 7442a96536eSKyongHo Cho goto err_counter; 7452a96536eSKyongHo Cho 746f171ababSSachin Kamat /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */ 74766a7ed84SCho KyongHo for (i = 0; i < NUM_LV1ENTRIES; i += 8) { 748bfa00489SMarek Szyprowski domain->pgtable[i + 0] = ZERO_LV2LINK; 749bfa00489SMarek Szyprowski domain->pgtable[i + 1] = ZERO_LV2LINK; 750bfa00489SMarek Szyprowski domain->pgtable[i + 2] = ZERO_LV2LINK; 751bfa00489SMarek Szyprowski domain->pgtable[i + 3] = ZERO_LV2LINK; 752bfa00489SMarek Szyprowski domain->pgtable[i + 4] = ZERO_LV2LINK; 753bfa00489SMarek Szyprowski domain->pgtable[i + 5] = ZERO_LV2LINK; 754bfa00489SMarek Szyprowski domain->pgtable[i + 6] = ZERO_LV2LINK; 755bfa00489SMarek Szyprowski domain->pgtable[i + 7] = ZERO_LV2LINK; 75666a7ed84SCho KyongHo } 75766a7ed84SCho KyongHo 7585e3435ebSMarek Szyprowski handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE, 7595e3435ebSMarek Szyprowski DMA_TO_DEVICE); 7605e3435ebSMarek Szyprowski /* For mapping page table entries we rely on dma == phys */ 7615e3435ebSMarek Szyprowski BUG_ON(handle != virt_to_phys(domain->pgtable)); 7622a96536eSKyongHo Cho 763bfa00489SMarek Szyprowski spin_lock_init(&domain->lock); 764bfa00489SMarek Szyprowski spin_lock_init(&domain->pgtablelock); 765bfa00489SMarek Szyprowski INIT_LIST_HEAD(&domain->clients); 7662a96536eSKyongHo Cho 767bfa00489SMarek Szyprowski domain->domain.geometry.aperture_start = 0; 768bfa00489SMarek Szyprowski domain->domain.geometry.aperture_end = ~0UL; 769bfa00489SMarek Szyprowski domain->domain.geometry.force_aperture = true; 7703177bb76SJoerg Roedel 771bfa00489SMarek Szyprowski return &domain->domain; 7722a96536eSKyongHo Cho 7732a96536eSKyongHo Cho err_counter: 774bfa00489SMarek Szyprowski free_pages((unsigned long)domain->pgtable, 2); 77558c6f6a3SMarek Szyprowski err_dma_cookie: 77658c6f6a3SMarek Szyprowski if (type == IOMMU_DOMAIN_DMA) 77758c6f6a3SMarek Szyprowski iommu_put_dma_cookie(&domain->domain); 7782a96536eSKyongHo Cho err_pgtable: 779bfa00489SMarek Szyprowski kfree(domain); 780e1fd1eaaSJoerg Roedel return NULL; 7812a96536eSKyongHo Cho } 7822a96536eSKyongHo Cho 783bfa00489SMarek Szyprowski static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain) 7842a96536eSKyongHo Cho { 785bfa00489SMarek Szyprowski struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); 786469acebeSMarek Szyprowski struct sysmmu_drvdata *data, *next; 7872a96536eSKyongHo Cho unsigned long flags; 7882a96536eSKyongHo Cho int i; 7892a96536eSKyongHo Cho 790bfa00489SMarek Szyprowski WARN_ON(!list_empty(&domain->clients)); 7912a96536eSKyongHo Cho 792bfa00489SMarek Szyprowski spin_lock_irqsave(&domain->lock, flags); 7932a96536eSKyongHo Cho 794bfa00489SMarek Szyprowski list_for_each_entry_safe(data, next, &domain->clients, domain_node) { 795469acebeSMarek Szyprowski if (__sysmmu_disable(data)) 796469acebeSMarek Szyprowski data->master = NULL; 797469acebeSMarek Szyprowski list_del_init(&data->domain_node); 7982a96536eSKyongHo Cho } 7992a96536eSKyongHo Cho 800bfa00489SMarek Szyprowski spin_unlock_irqrestore(&domain->lock, flags); 8012a96536eSKyongHo Cho 80258c6f6a3SMarek Szyprowski if (iommu_domain->type == IOMMU_DOMAIN_DMA) 80358c6f6a3SMarek Szyprowski iommu_put_dma_cookie(iommu_domain); 80458c6f6a3SMarek Szyprowski 8055e3435ebSMarek Szyprowski dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE, 8065e3435ebSMarek Szyprowski DMA_TO_DEVICE); 8075e3435ebSMarek Szyprowski 8082a96536eSKyongHo Cho for (i = 0; i < NUM_LV1ENTRIES; i++) 8095e3435ebSMarek Szyprowski if (lv1ent_page(domain->pgtable + i)) { 8105e3435ebSMarek Szyprowski phys_addr_t base = lv2table_base(domain->pgtable + i); 8115e3435ebSMarek Szyprowski 8125e3435ebSMarek Szyprowski dma_unmap_single(dma_dev, base, LV2TABLE_SIZE, 8135e3435ebSMarek Szyprowski DMA_TO_DEVICE); 814734c3c73SCho KyongHo kmem_cache_free(lv2table_kmem_cache, 8155e3435ebSMarek Szyprowski phys_to_virt(base)); 8165e3435ebSMarek Szyprowski } 8172a96536eSKyongHo Cho 818bfa00489SMarek Szyprowski free_pages((unsigned long)domain->pgtable, 2); 819bfa00489SMarek Szyprowski free_pages((unsigned long)domain->lv2entcnt, 1); 820bfa00489SMarek Szyprowski kfree(domain); 8212a96536eSKyongHo Cho } 8222a96536eSKyongHo Cho 8235fa61cbfSMarek Szyprowski static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain, 8245fa61cbfSMarek Szyprowski struct device *dev) 8255fa61cbfSMarek Szyprowski { 8265fa61cbfSMarek Szyprowski struct exynos_iommu_owner *owner = dev->archdata.iommu; 8275fa61cbfSMarek Szyprowski struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); 8285fa61cbfSMarek Szyprowski phys_addr_t pagetable = virt_to_phys(domain->pgtable); 8295fa61cbfSMarek Szyprowski struct sysmmu_drvdata *data, *next; 8305fa61cbfSMarek Szyprowski unsigned long flags; 8315fa61cbfSMarek Szyprowski bool found = false; 8325fa61cbfSMarek Szyprowski 8335fa61cbfSMarek Szyprowski if (!has_sysmmu(dev) || owner->domain != iommu_domain) 8345fa61cbfSMarek Szyprowski return; 8355fa61cbfSMarek Szyprowski 8365fa61cbfSMarek Szyprowski spin_lock_irqsave(&domain->lock, flags); 8375fa61cbfSMarek Szyprowski list_for_each_entry_safe(data, next, &domain->clients, domain_node) { 8385fa61cbfSMarek Szyprowski if (data->master == dev) { 8395fa61cbfSMarek Szyprowski if (__sysmmu_disable(data)) { 8405fa61cbfSMarek Szyprowski data->master = NULL; 8415fa61cbfSMarek Szyprowski list_del_init(&data->domain_node); 8425fa61cbfSMarek Szyprowski } 8435fa61cbfSMarek Szyprowski pm_runtime_put(data->sysmmu); 8445fa61cbfSMarek Szyprowski found = true; 8455fa61cbfSMarek Szyprowski } 8465fa61cbfSMarek Szyprowski } 8475fa61cbfSMarek Szyprowski spin_unlock_irqrestore(&domain->lock, flags); 8485fa61cbfSMarek Szyprowski 8495fa61cbfSMarek Szyprowski owner->domain = NULL; 8505fa61cbfSMarek Szyprowski 8515fa61cbfSMarek Szyprowski if (found) 8525fa61cbfSMarek Szyprowski dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n", 8535fa61cbfSMarek Szyprowski __func__, &pagetable); 8545fa61cbfSMarek Szyprowski else 8555fa61cbfSMarek Szyprowski dev_err(dev, "%s: No IOMMU is attached\n", __func__); 8565fa61cbfSMarek Szyprowski } 8575fa61cbfSMarek Szyprowski 858bfa00489SMarek Szyprowski static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain, 8592a96536eSKyongHo Cho struct device *dev) 8602a96536eSKyongHo Cho { 8616b21a5dbSCho KyongHo struct exynos_iommu_owner *owner = dev->archdata.iommu; 862bfa00489SMarek Szyprowski struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); 863469acebeSMarek Szyprowski struct sysmmu_drvdata *data; 864bfa00489SMarek Szyprowski phys_addr_t pagetable = virt_to_phys(domain->pgtable); 8652a96536eSKyongHo Cho unsigned long flags; 866469acebeSMarek Szyprowski int ret = -ENODEV; 867469acebeSMarek Szyprowski 868469acebeSMarek Szyprowski if (!has_sysmmu(dev)) 869469acebeSMarek Szyprowski return -ENODEV; 870469acebeSMarek Szyprowski 8715fa61cbfSMarek Szyprowski if (owner->domain) 8725fa61cbfSMarek Szyprowski exynos_iommu_detach_device(owner->domain, dev); 8735fa61cbfSMarek Szyprowski 8741b092054SMarek Szyprowski list_for_each_entry(data, &owner->controllers, owner_node) { 875ce70ca56SMarek Szyprowski pm_runtime_get_sync(data->sysmmu); 876a9133b99SMarek Szyprowski ret = __sysmmu_enable(data, pagetable, domain); 877469acebeSMarek Szyprowski if (ret >= 0) { 878469acebeSMarek Szyprowski data->master = dev; 8792a96536eSKyongHo Cho 880bfa00489SMarek Szyprowski spin_lock_irqsave(&domain->lock, flags); 881bfa00489SMarek Szyprowski list_add_tail(&data->domain_node, &domain->clients); 882bfa00489SMarek Szyprowski spin_unlock_irqrestore(&domain->lock, flags); 883469acebeSMarek Szyprowski } 884469acebeSMarek Szyprowski } 8852a96536eSKyongHo Cho 8862a96536eSKyongHo Cho if (ret < 0) { 8877222e8dbSCho KyongHo dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n", 8887222e8dbSCho KyongHo __func__, &pagetable); 8897222e8dbSCho KyongHo return ret; 8902a96536eSKyongHo Cho } 8912a96536eSKyongHo Cho 8925fa61cbfSMarek Szyprowski owner->domain = iommu_domain; 8937222e8dbSCho KyongHo dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n", 8947222e8dbSCho KyongHo __func__, &pagetable, (ret == 0) ? "" : ", again"); 8957222e8dbSCho KyongHo 8962a96536eSKyongHo Cho return ret; 8972a96536eSKyongHo Cho } 8982a96536eSKyongHo Cho 899bfa00489SMarek Szyprowski static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain, 90066a7ed84SCho KyongHo sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter) 9012a96536eSKyongHo Cho { 90261128f08SCho KyongHo if (lv1ent_section(sent)) { 903d09d78fcSCho KyongHo WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova); 90461128f08SCho KyongHo return ERR_PTR(-EADDRINUSE); 90561128f08SCho KyongHo } 90661128f08SCho KyongHo 9072a96536eSKyongHo Cho if (lv1ent_fault(sent)) { 908d09d78fcSCho KyongHo sysmmu_pte_t *pent; 90966a7ed84SCho KyongHo bool need_flush_flpd_cache = lv1ent_zero(sent); 9102a96536eSKyongHo Cho 911734c3c73SCho KyongHo pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC); 912dbf6c6efSArnd Bergmann BUG_ON((uintptr_t)pent & (LV2TABLE_SIZE - 1)); 9132a96536eSKyongHo Cho if (!pent) 91461128f08SCho KyongHo return ERR_PTR(-ENOMEM); 9152a96536eSKyongHo Cho 9165e3435ebSMarek Szyprowski update_pte(sent, mk_lv1ent_page(virt_to_phys(pent))); 917dc3814f4SColin Cross kmemleak_ignore(pent); 9182a96536eSKyongHo Cho *pgcounter = NUM_LV2ENTRIES; 9195e3435ebSMarek Szyprowski dma_map_single(dma_dev, pent, LV2TABLE_SIZE, DMA_TO_DEVICE); 92066a7ed84SCho KyongHo 92166a7ed84SCho KyongHo /* 922f171ababSSachin Kamat * If pre-fetched SLPD is a faulty SLPD in zero_l2_table, 923f171ababSSachin Kamat * FLPD cache may cache the address of zero_l2_table. This 924f171ababSSachin Kamat * function replaces the zero_l2_table with new L2 page table 925f171ababSSachin Kamat * to write valid mappings. 92666a7ed84SCho KyongHo * Accessing the valid area may cause page fault since FLPD 927f171ababSSachin Kamat * cache may still cache zero_l2_table for the valid area 928f171ababSSachin Kamat * instead of new L2 page table that has the mapping 929f171ababSSachin Kamat * information of the valid area. 93066a7ed84SCho KyongHo * Thus any replacement of zero_l2_table with other valid L2 93166a7ed84SCho KyongHo * page table must involve FLPD cache invalidation for System 93266a7ed84SCho KyongHo * MMU v3.3. 93366a7ed84SCho KyongHo * FLPD cache invalidation is performed with TLB invalidation 93466a7ed84SCho KyongHo * by VPN without blocking. It is safe to invalidate TLB without 93566a7ed84SCho KyongHo * blocking because the target address of TLB invalidation is 93666a7ed84SCho KyongHo * not currently mapped. 93766a7ed84SCho KyongHo */ 93866a7ed84SCho KyongHo if (need_flush_flpd_cache) { 939469acebeSMarek Szyprowski struct sysmmu_drvdata *data; 940365409dbSSachin Kamat 941bfa00489SMarek Szyprowski spin_lock(&domain->lock); 942bfa00489SMarek Szyprowski list_for_each_entry(data, &domain->clients, domain_node) 943469acebeSMarek Szyprowski sysmmu_tlb_invalidate_flpdcache(data, iova); 944bfa00489SMarek Szyprowski spin_unlock(&domain->lock); 94566a7ed84SCho KyongHo } 9462a96536eSKyongHo Cho } 9472a96536eSKyongHo Cho 9482a96536eSKyongHo Cho return page_entry(sent, iova); 9492a96536eSKyongHo Cho } 9502a96536eSKyongHo Cho 951bfa00489SMarek Szyprowski static int lv1set_section(struct exynos_iommu_domain *domain, 95266a7ed84SCho KyongHo sysmmu_pte_t *sent, sysmmu_iova_t iova, 95361128f08SCho KyongHo phys_addr_t paddr, short *pgcnt) 9542a96536eSKyongHo Cho { 95561128f08SCho KyongHo if (lv1ent_section(sent)) { 956d09d78fcSCho KyongHo WARN(1, "Trying mapping on 1MiB@%#08x that is mapped", 95761128f08SCho KyongHo iova); 9582a96536eSKyongHo Cho return -EADDRINUSE; 95961128f08SCho KyongHo } 9602a96536eSKyongHo Cho 9612a96536eSKyongHo Cho if (lv1ent_page(sent)) { 96261128f08SCho KyongHo if (*pgcnt != NUM_LV2ENTRIES) { 963d09d78fcSCho KyongHo WARN(1, "Trying mapping on 1MiB@%#08x that is mapped", 96461128f08SCho KyongHo iova); 9652a96536eSKyongHo Cho return -EADDRINUSE; 96661128f08SCho KyongHo } 9672a96536eSKyongHo Cho 968734c3c73SCho KyongHo kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0)); 9692a96536eSKyongHo Cho *pgcnt = 0; 9702a96536eSKyongHo Cho } 9712a96536eSKyongHo Cho 9725e3435ebSMarek Szyprowski update_pte(sent, mk_lv1ent_sect(paddr)); 9732a96536eSKyongHo Cho 974bfa00489SMarek Szyprowski spin_lock(&domain->lock); 97566a7ed84SCho KyongHo if (lv1ent_page_zero(sent)) { 976469acebeSMarek Szyprowski struct sysmmu_drvdata *data; 97766a7ed84SCho KyongHo /* 97866a7ed84SCho KyongHo * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD 97966a7ed84SCho KyongHo * entry by speculative prefetch of SLPD which has no mapping. 98066a7ed84SCho KyongHo */ 981bfa00489SMarek Szyprowski list_for_each_entry(data, &domain->clients, domain_node) 982469acebeSMarek Szyprowski sysmmu_tlb_invalidate_flpdcache(data, iova); 98366a7ed84SCho KyongHo } 984bfa00489SMarek Szyprowski spin_unlock(&domain->lock); 98566a7ed84SCho KyongHo 9862a96536eSKyongHo Cho return 0; 9872a96536eSKyongHo Cho } 9882a96536eSKyongHo Cho 989d09d78fcSCho KyongHo static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size, 9902a96536eSKyongHo Cho short *pgcnt) 9912a96536eSKyongHo Cho { 9922a96536eSKyongHo Cho if (size == SPAGE_SIZE) { 9930bf4e54dSCho KyongHo if (WARN_ON(!lv2ent_fault(pent))) 9942a96536eSKyongHo Cho return -EADDRINUSE; 9952a96536eSKyongHo Cho 9965e3435ebSMarek Szyprowski update_pte(pent, mk_lv2ent_spage(paddr)); 9972a96536eSKyongHo Cho *pgcnt -= 1; 9982a96536eSKyongHo Cho } else { /* size == LPAGE_SIZE */ 9992a96536eSKyongHo Cho int i; 10005e3435ebSMarek Szyprowski dma_addr_t pent_base = virt_to_phys(pent); 1001365409dbSSachin Kamat 10025e3435ebSMarek Szyprowski dma_sync_single_for_cpu(dma_dev, pent_base, 10035e3435ebSMarek Szyprowski sizeof(*pent) * SPAGES_PER_LPAGE, 10045e3435ebSMarek Szyprowski DMA_TO_DEVICE); 10052a96536eSKyongHo Cho for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) { 10060bf4e54dSCho KyongHo if (WARN_ON(!lv2ent_fault(pent))) { 100761128f08SCho KyongHo if (i > 0) 100861128f08SCho KyongHo memset(pent - i, 0, sizeof(*pent) * i); 10092a96536eSKyongHo Cho return -EADDRINUSE; 10102a96536eSKyongHo Cho } 10112a96536eSKyongHo Cho 10122a96536eSKyongHo Cho *pent = mk_lv2ent_lpage(paddr); 10132a96536eSKyongHo Cho } 10145e3435ebSMarek Szyprowski dma_sync_single_for_device(dma_dev, pent_base, 10155e3435ebSMarek Szyprowski sizeof(*pent) * SPAGES_PER_LPAGE, 10165e3435ebSMarek Szyprowski DMA_TO_DEVICE); 10172a96536eSKyongHo Cho *pgcnt -= SPAGES_PER_LPAGE; 10182a96536eSKyongHo Cho } 10192a96536eSKyongHo Cho 10202a96536eSKyongHo Cho return 0; 10212a96536eSKyongHo Cho } 10222a96536eSKyongHo Cho 102366a7ed84SCho KyongHo /* 102466a7ed84SCho KyongHo * *CAUTION* to the I/O virtual memory managers that support exynos-iommu: 102566a7ed84SCho KyongHo * 1026f171ababSSachin Kamat * System MMU v3.x has advanced logic to improve address translation 102766a7ed84SCho KyongHo * performance with caching more page table entries by a page table walk. 1028f171ababSSachin Kamat * However, the logic has a bug that while caching faulty page table entries, 1029f171ababSSachin Kamat * System MMU reports page fault if the cached fault entry is hit even though 1030f171ababSSachin Kamat * the fault entry is updated to a valid entry after the entry is cached. 1031f171ababSSachin Kamat * To prevent caching faulty page table entries which may be updated to valid 1032f171ababSSachin Kamat * entries later, the virtual memory manager should care about the workaround 1033f171ababSSachin Kamat * for the problem. The following describes the workaround. 103466a7ed84SCho KyongHo * 103566a7ed84SCho KyongHo * Any two consecutive I/O virtual address regions must have a hole of 128KiB 1036f171ababSSachin Kamat * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug). 103766a7ed84SCho KyongHo * 1038f171ababSSachin Kamat * Precisely, any start address of I/O virtual region must be aligned with 103966a7ed84SCho KyongHo * the following sizes for System MMU v3.1 and v3.2. 104066a7ed84SCho KyongHo * System MMU v3.1: 128KiB 104166a7ed84SCho KyongHo * System MMU v3.2: 256KiB 104266a7ed84SCho KyongHo * 104366a7ed84SCho KyongHo * Because System MMU v3.3 caches page table entries more aggressively, it needs 1044f171ababSSachin Kamat * more workarounds. 1045f171ababSSachin Kamat * - Any two consecutive I/O virtual regions must have a hole of size larger 1046f171ababSSachin Kamat * than or equal to 128KiB. 104766a7ed84SCho KyongHo * - Start address of an I/O virtual region must be aligned by 128KiB. 104866a7ed84SCho KyongHo */ 1049bfa00489SMarek Szyprowski static int exynos_iommu_map(struct iommu_domain *iommu_domain, 1050bfa00489SMarek Szyprowski unsigned long l_iova, phys_addr_t paddr, size_t size, 1051bfa00489SMarek Szyprowski int prot) 10522a96536eSKyongHo Cho { 1053bfa00489SMarek Szyprowski struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); 1054d09d78fcSCho KyongHo sysmmu_pte_t *entry; 1055d09d78fcSCho KyongHo sysmmu_iova_t iova = (sysmmu_iova_t)l_iova; 10562a96536eSKyongHo Cho unsigned long flags; 10572a96536eSKyongHo Cho int ret = -ENOMEM; 10582a96536eSKyongHo Cho 1059bfa00489SMarek Szyprowski BUG_ON(domain->pgtable == NULL); 10602a96536eSKyongHo Cho 1061bfa00489SMarek Szyprowski spin_lock_irqsave(&domain->pgtablelock, flags); 10622a96536eSKyongHo Cho 1063bfa00489SMarek Szyprowski entry = section_entry(domain->pgtable, iova); 10642a96536eSKyongHo Cho 10652a96536eSKyongHo Cho if (size == SECT_SIZE) { 1066bfa00489SMarek Szyprowski ret = lv1set_section(domain, entry, iova, paddr, 1067bfa00489SMarek Szyprowski &domain->lv2entcnt[lv1ent_offset(iova)]); 10682a96536eSKyongHo Cho } else { 1069d09d78fcSCho KyongHo sysmmu_pte_t *pent; 10702a96536eSKyongHo Cho 1071bfa00489SMarek Szyprowski pent = alloc_lv2entry(domain, entry, iova, 1072bfa00489SMarek Szyprowski &domain->lv2entcnt[lv1ent_offset(iova)]); 10732a96536eSKyongHo Cho 107461128f08SCho KyongHo if (IS_ERR(pent)) 107561128f08SCho KyongHo ret = PTR_ERR(pent); 10762a96536eSKyongHo Cho else 10772a96536eSKyongHo Cho ret = lv2set_page(pent, paddr, size, 1078bfa00489SMarek Szyprowski &domain->lv2entcnt[lv1ent_offset(iova)]); 10792a96536eSKyongHo Cho } 10802a96536eSKyongHo Cho 108161128f08SCho KyongHo if (ret) 10820bf4e54dSCho KyongHo pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n", 10830bf4e54dSCho KyongHo __func__, ret, size, iova); 10842a96536eSKyongHo Cho 1085bfa00489SMarek Szyprowski spin_unlock_irqrestore(&domain->pgtablelock, flags); 10862a96536eSKyongHo Cho 10872a96536eSKyongHo Cho return ret; 10882a96536eSKyongHo Cho } 10892a96536eSKyongHo Cho 1090bfa00489SMarek Szyprowski static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain, 109166a7ed84SCho KyongHo sysmmu_iova_t iova, size_t size) 109266a7ed84SCho KyongHo { 1093469acebeSMarek Szyprowski struct sysmmu_drvdata *data; 109466a7ed84SCho KyongHo unsigned long flags; 109566a7ed84SCho KyongHo 1096bfa00489SMarek Szyprowski spin_lock_irqsave(&domain->lock, flags); 109766a7ed84SCho KyongHo 1098bfa00489SMarek Szyprowski list_for_each_entry(data, &domain->clients, domain_node) 1099469acebeSMarek Szyprowski sysmmu_tlb_invalidate_entry(data, iova, size); 110066a7ed84SCho KyongHo 1101bfa00489SMarek Szyprowski spin_unlock_irqrestore(&domain->lock, flags); 110266a7ed84SCho KyongHo } 110366a7ed84SCho KyongHo 1104bfa00489SMarek Szyprowski static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain, 1105d09d78fcSCho KyongHo unsigned long l_iova, size_t size) 11062a96536eSKyongHo Cho { 1107bfa00489SMarek Szyprowski struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); 1108d09d78fcSCho KyongHo sysmmu_iova_t iova = (sysmmu_iova_t)l_iova; 1109d09d78fcSCho KyongHo sysmmu_pte_t *ent; 111061128f08SCho KyongHo size_t err_pgsize; 1111d09d78fcSCho KyongHo unsigned long flags; 11122a96536eSKyongHo Cho 1113bfa00489SMarek Szyprowski BUG_ON(domain->pgtable == NULL); 11142a96536eSKyongHo Cho 1115bfa00489SMarek Szyprowski spin_lock_irqsave(&domain->pgtablelock, flags); 11162a96536eSKyongHo Cho 1117bfa00489SMarek Szyprowski ent = section_entry(domain->pgtable, iova); 11182a96536eSKyongHo Cho 11192a96536eSKyongHo Cho if (lv1ent_section(ent)) { 11200bf4e54dSCho KyongHo if (WARN_ON(size < SECT_SIZE)) { 112161128f08SCho KyongHo err_pgsize = SECT_SIZE; 112261128f08SCho KyongHo goto err; 112361128f08SCho KyongHo } 11242a96536eSKyongHo Cho 1125f171ababSSachin Kamat /* workaround for h/w bug in System MMU v3.3 */ 11265e3435ebSMarek Szyprowski update_pte(ent, ZERO_LV2LINK); 11272a96536eSKyongHo Cho size = SECT_SIZE; 11282a96536eSKyongHo Cho goto done; 11292a96536eSKyongHo Cho } 11302a96536eSKyongHo Cho 11312a96536eSKyongHo Cho if (unlikely(lv1ent_fault(ent))) { 11322a96536eSKyongHo Cho if (size > SECT_SIZE) 11332a96536eSKyongHo Cho size = SECT_SIZE; 11342a96536eSKyongHo Cho goto done; 11352a96536eSKyongHo Cho } 11362a96536eSKyongHo Cho 11372a96536eSKyongHo Cho /* lv1ent_page(sent) == true here */ 11382a96536eSKyongHo Cho 11392a96536eSKyongHo Cho ent = page_entry(ent, iova); 11402a96536eSKyongHo Cho 11412a96536eSKyongHo Cho if (unlikely(lv2ent_fault(ent))) { 11422a96536eSKyongHo Cho size = SPAGE_SIZE; 11432a96536eSKyongHo Cho goto done; 11442a96536eSKyongHo Cho } 11452a96536eSKyongHo Cho 11462a96536eSKyongHo Cho if (lv2ent_small(ent)) { 11475e3435ebSMarek Szyprowski update_pte(ent, 0); 11482a96536eSKyongHo Cho size = SPAGE_SIZE; 1149bfa00489SMarek Szyprowski domain->lv2entcnt[lv1ent_offset(iova)] += 1; 11502a96536eSKyongHo Cho goto done; 11512a96536eSKyongHo Cho } 11522a96536eSKyongHo Cho 11532a96536eSKyongHo Cho /* lv1ent_large(ent) == true here */ 11540bf4e54dSCho KyongHo if (WARN_ON(size < LPAGE_SIZE)) { 115561128f08SCho KyongHo err_pgsize = LPAGE_SIZE; 115661128f08SCho KyongHo goto err; 115761128f08SCho KyongHo } 11582a96536eSKyongHo Cho 11595e3435ebSMarek Szyprowski dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), 11605e3435ebSMarek Szyprowski sizeof(*ent) * SPAGES_PER_LPAGE, 11615e3435ebSMarek Szyprowski DMA_TO_DEVICE); 11622a96536eSKyongHo Cho memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE); 11635e3435ebSMarek Szyprowski dma_sync_single_for_device(dma_dev, virt_to_phys(ent), 11645e3435ebSMarek Szyprowski sizeof(*ent) * SPAGES_PER_LPAGE, 11655e3435ebSMarek Szyprowski DMA_TO_DEVICE); 11662a96536eSKyongHo Cho size = LPAGE_SIZE; 1167bfa00489SMarek Szyprowski domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE; 11682a96536eSKyongHo Cho done: 1169bfa00489SMarek Szyprowski spin_unlock_irqrestore(&domain->pgtablelock, flags); 11702a96536eSKyongHo Cho 1171bfa00489SMarek Szyprowski exynos_iommu_tlb_invalidate_entry(domain, iova, size); 11722a96536eSKyongHo Cho 11732a96536eSKyongHo Cho return size; 117461128f08SCho KyongHo err: 1175bfa00489SMarek Szyprowski spin_unlock_irqrestore(&domain->pgtablelock, flags); 117661128f08SCho KyongHo 11770bf4e54dSCho KyongHo pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n", 117861128f08SCho KyongHo __func__, size, iova, err_pgsize); 117961128f08SCho KyongHo 118061128f08SCho KyongHo return 0; 11812a96536eSKyongHo Cho } 11822a96536eSKyongHo Cho 1183bfa00489SMarek Szyprowski static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain, 1184bb5547acSVarun Sethi dma_addr_t iova) 11852a96536eSKyongHo Cho { 1186bfa00489SMarek Szyprowski struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); 1187d09d78fcSCho KyongHo sysmmu_pte_t *entry; 11882a96536eSKyongHo Cho unsigned long flags; 11892a96536eSKyongHo Cho phys_addr_t phys = 0; 11902a96536eSKyongHo Cho 1191bfa00489SMarek Szyprowski spin_lock_irqsave(&domain->pgtablelock, flags); 11922a96536eSKyongHo Cho 1193bfa00489SMarek Szyprowski entry = section_entry(domain->pgtable, iova); 11942a96536eSKyongHo Cho 11952a96536eSKyongHo Cho if (lv1ent_section(entry)) { 11962a96536eSKyongHo Cho phys = section_phys(entry) + section_offs(iova); 11972a96536eSKyongHo Cho } else if (lv1ent_page(entry)) { 11982a96536eSKyongHo Cho entry = page_entry(entry, iova); 11992a96536eSKyongHo Cho 12002a96536eSKyongHo Cho if (lv2ent_large(entry)) 12012a96536eSKyongHo Cho phys = lpage_phys(entry) + lpage_offs(iova); 12022a96536eSKyongHo Cho else if (lv2ent_small(entry)) 12032a96536eSKyongHo Cho phys = spage_phys(entry) + spage_offs(iova); 12042a96536eSKyongHo Cho } 12052a96536eSKyongHo Cho 1206bfa00489SMarek Szyprowski spin_unlock_irqrestore(&domain->pgtablelock, flags); 12072a96536eSKyongHo Cho 12082a96536eSKyongHo Cho return phys; 12092a96536eSKyongHo Cho } 12102a96536eSKyongHo Cho 12116c2ae7e2SMarek Szyprowski static struct iommu_group *get_device_iommu_group(struct device *dev) 12126c2ae7e2SMarek Szyprowski { 12136c2ae7e2SMarek Szyprowski struct iommu_group *group; 12146c2ae7e2SMarek Szyprowski 12156c2ae7e2SMarek Szyprowski group = iommu_group_get(dev); 12166c2ae7e2SMarek Szyprowski if (!group) 12176c2ae7e2SMarek Szyprowski group = iommu_group_alloc(); 12186c2ae7e2SMarek Szyprowski 12196c2ae7e2SMarek Szyprowski return group; 12206c2ae7e2SMarek Szyprowski } 12216c2ae7e2SMarek Szyprowski 1222bf4a1c92SAntonios Motakis static int exynos_iommu_add_device(struct device *dev) 1223bf4a1c92SAntonios Motakis { 1224bf4a1c92SAntonios Motakis struct iommu_group *group; 1225bf4a1c92SAntonios Motakis 122606801db0SMarek Szyprowski if (!has_sysmmu(dev)) 122706801db0SMarek Szyprowski return -ENODEV; 122806801db0SMarek Szyprowski 12296c2ae7e2SMarek Szyprowski group = iommu_group_get_for_dev(dev); 1230bf4a1c92SAntonios Motakis 12316c2ae7e2SMarek Szyprowski if (IS_ERR(group)) 1232bf4a1c92SAntonios Motakis return PTR_ERR(group); 1233bf4a1c92SAntonios Motakis 1234bf4a1c92SAntonios Motakis iommu_group_put(group); 1235bf4a1c92SAntonios Motakis 12366c2ae7e2SMarek Szyprowski return 0; 1237bf4a1c92SAntonios Motakis } 1238bf4a1c92SAntonios Motakis 1239bf4a1c92SAntonios Motakis static void exynos_iommu_remove_device(struct device *dev) 1240bf4a1c92SAntonios Motakis { 124106801db0SMarek Szyprowski if (!has_sysmmu(dev)) 124206801db0SMarek Szyprowski return; 124306801db0SMarek Szyprowski 1244bf4a1c92SAntonios Motakis iommu_group_remove_device(dev); 1245bf4a1c92SAntonios Motakis } 1246bf4a1c92SAntonios Motakis 1247aa759fd3SMarek Szyprowski static int exynos_iommu_of_xlate(struct device *dev, 1248aa759fd3SMarek Szyprowski struct of_phandle_args *spec) 1249aa759fd3SMarek Szyprowski { 1250aa759fd3SMarek Szyprowski struct exynos_iommu_owner *owner = dev->archdata.iommu; 1251aa759fd3SMarek Szyprowski struct platform_device *sysmmu = of_find_device_by_node(spec->np); 1252aa759fd3SMarek Szyprowski struct sysmmu_drvdata *data; 1253aa759fd3SMarek Szyprowski 1254aa759fd3SMarek Szyprowski if (!sysmmu) 1255aa759fd3SMarek Szyprowski return -ENODEV; 1256aa759fd3SMarek Szyprowski 1257aa759fd3SMarek Szyprowski data = platform_get_drvdata(sysmmu); 1258aa759fd3SMarek Szyprowski if (!data) 1259aa759fd3SMarek Szyprowski return -ENODEV; 1260aa759fd3SMarek Szyprowski 1261aa759fd3SMarek Szyprowski if (!owner) { 1262aa759fd3SMarek Szyprowski owner = kzalloc(sizeof(*owner), GFP_KERNEL); 1263aa759fd3SMarek Szyprowski if (!owner) 1264aa759fd3SMarek Szyprowski return -ENOMEM; 1265aa759fd3SMarek Szyprowski 1266aa759fd3SMarek Szyprowski INIT_LIST_HEAD(&owner->controllers); 1267aa759fd3SMarek Szyprowski dev->archdata.iommu = owner; 1268aa759fd3SMarek Szyprowski } 1269aa759fd3SMarek Szyprowski 1270aa759fd3SMarek Szyprowski list_add_tail(&data->owner_node, &owner->controllers); 1271aa759fd3SMarek Szyprowski return 0; 1272aa759fd3SMarek Szyprowski } 1273aa759fd3SMarek Szyprowski 12748ed55c81SMarek Szyprowski static struct iommu_ops exynos_iommu_ops = { 1275e1fd1eaaSJoerg Roedel .domain_alloc = exynos_iommu_domain_alloc, 1276e1fd1eaaSJoerg Roedel .domain_free = exynos_iommu_domain_free, 1277ba5fa6f6SBjorn Helgaas .attach_dev = exynos_iommu_attach_device, 1278ba5fa6f6SBjorn Helgaas .detach_dev = exynos_iommu_detach_device, 1279ba5fa6f6SBjorn Helgaas .map = exynos_iommu_map, 1280ba5fa6f6SBjorn Helgaas .unmap = exynos_iommu_unmap, 1281315786ebSOlav Haugan .map_sg = default_iommu_map_sg, 1282ba5fa6f6SBjorn Helgaas .iova_to_phys = exynos_iommu_iova_to_phys, 12836c2ae7e2SMarek Szyprowski .device_group = get_device_iommu_group, 1284ba5fa6f6SBjorn Helgaas .add_device = exynos_iommu_add_device, 1285ba5fa6f6SBjorn Helgaas .remove_device = exynos_iommu_remove_device, 12862a96536eSKyongHo Cho .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE, 1287aa759fd3SMarek Szyprowski .of_xlate = exynos_iommu_of_xlate, 12882a96536eSKyongHo Cho }; 12892a96536eSKyongHo Cho 12908ed55c81SMarek Szyprowski static bool init_done; 12918ed55c81SMarek Szyprowski 12922a96536eSKyongHo Cho static int __init exynos_iommu_init(void) 12932a96536eSKyongHo Cho { 12942a96536eSKyongHo Cho int ret; 12952a96536eSKyongHo Cho 1296734c3c73SCho KyongHo lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table", 1297734c3c73SCho KyongHo LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL); 1298734c3c73SCho KyongHo if (!lv2table_kmem_cache) { 1299734c3c73SCho KyongHo pr_err("%s: Failed to create kmem cache\n", __func__); 1300734c3c73SCho KyongHo return -ENOMEM; 1301734c3c73SCho KyongHo } 1302734c3c73SCho KyongHo 13032a96536eSKyongHo Cho ret = platform_driver_register(&exynos_sysmmu_driver); 1304734c3c73SCho KyongHo if (ret) { 1305734c3c73SCho KyongHo pr_err("%s: Failed to register driver\n", __func__); 1306734c3c73SCho KyongHo goto err_reg_driver; 1307734c3c73SCho KyongHo } 13082a96536eSKyongHo Cho 130966a7ed84SCho KyongHo zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL); 131066a7ed84SCho KyongHo if (zero_lv2_table == NULL) { 131166a7ed84SCho KyongHo pr_err("%s: Failed to allocate zero level2 page table\n", 131266a7ed84SCho KyongHo __func__); 131366a7ed84SCho KyongHo ret = -ENOMEM; 131466a7ed84SCho KyongHo goto err_zero_lv2; 131566a7ed84SCho KyongHo } 131666a7ed84SCho KyongHo 1317734c3c73SCho KyongHo ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops); 1318734c3c73SCho KyongHo if (ret) { 1319734c3c73SCho KyongHo pr_err("%s: Failed to register exynos-iommu driver.\n", 1320734c3c73SCho KyongHo __func__); 1321734c3c73SCho KyongHo goto err_set_iommu; 1322734c3c73SCho KyongHo } 13232a96536eSKyongHo Cho 13248ed55c81SMarek Szyprowski init_done = true; 13258ed55c81SMarek Szyprowski 1326734c3c73SCho KyongHo return 0; 1327734c3c73SCho KyongHo err_set_iommu: 132866a7ed84SCho KyongHo kmem_cache_free(lv2table_kmem_cache, zero_lv2_table); 132966a7ed84SCho KyongHo err_zero_lv2: 1330734c3c73SCho KyongHo platform_driver_unregister(&exynos_sysmmu_driver); 1331734c3c73SCho KyongHo err_reg_driver: 1332734c3c73SCho KyongHo kmem_cache_destroy(lv2table_kmem_cache); 13332a96536eSKyongHo Cho return ret; 13342a96536eSKyongHo Cho } 13358ed55c81SMarek Szyprowski 13368ed55c81SMarek Szyprowski static int __init exynos_iommu_of_setup(struct device_node *np) 13378ed55c81SMarek Szyprowski { 13388ed55c81SMarek Szyprowski struct platform_device *pdev; 13398ed55c81SMarek Szyprowski 13408ed55c81SMarek Szyprowski if (!init_done) 13418ed55c81SMarek Szyprowski exynos_iommu_init(); 13428ed55c81SMarek Szyprowski 13438ed55c81SMarek Szyprowski pdev = of_platform_device_create(np, NULL, platform_bus_type.dev_root); 13448ed55c81SMarek Szyprowski if (IS_ERR(pdev)) 13458ed55c81SMarek Szyprowski return PTR_ERR(pdev); 13468ed55c81SMarek Szyprowski 13475e3435ebSMarek Szyprowski /* 13485e3435ebSMarek Szyprowski * use the first registered sysmmu device for performing 13495e3435ebSMarek Szyprowski * dma mapping operations on iommu page tables (cpu cache flush) 13505e3435ebSMarek Szyprowski */ 13515e3435ebSMarek Szyprowski if (!dma_dev) 13525e3435ebSMarek Szyprowski dma_dev = &pdev->dev; 13535e3435ebSMarek Szyprowski 13548ed55c81SMarek Szyprowski return 0; 13558ed55c81SMarek Szyprowski } 13568ed55c81SMarek Szyprowski 13578ed55c81SMarek Szyprowski IOMMU_OF_DECLARE(exynos_iommu_of, "samsung,exynos-sysmmu", 13588ed55c81SMarek Szyprowski exynos_iommu_of_setup); 1359