1 /* 2 * Copyright (C) 2014 Christian Gmeiner <christian.gmeiner@gmail.com> 3 * 4 * This program is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 as published by 6 * the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 11 * more details. 12 * 13 * You should have received a copy of the GNU General Public License along with 14 * this program. If not, see <http://www.gnu.org/licenses/>. 15 */ 16 17 #include <linux/iommu.h> 18 #include <linux/platform_device.h> 19 #include <linux/sizes.h> 20 #include <linux/slab.h> 21 #include <linux/dma-mapping.h> 22 #include <linux/bitops.h> 23 24 #include "etnaviv_gpu.h" 25 #include "etnaviv_mmu.h" 26 #include "etnaviv_iommu.h" 27 #include "state_hi.xml.h" 28 29 #define PT_SIZE SZ_2M 30 #define PT_ENTRIES (PT_SIZE / sizeof(u32)) 31 32 #define GPU_MEM_START 0x80000000 33 34 struct etnaviv_iommu_domain_pgtable { 35 u32 *pgtable; 36 dma_addr_t paddr; 37 }; 38 39 struct etnaviv_iommu_domain { 40 struct iommu_domain domain; 41 struct device *dev; 42 void *bad_page_cpu; 43 dma_addr_t bad_page_dma; 44 struct etnaviv_iommu_domain_pgtable pgtable; 45 spinlock_t map_lock; 46 }; 47 48 static struct etnaviv_iommu_domain *to_etnaviv_domain(struct iommu_domain *domain) 49 { 50 return container_of(domain, struct etnaviv_iommu_domain, domain); 51 } 52 53 static int pgtable_alloc(struct etnaviv_iommu_domain_pgtable *pgtable, 54 size_t size) 55 { 56 pgtable->pgtable = dma_alloc_coherent(NULL, size, &pgtable->paddr, GFP_KERNEL); 57 if (!pgtable->pgtable) 58 return -ENOMEM; 59 60 return 0; 61 } 62 63 static void pgtable_free(struct etnaviv_iommu_domain_pgtable *pgtable, 64 size_t size) 65 { 66 dma_free_coherent(NULL, size, pgtable->pgtable, pgtable->paddr); 67 } 68 69 static u32 pgtable_read(struct etnaviv_iommu_domain_pgtable *pgtable, 70 unsigned long iova) 71 { 72 /* calcuate index into page table */ 73 unsigned int index = (iova - GPU_MEM_START) / SZ_4K; 74 phys_addr_t paddr; 75 76 paddr = pgtable->pgtable[index]; 77 78 return paddr; 79 } 80 81 static void pgtable_write(struct etnaviv_iommu_domain_pgtable *pgtable, 82 unsigned long iova, phys_addr_t paddr) 83 { 84 /* calcuate index into page table */ 85 unsigned int index = (iova - GPU_MEM_START) / SZ_4K; 86 87 pgtable->pgtable[index] = paddr; 88 } 89 90 static int __etnaviv_iommu_init(struct etnaviv_iommu_domain *etnaviv_domain) 91 { 92 u32 *p; 93 int ret, i; 94 95 etnaviv_domain->bad_page_cpu = dma_alloc_coherent(etnaviv_domain->dev, 96 SZ_4K, 97 &etnaviv_domain->bad_page_dma, 98 GFP_KERNEL); 99 if (!etnaviv_domain->bad_page_cpu) 100 return -ENOMEM; 101 102 p = etnaviv_domain->bad_page_cpu; 103 for (i = 0; i < SZ_4K / 4; i++) 104 *p++ = 0xdead55aa; 105 106 ret = pgtable_alloc(&etnaviv_domain->pgtable, PT_SIZE); 107 if (ret < 0) { 108 dma_free_coherent(etnaviv_domain->dev, SZ_4K, 109 etnaviv_domain->bad_page_cpu, 110 etnaviv_domain->bad_page_dma); 111 return ret; 112 } 113 114 for (i = 0; i < PT_ENTRIES; i++) 115 etnaviv_domain->pgtable.pgtable[i] = 116 etnaviv_domain->bad_page_dma; 117 118 spin_lock_init(&etnaviv_domain->map_lock); 119 120 return 0; 121 } 122 123 static void etnaviv_domain_free(struct iommu_domain *domain) 124 { 125 struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain); 126 127 pgtable_free(&etnaviv_domain->pgtable, PT_SIZE); 128 129 dma_free_coherent(etnaviv_domain->dev, SZ_4K, 130 etnaviv_domain->bad_page_cpu, 131 etnaviv_domain->bad_page_dma); 132 133 kfree(etnaviv_domain); 134 } 135 136 static int etnaviv_iommuv1_map(struct iommu_domain *domain, unsigned long iova, 137 phys_addr_t paddr, size_t size, int prot) 138 { 139 struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain); 140 141 if (size != SZ_4K) 142 return -EINVAL; 143 144 spin_lock(&etnaviv_domain->map_lock); 145 pgtable_write(&etnaviv_domain->pgtable, iova, paddr); 146 spin_unlock(&etnaviv_domain->map_lock); 147 148 return 0; 149 } 150 151 static size_t etnaviv_iommuv1_unmap(struct iommu_domain *domain, 152 unsigned long iova, size_t size) 153 { 154 struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain); 155 156 if (size != SZ_4K) 157 return -EINVAL; 158 159 spin_lock(&etnaviv_domain->map_lock); 160 pgtable_write(&etnaviv_domain->pgtable, iova, 161 etnaviv_domain->bad_page_dma); 162 spin_unlock(&etnaviv_domain->map_lock); 163 164 return SZ_4K; 165 } 166 167 static phys_addr_t etnaviv_iommu_iova_to_phys(struct iommu_domain *domain, 168 dma_addr_t iova) 169 { 170 struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain); 171 172 return pgtable_read(&etnaviv_domain->pgtable, iova); 173 } 174 175 static size_t etnaviv_iommuv1_dump_size(struct iommu_domain *domain) 176 { 177 return PT_SIZE; 178 } 179 180 static void etnaviv_iommuv1_dump(struct iommu_domain *domain, void *buf) 181 { 182 struct etnaviv_iommu_domain *etnaviv_domain = to_etnaviv_domain(domain); 183 184 memcpy(buf, etnaviv_domain->pgtable.pgtable, PT_SIZE); 185 } 186 187 static const struct etnaviv_iommu_ops etnaviv_iommu_ops = { 188 .ops = { 189 .domain_free = etnaviv_domain_free, 190 .map = etnaviv_iommuv1_map, 191 .unmap = etnaviv_iommuv1_unmap, 192 .iova_to_phys = etnaviv_iommu_iova_to_phys, 193 .pgsize_bitmap = SZ_4K, 194 }, 195 .dump_size = etnaviv_iommuv1_dump_size, 196 .dump = etnaviv_iommuv1_dump, 197 }; 198 199 void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu) 200 { 201 struct etnaviv_iommu_domain *etnaviv_domain = 202 to_etnaviv_domain(gpu->mmu->domain); 203 u32 pgtable; 204 205 /* set base addresses */ 206 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_RA, gpu->memory_base); 207 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_FE, gpu->memory_base); 208 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_TX, gpu->memory_base); 209 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PEZ, gpu->memory_base); 210 gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base); 211 212 /* set page table address in MC */ 213 pgtable = (u32)etnaviv_domain->pgtable.paddr; 214 215 gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable); 216 gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable); 217 gpu_write(gpu, VIVS_MC_MMU_PE_PAGE_TABLE, pgtable); 218 gpu_write(gpu, VIVS_MC_MMU_PEZ_PAGE_TABLE, pgtable); 219 gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable); 220 } 221 222 struct iommu_domain *etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu) 223 { 224 struct etnaviv_iommu_domain *etnaviv_domain; 225 int ret; 226 227 etnaviv_domain = kzalloc(sizeof(*etnaviv_domain), GFP_KERNEL); 228 if (!etnaviv_domain) 229 return NULL; 230 231 etnaviv_domain->dev = gpu->dev; 232 233 etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING; 234 etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops; 235 etnaviv_domain->domain.pgsize_bitmap = SZ_4K; 236 etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START; 237 etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1; 238 239 ret = __etnaviv_iommu_init(etnaviv_domain); 240 if (ret) 241 goto out_free; 242 243 return &etnaviv_domain->domain; 244 245 out_free: 246 kfree(etnaviv_domain); 247 return NULL; 248 } 249