1 /* 2 * Copyright (c) 2015-2016, Linaro Limited 3 * 4 * This software is licensed under the terms of the GNU General Public 5 * License version 2, as published by the Free Software Foundation, and 6 * may be copied, distributed, and modified under those terms. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 */ 14 #include <linux/device.h> 15 #include <linux/dma-buf.h> 16 #include <linux/fdtable.h> 17 #include <linux/idr.h> 18 #include <linux/sched.h> 19 #include <linux/slab.h> 20 #include <linux/tee_drv.h> 21 #include "tee_private.h" 22 23 static void tee_shm_release(struct tee_shm *shm) 24 { 25 struct tee_device *teedev = shm->teedev; 26 struct tee_shm_pool_mgr *poolm; 27 28 mutex_lock(&teedev->mutex); 29 idr_remove(&teedev->idr, shm->id); 30 if (shm->ctx) 31 list_del(&shm->link); 32 mutex_unlock(&teedev->mutex); 33 34 if (shm->flags & TEE_SHM_DMA_BUF) 35 poolm = &teedev->pool->dma_buf_mgr; 36 else 37 poolm = &teedev->pool->private_mgr; 38 39 poolm->ops->free(poolm, shm); 40 kfree(shm); 41 42 tee_device_put(teedev); 43 } 44 45 static struct sg_table *tee_shm_op_map_dma_buf(struct dma_buf_attachment 46 *attach, enum dma_data_direction dir) 47 { 48 return NULL; 49 } 50 51 static void tee_shm_op_unmap_dma_buf(struct dma_buf_attachment *attach, 52 struct sg_table *table, 53 enum dma_data_direction dir) 54 { 55 } 56 57 static void tee_shm_op_release(struct dma_buf *dmabuf) 58 { 59 struct tee_shm *shm = dmabuf->priv; 60 61 tee_shm_release(shm); 62 } 63 64 static void *tee_shm_op_map_atomic(struct dma_buf *dmabuf, unsigned long pgnum) 65 { 66 return NULL; 67 } 68 69 static void *tee_shm_op_map(struct dma_buf *dmabuf, unsigned long pgnum) 70 { 71 return NULL; 72 } 73 74 static int tee_shm_op_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) 75 { 76 struct tee_shm *shm = dmabuf->priv; 77 size_t size = vma->vm_end - vma->vm_start; 78 79 return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT, 80 size, vma->vm_page_prot); 81 } 82 83 static const struct dma_buf_ops tee_shm_dma_buf_ops = { 84 .map_dma_buf = tee_shm_op_map_dma_buf, 85 .unmap_dma_buf = tee_shm_op_unmap_dma_buf, 86 .release = tee_shm_op_release, 87 .map_atomic = tee_shm_op_map_atomic, 88 .map = tee_shm_op_map, 89 .mmap = tee_shm_op_mmap, 90 }; 91 92 /** 93 * tee_shm_alloc() - Allocate shared memory 94 * @ctx: Context that allocates the shared memory 95 * @size: Requested size of shared memory 96 * @flags: Flags setting properties for the requested shared memory. 97 * 98 * Memory allocated as global shared memory is automatically freed when the 99 * TEE file pointer is closed. The @flags field uses the bits defined by 100 * TEE_SHM_* in <linux/tee_drv.h>. TEE_SHM_MAPPED must currently always be 101 * set. If TEE_SHM_DMA_BUF global shared memory will be allocated and 102 * associated with a dma-buf handle, else driver private memory. 103 */ 104 struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags) 105 { 106 struct tee_device *teedev = ctx->teedev; 107 struct tee_shm_pool_mgr *poolm = NULL; 108 struct tee_shm *shm; 109 void *ret; 110 int rc; 111 112 if (!(flags & TEE_SHM_MAPPED)) { 113 dev_err(teedev->dev.parent, 114 "only mapped allocations supported\n"); 115 return ERR_PTR(-EINVAL); 116 } 117 118 if ((flags & ~(TEE_SHM_MAPPED | TEE_SHM_DMA_BUF))) { 119 dev_err(teedev->dev.parent, "invalid shm flags 0x%x", flags); 120 return ERR_PTR(-EINVAL); 121 } 122 123 if (!tee_device_get(teedev)) 124 return ERR_PTR(-EINVAL); 125 126 if (!teedev->pool) { 127 /* teedev has been detached from driver */ 128 ret = ERR_PTR(-EINVAL); 129 goto err_dev_put; 130 } 131 132 shm = kzalloc(sizeof(*shm), GFP_KERNEL); 133 if (!shm) { 134 ret = ERR_PTR(-ENOMEM); 135 goto err_dev_put; 136 } 137 138 shm->flags = flags; 139 shm->teedev = teedev; 140 shm->ctx = ctx; 141 if (flags & TEE_SHM_DMA_BUF) 142 poolm = &teedev->pool->dma_buf_mgr; 143 else 144 poolm = &teedev->pool->private_mgr; 145 146 rc = poolm->ops->alloc(poolm, shm, size); 147 if (rc) { 148 ret = ERR_PTR(rc); 149 goto err_kfree; 150 } 151 152 mutex_lock(&teedev->mutex); 153 shm->id = idr_alloc(&teedev->idr, shm, 1, 0, GFP_KERNEL); 154 mutex_unlock(&teedev->mutex); 155 if (shm->id < 0) { 156 ret = ERR_PTR(shm->id); 157 goto err_pool_free; 158 } 159 160 if (flags & TEE_SHM_DMA_BUF) { 161 DEFINE_DMA_BUF_EXPORT_INFO(exp_info); 162 163 exp_info.ops = &tee_shm_dma_buf_ops; 164 exp_info.size = shm->size; 165 exp_info.flags = O_RDWR; 166 exp_info.priv = shm; 167 168 shm->dmabuf = dma_buf_export(&exp_info); 169 if (IS_ERR(shm->dmabuf)) { 170 ret = ERR_CAST(shm->dmabuf); 171 goto err_rem; 172 } 173 } 174 mutex_lock(&teedev->mutex); 175 list_add_tail(&shm->link, &ctx->list_shm); 176 mutex_unlock(&teedev->mutex); 177 178 return shm; 179 err_rem: 180 mutex_lock(&teedev->mutex); 181 idr_remove(&teedev->idr, shm->id); 182 mutex_unlock(&teedev->mutex); 183 err_pool_free: 184 poolm->ops->free(poolm, shm); 185 err_kfree: 186 kfree(shm); 187 err_dev_put: 188 tee_device_put(teedev); 189 return ret; 190 } 191 EXPORT_SYMBOL_GPL(tee_shm_alloc); 192 193 /** 194 * tee_shm_get_fd() - Increase reference count and return file descriptor 195 * @shm: Shared memory handle 196 * @returns user space file descriptor to shared memory 197 */ 198 int tee_shm_get_fd(struct tee_shm *shm) 199 { 200 u32 req_flags = TEE_SHM_MAPPED | TEE_SHM_DMA_BUF; 201 int fd; 202 203 if ((shm->flags & req_flags) != req_flags) 204 return -EINVAL; 205 206 fd = dma_buf_fd(shm->dmabuf, O_CLOEXEC); 207 if (fd >= 0) 208 get_dma_buf(shm->dmabuf); 209 return fd; 210 } 211 212 /** 213 * tee_shm_free() - Free shared memory 214 * @shm: Handle to shared memory to free 215 */ 216 void tee_shm_free(struct tee_shm *shm) 217 { 218 /* 219 * dma_buf_put() decreases the dmabuf reference counter and will 220 * call tee_shm_release() when the last reference is gone. 221 * 222 * In the case of driver private memory we call tee_shm_release 223 * directly instead as it doesn't have a reference counter. 224 */ 225 if (shm->flags & TEE_SHM_DMA_BUF) 226 dma_buf_put(shm->dmabuf); 227 else 228 tee_shm_release(shm); 229 } 230 EXPORT_SYMBOL_GPL(tee_shm_free); 231 232 /** 233 * tee_shm_va2pa() - Get physical address of a virtual address 234 * @shm: Shared memory handle 235 * @va: Virtual address to tranlsate 236 * @pa: Returned physical address 237 * @returns 0 on success and < 0 on failure 238 */ 239 int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa) 240 { 241 /* Check that we're in the range of the shm */ 242 if ((char *)va < (char *)shm->kaddr) 243 return -EINVAL; 244 if ((char *)va >= ((char *)shm->kaddr + shm->size)) 245 return -EINVAL; 246 247 return tee_shm_get_pa( 248 shm, (unsigned long)va - (unsigned long)shm->kaddr, pa); 249 } 250 EXPORT_SYMBOL_GPL(tee_shm_va2pa); 251 252 /** 253 * tee_shm_pa2va() - Get virtual address of a physical address 254 * @shm: Shared memory handle 255 * @pa: Physical address to tranlsate 256 * @va: Returned virtual address 257 * @returns 0 on success and < 0 on failure 258 */ 259 int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va) 260 { 261 /* Check that we're in the range of the shm */ 262 if (pa < shm->paddr) 263 return -EINVAL; 264 if (pa >= (shm->paddr + shm->size)) 265 return -EINVAL; 266 267 if (va) { 268 void *v = tee_shm_get_va(shm, pa - shm->paddr); 269 270 if (IS_ERR(v)) 271 return PTR_ERR(v); 272 *va = v; 273 } 274 return 0; 275 } 276 EXPORT_SYMBOL_GPL(tee_shm_pa2va); 277 278 /** 279 * tee_shm_get_va() - Get virtual address of a shared memory plus an offset 280 * @shm: Shared memory handle 281 * @offs: Offset from start of this shared memory 282 * @returns virtual address of the shared memory + offs if offs is within 283 * the bounds of this shared memory, else an ERR_PTR 284 */ 285 void *tee_shm_get_va(struct tee_shm *shm, size_t offs) 286 { 287 if (offs >= shm->size) 288 return ERR_PTR(-EINVAL); 289 return (char *)shm->kaddr + offs; 290 } 291 EXPORT_SYMBOL_GPL(tee_shm_get_va); 292 293 /** 294 * tee_shm_get_pa() - Get physical address of a shared memory plus an offset 295 * @shm: Shared memory handle 296 * @offs: Offset from start of this shared memory 297 * @pa: Physical address to return 298 * @returns 0 if offs is within the bounds of this shared memory, else an 299 * error code. 300 */ 301 int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa) 302 { 303 if (offs >= shm->size) 304 return -EINVAL; 305 if (pa) 306 *pa = shm->paddr + offs; 307 return 0; 308 } 309 EXPORT_SYMBOL_GPL(tee_shm_get_pa); 310 311 /** 312 * tee_shm_get_from_id() - Find shared memory object and increase reference 313 * count 314 * @ctx: Context owning the shared memory 315 * @id: Id of shared memory object 316 * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure 317 */ 318 struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id) 319 { 320 struct tee_device *teedev; 321 struct tee_shm *shm; 322 323 if (!ctx) 324 return ERR_PTR(-EINVAL); 325 326 teedev = ctx->teedev; 327 mutex_lock(&teedev->mutex); 328 shm = idr_find(&teedev->idr, id); 329 if (!shm || shm->ctx != ctx) 330 shm = ERR_PTR(-EINVAL); 331 else if (shm->flags & TEE_SHM_DMA_BUF) 332 get_dma_buf(shm->dmabuf); 333 mutex_unlock(&teedev->mutex); 334 return shm; 335 } 336 EXPORT_SYMBOL_GPL(tee_shm_get_from_id); 337 338 /** 339 * tee_shm_get_id() - Get id of a shared memory object 340 * @shm: Shared memory handle 341 * @returns id 342 */ 343 int tee_shm_get_id(struct tee_shm *shm) 344 { 345 return shm->id; 346 } 347 EXPORT_SYMBOL_GPL(tee_shm_get_id); 348 349 /** 350 * tee_shm_put() - Decrease reference count on a shared memory handle 351 * @shm: Shared memory handle 352 */ 353 void tee_shm_put(struct tee_shm *shm) 354 { 355 if (shm->flags & TEE_SHM_DMA_BUF) 356 dma_buf_put(shm->dmabuf); 357 } 358 EXPORT_SYMBOL_GPL(tee_shm_put); 359