1 /* 2 * Copyright (c) 2015, Linaro Limited 3 * 4 * This software is licensed under the terms of the GNU General Public 5 * License version 2, as published by the Free Software Foundation, and 6 * may be copied, distributed, and modified under those terms. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 * 13 */ 14 #include <linux/device.h> 15 #include <linux/dma-buf.h> 16 #include <linux/genalloc.h> 17 #include <linux/slab.h> 18 #include <linux/tee_drv.h> 19 #include "tee_private.h" 20 21 static int pool_op_gen_alloc(struct tee_shm_pool_mgr *poolm, 22 struct tee_shm *shm, size_t size) 23 { 24 unsigned long va; 25 struct gen_pool *genpool = poolm->private_data; 26 size_t s = roundup(size, 1 << genpool->min_alloc_order); 27 28 va = gen_pool_alloc(genpool, s); 29 if (!va) 30 return -ENOMEM; 31 32 memset((void *)va, 0, s); 33 shm->kaddr = (void *)va; 34 shm->paddr = gen_pool_virt_to_phys(genpool, va); 35 shm->size = s; 36 return 0; 37 } 38 39 static void pool_op_gen_free(struct tee_shm_pool_mgr *poolm, 40 struct tee_shm *shm) 41 { 42 gen_pool_free(poolm->private_data, (unsigned long)shm->kaddr, 43 shm->size); 44 shm->kaddr = NULL; 45 } 46 47 static void pool_op_gen_destroy_poolmgr(struct tee_shm_pool_mgr *poolm) 48 { 49 gen_pool_destroy(poolm->private_data); 50 kfree(poolm); 51 } 52 53 static const struct tee_shm_pool_mgr_ops pool_ops_generic = { 54 .alloc = pool_op_gen_alloc, 55 .free = pool_op_gen_free, 56 .destroy_poolmgr = pool_op_gen_destroy_poolmgr, 57 }; 58 59 /** 60 * tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved 61 * memory range 62 * @priv_info: Information for driver private shared memory pool 63 * @dmabuf_info: Information for dma-buf shared memory pool 64 * 65 * Start and end of pools will must be page aligned. 66 * 67 * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied 68 * in @dmabuf, others will use the range provided by @priv. 69 * 70 * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure. 71 */ 72 struct tee_shm_pool * 73 tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info, 74 struct tee_shm_pool_mem_info *dmabuf_info) 75 { 76 struct tee_shm_pool_mgr *priv_mgr; 77 struct tee_shm_pool_mgr *dmabuf_mgr; 78 void *rc; 79 80 /* 81 * Create the pool for driver private shared memory 82 */ 83 rc = tee_shm_pool_mgr_alloc_res_mem(priv_info->vaddr, priv_info->paddr, 84 priv_info->size, 85 3 /* 8 byte aligned */); 86 if (IS_ERR(rc)) 87 return rc; 88 priv_mgr = rc; 89 90 /* 91 * Create the pool for dma_buf shared memory 92 */ 93 rc = tee_shm_pool_mgr_alloc_res_mem(dmabuf_info->vaddr, 94 dmabuf_info->paddr, 95 dmabuf_info->size, PAGE_SHIFT); 96 if (IS_ERR(rc)) 97 goto err_free_priv_mgr; 98 dmabuf_mgr = rc; 99 100 rc = tee_shm_pool_alloc(priv_mgr, dmabuf_mgr); 101 if (IS_ERR(rc)) 102 goto err_free_dmabuf_mgr; 103 104 return rc; 105 106 err_free_dmabuf_mgr: 107 tee_shm_pool_mgr_destroy(dmabuf_mgr); 108 err_free_priv_mgr: 109 tee_shm_pool_mgr_destroy(priv_mgr); 110 111 return rc; 112 } 113 EXPORT_SYMBOL_GPL(tee_shm_pool_alloc_res_mem); 114 115 struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr, 116 phys_addr_t paddr, 117 size_t size, 118 int min_alloc_order) 119 { 120 const size_t page_mask = PAGE_SIZE - 1; 121 struct tee_shm_pool_mgr *mgr; 122 int rc; 123 124 /* Start and end must be page aligned */ 125 if (vaddr & page_mask || paddr & page_mask || size & page_mask) 126 return ERR_PTR(-EINVAL); 127 128 mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); 129 if (!mgr) 130 return ERR_PTR(-ENOMEM); 131 132 mgr->private_data = gen_pool_create(min_alloc_order, -1); 133 if (!mgr->private_data) { 134 rc = -ENOMEM; 135 goto err; 136 } 137 138 gen_pool_set_algo(mgr->private_data, gen_pool_best_fit, NULL); 139 rc = gen_pool_add_virt(mgr->private_data, vaddr, paddr, size, -1); 140 if (rc) { 141 gen_pool_destroy(mgr->private_data); 142 goto err; 143 } 144 145 mgr->ops = &pool_ops_generic; 146 147 return mgr; 148 err: 149 kfree(mgr); 150 151 return ERR_PTR(rc); 152 } 153 EXPORT_SYMBOL_GPL(tee_shm_pool_mgr_alloc_res_mem); 154 155 static bool check_mgr_ops(struct tee_shm_pool_mgr *mgr) 156 { 157 return mgr && mgr->ops && mgr->ops->alloc && mgr->ops->free && 158 mgr->ops->destroy_poolmgr; 159 } 160 161 struct tee_shm_pool *tee_shm_pool_alloc(struct tee_shm_pool_mgr *priv_mgr, 162 struct tee_shm_pool_mgr *dmabuf_mgr) 163 { 164 struct tee_shm_pool *pool; 165 166 if (!check_mgr_ops(priv_mgr) || !check_mgr_ops(dmabuf_mgr)) 167 return ERR_PTR(-EINVAL); 168 169 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 170 if (!pool) 171 return ERR_PTR(-ENOMEM); 172 173 pool->private_mgr = priv_mgr; 174 pool->dma_buf_mgr = dmabuf_mgr; 175 176 return pool; 177 } 178 EXPORT_SYMBOL_GPL(tee_shm_pool_alloc); 179 180 /** 181 * tee_shm_pool_free() - Free a shared memory pool 182 * @pool: The shared memory pool to free 183 * 184 * There must be no remaining shared memory allocated from this pool when 185 * this function is called. 186 */ 187 void tee_shm_pool_free(struct tee_shm_pool *pool) 188 { 189 if (pool->private_mgr) 190 tee_shm_pool_mgr_destroy(pool->private_mgr); 191 if (pool->dma_buf_mgr) 192 tee_shm_pool_mgr_destroy(pool->dma_buf_mgr); 193 kfree(pool); 194 } 195 EXPORT_SYMBOL_GPL(tee_shm_pool_free); 196