1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2019 HiSilicon Limited. */ 3 #include <linux/dma-mapping.h> 4 #include <linux/module.h> 5 #include <linux/slab.h> 6 #include "qm.h" 7 8 #define HISI_ACC_SGL_SGE_NR_MIN 1 9 #define HISI_ACC_SGL_NR_MAX 256 10 #define HISI_ACC_SGL_ALIGN_SIZE 64 11 #define HISI_ACC_MEM_BLOCK_NR 5 12 13 struct acc_hw_sge { 14 dma_addr_t buf; 15 void *page_ctrl; 16 __le32 len; 17 __le32 pad; 18 __le32 pad0; 19 __le32 pad1; 20 }; 21 22 /* use default sgl head size 64B */ 23 struct hisi_acc_hw_sgl { 24 dma_addr_t next_dma; 25 __le16 entry_sum_in_chain; 26 __le16 entry_sum_in_sgl; 27 __le16 entry_length_in_sgl; 28 __le16 pad0; 29 __le64 pad1[5]; 30 struct hisi_acc_hw_sgl *next; 31 struct acc_hw_sge sge_entries[]; 32 } __aligned(1); 33 34 struct hisi_acc_sgl_pool { 35 struct mem_block { 36 struct hisi_acc_hw_sgl *sgl; 37 dma_addr_t sgl_dma; 38 size_t size; 39 } mem_block[HISI_ACC_MEM_BLOCK_NR]; 40 u32 sgl_num_per_block; 41 u32 block_num; 42 u32 count; 43 u32 sge_nr; 44 size_t sgl_size; 45 }; 46 47 /** 48 * hisi_acc_create_sgl_pool() - Create a hw sgl pool. 49 * @dev: The device which hw sgl pool belongs to. 50 * @count: Count of hisi_acc_hw_sgl in pool. 51 * @sge_nr: The count of sge in hw_sgl 52 * 53 * This function creates a hw sgl pool, after this user can get hw sgl memory 54 * from it. 55 */ 56 struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev, 57 u32 count, u32 sge_nr) 58 { 59 u32 sgl_size, block_size, sgl_num_per_block, block_num, remain_sgl = 0; 60 struct hisi_acc_sgl_pool *pool; 61 struct mem_block *block; 62 u32 i, j; 63 64 if (!dev || !count || !sge_nr || sge_nr > HISI_ACC_SGL_SGE_NR_MAX) 65 return ERR_PTR(-EINVAL); 66 67 sgl_size = sizeof(struct acc_hw_sge) * sge_nr + 68 sizeof(struct hisi_acc_hw_sgl); 69 block_size = PAGE_SIZE * (1 << (MAX_ORDER - 1)); 70 sgl_num_per_block = block_size / sgl_size; 71 block_num = count / sgl_num_per_block; 72 remain_sgl = count % sgl_num_per_block; 73 74 if ((!remain_sgl && block_num > HISI_ACC_MEM_BLOCK_NR) || 75 (remain_sgl > 0 && block_num > HISI_ACC_MEM_BLOCK_NR - 1)) 76 return ERR_PTR(-EINVAL); 77 78 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 79 if (!pool) 80 return ERR_PTR(-ENOMEM); 81 block = pool->mem_block; 82 83 for (i = 0; i < block_num; i++) { 84 block[i].sgl = dma_alloc_coherent(dev, block_size, 85 &block[i].sgl_dma, 86 GFP_KERNEL); 87 if (!block[i].sgl) 88 goto err_free_mem; 89 90 block[i].size = block_size; 91 } 92 93 if (remain_sgl > 0) { 94 block[i].sgl = dma_alloc_coherent(dev, remain_sgl * sgl_size, 95 &block[i].sgl_dma, 96 GFP_KERNEL); 97 if (!block[i].sgl) 98 goto err_free_mem; 99 100 block[i].size = remain_sgl * sgl_size; 101 } 102 103 pool->sgl_num_per_block = sgl_num_per_block; 104 pool->block_num = remain_sgl ? block_num + 1 : block_num; 105 pool->count = count; 106 pool->sgl_size = sgl_size; 107 pool->sge_nr = sge_nr; 108 109 return pool; 110 111 err_free_mem: 112 for (j = 0; j < i; j++) { 113 dma_free_coherent(dev, block_size, block[j].sgl, 114 block[j].sgl_dma); 115 memset(block + j, 0, sizeof(*block)); 116 } 117 kfree(pool); 118 return ERR_PTR(-ENOMEM); 119 } 120 EXPORT_SYMBOL_GPL(hisi_acc_create_sgl_pool); 121 122 /** 123 * hisi_acc_free_sgl_pool() - Free a hw sgl pool. 124 * @dev: The device which hw sgl pool belongs to. 125 * @pool: Pointer of pool. 126 * 127 * This function frees memory of a hw sgl pool. 128 */ 129 void hisi_acc_free_sgl_pool(struct device *dev, struct hisi_acc_sgl_pool *pool) 130 { 131 struct mem_block *block; 132 int i; 133 134 if (!dev || !pool) 135 return; 136 137 block = pool->mem_block; 138 139 for (i = 0; i < pool->block_num; i++) 140 dma_free_coherent(dev, block[i].size, block[i].sgl, 141 block[i].sgl_dma); 142 143 kfree(pool); 144 } 145 EXPORT_SYMBOL_GPL(hisi_acc_free_sgl_pool); 146 147 static struct hisi_acc_hw_sgl *acc_get_sgl(struct hisi_acc_sgl_pool *pool, 148 u32 index, dma_addr_t *hw_sgl_dma) 149 { 150 struct mem_block *block; 151 u32 block_index, offset; 152 153 if (!pool || !hw_sgl_dma || index >= pool->count) 154 return ERR_PTR(-EINVAL); 155 156 block = pool->mem_block; 157 block_index = index / pool->sgl_num_per_block; 158 offset = index % pool->sgl_num_per_block; 159 160 *hw_sgl_dma = block[block_index].sgl_dma + pool->sgl_size * offset; 161 return (void *)block[block_index].sgl + pool->sgl_size * offset; 162 } 163 164 static void sg_map_to_hw_sg(struct scatterlist *sgl, 165 struct acc_hw_sge *hw_sge) 166 { 167 hw_sge->buf = sg_dma_address(sgl); 168 hw_sge->len = cpu_to_le32(sg_dma_len(sgl)); 169 } 170 171 static void inc_hw_sgl_sge(struct hisi_acc_hw_sgl *hw_sgl) 172 { 173 u16 var = le16_to_cpu(hw_sgl->entry_sum_in_sgl); 174 175 var++; 176 hw_sgl->entry_sum_in_sgl = cpu_to_le16(var); 177 } 178 179 static void update_hw_sgl_sum_sge(struct hisi_acc_hw_sgl *hw_sgl, u16 sum) 180 { 181 hw_sgl->entry_sum_in_chain = cpu_to_le16(sum); 182 } 183 184 /** 185 * hisi_acc_sg_buf_map_to_hw_sgl - Map a scatterlist to a hw sgl. 186 * @dev: The device which hw sgl belongs to. 187 * @sgl: Scatterlist which will be mapped to hw sgl. 188 * @pool: Pool which hw sgl memory will be allocated in. 189 * @index: Index of hisi_acc_hw_sgl in pool. 190 * @hw_sgl_dma: The dma address of allocated hw sgl. 191 * 192 * This function builds hw sgl according input sgl, user can use hw_sgl_dma 193 * as src/dst in its BD. Only support single hw sgl currently. 194 */ 195 struct hisi_acc_hw_sgl * 196 hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, 197 struct scatterlist *sgl, 198 struct hisi_acc_sgl_pool *pool, 199 u32 index, dma_addr_t *hw_sgl_dma) 200 { 201 struct hisi_acc_hw_sgl *curr_hw_sgl; 202 dma_addr_t curr_sgl_dma = 0; 203 struct acc_hw_sge *curr_hw_sge; 204 struct scatterlist *sg; 205 int i, sg_n, sg_n_mapped; 206 207 if (!dev || !sgl || !pool || !hw_sgl_dma) 208 return ERR_PTR(-EINVAL); 209 210 sg_n = sg_nents(sgl); 211 212 sg_n_mapped = dma_map_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL); 213 if (!sg_n_mapped) 214 return ERR_PTR(-EINVAL); 215 216 if (sg_n_mapped > pool->sge_nr) { 217 dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL); 218 return ERR_PTR(-EINVAL); 219 } 220 221 curr_hw_sgl = acc_get_sgl(pool, index, &curr_sgl_dma); 222 if (IS_ERR(curr_hw_sgl)) { 223 dma_unmap_sg(dev, sgl, sg_n, DMA_BIDIRECTIONAL); 224 return ERR_PTR(-ENOMEM); 225 226 } 227 curr_hw_sgl->entry_length_in_sgl = cpu_to_le16(pool->sge_nr); 228 curr_hw_sge = curr_hw_sgl->sge_entries; 229 230 for_each_sg(sgl, sg, sg_n_mapped, i) { 231 sg_map_to_hw_sg(sg, curr_hw_sge); 232 inc_hw_sgl_sge(curr_hw_sgl); 233 curr_hw_sge++; 234 } 235 236 update_hw_sgl_sum_sge(curr_hw_sgl, pool->sge_nr); 237 *hw_sgl_dma = curr_sgl_dma; 238 239 return curr_hw_sgl; 240 } 241 EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_map_to_hw_sgl); 242 243 /** 244 * hisi_acc_sg_buf_unmap() - Unmap allocated hw sgl. 245 * @dev: The device which hw sgl belongs to. 246 * @sgl: Related scatterlist. 247 * @hw_sgl: Virtual address of hw sgl. 248 * @hw_sgl_dma: DMA address of hw sgl. 249 * @pool: Pool which hw sgl is allocated in. 250 * 251 * This function unmaps allocated hw sgl. 252 */ 253 void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl, 254 struct hisi_acc_hw_sgl *hw_sgl) 255 { 256 if (!dev || !sgl || !hw_sgl) 257 return; 258 259 dma_unmap_sg(dev, sgl, sg_nents(sgl), DMA_BIDIRECTIONAL); 260 261 hw_sgl->entry_sum_in_chain = 0; 262 hw_sgl->entry_sum_in_sgl = 0; 263 hw_sgl->entry_length_in_sgl = 0; 264 } 265 EXPORT_SYMBOL_GPL(hisi_acc_sg_buf_unmap); 266