1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2014 - 2022 Intel Corporation */
3 #include <linux/device.h>
4 #include <linux/dma-mapping.h>
5 #include <linux/pci.h>
6 #include <linux/scatterlist.h>
7 #include <linux/slab.h>
8 #include <linux/types.h>
9 #include "adf_accel_devices.h"
10 #include "qat_bl.h"
11 #include "qat_crypto.h"
12 
13 void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
14 		      struct qat_request_buffs *buf)
15 {
16 	struct device *dev = &GET_DEV(accel_dev);
17 	struct qat_alg_buf_list *bl = buf->bl;
18 	struct qat_alg_buf_list *blout = buf->blout;
19 	dma_addr_t blp = buf->blp;
20 	dma_addr_t blpout = buf->bloutp;
21 	size_t sz = buf->sz;
22 	size_t sz_out = buf->sz_out;
23 	int bl_dma_dir;
24 	int i;
25 
26 	bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
27 
28 	for (i = 0; i < bl->num_bufs; i++)
29 		dma_unmap_single(dev, bl->buffers[i].addr,
30 				 bl->buffers[i].len, bl_dma_dir);
31 
32 	dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
33 
34 	if (!buf->sgl_src_valid)
35 		kfree(bl);
36 
37 	if (blp != blpout) {
38 		for (i = 0; i < blout->num_mapped_bufs; i++) {
39 			dma_unmap_single(dev, blout->buffers[i].addr,
40 					 blout->buffers[i].len,
41 					 DMA_FROM_DEVICE);
42 		}
43 		dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
44 
45 		if (!buf->sgl_dst_valid)
46 			kfree(blout);
47 	}
48 }
49 
50 static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
51 				struct scatterlist *sgl,
52 				struct scatterlist *sglout,
53 				struct qat_request_buffs *buf,
54 				dma_addr_t extra_dst_buff,
55 				size_t sz_extra_dst_buff,
56 				unsigned int sskip,
57 				unsigned int dskip,
58 				gfp_t flags)
59 {
60 	struct device *dev = &GET_DEV(accel_dev);
61 	int i, sg_nctr = 0;
62 	int n = sg_nents(sgl);
63 	struct qat_alg_buf_list *bufl;
64 	struct qat_alg_buf_list *buflout = NULL;
65 	dma_addr_t blp = DMA_MAPPING_ERROR;
66 	dma_addr_t bloutp = DMA_MAPPING_ERROR;
67 	struct scatterlist *sg;
68 	size_t sz_out, sz = struct_size(bufl, buffers, n);
69 	int node = dev_to_node(&GET_DEV(accel_dev));
70 	unsigned int left;
71 	int bufl_dma_dir;
72 
73 	if (unlikely(!n))
74 		return -EINVAL;
75 
76 	buf->sgl_src_valid = false;
77 	buf->sgl_dst_valid = false;
78 
79 	if (n > QAT_MAX_BUFF_DESC) {
80 		bufl = kzalloc_node(sz, flags, node);
81 		if (unlikely(!bufl))
82 			return -ENOMEM;
83 	} else {
84 		bufl = &buf->sgl_src.sgl_hdr;
85 		memset(bufl, 0, sizeof(struct qat_alg_buf_list));
86 		buf->sgl_src_valid = true;
87 	}
88 
89 	bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
90 
91 	for (i = 0; i < n; i++)
92 		bufl->buffers[i].addr = DMA_MAPPING_ERROR;
93 
94 	left = sskip;
95 
96 	for_each_sg(sgl, sg, n, i) {
97 		int y = sg_nctr;
98 
99 		if (!sg->length)
100 			continue;
101 
102 		if (left >= sg->length) {
103 			left -= sg->length;
104 			continue;
105 		}
106 		bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
107 						       sg->length - left,
108 						       bufl_dma_dir);
109 		bufl->buffers[y].len = sg->length;
110 		if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
111 			goto err_in;
112 		sg_nctr++;
113 		if (left) {
114 			bufl->buffers[y].len -= left;
115 			left = 0;
116 		}
117 	}
118 	bufl->num_bufs = sg_nctr;
119 	blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
120 	if (unlikely(dma_mapping_error(dev, blp)))
121 		goto err_in;
122 	buf->bl = bufl;
123 	buf->blp = blp;
124 	buf->sz = sz;
125 	/* Handle out of place operation */
126 	if (sgl != sglout) {
127 		struct qat_alg_buf *buffers;
128 		int extra_buff = extra_dst_buff ? 1 : 0;
129 		int n_sglout = sg_nents(sglout);
130 
131 		n = n_sglout + extra_buff;
132 		sz_out = struct_size(buflout, buffers, n);
133 		left = dskip;
134 
135 		sg_nctr = 0;
136 
137 		if (n > QAT_MAX_BUFF_DESC) {
138 			buflout = kzalloc_node(sz_out, flags, node);
139 			if (unlikely(!buflout))
140 				goto err_in;
141 		} else {
142 			buflout = &buf->sgl_dst.sgl_hdr;
143 			memset(buflout, 0, sizeof(struct qat_alg_buf_list));
144 			buf->sgl_dst_valid = true;
145 		}
146 
147 		buffers = buflout->buffers;
148 		for (i = 0; i < n; i++)
149 			buffers[i].addr = DMA_MAPPING_ERROR;
150 
151 		for_each_sg(sglout, sg, n_sglout, i) {
152 			int y = sg_nctr;
153 
154 			if (!sg->length)
155 				continue;
156 
157 			if (left >= sg->length) {
158 				left -= sg->length;
159 				continue;
160 			}
161 			buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
162 							 sg->length - left,
163 							 DMA_FROM_DEVICE);
164 			if (unlikely(dma_mapping_error(dev, buffers[y].addr)))
165 				goto err_out;
166 			buffers[y].len = sg->length;
167 			sg_nctr++;
168 			if (left) {
169 				buffers[y].len -= left;
170 				left = 0;
171 			}
172 		}
173 		if (extra_buff) {
174 			buffers[sg_nctr].addr = extra_dst_buff;
175 			buffers[sg_nctr].len = sz_extra_dst_buff;
176 		}
177 
178 		buflout->num_bufs = sg_nctr;
179 		buflout->num_bufs += extra_buff;
180 		buflout->num_mapped_bufs = sg_nctr;
181 		bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
182 		if (unlikely(dma_mapping_error(dev, bloutp)))
183 			goto err_out;
184 		buf->blout = buflout;
185 		buf->bloutp = bloutp;
186 		buf->sz_out = sz_out;
187 	} else {
188 		/* Otherwise set the src and dst to the same address */
189 		buf->bloutp = buf->blp;
190 		buf->sz_out = 0;
191 	}
192 	return 0;
193 
194 err_out:
195 	if (!dma_mapping_error(dev, bloutp))
196 		dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
197 
198 	n = sg_nents(sglout);
199 	for (i = 0; i < n; i++) {
200 		if (buflout->buffers[i].addr == extra_dst_buff)
201 			break;
202 		if (!dma_mapping_error(dev, buflout->buffers[i].addr))
203 			dma_unmap_single(dev, buflout->buffers[i].addr,
204 					 buflout->buffers[i].len,
205 					 DMA_FROM_DEVICE);
206 	}
207 
208 	if (!buf->sgl_dst_valid)
209 		kfree(buflout);
210 
211 err_in:
212 	if (!dma_mapping_error(dev, blp))
213 		dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
214 
215 	n = sg_nents(sgl);
216 	for (i = 0; i < n; i++)
217 		if (!dma_mapping_error(dev, bufl->buffers[i].addr))
218 			dma_unmap_single(dev, bufl->buffers[i].addr,
219 					 bufl->buffers[i].len,
220 					 bufl_dma_dir);
221 
222 	if (!buf->sgl_src_valid)
223 		kfree(bufl);
224 
225 	dev_err(dev, "Failed to map buf for dma\n");
226 	return -ENOMEM;
227 }
228 
229 int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
230 		       struct scatterlist *sgl,
231 		       struct scatterlist *sglout,
232 		       struct qat_request_buffs *buf,
233 		       struct qat_sgl_to_bufl_params *params,
234 		       gfp_t flags)
235 {
236 	dma_addr_t extra_dst_buff = 0;
237 	size_t sz_extra_dst_buff = 0;
238 	unsigned int sskip = 0;
239 	unsigned int dskip = 0;
240 
241 	if (params) {
242 		extra_dst_buff = params->extra_dst_buff;
243 		sz_extra_dst_buff = params->sz_extra_dst_buff;
244 		sskip = params->sskip;
245 		dskip = params->dskip;
246 	}
247 
248 	return __qat_bl_sgl_to_bufl(accel_dev, sgl, sglout, buf,
249 				    extra_dst_buff, sz_extra_dst_buff,
250 				    sskip, dskip, flags);
251 }
252 
253 static void qat_bl_sgl_unmap(struct adf_accel_dev *accel_dev,
254 			     struct qat_alg_buf_list *bl)
255 {
256 	struct device *dev = &GET_DEV(accel_dev);
257 	int n = bl->num_bufs;
258 	int i;
259 
260 	for (i = 0; i < n; i++)
261 		if (!dma_mapping_error(dev, bl->buffers[i].addr))
262 			dma_unmap_single(dev, bl->buffers[i].addr,
263 					 bl->buffers[i].len, DMA_FROM_DEVICE);
264 }
265 
266 static int qat_bl_sgl_map(struct adf_accel_dev *accel_dev,
267 			  struct scatterlist *sgl,
268 			  struct qat_alg_buf_list **bl)
269 {
270 	struct device *dev = &GET_DEV(accel_dev);
271 	struct qat_alg_buf_list *bufl;
272 	int node = dev_to_node(dev);
273 	struct scatterlist *sg;
274 	int n, i, sg_nctr;
275 	size_t sz;
276 
277 	n = sg_nents(sgl);
278 	sz = struct_size(bufl, buffers, n);
279 	bufl = kzalloc_node(sz, GFP_KERNEL, node);
280 	if (unlikely(!bufl))
281 		return -ENOMEM;
282 
283 	for (i = 0; i < n; i++)
284 		bufl->buffers[i].addr = DMA_MAPPING_ERROR;
285 
286 	sg_nctr = 0;
287 	for_each_sg(sgl, sg, n, i) {
288 		int y = sg_nctr;
289 
290 		if (!sg->length)
291 			continue;
292 
293 		bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg),
294 						       sg->length,
295 						       DMA_FROM_DEVICE);
296 		bufl->buffers[y].len = sg->length;
297 		if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
298 			goto err_map;
299 		sg_nctr++;
300 	}
301 	bufl->num_bufs = sg_nctr;
302 	bufl->num_mapped_bufs = sg_nctr;
303 
304 	*bl = bufl;
305 
306 	return 0;
307 
308 err_map:
309 	for (i = 0; i < n; i++)
310 		if (!dma_mapping_error(dev, bufl->buffers[i].addr))
311 			dma_unmap_single(dev, bufl->buffers[i].addr,
312 					 bufl->buffers[i].len,
313 					 DMA_FROM_DEVICE);
314 	kfree(bufl);
315 	*bl = NULL;
316 
317 	return -ENOMEM;
318 }
319 
320 static void qat_bl_sgl_free_unmap(struct adf_accel_dev *accel_dev,
321 				  struct scatterlist *sgl,
322 				  struct qat_alg_buf_list *bl,
323 				  bool free_bl)
324 {
325 	if (bl) {
326 		qat_bl_sgl_unmap(accel_dev, bl);
327 
328 		if (free_bl)
329 			kfree(bl);
330 	}
331 	if (sgl)
332 		sgl_free(sgl);
333 }
334 
335 static int qat_bl_sgl_alloc_map(struct adf_accel_dev *accel_dev,
336 				struct scatterlist **sgl,
337 				struct qat_alg_buf_list **bl,
338 				unsigned int dlen,
339 				gfp_t gfp)
340 {
341 	struct scatterlist *dst;
342 	int ret;
343 
344 	dst = sgl_alloc(dlen, gfp, NULL);
345 	if (!dst) {
346 		dev_err(&GET_DEV(accel_dev), "sg_alloc failed\n");
347 		return -ENOMEM;
348 	}
349 
350 	ret = qat_bl_sgl_map(accel_dev, dst, bl);
351 	if (ret)
352 		goto err;
353 
354 	*sgl = dst;
355 
356 	return 0;
357 
358 err:
359 	sgl_free(dst);
360 	*sgl = NULL;
361 	return ret;
362 }
363 
364 int qat_bl_realloc_map_new_dst(struct adf_accel_dev *accel_dev,
365 			       struct scatterlist **sg,
366 			       unsigned int dlen,
367 			       struct qat_request_buffs *qat_bufs,
368 			       gfp_t gfp)
369 {
370 	struct device *dev = &GET_DEV(accel_dev);
371 	dma_addr_t new_blp = DMA_MAPPING_ERROR;
372 	struct qat_alg_buf_list *new_bl;
373 	struct scatterlist *new_sg;
374 	size_t new_bl_size;
375 	int ret;
376 
377 	ret = qat_bl_sgl_alloc_map(accel_dev, &new_sg, &new_bl, dlen, gfp);
378 	if (ret)
379 		return ret;
380 
381 	new_bl_size = struct_size(new_bl, buffers, new_bl->num_bufs);
382 
383 	/* Map new firmware SGL descriptor */
384 	new_blp = dma_map_single(dev, new_bl, new_bl_size, DMA_TO_DEVICE);
385 	if (unlikely(dma_mapping_error(dev, new_blp)))
386 		goto err;
387 
388 	/* Unmap old firmware SGL descriptor */
389 	dma_unmap_single(dev, qat_bufs->bloutp, qat_bufs->sz_out, DMA_TO_DEVICE);
390 
391 	/* Free and unmap old scatterlist */
392 	qat_bl_sgl_free_unmap(accel_dev, *sg, qat_bufs->blout,
393 			      !qat_bufs->sgl_dst_valid);
394 
395 	qat_bufs->sgl_dst_valid = false;
396 	qat_bufs->blout = new_bl;
397 	qat_bufs->bloutp = new_blp;
398 	qat_bufs->sz_out = new_bl_size;
399 
400 	*sg = new_sg;
401 
402 	return 0;
403 err:
404 	qat_bl_sgl_free_unmap(accel_dev, new_sg, new_bl, true);
405 
406 	if (!dma_mapping_error(dev, new_blp))
407 		dma_unmap_single(dev, new_blp, new_bl_size, DMA_TO_DEVICE);
408 
409 	return -ENOMEM;
410 }
411