1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 HiSilicon Limited. */
3 #include <crypto/internal/acompress.h>
4 #include <linux/bitfield.h>
5 #include <linux/bitmap.h>
6 #include <linux/dma-mapping.h>
7 #include <linux/scatterlist.h>
8 #include "zip.h"
9
10 /* hisi_zip_sqe dw3 */
11 #define HZIP_BD_STATUS_M GENMASK(7, 0)
12 /* hisi_zip_sqe dw7 */
13 #define HZIP_IN_SGE_DATA_OFFSET_M GENMASK(23, 0)
14 #define HZIP_SQE_TYPE_M GENMASK(31, 28)
15 /* hisi_zip_sqe dw8 */
16 #define HZIP_OUT_SGE_DATA_OFFSET_M GENMASK(23, 0)
17 /* hisi_zip_sqe dw9 */
18 #define HZIP_REQ_TYPE_M GENMASK(7, 0)
19 #define HZIP_ALG_TYPE_ZLIB 0x02
20 #define HZIP_ALG_TYPE_GZIP 0x03
21 #define HZIP_BUF_TYPE_M GENMASK(11, 8)
22 #define HZIP_PBUFFER 0x0
23 #define HZIP_SGL 0x1
24
25 #define HZIP_ZLIB_HEAD_SIZE 2
26 #define HZIP_GZIP_HEAD_SIZE 10
27
28 #define GZIP_HEAD_FHCRC_BIT BIT(1)
29 #define GZIP_HEAD_FEXTRA_BIT BIT(2)
30 #define GZIP_HEAD_FNAME_BIT BIT(3)
31 #define GZIP_HEAD_FCOMMENT_BIT BIT(4)
32
33 #define GZIP_HEAD_FLG_SHIFT 3
34 #define GZIP_HEAD_FEXTRA_SHIFT 10
35 #define GZIP_HEAD_FEXTRA_XLEN 2UL
36 #define GZIP_HEAD_FHCRC_SIZE 2
37
38 #define HZIP_GZIP_HEAD_BUF 256
39 #define HZIP_ALG_PRIORITY 300
40 #define HZIP_SGL_SGE_NR 10
41
42 #define HZIP_ALG_ZLIB GENMASK(1, 0)
43 #define HZIP_ALG_GZIP GENMASK(3, 2)
44
45 static const u8 zlib_head[HZIP_ZLIB_HEAD_SIZE] = {0x78, 0x9c};
46 static const u8 gzip_head[HZIP_GZIP_HEAD_SIZE] = {
47 0x1f, 0x8b, 0x08, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x03
48 };
49
50 enum hisi_zip_alg_type {
51 HZIP_ALG_TYPE_COMP = 0,
52 HZIP_ALG_TYPE_DECOMP = 1,
53 };
54
55 enum {
56 HZIP_QPC_COMP,
57 HZIP_QPC_DECOMP,
58 HZIP_CTX_Q_NUM
59 };
60
61 #define COMP_NAME_TO_TYPE(alg_name) \
62 (!strcmp((alg_name), "zlib-deflate") ? HZIP_ALG_TYPE_ZLIB : \
63 !strcmp((alg_name), "gzip") ? HZIP_ALG_TYPE_GZIP : 0) \
64
65 #define TO_HEAD_SIZE(req_type) \
66 (((req_type) == HZIP_ALG_TYPE_ZLIB) ? sizeof(zlib_head) : \
67 ((req_type) == HZIP_ALG_TYPE_GZIP) ? sizeof(gzip_head) : 0) \
68
69 #define TO_HEAD(req_type) \
70 (((req_type) == HZIP_ALG_TYPE_ZLIB) ? zlib_head : \
71 ((req_type) == HZIP_ALG_TYPE_GZIP) ? gzip_head : NULL) \
72
73 struct hisi_zip_req {
74 struct acomp_req *req;
75 u32 sskip;
76 u32 dskip;
77 struct hisi_acc_hw_sgl *hw_src;
78 struct hisi_acc_hw_sgl *hw_dst;
79 dma_addr_t dma_src;
80 dma_addr_t dma_dst;
81 u16 req_id;
82 };
83
84 struct hisi_zip_req_q {
85 struct hisi_zip_req *q;
86 unsigned long *req_bitmap;
87 rwlock_t req_lock;
88 u16 size;
89 };
90
91 struct hisi_zip_qp_ctx {
92 struct hisi_qp *qp;
93 struct hisi_zip_req_q req_q;
94 struct hisi_acc_sgl_pool *sgl_pool;
95 struct hisi_zip *zip_dev;
96 struct hisi_zip_ctx *ctx;
97 };
98
99 struct hisi_zip_sqe_ops {
100 u8 sqe_type;
101 void (*fill_addr)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
102 void (*fill_buf_size)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
103 void (*fill_buf_type)(struct hisi_zip_sqe *sqe, u8 buf_type);
104 void (*fill_req_type)(struct hisi_zip_sqe *sqe, u8 req_type);
105 void (*fill_tag)(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req);
106 void (*fill_sqe_type)(struct hisi_zip_sqe *sqe, u8 sqe_type);
107 u32 (*get_tag)(struct hisi_zip_sqe *sqe);
108 u32 (*get_status)(struct hisi_zip_sqe *sqe);
109 u32 (*get_dstlen)(struct hisi_zip_sqe *sqe);
110 };
111
112 struct hisi_zip_ctx {
113 struct hisi_zip_qp_ctx qp_ctx[HZIP_CTX_Q_NUM];
114 const struct hisi_zip_sqe_ops *ops;
115 };
116
sgl_sge_nr_set(const char * val,const struct kernel_param * kp)117 static int sgl_sge_nr_set(const char *val, const struct kernel_param *kp)
118 {
119 int ret;
120 u16 n;
121
122 if (!val)
123 return -EINVAL;
124
125 ret = kstrtou16(val, 10, &n);
126 if (ret || n == 0 || n > HISI_ACC_SGL_SGE_NR_MAX)
127 return -EINVAL;
128
129 return param_set_ushort(val, kp);
130 }
131
132 static const struct kernel_param_ops sgl_sge_nr_ops = {
133 .set = sgl_sge_nr_set,
134 .get = param_get_ushort,
135 };
136
137 static u16 sgl_sge_nr = HZIP_SGL_SGE_NR;
138 module_param_cb(sgl_sge_nr, &sgl_sge_nr_ops, &sgl_sge_nr, 0444);
139 MODULE_PARM_DESC(sgl_sge_nr, "Number of sge in sgl(1-255)");
140
get_extra_field_size(const u8 * start)141 static u32 get_extra_field_size(const u8 *start)
142 {
143 return *((u16 *)start) + GZIP_HEAD_FEXTRA_XLEN;
144 }
145
get_name_field_size(const u8 * start)146 static u32 get_name_field_size(const u8 *start)
147 {
148 return strlen(start) + 1;
149 }
150
get_comment_field_size(const u8 * start)151 static u32 get_comment_field_size(const u8 *start)
152 {
153 return strlen(start) + 1;
154 }
155
__get_gzip_head_size(const u8 * src)156 static u32 __get_gzip_head_size(const u8 *src)
157 {
158 u8 head_flg = *(src + GZIP_HEAD_FLG_SHIFT);
159 u32 size = GZIP_HEAD_FEXTRA_SHIFT;
160
161 if (head_flg & GZIP_HEAD_FEXTRA_BIT)
162 size += get_extra_field_size(src + size);
163 if (head_flg & GZIP_HEAD_FNAME_BIT)
164 size += get_name_field_size(src + size);
165 if (head_flg & GZIP_HEAD_FCOMMENT_BIT)
166 size += get_comment_field_size(src + size);
167 if (head_flg & GZIP_HEAD_FHCRC_BIT)
168 size += GZIP_HEAD_FHCRC_SIZE;
169
170 return size;
171 }
172
get_gzip_head_size(struct scatterlist * sgl)173 static u32 __maybe_unused get_gzip_head_size(struct scatterlist *sgl)
174 {
175 char buf[HZIP_GZIP_HEAD_BUF];
176
177 sg_copy_to_buffer(sgl, sg_nents(sgl), buf, sizeof(buf));
178
179 return __get_gzip_head_size(buf);
180 }
181
add_comp_head(struct scatterlist * dst,u8 req_type)182 static int add_comp_head(struct scatterlist *dst, u8 req_type)
183 {
184 int head_size = TO_HEAD_SIZE(req_type);
185 const u8 *head = TO_HEAD(req_type);
186 int ret;
187
188 ret = sg_copy_from_buffer(dst, sg_nents(dst), head, head_size);
189 if (unlikely(ret != head_size)) {
190 pr_err("the head size of buffer is wrong (%d)!\n", ret);
191 return -ENOMEM;
192 }
193
194 return head_size;
195 }
196
get_comp_head_size(struct acomp_req * acomp_req,u8 req_type)197 static int get_comp_head_size(struct acomp_req *acomp_req, u8 req_type)
198 {
199 if (unlikely(!acomp_req->src || !acomp_req->slen))
200 return -EINVAL;
201
202 if (unlikely(req_type == HZIP_ALG_TYPE_GZIP &&
203 acomp_req->slen < GZIP_HEAD_FEXTRA_SHIFT))
204 return -EINVAL;
205
206 switch (req_type) {
207 case HZIP_ALG_TYPE_ZLIB:
208 return TO_HEAD_SIZE(HZIP_ALG_TYPE_ZLIB);
209 case HZIP_ALG_TYPE_GZIP:
210 return TO_HEAD_SIZE(HZIP_ALG_TYPE_GZIP);
211 default:
212 pr_err("request type does not support!\n");
213 return -EINVAL;
214 }
215 }
216
hisi_zip_create_req(struct acomp_req * req,struct hisi_zip_qp_ctx * qp_ctx,size_t head_size,bool is_comp)217 static struct hisi_zip_req *hisi_zip_create_req(struct acomp_req *req,
218 struct hisi_zip_qp_ctx *qp_ctx,
219 size_t head_size, bool is_comp)
220 {
221 struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
222 struct hisi_zip_req *q = req_q->q;
223 struct hisi_zip_req *req_cache;
224 int req_id;
225
226 write_lock(&req_q->req_lock);
227
228 req_id = find_first_zero_bit(req_q->req_bitmap, req_q->size);
229 if (req_id >= req_q->size) {
230 write_unlock(&req_q->req_lock);
231 dev_dbg(&qp_ctx->qp->qm->pdev->dev, "req cache is full!\n");
232 return ERR_PTR(-EAGAIN);
233 }
234 set_bit(req_id, req_q->req_bitmap);
235
236 write_unlock(&req_q->req_lock);
237
238 req_cache = q + req_id;
239 req_cache->req_id = req_id;
240 req_cache->req = req;
241
242 if (is_comp) {
243 req_cache->sskip = 0;
244 req_cache->dskip = head_size;
245 } else {
246 req_cache->sskip = head_size;
247 req_cache->dskip = 0;
248 }
249
250 return req_cache;
251 }
252
hisi_zip_remove_req(struct hisi_zip_qp_ctx * qp_ctx,struct hisi_zip_req * req)253 static void hisi_zip_remove_req(struct hisi_zip_qp_ctx *qp_ctx,
254 struct hisi_zip_req *req)
255 {
256 struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
257
258 write_lock(&req_q->req_lock);
259 clear_bit(req->req_id, req_q->req_bitmap);
260 write_unlock(&req_q->req_lock);
261 }
262
hisi_zip_fill_addr(struct hisi_zip_sqe * sqe,struct hisi_zip_req * req)263 static void hisi_zip_fill_addr(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
264 {
265 sqe->source_addr_l = lower_32_bits(req->dma_src);
266 sqe->source_addr_h = upper_32_bits(req->dma_src);
267 sqe->dest_addr_l = lower_32_bits(req->dma_dst);
268 sqe->dest_addr_h = upper_32_bits(req->dma_dst);
269 }
270
hisi_zip_fill_buf_size(struct hisi_zip_sqe * sqe,struct hisi_zip_req * req)271 static void hisi_zip_fill_buf_size(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
272 {
273 struct acomp_req *a_req = req->req;
274
275 sqe->input_data_length = a_req->slen - req->sskip;
276 sqe->dest_avail_out = a_req->dlen - req->dskip;
277 sqe->dw7 = FIELD_PREP(HZIP_IN_SGE_DATA_OFFSET_M, req->sskip);
278 sqe->dw8 = FIELD_PREP(HZIP_OUT_SGE_DATA_OFFSET_M, req->dskip);
279 }
280
hisi_zip_fill_buf_type(struct hisi_zip_sqe * sqe,u8 buf_type)281 static void hisi_zip_fill_buf_type(struct hisi_zip_sqe *sqe, u8 buf_type)
282 {
283 u32 val;
284
285 val = sqe->dw9 & ~HZIP_BUF_TYPE_M;
286 val |= FIELD_PREP(HZIP_BUF_TYPE_M, buf_type);
287 sqe->dw9 = val;
288 }
289
hisi_zip_fill_req_type(struct hisi_zip_sqe * sqe,u8 req_type)290 static void hisi_zip_fill_req_type(struct hisi_zip_sqe *sqe, u8 req_type)
291 {
292 u32 val;
293
294 val = sqe->dw9 & ~HZIP_REQ_TYPE_M;
295 val |= FIELD_PREP(HZIP_REQ_TYPE_M, req_type);
296 sqe->dw9 = val;
297 }
298
hisi_zip_fill_tag_v1(struct hisi_zip_sqe * sqe,struct hisi_zip_req * req)299 static void hisi_zip_fill_tag_v1(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
300 {
301 sqe->dw13 = req->req_id;
302 }
303
hisi_zip_fill_tag_v2(struct hisi_zip_sqe * sqe,struct hisi_zip_req * req)304 static void hisi_zip_fill_tag_v2(struct hisi_zip_sqe *sqe, struct hisi_zip_req *req)
305 {
306 sqe->dw26 = req->req_id;
307 }
308
hisi_zip_fill_sqe_type(struct hisi_zip_sqe * sqe,u8 sqe_type)309 static void hisi_zip_fill_sqe_type(struct hisi_zip_sqe *sqe, u8 sqe_type)
310 {
311 u32 val;
312
313 val = sqe->dw7 & ~HZIP_SQE_TYPE_M;
314 val |= FIELD_PREP(HZIP_SQE_TYPE_M, sqe_type);
315 sqe->dw7 = val;
316 }
317
hisi_zip_fill_sqe(struct hisi_zip_ctx * ctx,struct hisi_zip_sqe * sqe,u8 req_type,struct hisi_zip_req * req)318 static void hisi_zip_fill_sqe(struct hisi_zip_ctx *ctx, struct hisi_zip_sqe *sqe,
319 u8 req_type, struct hisi_zip_req *req)
320 {
321 const struct hisi_zip_sqe_ops *ops = ctx->ops;
322
323 memset(sqe, 0, sizeof(struct hisi_zip_sqe));
324
325 ops->fill_addr(sqe, req);
326 ops->fill_buf_size(sqe, req);
327 ops->fill_buf_type(sqe, HZIP_SGL);
328 ops->fill_req_type(sqe, req_type);
329 ops->fill_tag(sqe, req);
330 ops->fill_sqe_type(sqe, ops->sqe_type);
331 }
332
hisi_zip_do_work(struct hisi_zip_req * req,struct hisi_zip_qp_ctx * qp_ctx)333 static int hisi_zip_do_work(struct hisi_zip_req *req,
334 struct hisi_zip_qp_ctx *qp_ctx)
335 {
336 struct hisi_acc_sgl_pool *pool = qp_ctx->sgl_pool;
337 struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
338 struct acomp_req *a_req = req->req;
339 struct hisi_qp *qp = qp_ctx->qp;
340 struct device *dev = &qp->qm->pdev->dev;
341 struct hisi_zip_sqe zip_sqe;
342 int ret;
343
344 if (unlikely(!a_req->src || !a_req->slen || !a_req->dst || !a_req->dlen))
345 return -EINVAL;
346
347 req->hw_src = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->src, pool,
348 req->req_id << 1, &req->dma_src);
349 if (IS_ERR(req->hw_src)) {
350 dev_err(dev, "failed to map the src buffer to hw sgl (%ld)!\n",
351 PTR_ERR(req->hw_src));
352 return PTR_ERR(req->hw_src);
353 }
354
355 req->hw_dst = hisi_acc_sg_buf_map_to_hw_sgl(dev, a_req->dst, pool,
356 (req->req_id << 1) + 1,
357 &req->dma_dst);
358 if (IS_ERR(req->hw_dst)) {
359 ret = PTR_ERR(req->hw_dst);
360 dev_err(dev, "failed to map the dst buffer to hw slg (%d)!\n",
361 ret);
362 goto err_unmap_input;
363 }
364
365 hisi_zip_fill_sqe(qp_ctx->ctx, &zip_sqe, qp->req_type, req);
366
367 /* send command to start a task */
368 atomic64_inc(&dfx->send_cnt);
369 ret = hisi_qp_send(qp, &zip_sqe);
370 if (unlikely(ret < 0)) {
371 atomic64_inc(&dfx->send_busy_cnt);
372 ret = -EAGAIN;
373 dev_dbg_ratelimited(dev, "failed to send request!\n");
374 goto err_unmap_output;
375 }
376
377 return -EINPROGRESS;
378
379 err_unmap_output:
380 hisi_acc_sg_buf_unmap(dev, a_req->dst, req->hw_dst);
381 err_unmap_input:
382 hisi_acc_sg_buf_unmap(dev, a_req->src, req->hw_src);
383 return ret;
384 }
385
hisi_zip_get_tag_v1(struct hisi_zip_sqe * sqe)386 static u32 hisi_zip_get_tag_v1(struct hisi_zip_sqe *sqe)
387 {
388 return sqe->dw13;
389 }
390
hisi_zip_get_tag_v2(struct hisi_zip_sqe * sqe)391 static u32 hisi_zip_get_tag_v2(struct hisi_zip_sqe *sqe)
392 {
393 return sqe->dw26;
394 }
395
hisi_zip_get_status(struct hisi_zip_sqe * sqe)396 static u32 hisi_zip_get_status(struct hisi_zip_sqe *sqe)
397 {
398 return sqe->dw3 & HZIP_BD_STATUS_M;
399 }
400
hisi_zip_get_dstlen(struct hisi_zip_sqe * sqe)401 static u32 hisi_zip_get_dstlen(struct hisi_zip_sqe *sqe)
402 {
403 return sqe->produced;
404 }
405
hisi_zip_acomp_cb(struct hisi_qp * qp,void * data)406 static void hisi_zip_acomp_cb(struct hisi_qp *qp, void *data)
407 {
408 struct hisi_zip_qp_ctx *qp_ctx = qp->qp_ctx;
409 const struct hisi_zip_sqe_ops *ops = qp_ctx->ctx->ops;
410 struct hisi_zip_dfx *dfx = &qp_ctx->zip_dev->dfx;
411 struct hisi_zip_req_q *req_q = &qp_ctx->req_q;
412 struct device *dev = &qp->qm->pdev->dev;
413 struct hisi_zip_sqe *sqe = data;
414 u32 tag = ops->get_tag(sqe);
415 struct hisi_zip_req *req = req_q->q + tag;
416 struct acomp_req *acomp_req = req->req;
417 u32 status, dlen, head_size;
418 int err = 0;
419
420 atomic64_inc(&dfx->recv_cnt);
421 status = ops->get_status(sqe);
422 if (unlikely(status != 0 && status != HZIP_NC_ERR)) {
423 dev_err(dev, "%scompress fail in qp%u: %u, output: %u\n",
424 (qp->alg_type == 0) ? "" : "de", qp->qp_id, status,
425 sqe->produced);
426 atomic64_inc(&dfx->err_bd_cnt);
427 err = -EIO;
428 }
429
430 dlen = ops->get_dstlen(sqe);
431
432 hisi_acc_sg_buf_unmap(dev, acomp_req->src, req->hw_src);
433 hisi_acc_sg_buf_unmap(dev, acomp_req->dst, req->hw_dst);
434
435 head_size = (qp->alg_type == 0) ? TO_HEAD_SIZE(qp->req_type) : 0;
436 acomp_req->dlen = dlen + head_size;
437
438 if (acomp_req->base.complete)
439 acomp_request_complete(acomp_req, err);
440
441 hisi_zip_remove_req(qp_ctx, req);
442 }
443
hisi_zip_acompress(struct acomp_req * acomp_req)444 static int hisi_zip_acompress(struct acomp_req *acomp_req)
445 {
446 struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
447 struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_COMP];
448 struct device *dev = &qp_ctx->qp->qm->pdev->dev;
449 struct hisi_zip_req *req;
450 int head_size;
451 int ret;
452
453 /* let's output compression head now */
454 head_size = add_comp_head(acomp_req->dst, qp_ctx->qp->req_type);
455 if (unlikely(head_size < 0)) {
456 dev_err_ratelimited(dev, "failed to add comp head (%d)!\n",
457 head_size);
458 return head_size;
459 }
460
461 req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, true);
462 if (IS_ERR(req))
463 return PTR_ERR(req);
464
465 ret = hisi_zip_do_work(req, qp_ctx);
466 if (unlikely(ret != -EINPROGRESS)) {
467 dev_info_ratelimited(dev, "failed to do compress (%d)!\n", ret);
468 hisi_zip_remove_req(qp_ctx, req);
469 }
470
471 return ret;
472 }
473
hisi_zip_adecompress(struct acomp_req * acomp_req)474 static int hisi_zip_adecompress(struct acomp_req *acomp_req)
475 {
476 struct hisi_zip_ctx *ctx = crypto_tfm_ctx(acomp_req->base.tfm);
477 struct hisi_zip_qp_ctx *qp_ctx = &ctx->qp_ctx[HZIP_QPC_DECOMP];
478 struct device *dev = &qp_ctx->qp->qm->pdev->dev;
479 struct hisi_zip_req *req;
480 int head_size, ret;
481
482 head_size = get_comp_head_size(acomp_req, qp_ctx->qp->req_type);
483 if (unlikely(head_size < 0)) {
484 dev_err_ratelimited(dev, "failed to get comp head size (%d)!\n",
485 head_size);
486 return head_size;
487 }
488
489 req = hisi_zip_create_req(acomp_req, qp_ctx, head_size, false);
490 if (IS_ERR(req))
491 return PTR_ERR(req);
492
493 ret = hisi_zip_do_work(req, qp_ctx);
494 if (unlikely(ret != -EINPROGRESS)) {
495 dev_info_ratelimited(dev, "failed to do decompress (%d)!\n",
496 ret);
497 hisi_zip_remove_req(qp_ctx, req);
498 }
499
500 return ret;
501 }
502
hisi_zip_start_qp(struct hisi_qp * qp,struct hisi_zip_qp_ctx * qp_ctx,int alg_type,int req_type)503 static int hisi_zip_start_qp(struct hisi_qp *qp, struct hisi_zip_qp_ctx *qp_ctx,
504 int alg_type, int req_type)
505 {
506 struct device *dev = &qp->qm->pdev->dev;
507 int ret;
508
509 qp->req_type = req_type;
510 qp->alg_type = alg_type;
511 qp->qp_ctx = qp_ctx;
512
513 ret = hisi_qm_start_qp(qp, 0);
514 if (ret < 0) {
515 dev_err(dev, "failed to start qp (%d)!\n", ret);
516 return ret;
517 }
518
519 qp_ctx->qp = qp;
520
521 return 0;
522 }
523
hisi_zip_release_qp(struct hisi_zip_qp_ctx * qp_ctx)524 static void hisi_zip_release_qp(struct hisi_zip_qp_ctx *qp_ctx)
525 {
526 hisi_qm_stop_qp(qp_ctx->qp);
527 hisi_qm_free_qps(&qp_ctx->qp, 1);
528 }
529
530 static const struct hisi_zip_sqe_ops hisi_zip_ops_v1 = {
531 .sqe_type = 0,
532 .fill_addr = hisi_zip_fill_addr,
533 .fill_buf_size = hisi_zip_fill_buf_size,
534 .fill_buf_type = hisi_zip_fill_buf_type,
535 .fill_req_type = hisi_zip_fill_req_type,
536 .fill_tag = hisi_zip_fill_tag_v1,
537 .fill_sqe_type = hisi_zip_fill_sqe_type,
538 .get_tag = hisi_zip_get_tag_v1,
539 .get_status = hisi_zip_get_status,
540 .get_dstlen = hisi_zip_get_dstlen,
541 };
542
543 static const struct hisi_zip_sqe_ops hisi_zip_ops_v2 = {
544 .sqe_type = 0x3,
545 .fill_addr = hisi_zip_fill_addr,
546 .fill_buf_size = hisi_zip_fill_buf_size,
547 .fill_buf_type = hisi_zip_fill_buf_type,
548 .fill_req_type = hisi_zip_fill_req_type,
549 .fill_tag = hisi_zip_fill_tag_v2,
550 .fill_sqe_type = hisi_zip_fill_sqe_type,
551 .get_tag = hisi_zip_get_tag_v2,
552 .get_status = hisi_zip_get_status,
553 .get_dstlen = hisi_zip_get_dstlen,
554 };
555
hisi_zip_ctx_init(struct hisi_zip_ctx * hisi_zip_ctx,u8 req_type,int node)556 static int hisi_zip_ctx_init(struct hisi_zip_ctx *hisi_zip_ctx, u8 req_type, int node)
557 {
558 struct hisi_qp *qps[HZIP_CTX_Q_NUM] = { NULL };
559 struct hisi_zip_qp_ctx *qp_ctx;
560 struct hisi_zip *hisi_zip;
561 int ret, i, j;
562
563 ret = zip_create_qps(qps, HZIP_CTX_Q_NUM, node);
564 if (ret) {
565 pr_err("failed to create zip qps (%d)!\n", ret);
566 return -ENODEV;
567 }
568
569 hisi_zip = container_of(qps[0]->qm, struct hisi_zip, qm);
570
571 for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
572 /* alg_type = 0 for compress, 1 for decompress in hw sqe */
573 qp_ctx = &hisi_zip_ctx->qp_ctx[i];
574 qp_ctx->ctx = hisi_zip_ctx;
575 ret = hisi_zip_start_qp(qps[i], qp_ctx, i, req_type);
576 if (ret) {
577 for (j = i - 1; j >= 0; j--)
578 hisi_qm_stop_qp(hisi_zip_ctx->qp_ctx[j].qp);
579
580 hisi_qm_free_qps(qps, HZIP_CTX_Q_NUM);
581 return ret;
582 }
583
584 qp_ctx->zip_dev = hisi_zip;
585 }
586
587 if (hisi_zip->qm.ver < QM_HW_V3)
588 hisi_zip_ctx->ops = &hisi_zip_ops_v1;
589 else
590 hisi_zip_ctx->ops = &hisi_zip_ops_v2;
591
592 return 0;
593 }
594
hisi_zip_ctx_exit(struct hisi_zip_ctx * hisi_zip_ctx)595 static void hisi_zip_ctx_exit(struct hisi_zip_ctx *hisi_zip_ctx)
596 {
597 int i;
598
599 for (i = 0; i < HZIP_CTX_Q_NUM; i++)
600 hisi_zip_release_qp(&hisi_zip_ctx->qp_ctx[i]);
601 }
602
hisi_zip_create_req_q(struct hisi_zip_ctx * ctx)603 static int hisi_zip_create_req_q(struct hisi_zip_ctx *ctx)
604 {
605 u16 q_depth = ctx->qp_ctx[0].qp->sq_depth;
606 struct hisi_zip_req_q *req_q;
607 int i, ret;
608
609 for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
610 req_q = &ctx->qp_ctx[i].req_q;
611 req_q->size = q_depth;
612
613 req_q->req_bitmap = bitmap_zalloc(req_q->size, GFP_KERNEL);
614 if (!req_q->req_bitmap) {
615 ret = -ENOMEM;
616 if (i == 0)
617 return ret;
618
619 goto err_free_comp_q;
620 }
621 rwlock_init(&req_q->req_lock);
622
623 req_q->q = kcalloc(req_q->size, sizeof(struct hisi_zip_req),
624 GFP_KERNEL);
625 if (!req_q->q) {
626 ret = -ENOMEM;
627 if (i == 0)
628 goto err_free_comp_bitmap;
629 else
630 goto err_free_decomp_bitmap;
631 }
632 }
633
634 return 0;
635
636 err_free_decomp_bitmap:
637 bitmap_free(ctx->qp_ctx[HZIP_QPC_DECOMP].req_q.req_bitmap);
638 err_free_comp_q:
639 kfree(ctx->qp_ctx[HZIP_QPC_COMP].req_q.q);
640 err_free_comp_bitmap:
641 bitmap_free(ctx->qp_ctx[HZIP_QPC_COMP].req_q.req_bitmap);
642 return ret;
643 }
644
hisi_zip_release_req_q(struct hisi_zip_ctx * ctx)645 static void hisi_zip_release_req_q(struct hisi_zip_ctx *ctx)
646 {
647 int i;
648
649 for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
650 kfree(ctx->qp_ctx[i].req_q.q);
651 bitmap_free(ctx->qp_ctx[i].req_q.req_bitmap);
652 }
653 }
654
hisi_zip_create_sgl_pool(struct hisi_zip_ctx * ctx)655 static int hisi_zip_create_sgl_pool(struct hisi_zip_ctx *ctx)
656 {
657 u16 q_depth = ctx->qp_ctx[0].qp->sq_depth;
658 struct hisi_zip_qp_ctx *tmp;
659 struct device *dev;
660 int i;
661
662 for (i = 0; i < HZIP_CTX_Q_NUM; i++) {
663 tmp = &ctx->qp_ctx[i];
664 dev = &tmp->qp->qm->pdev->dev;
665 tmp->sgl_pool = hisi_acc_create_sgl_pool(dev, q_depth << 1,
666 sgl_sge_nr);
667 if (IS_ERR(tmp->sgl_pool)) {
668 if (i == 1)
669 goto err_free_sgl_pool0;
670 return -ENOMEM;
671 }
672 }
673
674 return 0;
675
676 err_free_sgl_pool0:
677 hisi_acc_free_sgl_pool(&ctx->qp_ctx[HZIP_QPC_COMP].qp->qm->pdev->dev,
678 ctx->qp_ctx[HZIP_QPC_COMP].sgl_pool);
679 return -ENOMEM;
680 }
681
hisi_zip_release_sgl_pool(struct hisi_zip_ctx * ctx)682 static void hisi_zip_release_sgl_pool(struct hisi_zip_ctx *ctx)
683 {
684 int i;
685
686 for (i = 0; i < HZIP_CTX_Q_NUM; i++)
687 hisi_acc_free_sgl_pool(&ctx->qp_ctx[i].qp->qm->pdev->dev,
688 ctx->qp_ctx[i].sgl_pool);
689 }
690
hisi_zip_set_acomp_cb(struct hisi_zip_ctx * ctx,void (* fn)(struct hisi_qp *,void *))691 static void hisi_zip_set_acomp_cb(struct hisi_zip_ctx *ctx,
692 void (*fn)(struct hisi_qp *, void *))
693 {
694 int i;
695
696 for (i = 0; i < HZIP_CTX_Q_NUM; i++)
697 ctx->qp_ctx[i].qp->req_cb = fn;
698 }
699
hisi_zip_acomp_init(struct crypto_acomp * tfm)700 static int hisi_zip_acomp_init(struct crypto_acomp *tfm)
701 {
702 const char *alg_name = crypto_tfm_alg_name(&tfm->base);
703 struct hisi_zip_ctx *ctx = crypto_tfm_ctx(&tfm->base);
704 struct device *dev;
705 int ret;
706
707 ret = hisi_zip_ctx_init(ctx, COMP_NAME_TO_TYPE(alg_name), tfm->base.node);
708 if (ret) {
709 pr_err("failed to init ctx (%d)!\n", ret);
710 return ret;
711 }
712
713 dev = &ctx->qp_ctx[0].qp->qm->pdev->dev;
714
715 ret = hisi_zip_create_req_q(ctx);
716 if (ret) {
717 dev_err(dev, "failed to create request queue (%d)!\n", ret);
718 goto err_ctx_exit;
719 }
720
721 ret = hisi_zip_create_sgl_pool(ctx);
722 if (ret) {
723 dev_err(dev, "failed to create sgl pool (%d)!\n", ret);
724 goto err_release_req_q;
725 }
726
727 hisi_zip_set_acomp_cb(ctx, hisi_zip_acomp_cb);
728
729 return 0;
730
731 err_release_req_q:
732 hisi_zip_release_req_q(ctx);
733 err_ctx_exit:
734 hisi_zip_ctx_exit(ctx);
735 return ret;
736 }
737
hisi_zip_acomp_exit(struct crypto_acomp * tfm)738 static void hisi_zip_acomp_exit(struct crypto_acomp *tfm)
739 {
740 struct hisi_zip_ctx *ctx = crypto_tfm_ctx(&tfm->base);
741
742 hisi_zip_set_acomp_cb(ctx, NULL);
743 hisi_zip_release_sgl_pool(ctx);
744 hisi_zip_release_req_q(ctx);
745 hisi_zip_ctx_exit(ctx);
746 }
747
748 static struct acomp_alg hisi_zip_acomp_zlib = {
749 .init = hisi_zip_acomp_init,
750 .exit = hisi_zip_acomp_exit,
751 .compress = hisi_zip_acompress,
752 .decompress = hisi_zip_adecompress,
753 .base = {
754 .cra_name = "zlib-deflate",
755 .cra_driver_name = "hisi-zlib-acomp",
756 .cra_module = THIS_MODULE,
757 .cra_priority = HZIP_ALG_PRIORITY,
758 .cra_ctxsize = sizeof(struct hisi_zip_ctx),
759 }
760 };
761
hisi_zip_register_zlib(struct hisi_qm * qm)762 static int hisi_zip_register_zlib(struct hisi_qm *qm)
763 {
764 int ret;
765
766 if (!hisi_zip_alg_support(qm, HZIP_ALG_ZLIB))
767 return 0;
768
769 ret = crypto_register_acomp(&hisi_zip_acomp_zlib);
770 if (ret)
771 dev_err(&qm->pdev->dev, "failed to register to zlib (%d)!\n", ret);
772
773 return ret;
774 }
775
hisi_zip_unregister_zlib(struct hisi_qm * qm)776 static void hisi_zip_unregister_zlib(struct hisi_qm *qm)
777 {
778 if (!hisi_zip_alg_support(qm, HZIP_ALG_ZLIB))
779 return;
780
781 crypto_unregister_acomp(&hisi_zip_acomp_zlib);
782 }
783
784 static struct acomp_alg hisi_zip_acomp_gzip = {
785 .init = hisi_zip_acomp_init,
786 .exit = hisi_zip_acomp_exit,
787 .compress = hisi_zip_acompress,
788 .decompress = hisi_zip_adecompress,
789 .base = {
790 .cra_name = "gzip",
791 .cra_driver_name = "hisi-gzip-acomp",
792 .cra_module = THIS_MODULE,
793 .cra_priority = HZIP_ALG_PRIORITY,
794 .cra_ctxsize = sizeof(struct hisi_zip_ctx),
795 }
796 };
797
hisi_zip_register_gzip(struct hisi_qm * qm)798 static int hisi_zip_register_gzip(struct hisi_qm *qm)
799 {
800 int ret;
801
802 if (!hisi_zip_alg_support(qm, HZIP_ALG_GZIP))
803 return 0;
804
805 ret = crypto_register_acomp(&hisi_zip_acomp_gzip);
806 if (ret)
807 dev_err(&qm->pdev->dev, "failed to register to gzip (%d)!\n", ret);
808
809 return ret;
810 }
811
hisi_zip_unregister_gzip(struct hisi_qm * qm)812 static void hisi_zip_unregister_gzip(struct hisi_qm *qm)
813 {
814 if (!hisi_zip_alg_support(qm, HZIP_ALG_GZIP))
815 return;
816
817 crypto_unregister_acomp(&hisi_zip_acomp_gzip);
818 }
819
hisi_zip_register_to_crypto(struct hisi_qm * qm)820 int hisi_zip_register_to_crypto(struct hisi_qm *qm)
821 {
822 int ret = 0;
823
824 ret = hisi_zip_register_zlib(qm);
825 if (ret)
826 return ret;
827
828 ret = hisi_zip_register_gzip(qm);
829 if (ret)
830 hisi_zip_unregister_zlib(qm);
831
832 return ret;
833 }
834
hisi_zip_unregister_from_crypto(struct hisi_qm * qm)835 void hisi_zip_unregister_from_crypto(struct hisi_qm *qm)
836 {
837 hisi_zip_unregister_zlib(qm);
838 hisi_zip_unregister_gzip(qm);
839 }
840