xref: /openbmc/linux/drivers/crypto/cavium/nitrox/nitrox_reqmgr.c (revision 943126417891372d56aa3fe46295cbf53db31370)
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/gfp.h>
3 #include <linux/workqueue.h>
4 #include <crypto/internal/skcipher.h>
5 
6 #include "nitrox_dev.h"
7 #include "nitrox_req.h"
8 #include "nitrox_csr.h"
9 
10 /* SLC_STORE_INFO */
11 #define MIN_UDD_LEN 16
12 /* PKT_IN_HDR + SLC_STORE_INFO */
13 #define FDATA_SIZE 32
14 /* Base destination port for the solicited requests */
15 #define SOLICIT_BASE_DPORT 256
16 #define PENDING_SIG	0xFFFFFFFFFFFFFFFFUL
17 
18 #define REQ_NOT_POSTED 1
19 #define REQ_BACKLOG    2
20 #define REQ_POSTED     3
21 
22 /**
23  * Response codes from SE microcode
24  * 0x00 - Success
25  *   Completion with no error
26  * 0x43 - ERR_GC_DATA_LEN_INVALID
27  *   Invalid Data length if Encryption Data length is
28  *   less than 16 bytes for AES-XTS and AES-CTS.
29  * 0x45 - ERR_GC_CTX_LEN_INVALID
30  *   Invalid context length: CTXL != 23 words.
31  * 0x4F - ERR_GC_DOCSIS_CIPHER_INVALID
32  *   DOCSIS support is enabled with other than
33  *   AES/DES-CBC mode encryption.
34  * 0x50 - ERR_GC_DOCSIS_OFFSET_INVALID
35  *   Authentication offset is other than 0 with
36  *   Encryption IV source = 0.
37  *   Authentication offset is other than 8 (DES)/16 (AES)
38  *   with Encryption IV source = 1
39  * 0x51 - ERR_GC_CRC32_INVALID_SELECTION
40  *   CRC32 is enabled for other than DOCSIS encryption.
41  * 0x52 - ERR_GC_AES_CCM_FLAG_INVALID
42  *   Invalid flag options in AES-CCM IV.
43  */
44 
45 static inline int incr_index(int index, int count, int max)
46 {
47 	if ((index + count) >= max)
48 		index = index + count - max;
49 	else
50 		index += count;
51 
52 	return index;
53 }
54 
55 /**
56  * dma_free_sglist - unmap and free the sg lists.
57  * @ndev: N5 device
58  * @sgtbl: SG table
59  */
60 static void softreq_unmap_sgbufs(struct nitrox_softreq *sr)
61 {
62 	struct nitrox_device *ndev = sr->ndev;
63 	struct device *dev = DEV(ndev);
64 	struct nitrox_sglist *sglist;
65 
66 	/* unmap in sgbuf */
67 	sglist = sr->in.sglist;
68 	if (!sglist)
69 		goto out_unmap;
70 
71 	/* unmap iv */
72 	dma_unmap_single(dev, sglist->dma, sglist->len, DMA_BIDIRECTIONAL);
73 	/* unmpa src sglist */
74 	dma_unmap_sg(dev, sr->in.buf, (sr->in.map_bufs_cnt - 1), sr->in.dir);
75 	/* unamp gather component */
76 	dma_unmap_single(dev, sr->in.dma, sr->in.len, DMA_TO_DEVICE);
77 	kfree(sr->in.sglist);
78 	kfree(sr->in.sgcomp);
79 	sr->in.sglist = NULL;
80 	sr->in.buf = NULL;
81 	sr->in.map_bufs_cnt = 0;
82 
83 out_unmap:
84 	/* unmap out sgbuf */
85 	sglist = sr->out.sglist;
86 	if (!sglist)
87 		return;
88 
89 	/* unmap orh */
90 	dma_unmap_single(dev, sr->resp.orh_dma, ORH_HLEN, sr->out.dir);
91 
92 	/* unmap dst sglist */
93 	if (!sr->inplace) {
94 		dma_unmap_sg(dev, sr->out.buf, (sr->out.map_bufs_cnt - 3),
95 			     sr->out.dir);
96 	}
97 	/* unmap completion */
98 	dma_unmap_single(dev, sr->resp.completion_dma, COMP_HLEN, sr->out.dir);
99 
100 	/* unmap scatter component */
101 	dma_unmap_single(dev, sr->out.dma, sr->out.len, DMA_TO_DEVICE);
102 	kfree(sr->out.sglist);
103 	kfree(sr->out.sgcomp);
104 	sr->out.sglist = NULL;
105 	sr->out.buf = NULL;
106 	sr->out.map_bufs_cnt = 0;
107 }
108 
109 static void softreq_destroy(struct nitrox_softreq *sr)
110 {
111 	softreq_unmap_sgbufs(sr);
112 	kfree(sr);
113 }
114 
115 /**
116  * create_sg_component - create SG componets for N5 device.
117  * @sr: Request structure
118  * @sgtbl: SG table
119  * @nr_comp: total number of components required
120  *
121  * Component structure
122  *
123  *   63     48 47     32 31    16 15      0
124  *   --------------------------------------
125  *   |   LEN0  |  LEN1  |  LEN2  |  LEN3  |
126  *   |-------------------------------------
127  *   |               PTR0                 |
128  *   --------------------------------------
129  *   |               PTR1                 |
130  *   --------------------------------------
131  *   |               PTR2                 |
132  *   --------------------------------------
133  *   |               PTR3                 |
134  *   --------------------------------------
135  *
136  *   Returns 0 if success or a negative errno code on error.
137  */
138 static int create_sg_component(struct nitrox_softreq *sr,
139 			       struct nitrox_sgtable *sgtbl, int map_nents)
140 {
141 	struct nitrox_device *ndev = sr->ndev;
142 	struct nitrox_sgcomp *sgcomp;
143 	struct nitrox_sglist *sglist;
144 	dma_addr_t dma;
145 	size_t sz_comp;
146 	int i, j, nr_sgcomp;
147 
148 	nr_sgcomp = roundup(map_nents, 4) / 4;
149 
150 	/* each component holds 4 dma pointers */
151 	sz_comp = nr_sgcomp * sizeof(*sgcomp);
152 	sgcomp = kzalloc(sz_comp, sr->gfp);
153 	if (!sgcomp)
154 		return -ENOMEM;
155 
156 	sgtbl->sgcomp = sgcomp;
157 	sgtbl->nr_sgcomp = nr_sgcomp;
158 
159 	sglist = sgtbl->sglist;
160 	/* populate device sg component */
161 	for (i = 0; i < nr_sgcomp; i++) {
162 		for (j = 0; j < 4; j++) {
163 			sgcomp->len[j] = cpu_to_be16(sglist->len);
164 			sgcomp->dma[j] = cpu_to_be64(sglist->dma);
165 			sglist++;
166 		}
167 		sgcomp++;
168 	}
169 	/* map the device sg component */
170 	dma = dma_map_single(DEV(ndev), sgtbl->sgcomp, sz_comp, DMA_TO_DEVICE);
171 	if (dma_mapping_error(DEV(ndev), dma)) {
172 		kfree(sgtbl->sgcomp);
173 		sgtbl->sgcomp = NULL;
174 		return -ENOMEM;
175 	}
176 
177 	sgtbl->dma = dma;
178 	sgtbl->len = sz_comp;
179 
180 	return 0;
181 }
182 
183 /**
184  * dma_map_inbufs - DMA map input sglist and creates sglist component
185  *                  for N5 device.
186  * @sr: Request structure
187  * @req: Crypto request structre
188  *
189  * Returns 0 if successful or a negative errno code on error.
190  */
191 static int dma_map_inbufs(struct nitrox_softreq *sr,
192 			  struct se_crypto_request *req)
193 {
194 	struct device *dev = DEV(sr->ndev);
195 	struct scatterlist *sg = req->src;
196 	struct nitrox_sglist *glist;
197 	int i, nents, ret = 0;
198 	dma_addr_t dma;
199 	size_t sz;
200 
201 	nents = sg_nents(req->src);
202 
203 	/* creater gather list IV and src entries */
204 	sz = roundup((1 + nents), 4) * sizeof(*glist);
205 	glist = kzalloc(sz, sr->gfp);
206 	if (!glist)
207 		return -ENOMEM;
208 
209 	sr->in.sglist = glist;
210 	/* map IV */
211 	dma = dma_map_single(dev, &req->iv, req->ivsize, DMA_BIDIRECTIONAL);
212 	if (dma_mapping_error(dev, dma)) {
213 		ret = -EINVAL;
214 		goto iv_map_err;
215 	}
216 
217 	sr->in.dir = (req->src == req->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
218 	/* map src entries */
219 	nents = dma_map_sg(dev, req->src, nents, sr->in.dir);
220 	if (!nents) {
221 		ret = -EINVAL;
222 		goto src_map_err;
223 	}
224 	sr->in.buf = req->src;
225 
226 	/* store the mappings */
227 	glist->len = req->ivsize;
228 	glist->dma = dma;
229 	glist++;
230 	sr->in.total_bytes += req->ivsize;
231 
232 	for_each_sg(req->src, sg, nents, i) {
233 		glist->len = sg_dma_len(sg);
234 		glist->dma = sg_dma_address(sg);
235 		sr->in.total_bytes += glist->len;
236 		glist++;
237 	}
238 	/* roundup map count to align with entires in sg component */
239 	sr->in.map_bufs_cnt = (1 + nents);
240 
241 	/* create NITROX gather component */
242 	ret = create_sg_component(sr, &sr->in, sr->in.map_bufs_cnt);
243 	if (ret)
244 		goto incomp_err;
245 
246 	return 0;
247 
248 incomp_err:
249 	dma_unmap_sg(dev, req->src, nents, sr->in.dir);
250 	sr->in.map_bufs_cnt = 0;
251 src_map_err:
252 	dma_unmap_single(dev, dma, req->ivsize, DMA_BIDIRECTIONAL);
253 iv_map_err:
254 	kfree(sr->in.sglist);
255 	sr->in.sglist = NULL;
256 	return ret;
257 }
258 
259 static int dma_map_outbufs(struct nitrox_softreq *sr,
260 			   struct se_crypto_request *req)
261 {
262 	struct device *dev = DEV(sr->ndev);
263 	struct nitrox_sglist *glist = sr->in.sglist;
264 	struct nitrox_sglist *slist;
265 	struct scatterlist *sg;
266 	int i, nents, map_bufs_cnt, ret = 0;
267 	size_t sz;
268 
269 	nents = sg_nents(req->dst);
270 
271 	/* create scatter list ORH, IV, dst entries and Completion header */
272 	sz = roundup((3 + nents), 4) * sizeof(*slist);
273 	slist = kzalloc(sz, sr->gfp);
274 	if (!slist)
275 		return -ENOMEM;
276 
277 	sr->out.sglist = slist;
278 	sr->out.dir = DMA_BIDIRECTIONAL;
279 	/* map ORH */
280 	sr->resp.orh_dma = dma_map_single(dev, &sr->resp.orh, ORH_HLEN,
281 					  sr->out.dir);
282 	if (dma_mapping_error(dev, sr->resp.orh_dma)) {
283 		ret = -EINVAL;
284 		goto orh_map_err;
285 	}
286 
287 	/* map completion */
288 	sr->resp.completion_dma = dma_map_single(dev, &sr->resp.completion,
289 						 COMP_HLEN, sr->out.dir);
290 	if (dma_mapping_error(dev, sr->resp.completion_dma)) {
291 		ret = -EINVAL;
292 		goto compl_map_err;
293 	}
294 
295 	sr->inplace = (req->src == req->dst) ? true : false;
296 	/* out place */
297 	if (!sr->inplace) {
298 		nents = dma_map_sg(dev, req->dst, nents, sr->out.dir);
299 		if (!nents) {
300 			ret = -EINVAL;
301 			goto dst_map_err;
302 		}
303 	}
304 	sr->out.buf = req->dst;
305 
306 	/* store the mappings */
307 	/* orh */
308 	slist->len = ORH_HLEN;
309 	slist->dma = sr->resp.orh_dma;
310 	slist++;
311 
312 	/* copy the glist mappings */
313 	if (sr->inplace) {
314 		nents = sr->in.map_bufs_cnt - 1;
315 		map_bufs_cnt = sr->in.map_bufs_cnt;
316 		while (map_bufs_cnt--) {
317 			slist->len = glist->len;
318 			slist->dma = glist->dma;
319 			slist++;
320 			glist++;
321 		}
322 	} else {
323 		/* copy iv mapping */
324 		slist->len = glist->len;
325 		slist->dma = glist->dma;
326 		slist++;
327 		/* copy remaining maps */
328 		for_each_sg(req->dst, sg, nents, i) {
329 			slist->len = sg_dma_len(sg);
330 			slist->dma = sg_dma_address(sg);
331 			slist++;
332 		}
333 	}
334 
335 	/* completion */
336 	slist->len = COMP_HLEN;
337 	slist->dma = sr->resp.completion_dma;
338 
339 	sr->out.map_bufs_cnt = (3 + nents);
340 
341 	ret = create_sg_component(sr, &sr->out, sr->out.map_bufs_cnt);
342 	if (ret)
343 		goto outcomp_map_err;
344 
345 	return 0;
346 
347 outcomp_map_err:
348 	if (!sr->inplace)
349 		dma_unmap_sg(dev, req->dst, nents, sr->out.dir);
350 	sr->out.map_bufs_cnt = 0;
351 	sr->out.buf = NULL;
352 dst_map_err:
353 	dma_unmap_single(dev, sr->resp.completion_dma, COMP_HLEN, sr->out.dir);
354 	sr->resp.completion_dma = 0;
355 compl_map_err:
356 	dma_unmap_single(dev, sr->resp.orh_dma, ORH_HLEN, sr->out.dir);
357 	sr->resp.orh_dma = 0;
358 orh_map_err:
359 	kfree(sr->out.sglist);
360 	sr->out.sglist = NULL;
361 	return ret;
362 }
363 
364 static inline int softreq_map_iobuf(struct nitrox_softreq *sr,
365 				    struct se_crypto_request *creq)
366 {
367 	int ret;
368 
369 	ret = dma_map_inbufs(sr, creq);
370 	if (ret)
371 		return ret;
372 
373 	ret = dma_map_outbufs(sr, creq);
374 	if (ret)
375 		softreq_unmap_sgbufs(sr);
376 
377 	return ret;
378 }
379 
380 static inline void backlog_list_add(struct nitrox_softreq *sr,
381 				    struct nitrox_cmdq *cmdq)
382 {
383 	INIT_LIST_HEAD(&sr->backlog);
384 
385 	spin_lock_bh(&cmdq->backlog_qlock);
386 	list_add_tail(&sr->backlog, &cmdq->backlog_head);
387 	atomic_inc(&cmdq->backlog_count);
388 	atomic_set(&sr->status, REQ_BACKLOG);
389 	spin_unlock_bh(&cmdq->backlog_qlock);
390 }
391 
392 static inline void response_list_add(struct nitrox_softreq *sr,
393 				     struct nitrox_cmdq *cmdq)
394 {
395 	INIT_LIST_HEAD(&sr->response);
396 
397 	spin_lock_bh(&cmdq->resp_qlock);
398 	list_add_tail(&sr->response, &cmdq->response_head);
399 	spin_unlock_bh(&cmdq->resp_qlock);
400 }
401 
402 static inline void response_list_del(struct nitrox_softreq *sr,
403 				     struct nitrox_cmdq *cmdq)
404 {
405 	spin_lock_bh(&cmdq->resp_qlock);
406 	list_del(&sr->response);
407 	spin_unlock_bh(&cmdq->resp_qlock);
408 }
409 
410 static struct nitrox_softreq *
411 get_first_response_entry(struct nitrox_cmdq *cmdq)
412 {
413 	return list_first_entry_or_null(&cmdq->response_head,
414 					struct nitrox_softreq, response);
415 }
416 
417 static inline bool cmdq_full(struct nitrox_cmdq *cmdq, int qlen)
418 {
419 	if (atomic_inc_return(&cmdq->pending_count) > qlen) {
420 		atomic_dec(&cmdq->pending_count);
421 		/* sync with other cpus */
422 		smp_mb__after_atomic();
423 		return true;
424 	}
425 	return false;
426 }
427 
428 /**
429  * post_se_instr - Post SE instruction to Packet Input ring
430  * @sr: Request structure
431  *
432  * Returns 0 if successful or a negative error code,
433  * if no space in ring.
434  */
435 static void post_se_instr(struct nitrox_softreq *sr,
436 			  struct nitrox_cmdq *cmdq)
437 {
438 	struct nitrox_device *ndev = sr->ndev;
439 	int idx;
440 	u8 *ent;
441 
442 	spin_lock_bh(&cmdq->cmd_qlock);
443 
444 	idx = cmdq->write_idx;
445 	/* copy the instruction */
446 	ent = cmdq->base + (idx * cmdq->instr_size);
447 	memcpy(ent, &sr->instr, cmdq->instr_size);
448 
449 	atomic_set(&sr->status, REQ_POSTED);
450 	response_list_add(sr, cmdq);
451 	sr->tstamp = jiffies;
452 	/* flush the command queue updates */
453 	dma_wmb();
454 
455 	/* Ring doorbell with count 1 */
456 	writeq(1, cmdq->dbell_csr_addr);
457 	/* orders the doorbell rings */
458 	mmiowb();
459 
460 	cmdq->write_idx = incr_index(idx, 1, ndev->qlen);
461 
462 	spin_unlock_bh(&cmdq->cmd_qlock);
463 
464 	/* increment the posted command count */
465 	atomic64_inc(&ndev->stats.posted);
466 }
467 
468 static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
469 {
470 	struct nitrox_device *ndev = cmdq->ndev;
471 	struct nitrox_softreq *sr, *tmp;
472 	int ret = 0;
473 
474 	if (!atomic_read(&cmdq->backlog_count))
475 		return 0;
476 
477 	spin_lock_bh(&cmdq->backlog_qlock);
478 
479 	list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
480 		struct skcipher_request *skreq;
481 
482 		/* submit until space available */
483 		if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
484 			ret = -ENOSPC;
485 			break;
486 		}
487 		/* delete from backlog list */
488 		list_del(&sr->backlog);
489 		atomic_dec(&cmdq->backlog_count);
490 		/* sync with other cpus */
491 		smp_mb__after_atomic();
492 
493 		skreq = sr->skreq;
494 		/* post the command */
495 		post_se_instr(sr, cmdq);
496 
497 		/* backlog requests are posted, wakeup with -EINPROGRESS */
498 		skcipher_request_complete(skreq, -EINPROGRESS);
499 	}
500 	spin_unlock_bh(&cmdq->backlog_qlock);
501 
502 	return ret;
503 }
504 
505 static int nitrox_enqueue_request(struct nitrox_softreq *sr)
506 {
507 	struct nitrox_cmdq *cmdq = sr->cmdq;
508 	struct nitrox_device *ndev = sr->ndev;
509 
510 	/* try to post backlog requests */
511 	post_backlog_cmds(cmdq);
512 
513 	if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
514 		if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
515 			/* increment drop count */
516 			atomic64_inc(&ndev->stats.dropped);
517 			return -ENOSPC;
518 		}
519 		/* add to backlog list */
520 		backlog_list_add(sr, cmdq);
521 		return -EBUSY;
522 	}
523 	post_se_instr(sr, cmdq);
524 
525 	return -EINPROGRESS;
526 }
527 
528 /**
529  * nitrox_se_request - Send request to SE core
530  * @ndev: NITROX device
531  * @req: Crypto request
532  *
533  * Returns 0 on success, or a negative error code.
534  */
535 int nitrox_process_se_request(struct nitrox_device *ndev,
536 			      struct se_crypto_request *req,
537 			      completion_t callback,
538 			      struct skcipher_request *skreq)
539 {
540 	struct nitrox_softreq *sr;
541 	dma_addr_t ctx_handle = 0;
542 	int qno, ret = 0;
543 
544 	if (!nitrox_ready(ndev))
545 		return -ENODEV;
546 
547 	sr = kzalloc(sizeof(*sr), req->gfp);
548 	if (!sr)
549 		return -ENOMEM;
550 
551 	sr->ndev = ndev;
552 	sr->flags = req->flags;
553 	sr->gfp = req->gfp;
554 	sr->callback = callback;
555 	sr->skreq = skreq;
556 
557 	atomic_set(&sr->status, REQ_NOT_POSTED);
558 
559 	WRITE_ONCE(sr->resp.orh, PENDING_SIG);
560 	WRITE_ONCE(sr->resp.completion, PENDING_SIG);
561 
562 	ret = softreq_map_iobuf(sr, req);
563 	if (ret) {
564 		kfree(sr);
565 		return ret;
566 	}
567 
568 	/* get the context handle */
569 	if (req->ctx_handle) {
570 		struct ctx_hdr *hdr;
571 		u8 *ctx_ptr;
572 
573 		ctx_ptr = (u8 *)(uintptr_t)req->ctx_handle;
574 		hdr = (struct ctx_hdr *)(ctx_ptr - sizeof(struct ctx_hdr));
575 		ctx_handle = hdr->ctx_dma;
576 	}
577 
578 	/* select the queue */
579 	qno = smp_processor_id() % ndev->nr_queues;
580 
581 	sr->cmdq = &ndev->pkt_inq[qno];
582 
583 	/*
584 	 * 64-Byte Instruction Format
585 	 *
586 	 *  ----------------------
587 	 *  |      DPTR0         | 8 bytes
588 	 *  ----------------------
589 	 *  |  PKT_IN_INSTR_HDR  | 8 bytes
590 	 *  ----------------------
591 	 *  |    PKT_IN_HDR      | 16 bytes
592 	 *  ----------------------
593 	 *  |    SLC_INFO        | 16 bytes
594 	 *  ----------------------
595 	 *  |   Front data       | 16 bytes
596 	 *  ----------------------
597 	 */
598 
599 	/* fill the packet instruction */
600 	/* word 0 */
601 	sr->instr.dptr0 = cpu_to_be64(sr->in.dma);
602 
603 	/* word 1 */
604 	sr->instr.ih.value = 0;
605 	sr->instr.ih.s.g = 1;
606 	sr->instr.ih.s.gsz = sr->in.map_bufs_cnt;
607 	sr->instr.ih.s.ssz = sr->out.map_bufs_cnt;
608 	sr->instr.ih.s.fsz = FDATA_SIZE + sizeof(struct gphdr);
609 	sr->instr.ih.s.tlen = sr->instr.ih.s.fsz + sr->in.total_bytes;
610 	sr->instr.ih.value = cpu_to_be64(sr->instr.ih.value);
611 
612 	/* word 2 */
613 	sr->instr.irh.value[0] = 0;
614 	sr->instr.irh.s.uddl = MIN_UDD_LEN;
615 	/* context length in 64-bit words */
616 	sr->instr.irh.s.ctxl = (req->ctrl.s.ctxl / 8);
617 	/* offset from solicit base port 256 */
618 	sr->instr.irh.s.destport = SOLICIT_BASE_DPORT + qno;
619 	sr->instr.irh.s.ctxc = req->ctrl.s.ctxc;
620 	sr->instr.irh.s.arg = req->ctrl.s.arg;
621 	sr->instr.irh.s.opcode = req->opcode;
622 	sr->instr.irh.value[0] = cpu_to_be64(sr->instr.irh.value[0]);
623 
624 	/* word 3 */
625 	sr->instr.irh.s.ctxp = cpu_to_be64(ctx_handle);
626 
627 	/* word 4 */
628 	sr->instr.slc.value[0] = 0;
629 	sr->instr.slc.s.ssz = sr->out.map_bufs_cnt;
630 	sr->instr.slc.value[0] = cpu_to_be64(sr->instr.slc.value[0]);
631 
632 	/* word 5 */
633 	sr->instr.slc.s.rptr = cpu_to_be64(sr->out.dma);
634 
635 	/*
636 	 * No conversion for front data,
637 	 * It goes into payload
638 	 * put GP Header in front data
639 	 */
640 	sr->instr.fdata[0] = *((u64 *)&req->gph);
641 	sr->instr.fdata[1] = 0;
642 
643 	ret = nitrox_enqueue_request(sr);
644 	if (ret == -ENOSPC)
645 		goto send_fail;
646 
647 	return ret;
648 
649 send_fail:
650 	softreq_destroy(sr);
651 	return ret;
652 }
653 
654 static inline int cmd_timeout(unsigned long tstamp, unsigned long timeout)
655 {
656 	return time_after_eq(jiffies, (tstamp + timeout));
657 }
658 
659 void backlog_qflush_work(struct work_struct *work)
660 {
661 	struct nitrox_cmdq *cmdq;
662 
663 	cmdq = container_of(work, struct nitrox_cmdq, backlog_qflush);
664 	post_backlog_cmds(cmdq);
665 }
666 
667 /**
668  * process_request_list - process completed requests
669  * @ndev: N5 device
670  * @qno: queue to operate
671  *
672  * Returns the number of responses processed.
673  */
674 static void process_response_list(struct nitrox_cmdq *cmdq)
675 {
676 	struct nitrox_device *ndev = cmdq->ndev;
677 	struct nitrox_softreq *sr;
678 	struct skcipher_request *skreq;
679 	completion_t callback;
680 	int req_completed = 0, err = 0, budget;
681 
682 	/* check all pending requests */
683 	budget = atomic_read(&cmdq->pending_count);
684 
685 	while (req_completed < budget) {
686 		sr = get_first_response_entry(cmdq);
687 		if (!sr)
688 			break;
689 
690 		if (atomic_read(&sr->status) != REQ_POSTED)
691 			break;
692 
693 		/* check orh and completion bytes updates */
694 		if (READ_ONCE(sr->resp.orh) == READ_ONCE(sr->resp.completion)) {
695 			/* request not completed, check for timeout */
696 			if (!cmd_timeout(sr->tstamp, ndev->timeout))
697 				break;
698 			dev_err_ratelimited(DEV(ndev),
699 					    "Request timeout, orh 0x%016llx\n",
700 					    READ_ONCE(sr->resp.orh));
701 		}
702 		atomic_dec(&cmdq->pending_count);
703 		atomic64_inc(&ndev->stats.completed);
704 		/* sync with other cpus */
705 		smp_mb__after_atomic();
706 		/* remove from response list */
707 		response_list_del(sr, cmdq);
708 
709 		callback = sr->callback;
710 		skreq = sr->skreq;
711 
712 		/* ORH error code */
713 		err = READ_ONCE(sr->resp.orh) & 0xff;
714 		softreq_destroy(sr);
715 
716 		if (callback)
717 			callback(skreq, err);
718 
719 		req_completed++;
720 	}
721 }
722 
723 /**
724  * pkt_slc_resp_tasklet - post processing of SE responses
725  */
726 void pkt_slc_resp_tasklet(unsigned long data)
727 {
728 	struct nitrox_q_vector *qvec = (void *)(uintptr_t)(data);
729 	struct nitrox_cmdq *cmdq = qvec->cmdq;
730 	union nps_pkt_slc_cnts slc_cnts;
731 
732 	/* read completion count */
733 	slc_cnts.value = readq(cmdq->compl_cnt_csr_addr);
734 	/* resend the interrupt if more work to do */
735 	slc_cnts.s.resend = 1;
736 
737 	process_response_list(cmdq);
738 
739 	/*
740 	 * clear the interrupt with resend bit enabled,
741 	 * MSI-X interrupt generates if Completion count > Threshold
742 	 */
743 	writeq(slc_cnts.value, cmdq->compl_cnt_csr_addr);
744 	/* order the writes */
745 	mmiowb();
746 
747 	if (atomic_read(&cmdq->backlog_count))
748 		schedule_work(&cmdq->backlog_qflush);
749 }
750