1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/gfp.h>
3 #include <linux/workqueue.h>
4 #include <crypto/internal/skcipher.h>
5 
6 #include "nitrox_dev.h"
7 #include "nitrox_req.h"
8 #include "nitrox_csr.h"
9 
10 /* SLC_STORE_INFO */
11 #define MIN_UDD_LEN 16
12 /* PKT_IN_HDR + SLC_STORE_INFO */
13 #define FDATA_SIZE 32
14 /* Base destination port for the solicited requests */
15 #define SOLICIT_BASE_DPORT 256
16 #define PENDING_SIG	0xFFFFFFFFFFFFFFFFUL
17 
18 #define REQ_NOT_POSTED 1
19 #define REQ_BACKLOG    2
20 #define REQ_POSTED     3
21 
22 /**
23  * Response codes from SE microcode
24  * 0x00 - Success
25  *   Completion with no error
26  * 0x43 - ERR_GC_DATA_LEN_INVALID
27  *   Invalid Data length if Encryption Data length is
28  *   less than 16 bytes for AES-XTS and AES-CTS.
29  * 0x45 - ERR_GC_CTX_LEN_INVALID
30  *   Invalid context length: CTXL != 23 words.
31  * 0x4F - ERR_GC_DOCSIS_CIPHER_INVALID
32  *   DOCSIS support is enabled with other than
33  *   AES/DES-CBC mode encryption.
34  * 0x50 - ERR_GC_DOCSIS_OFFSET_INVALID
35  *   Authentication offset is other than 0 with
36  *   Encryption IV source = 0.
37  *   Authentication offset is other than 8 (DES)/16 (AES)
38  *   with Encryption IV source = 1
39  * 0x51 - ERR_GC_CRC32_INVALID_SELECTION
40  *   CRC32 is enabled for other than DOCSIS encryption.
41  * 0x52 - ERR_GC_AES_CCM_FLAG_INVALID
42  *   Invalid flag options in AES-CCM IV.
43  */
44 
45 /**
46  * dma_free_sglist - unmap and free the sg lists.
47  * @ndev: N5 device
48  * @sgtbl: SG table
49  */
50 static void softreq_unmap_sgbufs(struct nitrox_softreq *sr)
51 {
52 	struct nitrox_device *ndev = sr->ndev;
53 	struct device *dev = DEV(ndev);
54 	struct nitrox_sglist *sglist;
55 
56 	/* unmap in sgbuf */
57 	sglist = sr->in.sglist;
58 	if (!sglist)
59 		goto out_unmap;
60 
61 	/* unmap iv */
62 	dma_unmap_single(dev, sglist->dma, sglist->len, DMA_BIDIRECTIONAL);
63 	/* unmpa src sglist */
64 	dma_unmap_sg(dev, sr->in.buf, (sr->in.map_bufs_cnt - 1), sr->in.dir);
65 	/* unamp gather component */
66 	dma_unmap_single(dev, sr->in.dma, sr->in.len, DMA_TO_DEVICE);
67 	kfree(sr->in.sglist);
68 	kfree(sr->in.sgcomp);
69 	sr->in.sglist = NULL;
70 	sr->in.buf = NULL;
71 	sr->in.map_bufs_cnt = 0;
72 
73 out_unmap:
74 	/* unmap out sgbuf */
75 	sglist = sr->out.sglist;
76 	if (!sglist)
77 		return;
78 
79 	/* unmap orh */
80 	dma_unmap_single(dev, sr->resp.orh_dma, ORH_HLEN, sr->out.dir);
81 
82 	/* unmap dst sglist */
83 	if (!sr->inplace) {
84 		dma_unmap_sg(dev, sr->out.buf, (sr->out.map_bufs_cnt - 3),
85 			     sr->out.dir);
86 	}
87 	/* unmap completion */
88 	dma_unmap_single(dev, sr->resp.completion_dma, COMP_HLEN, sr->out.dir);
89 
90 	/* unmap scatter component */
91 	dma_unmap_single(dev, sr->out.dma, sr->out.len, DMA_TO_DEVICE);
92 	kfree(sr->out.sglist);
93 	kfree(sr->out.sgcomp);
94 	sr->out.sglist = NULL;
95 	sr->out.buf = NULL;
96 	sr->out.map_bufs_cnt = 0;
97 }
98 
99 static void softreq_destroy(struct nitrox_softreq *sr)
100 {
101 	softreq_unmap_sgbufs(sr);
102 	kfree(sr);
103 }
104 
105 /**
106  * create_sg_component - create SG componets for N5 device.
107  * @sr: Request structure
108  * @sgtbl: SG table
109  * @nr_comp: total number of components required
110  *
111  * Component structure
112  *
113  *   63     48 47     32 31    16 15      0
114  *   --------------------------------------
115  *   |   LEN0  |  LEN1  |  LEN2  |  LEN3  |
116  *   |-------------------------------------
117  *   |               PTR0                 |
118  *   --------------------------------------
119  *   |               PTR1                 |
120  *   --------------------------------------
121  *   |               PTR2                 |
122  *   --------------------------------------
123  *   |               PTR3                 |
124  *   --------------------------------------
125  *
126  *   Returns 0 if success or a negative errno code on error.
127  */
128 static int create_sg_component(struct nitrox_softreq *sr,
129 			       struct nitrox_sgtable *sgtbl, int map_nents)
130 {
131 	struct nitrox_device *ndev = sr->ndev;
132 	struct nitrox_sgcomp *sgcomp;
133 	struct nitrox_sglist *sglist;
134 	dma_addr_t dma;
135 	size_t sz_comp;
136 	int i, j, nr_sgcomp;
137 
138 	nr_sgcomp = roundup(map_nents, 4) / 4;
139 
140 	/* each component holds 4 dma pointers */
141 	sz_comp = nr_sgcomp * sizeof(*sgcomp);
142 	sgcomp = kzalloc(sz_comp, sr->gfp);
143 	if (!sgcomp)
144 		return -ENOMEM;
145 
146 	sgtbl->sgcomp = sgcomp;
147 	sgtbl->nr_sgcomp = nr_sgcomp;
148 
149 	sglist = sgtbl->sglist;
150 	/* populate device sg component */
151 	for (i = 0; i < nr_sgcomp; i++) {
152 		for (j = 0; j < 4; j++) {
153 			sgcomp->len[j] = cpu_to_be16(sglist->len);
154 			sgcomp->dma[j] = cpu_to_be64(sglist->dma);
155 			sglist++;
156 		}
157 		sgcomp++;
158 	}
159 	/* map the device sg component */
160 	dma = dma_map_single(DEV(ndev), sgtbl->sgcomp, sz_comp, DMA_TO_DEVICE);
161 	if (dma_mapping_error(DEV(ndev), dma)) {
162 		kfree(sgtbl->sgcomp);
163 		sgtbl->sgcomp = NULL;
164 		return -ENOMEM;
165 	}
166 
167 	sgtbl->dma = dma;
168 	sgtbl->len = sz_comp;
169 
170 	return 0;
171 }
172 
173 /**
174  * dma_map_inbufs - DMA map input sglist and creates sglist component
175  *                  for N5 device.
176  * @sr: Request structure
177  * @req: Crypto request structre
178  *
179  * Returns 0 if successful or a negative errno code on error.
180  */
181 static int dma_map_inbufs(struct nitrox_softreq *sr,
182 			  struct se_crypto_request *req)
183 {
184 	struct device *dev = DEV(sr->ndev);
185 	struct scatterlist *sg = req->src;
186 	struct nitrox_sglist *glist;
187 	int i, nents, ret = 0;
188 	dma_addr_t dma;
189 	size_t sz;
190 
191 	nents = sg_nents(req->src);
192 
193 	/* creater gather list IV and src entries */
194 	sz = roundup((1 + nents), 4) * sizeof(*glist);
195 	glist = kzalloc(sz, sr->gfp);
196 	if (!glist)
197 		return -ENOMEM;
198 
199 	sr->in.sglist = glist;
200 	/* map IV */
201 	dma = dma_map_single(dev, &req->iv, req->ivsize, DMA_BIDIRECTIONAL);
202 	if (dma_mapping_error(dev, dma)) {
203 		ret = -EINVAL;
204 		goto iv_map_err;
205 	}
206 
207 	sr->in.dir = (req->src == req->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
208 	/* map src entries */
209 	nents = dma_map_sg(dev, req->src, nents, sr->in.dir);
210 	if (!nents) {
211 		ret = -EINVAL;
212 		goto src_map_err;
213 	}
214 	sr->in.buf = req->src;
215 
216 	/* store the mappings */
217 	glist->len = req->ivsize;
218 	glist->dma = dma;
219 	glist++;
220 	sr->in.total_bytes += req->ivsize;
221 
222 	for_each_sg(req->src, sg, nents, i) {
223 		glist->len = sg_dma_len(sg);
224 		glist->dma = sg_dma_address(sg);
225 		sr->in.total_bytes += glist->len;
226 		glist++;
227 	}
228 	/* roundup map count to align with entires in sg component */
229 	sr->in.map_bufs_cnt = (1 + nents);
230 
231 	/* create NITROX gather component */
232 	ret = create_sg_component(sr, &sr->in, sr->in.map_bufs_cnt);
233 	if (ret)
234 		goto incomp_err;
235 
236 	return 0;
237 
238 incomp_err:
239 	dma_unmap_sg(dev, req->src, nents, sr->in.dir);
240 	sr->in.map_bufs_cnt = 0;
241 src_map_err:
242 	dma_unmap_single(dev, dma, req->ivsize, DMA_BIDIRECTIONAL);
243 iv_map_err:
244 	kfree(sr->in.sglist);
245 	sr->in.sglist = NULL;
246 	return ret;
247 }
248 
249 static int dma_map_outbufs(struct nitrox_softreq *sr,
250 			   struct se_crypto_request *req)
251 {
252 	struct device *dev = DEV(sr->ndev);
253 	struct nitrox_sglist *glist = sr->in.sglist;
254 	struct nitrox_sglist *slist;
255 	struct scatterlist *sg;
256 	int i, nents, map_bufs_cnt, ret = 0;
257 	size_t sz;
258 
259 	nents = sg_nents(req->dst);
260 
261 	/* create scatter list ORH, IV, dst entries and Completion header */
262 	sz = roundup((3 + nents), 4) * sizeof(*slist);
263 	slist = kzalloc(sz, sr->gfp);
264 	if (!slist)
265 		return -ENOMEM;
266 
267 	sr->out.sglist = slist;
268 	sr->out.dir = DMA_BIDIRECTIONAL;
269 	/* map ORH */
270 	sr->resp.orh_dma = dma_map_single(dev, &sr->resp.orh, ORH_HLEN,
271 					  sr->out.dir);
272 	if (dma_mapping_error(dev, sr->resp.orh_dma)) {
273 		ret = -EINVAL;
274 		goto orh_map_err;
275 	}
276 
277 	/* map completion */
278 	sr->resp.completion_dma = dma_map_single(dev, &sr->resp.completion,
279 						 COMP_HLEN, sr->out.dir);
280 	if (dma_mapping_error(dev, sr->resp.completion_dma)) {
281 		ret = -EINVAL;
282 		goto compl_map_err;
283 	}
284 
285 	sr->inplace = (req->src == req->dst) ? true : false;
286 	/* out place */
287 	if (!sr->inplace) {
288 		nents = dma_map_sg(dev, req->dst, nents, sr->out.dir);
289 		if (!nents) {
290 			ret = -EINVAL;
291 			goto dst_map_err;
292 		}
293 	}
294 	sr->out.buf = req->dst;
295 
296 	/* store the mappings */
297 	/* orh */
298 	slist->len = ORH_HLEN;
299 	slist->dma = sr->resp.orh_dma;
300 	slist++;
301 
302 	/* copy the glist mappings */
303 	if (sr->inplace) {
304 		nents = sr->in.map_bufs_cnt - 1;
305 		map_bufs_cnt = sr->in.map_bufs_cnt;
306 		while (map_bufs_cnt--) {
307 			slist->len = glist->len;
308 			slist->dma = glist->dma;
309 			slist++;
310 			glist++;
311 		}
312 	} else {
313 		/* copy iv mapping */
314 		slist->len = glist->len;
315 		slist->dma = glist->dma;
316 		slist++;
317 		/* copy remaining maps */
318 		for_each_sg(req->dst, sg, nents, i) {
319 			slist->len = sg_dma_len(sg);
320 			slist->dma = sg_dma_address(sg);
321 			slist++;
322 		}
323 	}
324 
325 	/* completion */
326 	slist->len = COMP_HLEN;
327 	slist->dma = sr->resp.completion_dma;
328 
329 	sr->out.map_bufs_cnt = (3 + nents);
330 
331 	ret = create_sg_component(sr, &sr->out, sr->out.map_bufs_cnt);
332 	if (ret)
333 		goto outcomp_map_err;
334 
335 	return 0;
336 
337 outcomp_map_err:
338 	if (!sr->inplace)
339 		dma_unmap_sg(dev, req->dst, nents, sr->out.dir);
340 	sr->out.map_bufs_cnt = 0;
341 	sr->out.buf = NULL;
342 dst_map_err:
343 	dma_unmap_single(dev, sr->resp.completion_dma, COMP_HLEN, sr->out.dir);
344 	sr->resp.completion_dma = 0;
345 compl_map_err:
346 	dma_unmap_single(dev, sr->resp.orh_dma, ORH_HLEN, sr->out.dir);
347 	sr->resp.orh_dma = 0;
348 orh_map_err:
349 	kfree(sr->out.sglist);
350 	sr->out.sglist = NULL;
351 	return ret;
352 }
353 
354 static inline int softreq_map_iobuf(struct nitrox_softreq *sr,
355 				    struct se_crypto_request *creq)
356 {
357 	int ret;
358 
359 	ret = dma_map_inbufs(sr, creq);
360 	if (ret)
361 		return ret;
362 
363 	ret = dma_map_outbufs(sr, creq);
364 	if (ret)
365 		softreq_unmap_sgbufs(sr);
366 
367 	return ret;
368 }
369 
370 static inline void backlog_list_add(struct nitrox_softreq *sr,
371 				    struct nitrox_cmdq *cmdq)
372 {
373 	INIT_LIST_HEAD(&sr->backlog);
374 
375 	spin_lock_bh(&cmdq->backlog_lock);
376 	list_add_tail(&sr->backlog, &cmdq->backlog_head);
377 	atomic_inc(&cmdq->backlog_count);
378 	atomic_set(&sr->status, REQ_BACKLOG);
379 	spin_unlock_bh(&cmdq->backlog_lock);
380 }
381 
382 static inline void response_list_add(struct nitrox_softreq *sr,
383 				     struct nitrox_cmdq *cmdq)
384 {
385 	INIT_LIST_HEAD(&sr->response);
386 
387 	spin_lock_bh(&cmdq->response_lock);
388 	list_add_tail(&sr->response, &cmdq->response_head);
389 	spin_unlock_bh(&cmdq->response_lock);
390 }
391 
392 static inline void response_list_del(struct nitrox_softreq *sr,
393 				     struct nitrox_cmdq *cmdq)
394 {
395 	spin_lock_bh(&cmdq->response_lock);
396 	list_del(&sr->response);
397 	spin_unlock_bh(&cmdq->response_lock);
398 }
399 
400 static struct nitrox_softreq *
401 get_first_response_entry(struct nitrox_cmdq *cmdq)
402 {
403 	return list_first_entry_or_null(&cmdq->response_head,
404 					struct nitrox_softreq, response);
405 }
406 
407 static inline bool cmdq_full(struct nitrox_cmdq *cmdq, int qlen)
408 {
409 	if (atomic_inc_return(&cmdq->pending_count) > qlen) {
410 		atomic_dec(&cmdq->pending_count);
411 		/* sync with other cpus */
412 		smp_mb__after_atomic();
413 		return true;
414 	}
415 	return false;
416 }
417 
418 /**
419  * post_se_instr - Post SE instruction to Packet Input ring
420  * @sr: Request structure
421  *
422  * Returns 0 if successful or a negative error code,
423  * if no space in ring.
424  */
425 static void post_se_instr(struct nitrox_softreq *sr,
426 			  struct nitrox_cmdq *cmdq)
427 {
428 	struct nitrox_device *ndev = sr->ndev;
429 	union nps_pkt_in_instr_baoff_dbell pkt_in_baoff_dbell;
430 	u64 offset;
431 	u8 *ent;
432 
433 	spin_lock_bh(&cmdq->cmdq_lock);
434 
435 	/* get the next write offset */
436 	offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(cmdq->qno);
437 	pkt_in_baoff_dbell.value = nitrox_read_csr(ndev, offset);
438 	/* copy the instruction */
439 	ent = cmdq->head + pkt_in_baoff_dbell.s.aoff;
440 	memcpy(ent, &sr->instr, cmdq->instr_size);
441 	/* flush the command queue updates */
442 	dma_wmb();
443 
444 	sr->tstamp = jiffies;
445 	atomic_set(&sr->status, REQ_POSTED);
446 	response_list_add(sr, cmdq);
447 
448 	/* Ring doorbell with count 1 */
449 	writeq(1, cmdq->dbell_csr_addr);
450 	/* orders the doorbell rings */
451 	mmiowb();
452 
453 	spin_unlock_bh(&cmdq->cmdq_lock);
454 }
455 
456 static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
457 {
458 	struct nitrox_device *ndev = cmdq->ndev;
459 	struct nitrox_softreq *sr, *tmp;
460 	int ret = 0;
461 
462 	spin_lock_bh(&cmdq->backlog_lock);
463 
464 	list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
465 		struct skcipher_request *skreq;
466 
467 		/* submit until space available */
468 		if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
469 			ret = -EBUSY;
470 			break;
471 		}
472 		/* delete from backlog list */
473 		list_del(&sr->backlog);
474 		atomic_dec(&cmdq->backlog_count);
475 		/* sync with other cpus */
476 		smp_mb__after_atomic();
477 
478 		skreq = sr->skreq;
479 		/* post the command */
480 		post_se_instr(sr, cmdq);
481 
482 		/* backlog requests are posted, wakeup with -EINPROGRESS */
483 		skcipher_request_complete(skreq, -EINPROGRESS);
484 	}
485 	spin_unlock_bh(&cmdq->backlog_lock);
486 
487 	return ret;
488 }
489 
490 static int nitrox_enqueue_request(struct nitrox_softreq *sr)
491 {
492 	struct nitrox_cmdq *cmdq = sr->cmdq;
493 	struct nitrox_device *ndev = sr->ndev;
494 	int ret = -EBUSY;
495 
496 	if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
497 		if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
498 			return -EAGAIN;
499 
500 		backlog_list_add(sr, cmdq);
501 	} else {
502 		ret = post_backlog_cmds(cmdq);
503 		if (ret) {
504 			backlog_list_add(sr, cmdq);
505 			return ret;
506 		}
507 		post_se_instr(sr, cmdq);
508 		ret = -EINPROGRESS;
509 	}
510 	return ret;
511 }
512 
513 /**
514  * nitrox_se_request - Send request to SE core
515  * @ndev: NITROX device
516  * @req: Crypto request
517  *
518  * Returns 0 on success, or a negative error code.
519  */
520 int nitrox_process_se_request(struct nitrox_device *ndev,
521 			      struct se_crypto_request *req,
522 			      completion_t callback,
523 			      struct skcipher_request *skreq)
524 {
525 	struct nitrox_softreq *sr;
526 	dma_addr_t ctx_handle = 0;
527 	int qno, ret = 0;
528 
529 	if (!nitrox_ready(ndev))
530 		return -ENODEV;
531 
532 	sr = kzalloc(sizeof(*sr), req->gfp);
533 	if (!sr)
534 		return -ENOMEM;
535 
536 	sr->ndev = ndev;
537 	sr->flags = req->flags;
538 	sr->gfp = req->gfp;
539 	sr->callback = callback;
540 	sr->skreq = skreq;
541 
542 	atomic_set(&sr->status, REQ_NOT_POSTED);
543 
544 	WRITE_ONCE(sr->resp.orh, PENDING_SIG);
545 	WRITE_ONCE(sr->resp.completion, PENDING_SIG);
546 
547 	ret = softreq_map_iobuf(sr, req);
548 	if (ret) {
549 		kfree(sr);
550 		return ret;
551 	}
552 
553 	/* get the context handle */
554 	if (req->ctx_handle) {
555 		struct ctx_hdr *hdr;
556 		u8 *ctx_ptr;
557 
558 		ctx_ptr = (u8 *)(uintptr_t)req->ctx_handle;
559 		hdr = (struct ctx_hdr *)(ctx_ptr - sizeof(struct ctx_hdr));
560 		ctx_handle = hdr->ctx_dma;
561 	}
562 
563 	/* select the queue */
564 	qno = smp_processor_id() % ndev->nr_queues;
565 
566 	sr->cmdq = &ndev->pkt_cmdqs[qno];
567 
568 	/*
569 	 * 64-Byte Instruction Format
570 	 *
571 	 *  ----------------------
572 	 *  |      DPTR0         | 8 bytes
573 	 *  ----------------------
574 	 *  |  PKT_IN_INSTR_HDR  | 8 bytes
575 	 *  ----------------------
576 	 *  |    PKT_IN_HDR      | 16 bytes
577 	 *  ----------------------
578 	 *  |    SLC_INFO        | 16 bytes
579 	 *  ----------------------
580 	 *  |   Front data       | 16 bytes
581 	 *  ----------------------
582 	 */
583 
584 	/* fill the packet instruction */
585 	/* word 0 */
586 	sr->instr.dptr0 = cpu_to_be64(sr->in.dma);
587 
588 	/* word 1 */
589 	sr->instr.ih.value = 0;
590 	sr->instr.ih.s.g = 1;
591 	sr->instr.ih.s.gsz = sr->in.map_bufs_cnt;
592 	sr->instr.ih.s.ssz = sr->out.map_bufs_cnt;
593 	sr->instr.ih.s.fsz = FDATA_SIZE + sizeof(struct gphdr);
594 	sr->instr.ih.s.tlen = sr->instr.ih.s.fsz + sr->in.total_bytes;
595 	sr->instr.ih.value = cpu_to_be64(sr->instr.ih.value);
596 
597 	/* word 2 */
598 	sr->instr.irh.value[0] = 0;
599 	sr->instr.irh.s.uddl = MIN_UDD_LEN;
600 	/* context length in 64-bit words */
601 	sr->instr.irh.s.ctxl = (req->ctrl.s.ctxl / 8);
602 	/* offset from solicit base port 256 */
603 	sr->instr.irh.s.destport = SOLICIT_BASE_DPORT + qno;
604 	sr->instr.irh.s.ctxc = req->ctrl.s.ctxc;
605 	sr->instr.irh.s.arg = req->ctrl.s.arg;
606 	sr->instr.irh.s.opcode = req->opcode;
607 	sr->instr.irh.value[0] = cpu_to_be64(sr->instr.irh.value[0]);
608 
609 	/* word 3 */
610 	sr->instr.irh.s.ctxp = cpu_to_be64(ctx_handle);
611 
612 	/* word 4 */
613 	sr->instr.slc.value[0] = 0;
614 	sr->instr.slc.s.ssz = sr->out.map_bufs_cnt;
615 	sr->instr.slc.value[0] = cpu_to_be64(sr->instr.slc.value[0]);
616 
617 	/* word 5 */
618 	sr->instr.slc.s.rptr = cpu_to_be64(sr->out.dma);
619 
620 	/*
621 	 * No conversion for front data,
622 	 * It goes into payload
623 	 * put GP Header in front data
624 	 */
625 	sr->instr.fdata[0] = *((u64 *)&req->gph);
626 	sr->instr.fdata[1] = 0;
627 	/* flush the soft_req changes before posting the cmd */
628 	wmb();
629 
630 	ret = nitrox_enqueue_request(sr);
631 	if (ret == -EAGAIN)
632 		goto send_fail;
633 
634 	return ret;
635 
636 send_fail:
637 	softreq_destroy(sr);
638 	return ret;
639 }
640 
641 static inline int cmd_timeout(unsigned long tstamp, unsigned long timeout)
642 {
643 	return time_after_eq(jiffies, (tstamp + timeout));
644 }
645 
646 void backlog_qflush_work(struct work_struct *work)
647 {
648 	struct nitrox_cmdq *cmdq;
649 
650 	cmdq = container_of(work, struct nitrox_cmdq, backlog_qflush);
651 	post_backlog_cmds(cmdq);
652 }
653 
654 /**
655  * process_request_list - process completed requests
656  * @ndev: N5 device
657  * @qno: queue to operate
658  *
659  * Returns the number of responses processed.
660  */
661 static void process_response_list(struct nitrox_cmdq *cmdq)
662 {
663 	struct nitrox_device *ndev = cmdq->ndev;
664 	struct nitrox_softreq *sr;
665 	struct skcipher_request *skreq;
666 	completion_t callback;
667 	int req_completed = 0, err = 0, budget;
668 
669 	/* check all pending requests */
670 	budget = atomic_read(&cmdq->pending_count);
671 
672 	while (req_completed < budget) {
673 		sr = get_first_response_entry(cmdq);
674 		if (!sr)
675 			break;
676 
677 		if (atomic_read(&sr->status) != REQ_POSTED)
678 			break;
679 
680 		/* check orh and completion bytes updates */
681 		if (READ_ONCE(sr->resp.orh) == READ_ONCE(sr->resp.completion)) {
682 			/* request not completed, check for timeout */
683 			if (!cmd_timeout(sr->tstamp, ndev->timeout))
684 				break;
685 			dev_err_ratelimited(DEV(ndev),
686 					    "Request timeout, orh 0x%016llx\n",
687 					    READ_ONCE(sr->resp.orh));
688 		}
689 		atomic_dec(&cmdq->pending_count);
690 		/* sync with other cpus */
691 		smp_mb__after_atomic();
692 		/* remove from response list */
693 		response_list_del(sr, cmdq);
694 
695 		callback = sr->callback;
696 		skreq = sr->skreq;
697 
698 		/* ORH error code */
699 		err = READ_ONCE(sr->resp.orh) & 0xff;
700 		softreq_destroy(sr);
701 
702 		if (callback)
703 			callback(skreq, err);
704 
705 		req_completed++;
706 	}
707 }
708 
709 /**
710  * pkt_slc_resp_handler - post processing of SE responses
711  */
712 void pkt_slc_resp_handler(unsigned long data)
713 {
714 	struct bh_data *bh = (void *)(uintptr_t)(data);
715 	struct nitrox_cmdq *cmdq = bh->cmdq;
716 	union nps_pkt_slc_cnts pkt_slc_cnts;
717 
718 	/* read completion count */
719 	pkt_slc_cnts.value = readq(bh->completion_cnt_csr_addr);
720 	/* resend the interrupt if more work to do */
721 	pkt_slc_cnts.s.resend = 1;
722 
723 	process_response_list(cmdq);
724 
725 	/*
726 	 * clear the interrupt with resend bit enabled,
727 	 * MSI-X interrupt generates if Completion count > Threshold
728 	 */
729 	writeq(pkt_slc_cnts.value, bh->completion_cnt_csr_addr);
730 	/* order the writes */
731 	mmiowb();
732 
733 	if (atomic_read(&cmdq->backlog_count))
734 		schedule_work(&cmdq->backlog_qflush);
735 }
736