1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/gfp.h>
3 #include <linux/workqueue.h>
4 #include <crypto/internal/skcipher.h>
5 
6 #include "nitrox_dev.h"
7 #include "nitrox_req.h"
8 #include "nitrox_csr.h"
9 #include "nitrox_req.h"
10 
11 /* SLC_STORE_INFO */
12 #define MIN_UDD_LEN 16
13 /* PKT_IN_HDR + SLC_STORE_INFO */
14 #define FDATA_SIZE 32
15 /* Base destination port for the solicited requests */
16 #define SOLICIT_BASE_DPORT 256
17 #define PENDING_SIG	0xFFFFFFFFFFFFFFFFUL
18 
19 #define REQ_NOT_POSTED 1
20 #define REQ_BACKLOG    2
21 #define REQ_POSTED     3
22 
23 /**
24  * Response codes from SE microcode
25  * 0x00 - Success
26  *   Completion with no error
27  * 0x43 - ERR_GC_DATA_LEN_INVALID
28  *   Invalid Data length if Encryption Data length is
29  *   less than 16 bytes for AES-XTS and AES-CTS.
30  * 0x45 - ERR_GC_CTX_LEN_INVALID
31  *   Invalid context length: CTXL != 23 words.
32  * 0x4F - ERR_GC_DOCSIS_CIPHER_INVALID
33  *   DOCSIS support is enabled with other than
34  *   AES/DES-CBC mode encryption.
35  * 0x50 - ERR_GC_DOCSIS_OFFSET_INVALID
36  *   Authentication offset is other than 0 with
37  *   Encryption IV source = 0.
38  *   Authentication offset is other than 8 (DES)/16 (AES)
39  *   with Encryption IV source = 1
40  * 0x51 - ERR_GC_CRC32_INVALID_SELECTION
41  *   CRC32 is enabled for other than DOCSIS encryption.
42  * 0x52 - ERR_GC_AES_CCM_FLAG_INVALID
43  *   Invalid flag options in AES-CCM IV.
44  */
45 
46 /**
47  * dma_free_sglist - unmap and free the sg lists.
48  * @ndev: N5 device
49  * @sgtbl: SG table
50  */
51 static void softreq_unmap_sgbufs(struct nitrox_softreq *sr)
52 {
53 	struct nitrox_device *ndev = sr->ndev;
54 	struct device *dev = DEV(ndev);
55 	struct nitrox_sglist *sglist;
56 
57 	/* unmap in sgbuf */
58 	sglist = sr->in.sglist;
59 	if (!sglist)
60 		goto out_unmap;
61 
62 	/* unmap iv */
63 	dma_unmap_single(dev, sglist->dma, sglist->len, DMA_BIDIRECTIONAL);
64 	/* unmpa src sglist */
65 	dma_unmap_sg(dev, sr->in.buf, (sr->in.map_bufs_cnt - 1), sr->in.dir);
66 	/* unamp gather component */
67 	dma_unmap_single(dev, sr->in.dma, sr->in.len, DMA_TO_DEVICE);
68 	kfree(sr->in.sglist);
69 	kfree(sr->in.sgcomp);
70 	sr->in.sglist = NULL;
71 	sr->in.buf = NULL;
72 	sr->in.map_bufs_cnt = 0;
73 
74 out_unmap:
75 	/* unmap out sgbuf */
76 	sglist = sr->out.sglist;
77 	if (!sglist)
78 		return;
79 
80 	/* unmap orh */
81 	dma_unmap_single(dev, sr->resp.orh_dma, ORH_HLEN, sr->out.dir);
82 
83 	/* unmap dst sglist */
84 	if (!sr->inplace) {
85 		dma_unmap_sg(dev, sr->out.buf, (sr->out.map_bufs_cnt - 3),
86 			     sr->out.dir);
87 	}
88 	/* unmap completion */
89 	dma_unmap_single(dev, sr->resp.completion_dma, COMP_HLEN, sr->out.dir);
90 
91 	/* unmap scatter component */
92 	dma_unmap_single(dev, sr->out.dma, sr->out.len, DMA_TO_DEVICE);
93 	kfree(sr->out.sglist);
94 	kfree(sr->out.sgcomp);
95 	sr->out.sglist = NULL;
96 	sr->out.buf = NULL;
97 	sr->out.map_bufs_cnt = 0;
98 }
99 
100 static void softreq_destroy(struct nitrox_softreq *sr)
101 {
102 	softreq_unmap_sgbufs(sr);
103 	kfree(sr);
104 }
105 
106 /**
107  * create_sg_component - create SG componets for N5 device.
108  * @sr: Request structure
109  * @sgtbl: SG table
110  * @nr_comp: total number of components required
111  *
112  * Component structure
113  *
114  *   63     48 47     32 31    16 15      0
115  *   --------------------------------------
116  *   |   LEN0  |  LEN1  |  LEN2  |  LEN3  |
117  *   |-------------------------------------
118  *   |               PTR0                 |
119  *   --------------------------------------
120  *   |               PTR1                 |
121  *   --------------------------------------
122  *   |               PTR2                 |
123  *   --------------------------------------
124  *   |               PTR3                 |
125  *   --------------------------------------
126  *
127  *   Returns 0 if success or a negative errno code on error.
128  */
129 static int create_sg_component(struct nitrox_softreq *sr,
130 			       struct nitrox_sgtable *sgtbl, int map_nents)
131 {
132 	struct nitrox_device *ndev = sr->ndev;
133 	struct nitrox_sgcomp *sgcomp;
134 	struct nitrox_sglist *sglist;
135 	dma_addr_t dma;
136 	size_t sz_comp;
137 	int i, j, nr_sgcomp;
138 
139 	nr_sgcomp = roundup(map_nents, 4) / 4;
140 
141 	/* each component holds 4 dma pointers */
142 	sz_comp = nr_sgcomp * sizeof(*sgcomp);
143 	sgcomp = kzalloc(sz_comp, sr->gfp);
144 	if (!sgcomp)
145 		return -ENOMEM;
146 
147 	sgtbl->sgcomp = sgcomp;
148 	sgtbl->nr_sgcomp = nr_sgcomp;
149 
150 	sglist = sgtbl->sglist;
151 	/* populate device sg component */
152 	for (i = 0; i < nr_sgcomp; i++) {
153 		for (j = 0; j < 4; j++) {
154 			sgcomp->len[j] = cpu_to_be16(sglist->len);
155 			sgcomp->dma[j] = cpu_to_be64(sglist->dma);
156 			sglist++;
157 		}
158 		sgcomp++;
159 	}
160 	/* map the device sg component */
161 	dma = dma_map_single(DEV(ndev), sgtbl->sgcomp, sz_comp, DMA_TO_DEVICE);
162 	if (dma_mapping_error(DEV(ndev), dma)) {
163 		kfree(sgtbl->sgcomp);
164 		sgtbl->sgcomp = NULL;
165 		return -ENOMEM;
166 	}
167 
168 	sgtbl->dma = dma;
169 	sgtbl->len = sz_comp;
170 
171 	return 0;
172 }
173 
174 /**
175  * dma_map_inbufs - DMA map input sglist and creates sglist component
176  *                  for N5 device.
177  * @sr: Request structure
178  * @req: Crypto request structre
179  *
180  * Returns 0 if successful or a negative errno code on error.
181  */
182 static int dma_map_inbufs(struct nitrox_softreq *sr,
183 			  struct se_crypto_request *req)
184 {
185 	struct device *dev = DEV(sr->ndev);
186 	struct scatterlist *sg = req->src;
187 	struct nitrox_sglist *glist;
188 	int i, nents, ret = 0;
189 	dma_addr_t dma;
190 	size_t sz;
191 
192 	nents = sg_nents(req->src);
193 
194 	/* creater gather list IV and src entries */
195 	sz = roundup((1 + nents), 4) * sizeof(*glist);
196 	glist = kzalloc(sz, sr->gfp);
197 	if (!glist)
198 		return -ENOMEM;
199 
200 	sr->in.sglist = glist;
201 	/* map IV */
202 	dma = dma_map_single(dev, &req->iv, req->ivsize, DMA_BIDIRECTIONAL);
203 	if (dma_mapping_error(dev, dma)) {
204 		ret = -EINVAL;
205 		goto iv_map_err;
206 	}
207 
208 	sr->in.dir = (req->src == req->dst) ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
209 	/* map src entries */
210 	nents = dma_map_sg(dev, req->src, nents, sr->in.dir);
211 	if (!nents) {
212 		ret = -EINVAL;
213 		goto src_map_err;
214 	}
215 	sr->in.buf = req->src;
216 
217 	/* store the mappings */
218 	glist->len = req->ivsize;
219 	glist->dma = dma;
220 	glist++;
221 	sr->in.total_bytes += req->ivsize;
222 
223 	for_each_sg(req->src, sg, nents, i) {
224 		glist->len = sg_dma_len(sg);
225 		glist->dma = sg_dma_address(sg);
226 		sr->in.total_bytes += glist->len;
227 		glist++;
228 	}
229 	/* roundup map count to align with entires in sg component */
230 	sr->in.map_bufs_cnt = (1 + nents);
231 
232 	/* create NITROX gather component */
233 	ret = create_sg_component(sr, &sr->in, sr->in.map_bufs_cnt);
234 	if (ret)
235 		goto incomp_err;
236 
237 	return 0;
238 
239 incomp_err:
240 	dma_unmap_sg(dev, req->src, nents, sr->in.dir);
241 	sr->in.map_bufs_cnt = 0;
242 src_map_err:
243 	dma_unmap_single(dev, dma, req->ivsize, DMA_BIDIRECTIONAL);
244 iv_map_err:
245 	kfree(sr->in.sglist);
246 	sr->in.sglist = NULL;
247 	return ret;
248 }
249 
250 static int dma_map_outbufs(struct nitrox_softreq *sr,
251 			   struct se_crypto_request *req)
252 {
253 	struct device *dev = DEV(sr->ndev);
254 	struct nitrox_sglist *glist = sr->in.sglist;
255 	struct nitrox_sglist *slist;
256 	struct scatterlist *sg;
257 	int i, nents, map_bufs_cnt, ret = 0;
258 	size_t sz;
259 
260 	nents = sg_nents(req->dst);
261 
262 	/* create scatter list ORH, IV, dst entries and Completion header */
263 	sz = roundup((3 + nents), 4) * sizeof(*slist);
264 	slist = kzalloc(sz, sr->gfp);
265 	if (!slist)
266 		return -ENOMEM;
267 
268 	sr->out.sglist = slist;
269 	sr->out.dir = DMA_BIDIRECTIONAL;
270 	/* map ORH */
271 	sr->resp.orh_dma = dma_map_single(dev, &sr->resp.orh, ORH_HLEN,
272 					  sr->out.dir);
273 	if (dma_mapping_error(dev, sr->resp.orh_dma)) {
274 		ret = -EINVAL;
275 		goto orh_map_err;
276 	}
277 
278 	/* map completion */
279 	sr->resp.completion_dma = dma_map_single(dev, &sr->resp.completion,
280 						 COMP_HLEN, sr->out.dir);
281 	if (dma_mapping_error(dev, sr->resp.completion_dma)) {
282 		ret = -EINVAL;
283 		goto compl_map_err;
284 	}
285 
286 	sr->inplace = (req->src == req->dst) ? true : false;
287 	/* out place */
288 	if (!sr->inplace) {
289 		nents = dma_map_sg(dev, req->dst, nents, sr->out.dir);
290 		if (!nents) {
291 			ret = -EINVAL;
292 			goto dst_map_err;
293 		}
294 	}
295 	sr->out.buf = req->dst;
296 
297 	/* store the mappings */
298 	/* orh */
299 	slist->len = ORH_HLEN;
300 	slist->dma = sr->resp.orh_dma;
301 	slist++;
302 
303 	/* copy the glist mappings */
304 	if (sr->inplace) {
305 		nents = sr->in.map_bufs_cnt - 1;
306 		map_bufs_cnt = sr->in.map_bufs_cnt;
307 		while (map_bufs_cnt--) {
308 			slist->len = glist->len;
309 			slist->dma = glist->dma;
310 			slist++;
311 			glist++;
312 		}
313 	} else {
314 		/* copy iv mapping */
315 		slist->len = glist->len;
316 		slist->dma = glist->dma;
317 		slist++;
318 		/* copy remaining maps */
319 		for_each_sg(req->dst, sg, nents, i) {
320 			slist->len = sg_dma_len(sg);
321 			slist->dma = sg_dma_address(sg);
322 			slist++;
323 		}
324 	}
325 
326 	/* completion */
327 	slist->len = COMP_HLEN;
328 	slist->dma = sr->resp.completion_dma;
329 
330 	sr->out.map_bufs_cnt = (3 + nents);
331 
332 	ret = create_sg_component(sr, &sr->out, sr->out.map_bufs_cnt);
333 	if (ret)
334 		goto outcomp_map_err;
335 
336 	return 0;
337 
338 outcomp_map_err:
339 	if (!sr->inplace)
340 		dma_unmap_sg(dev, req->dst, nents, sr->out.dir);
341 	sr->out.map_bufs_cnt = 0;
342 	sr->out.buf = NULL;
343 dst_map_err:
344 	dma_unmap_single(dev, sr->resp.completion_dma, COMP_HLEN, sr->out.dir);
345 	sr->resp.completion_dma = 0;
346 compl_map_err:
347 	dma_unmap_single(dev, sr->resp.orh_dma, ORH_HLEN, sr->out.dir);
348 	sr->resp.orh_dma = 0;
349 orh_map_err:
350 	kfree(sr->out.sglist);
351 	sr->out.sglist = NULL;
352 	return ret;
353 }
354 
355 static inline int softreq_map_iobuf(struct nitrox_softreq *sr,
356 				    struct se_crypto_request *creq)
357 {
358 	int ret;
359 
360 	ret = dma_map_inbufs(sr, creq);
361 	if (ret)
362 		return ret;
363 
364 	ret = dma_map_outbufs(sr, creq);
365 	if (ret)
366 		softreq_unmap_sgbufs(sr);
367 
368 	return ret;
369 }
370 
371 static inline void backlog_list_add(struct nitrox_softreq *sr,
372 				    struct nitrox_cmdq *cmdq)
373 {
374 	INIT_LIST_HEAD(&sr->backlog);
375 
376 	spin_lock_bh(&cmdq->backlog_lock);
377 	list_add_tail(&sr->backlog, &cmdq->backlog_head);
378 	atomic_inc(&cmdq->backlog_count);
379 	atomic_set(&sr->status, REQ_BACKLOG);
380 	spin_unlock_bh(&cmdq->backlog_lock);
381 }
382 
383 static inline void response_list_add(struct nitrox_softreq *sr,
384 				     struct nitrox_cmdq *cmdq)
385 {
386 	INIT_LIST_HEAD(&sr->response);
387 
388 	spin_lock_bh(&cmdq->response_lock);
389 	list_add_tail(&sr->response, &cmdq->response_head);
390 	spin_unlock_bh(&cmdq->response_lock);
391 }
392 
393 static inline void response_list_del(struct nitrox_softreq *sr,
394 				     struct nitrox_cmdq *cmdq)
395 {
396 	spin_lock_bh(&cmdq->response_lock);
397 	list_del(&sr->response);
398 	spin_unlock_bh(&cmdq->response_lock);
399 }
400 
401 static struct nitrox_softreq *
402 get_first_response_entry(struct nitrox_cmdq *cmdq)
403 {
404 	return list_first_entry_or_null(&cmdq->response_head,
405 					struct nitrox_softreq, response);
406 }
407 
408 static inline bool cmdq_full(struct nitrox_cmdq *cmdq, int qlen)
409 {
410 	if (atomic_inc_return(&cmdq->pending_count) > qlen) {
411 		atomic_dec(&cmdq->pending_count);
412 		/* sync with other cpus */
413 		smp_mb__after_atomic();
414 		return true;
415 	}
416 	return false;
417 }
418 
419 /**
420  * post_se_instr - Post SE instruction to Packet Input ring
421  * @sr: Request structure
422  *
423  * Returns 0 if successful or a negative error code,
424  * if no space in ring.
425  */
426 static void post_se_instr(struct nitrox_softreq *sr,
427 			  struct nitrox_cmdq *cmdq)
428 {
429 	struct nitrox_device *ndev = sr->ndev;
430 	union nps_pkt_in_instr_baoff_dbell pkt_in_baoff_dbell;
431 	u64 offset;
432 	u8 *ent;
433 
434 	spin_lock_bh(&cmdq->cmdq_lock);
435 
436 	/* get the next write offset */
437 	offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(cmdq->qno);
438 	pkt_in_baoff_dbell.value = nitrox_read_csr(ndev, offset);
439 	/* copy the instruction */
440 	ent = cmdq->head + pkt_in_baoff_dbell.s.aoff;
441 	memcpy(ent, &sr->instr, cmdq->instr_size);
442 	/* flush the command queue updates */
443 	dma_wmb();
444 
445 	sr->tstamp = jiffies;
446 	atomic_set(&sr->status, REQ_POSTED);
447 	response_list_add(sr, cmdq);
448 
449 	/* Ring doorbell with count 1 */
450 	writeq(1, cmdq->dbell_csr_addr);
451 	/* orders the doorbell rings */
452 	mmiowb();
453 
454 	spin_unlock_bh(&cmdq->cmdq_lock);
455 }
456 
457 static int post_backlog_cmds(struct nitrox_cmdq *cmdq)
458 {
459 	struct nitrox_device *ndev = cmdq->ndev;
460 	struct nitrox_softreq *sr, *tmp;
461 	int ret = 0;
462 
463 	spin_lock_bh(&cmdq->backlog_lock);
464 
465 	list_for_each_entry_safe(sr, tmp, &cmdq->backlog_head, backlog) {
466 		struct skcipher_request *skreq;
467 
468 		/* submit until space available */
469 		if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
470 			ret = -EBUSY;
471 			break;
472 		}
473 		/* delete from backlog list */
474 		list_del(&sr->backlog);
475 		atomic_dec(&cmdq->backlog_count);
476 		/* sync with other cpus */
477 		smp_mb__after_atomic();
478 
479 		skreq = sr->skreq;
480 		/* post the command */
481 		post_se_instr(sr, cmdq);
482 
483 		/* backlog requests are posted, wakeup with -EINPROGRESS */
484 		skcipher_request_complete(skreq, -EINPROGRESS);
485 	}
486 	spin_unlock_bh(&cmdq->backlog_lock);
487 
488 	return ret;
489 }
490 
491 static int nitrox_enqueue_request(struct nitrox_softreq *sr)
492 {
493 	struct nitrox_cmdq *cmdq = sr->cmdq;
494 	struct nitrox_device *ndev = sr->ndev;
495 	int ret = -EBUSY;
496 
497 	if (unlikely(cmdq_full(cmdq, ndev->qlen))) {
498 		if (!(sr->flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
499 			return -EAGAIN;
500 
501 		backlog_list_add(sr, cmdq);
502 	} else {
503 		ret = post_backlog_cmds(cmdq);
504 		if (ret) {
505 			backlog_list_add(sr, cmdq);
506 			return ret;
507 		}
508 		post_se_instr(sr, cmdq);
509 		ret = -EINPROGRESS;
510 	}
511 	return ret;
512 }
513 
514 /**
515  * nitrox_se_request - Send request to SE core
516  * @ndev: NITROX device
517  * @req: Crypto request
518  *
519  * Returns 0 on success, or a negative error code.
520  */
521 int nitrox_process_se_request(struct nitrox_device *ndev,
522 			      struct se_crypto_request *req,
523 			      completion_t callback,
524 			      struct skcipher_request *skreq)
525 {
526 	struct nitrox_softreq *sr;
527 	dma_addr_t ctx_handle = 0;
528 	int qno, ret = 0;
529 
530 	if (!nitrox_ready(ndev))
531 		return -ENODEV;
532 
533 	sr = kzalloc(sizeof(*sr), req->gfp);
534 	if (!sr)
535 		return -ENOMEM;
536 
537 	sr->ndev = ndev;
538 	sr->flags = req->flags;
539 	sr->gfp = req->gfp;
540 	sr->callback = callback;
541 	sr->skreq = skreq;
542 
543 	atomic_set(&sr->status, REQ_NOT_POSTED);
544 
545 	WRITE_ONCE(sr->resp.orh, PENDING_SIG);
546 	WRITE_ONCE(sr->resp.completion, PENDING_SIG);
547 
548 	ret = softreq_map_iobuf(sr, req);
549 	if (ret) {
550 		kfree(sr);
551 		return ret;
552 	}
553 
554 	/* get the context handle */
555 	if (req->ctx_handle) {
556 		struct ctx_hdr *hdr;
557 		u8 *ctx_ptr;
558 
559 		ctx_ptr = (u8 *)(uintptr_t)req->ctx_handle;
560 		hdr = (struct ctx_hdr *)(ctx_ptr - sizeof(struct ctx_hdr));
561 		ctx_handle = hdr->ctx_dma;
562 	}
563 
564 	/* select the queue */
565 	qno = smp_processor_id() % ndev->nr_queues;
566 
567 	sr->cmdq = &ndev->pkt_cmdqs[qno];
568 
569 	/*
570 	 * 64-Byte Instruction Format
571 	 *
572 	 *  ----------------------
573 	 *  |      DPTR0         | 8 bytes
574 	 *  ----------------------
575 	 *  |  PKT_IN_INSTR_HDR  | 8 bytes
576 	 *  ----------------------
577 	 *  |    PKT_IN_HDR      | 16 bytes
578 	 *  ----------------------
579 	 *  |    SLC_INFO        | 16 bytes
580 	 *  ----------------------
581 	 *  |   Front data       | 16 bytes
582 	 *  ----------------------
583 	 */
584 
585 	/* fill the packet instruction */
586 	/* word 0 */
587 	sr->instr.dptr0 = cpu_to_be64(sr->in.dma);
588 
589 	/* word 1 */
590 	sr->instr.ih.value = 0;
591 	sr->instr.ih.s.g = 1;
592 	sr->instr.ih.s.gsz = sr->in.map_bufs_cnt;
593 	sr->instr.ih.s.ssz = sr->out.map_bufs_cnt;
594 	sr->instr.ih.s.fsz = FDATA_SIZE + sizeof(struct gphdr);
595 	sr->instr.ih.s.tlen = sr->instr.ih.s.fsz + sr->in.total_bytes;
596 	sr->instr.ih.value = cpu_to_be64(sr->instr.ih.value);
597 
598 	/* word 2 */
599 	sr->instr.irh.value[0] = 0;
600 	sr->instr.irh.s.uddl = MIN_UDD_LEN;
601 	/* context length in 64-bit words */
602 	sr->instr.irh.s.ctxl = (req->ctrl.s.ctxl / 8);
603 	/* offset from solicit base port 256 */
604 	sr->instr.irh.s.destport = SOLICIT_BASE_DPORT + qno;
605 	sr->instr.irh.s.ctxc = req->ctrl.s.ctxc;
606 	sr->instr.irh.s.arg = req->ctrl.s.arg;
607 	sr->instr.irh.s.opcode = req->opcode;
608 	sr->instr.irh.value[0] = cpu_to_be64(sr->instr.irh.value[0]);
609 
610 	/* word 3 */
611 	sr->instr.irh.s.ctxp = cpu_to_be64(ctx_handle);
612 
613 	/* word 4 */
614 	sr->instr.slc.value[0] = 0;
615 	sr->instr.slc.s.ssz = sr->out.map_bufs_cnt;
616 	sr->instr.slc.value[0] = cpu_to_be64(sr->instr.slc.value[0]);
617 
618 	/* word 5 */
619 	sr->instr.slc.s.rptr = cpu_to_be64(sr->out.dma);
620 
621 	/*
622 	 * No conversion for front data,
623 	 * It goes into payload
624 	 * put GP Header in front data
625 	 */
626 	sr->instr.fdata[0] = *((u64 *)&req->gph);
627 	sr->instr.fdata[1] = 0;
628 	/* flush the soft_req changes before posting the cmd */
629 	wmb();
630 
631 	ret = nitrox_enqueue_request(sr);
632 	if (ret == -EAGAIN)
633 		goto send_fail;
634 
635 	return ret;
636 
637 send_fail:
638 	softreq_destroy(sr);
639 	return ret;
640 }
641 
642 static inline int cmd_timeout(unsigned long tstamp, unsigned long timeout)
643 {
644 	return time_after_eq(jiffies, (tstamp + timeout));
645 }
646 
647 void backlog_qflush_work(struct work_struct *work)
648 {
649 	struct nitrox_cmdq *cmdq;
650 
651 	cmdq = container_of(work, struct nitrox_cmdq, backlog_qflush);
652 	post_backlog_cmds(cmdq);
653 }
654 
655 /**
656  * process_request_list - process completed requests
657  * @ndev: N5 device
658  * @qno: queue to operate
659  *
660  * Returns the number of responses processed.
661  */
662 static void process_response_list(struct nitrox_cmdq *cmdq)
663 {
664 	struct nitrox_device *ndev = cmdq->ndev;
665 	struct nitrox_softreq *sr;
666 	struct skcipher_request *skreq;
667 	completion_t callback;
668 	int req_completed = 0, err = 0, budget;
669 
670 	/* check all pending requests */
671 	budget = atomic_read(&cmdq->pending_count);
672 
673 	while (req_completed < budget) {
674 		sr = get_first_response_entry(cmdq);
675 		if (!sr)
676 			break;
677 
678 		if (atomic_read(&sr->status) != REQ_POSTED)
679 			break;
680 
681 		/* check orh and completion bytes updates */
682 		if (READ_ONCE(sr->resp.orh) == READ_ONCE(sr->resp.completion)) {
683 			/* request not completed, check for timeout */
684 			if (!cmd_timeout(sr->tstamp, ndev->timeout))
685 				break;
686 			dev_err_ratelimited(DEV(ndev),
687 					    "Request timeout, orh 0x%016llx\n",
688 					    READ_ONCE(sr->resp.orh));
689 		}
690 		atomic_dec(&cmdq->pending_count);
691 		/* sync with other cpus */
692 		smp_mb__after_atomic();
693 		/* remove from response list */
694 		response_list_del(sr, cmdq);
695 
696 		callback = sr->callback;
697 		skreq = sr->skreq;
698 
699 		/* ORH error code */
700 		err = READ_ONCE(sr->resp.orh) & 0xff;
701 		softreq_destroy(sr);
702 
703 		if (callback)
704 			callback(skreq, err);
705 
706 		req_completed++;
707 	}
708 }
709 
710 /**
711  * pkt_slc_resp_handler - post processing of SE responses
712  */
713 void pkt_slc_resp_handler(unsigned long data)
714 {
715 	struct bh_data *bh = (void *)(uintptr_t)(data);
716 	struct nitrox_cmdq *cmdq = bh->cmdq;
717 	union nps_pkt_slc_cnts pkt_slc_cnts;
718 
719 	/* read completion count */
720 	pkt_slc_cnts.value = readq(bh->completion_cnt_csr_addr);
721 	/* resend the interrupt if more work to do */
722 	pkt_slc_cnts.s.resend = 1;
723 
724 	process_response_list(cmdq);
725 
726 	/*
727 	 * clear the interrupt with resend bit enabled,
728 	 * MSI-X interrupt generates if Completion count > Threshold
729 	 */
730 	writeq(pkt_slc_cnts.value, bh->completion_cnt_csr_addr);
731 	/* order the writes */
732 	mmiowb();
733 
734 	if (atomic_read(&cmdq->backlog_count))
735 		schedule_work(&cmdq->backlog_qflush);
736 }
737