1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/cpumask.h>
3 #include <linux/dma-mapping.h>
4 #include <linux/dmapool.h>
5 #include <linux/delay.h>
6 #include <linux/gfp.h>
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/pci_regs.h>
10 #include <linux/vmalloc.h>
11 #include <linux/pci.h>
12 
13 #include "nitrox_dev.h"
14 #include "nitrox_common.h"
15 #include "nitrox_req.h"
16 #include "nitrox_csr.h"
17 
18 #define CRYPTO_CTX_SIZE	256
19 
20 /* command queue alignments */
21 #define PKT_IN_ALIGN	16
22 
23 static int cmdq_common_init(struct nitrox_cmdq *cmdq)
24 {
25 	struct nitrox_device *ndev = cmdq->ndev;
26 	u32 qsize;
27 
28 	qsize = (ndev->qlen) * cmdq->instr_size;
29 	cmdq->head_unaligned = dma_zalloc_coherent(DEV(ndev),
30 						   (qsize + PKT_IN_ALIGN),
31 						   &cmdq->dma_unaligned,
32 						   GFP_KERNEL);
33 	if (!cmdq->head_unaligned)
34 		return -ENOMEM;
35 
36 	cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN);
37 	cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN);
38 	cmdq->qsize = (qsize + PKT_IN_ALIGN);
39 
40 	spin_lock_init(&cmdq->response_lock);
41 	spin_lock_init(&cmdq->cmdq_lock);
42 	spin_lock_init(&cmdq->backlog_lock);
43 
44 	INIT_LIST_HEAD(&cmdq->response_head);
45 	INIT_LIST_HEAD(&cmdq->backlog_head);
46 	INIT_WORK(&cmdq->backlog_qflush, backlog_qflush_work);
47 
48 	atomic_set(&cmdq->pending_count, 0);
49 	atomic_set(&cmdq->backlog_count, 0);
50 	return 0;
51 }
52 
53 static void cmdq_common_cleanup(struct nitrox_cmdq *cmdq)
54 {
55 	struct nitrox_device *ndev = cmdq->ndev;
56 
57 	cancel_work_sync(&cmdq->backlog_qflush);
58 
59 	dma_free_coherent(DEV(ndev), cmdq->qsize,
60 			  cmdq->head_unaligned, cmdq->dma_unaligned);
61 
62 	atomic_set(&cmdq->pending_count, 0);
63 	atomic_set(&cmdq->backlog_count, 0);
64 
65 	cmdq->dbell_csr_addr = NULL;
66 	cmdq->head = NULL;
67 	cmdq->dma = 0;
68 	cmdq->qsize = 0;
69 	cmdq->instr_size = 0;
70 }
71 
72 static void nitrox_cleanup_pkt_cmdqs(struct nitrox_device *ndev)
73 {
74 	int i;
75 
76 	for (i = 0; i < ndev->nr_queues; i++) {
77 		struct nitrox_cmdq *cmdq = &ndev->pkt_cmdqs[i];
78 
79 		cmdq_common_cleanup(cmdq);
80 	}
81 	kfree(ndev->pkt_cmdqs);
82 	ndev->pkt_cmdqs = NULL;
83 }
84 
85 static int nitrox_init_pkt_cmdqs(struct nitrox_device *ndev)
86 {
87 	int i, err, size;
88 
89 	size = ndev->nr_queues * sizeof(struct nitrox_cmdq);
90 	ndev->pkt_cmdqs = kzalloc(size, GFP_KERNEL);
91 	if (!ndev->pkt_cmdqs)
92 		return -ENOMEM;
93 
94 	for (i = 0; i < ndev->nr_queues; i++) {
95 		struct nitrox_cmdq *cmdq;
96 		u64 offset;
97 
98 		cmdq = &ndev->pkt_cmdqs[i];
99 		cmdq->ndev = ndev;
100 		cmdq->qno = i;
101 		cmdq->instr_size = sizeof(struct nps_pkt_instr);
102 
103 		offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i);
104 		/* SE ring doorbell address for this queue */
105 		cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset);
106 
107 		err = cmdq_common_init(cmdq);
108 		if (err)
109 			goto pkt_cmdq_fail;
110 	}
111 	return 0;
112 
113 pkt_cmdq_fail:
114 	nitrox_cleanup_pkt_cmdqs(ndev);
115 	return err;
116 }
117 
118 static int create_crypto_dma_pool(struct nitrox_device *ndev)
119 {
120 	size_t size;
121 
122 	/* Crypto context pool, 16 byte aligned */
123 	size = CRYPTO_CTX_SIZE + sizeof(struct ctx_hdr);
124 	ndev->ctx_pool = dma_pool_create("crypto-context",
125 					 DEV(ndev), size, 16, 0);
126 	if (!ndev->ctx_pool)
127 		return -ENOMEM;
128 
129 	return 0;
130 }
131 
132 static void destroy_crypto_dma_pool(struct nitrox_device *ndev)
133 {
134 	if (!ndev->ctx_pool)
135 		return;
136 
137 	dma_pool_destroy(ndev->ctx_pool);
138 	ndev->ctx_pool = NULL;
139 }
140 
141 /*
142  * crypto_alloc_context - Allocate crypto context from pool
143  * @ndev: NITROX Device
144  */
145 void *crypto_alloc_context(struct nitrox_device *ndev)
146 {
147 	struct ctx_hdr *ctx;
148 	void *vaddr;
149 	dma_addr_t dma;
150 
151 	vaddr = dma_pool_alloc(ndev->ctx_pool, (GFP_ATOMIC | __GFP_ZERO), &dma);
152 	if (!vaddr)
153 		return NULL;
154 
155 	/* fill meta data */
156 	ctx = vaddr;
157 	ctx->pool = ndev->ctx_pool;
158 	ctx->dma = dma;
159 	ctx->ctx_dma = dma + sizeof(struct ctx_hdr);
160 
161 	return ((u8 *)vaddr + sizeof(struct ctx_hdr));
162 }
163 
164 /**
165  * crypto_free_context - Free crypto context to pool
166  * @ctx: context to free
167  */
168 void crypto_free_context(void *ctx)
169 {
170 	struct ctx_hdr *ctxp;
171 
172 	if (!ctx)
173 		return;
174 
175 	ctxp = (struct ctx_hdr *)((u8 *)ctx - sizeof(struct ctx_hdr));
176 	dma_pool_free(ctxp->pool, ctxp, ctxp->dma);
177 }
178 
179 /**
180  * nitrox_common_sw_init - allocate software resources.
181  * @ndev: NITROX device
182  *
183  * Allocates crypto context pools and command queues etc.
184  *
185  * Return: 0 on success, or a negative error code on error.
186  */
187 int nitrox_common_sw_init(struct nitrox_device *ndev)
188 {
189 	int err = 0;
190 
191 	/* per device crypto context pool */
192 	err = create_crypto_dma_pool(ndev);
193 	if (err)
194 		return err;
195 
196 	err = nitrox_init_pkt_cmdqs(ndev);
197 	if (err)
198 		destroy_crypto_dma_pool(ndev);
199 
200 	return err;
201 }
202 
203 /**
204  * nitrox_common_sw_cleanup - free software resources.
205  * @ndev: NITROX device
206  */
207 void nitrox_common_sw_cleanup(struct nitrox_device *ndev)
208 {
209 	nitrox_cleanup_pkt_cmdqs(ndev);
210 	destroy_crypto_dma_pool(ndev);
211 }
212