1 #include <linux/cpumask.h>
2 #include <linux/dma-mapping.h>
3 #include <linux/dmapool.h>
4 #include <linux/delay.h>
5 #include <linux/gfp.h>
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/pci_regs.h>
9 #include <linux/vmalloc.h>
10 #include <linux/pci.h>
11 
12 #include "nitrox_dev.h"
13 #include "nitrox_common.h"
14 #include "nitrox_req.h"
15 #include "nitrox_csr.h"
16 
17 #define CRYPTO_CTX_SIZE	256
18 
19 /* command queue alignments */
20 #define PKT_IN_ALIGN	16
21 
22 static int cmdq_common_init(struct nitrox_cmdq *cmdq)
23 {
24 	struct nitrox_device *ndev = cmdq->ndev;
25 	u32 qsize;
26 
27 	qsize = (ndev->qlen) * cmdq->instr_size;
28 	cmdq->head_unaligned = dma_zalloc_coherent(DEV(ndev),
29 						   (qsize + PKT_IN_ALIGN),
30 						   &cmdq->dma_unaligned,
31 						   GFP_KERNEL);
32 	if (!cmdq->head_unaligned)
33 		return -ENOMEM;
34 
35 	cmdq->head = PTR_ALIGN(cmdq->head_unaligned, PKT_IN_ALIGN);
36 	cmdq->dma = PTR_ALIGN(cmdq->dma_unaligned, PKT_IN_ALIGN);
37 	cmdq->qsize = (qsize + PKT_IN_ALIGN);
38 
39 	spin_lock_init(&cmdq->response_lock);
40 	spin_lock_init(&cmdq->cmdq_lock);
41 	spin_lock_init(&cmdq->backlog_lock);
42 
43 	INIT_LIST_HEAD(&cmdq->response_head);
44 	INIT_LIST_HEAD(&cmdq->backlog_head);
45 	INIT_WORK(&cmdq->backlog_qflush, backlog_qflush_work);
46 
47 	atomic_set(&cmdq->pending_count, 0);
48 	atomic_set(&cmdq->backlog_count, 0);
49 	return 0;
50 }
51 
52 static void cmdq_common_cleanup(struct nitrox_cmdq *cmdq)
53 {
54 	struct nitrox_device *ndev = cmdq->ndev;
55 
56 	cancel_work_sync(&cmdq->backlog_qflush);
57 
58 	dma_free_coherent(DEV(ndev), cmdq->qsize,
59 			  cmdq->head_unaligned, cmdq->dma_unaligned);
60 
61 	atomic_set(&cmdq->pending_count, 0);
62 	atomic_set(&cmdq->backlog_count, 0);
63 
64 	cmdq->dbell_csr_addr = NULL;
65 	cmdq->head = NULL;
66 	cmdq->dma = 0;
67 	cmdq->qsize = 0;
68 	cmdq->instr_size = 0;
69 }
70 
71 static void nitrox_cleanup_pkt_cmdqs(struct nitrox_device *ndev)
72 {
73 	int i;
74 
75 	for (i = 0; i < ndev->nr_queues; i++) {
76 		struct nitrox_cmdq *cmdq = &ndev->pkt_cmdqs[i];
77 
78 		cmdq_common_cleanup(cmdq);
79 	}
80 	kfree(ndev->pkt_cmdqs);
81 	ndev->pkt_cmdqs = NULL;
82 }
83 
84 static int nitrox_init_pkt_cmdqs(struct nitrox_device *ndev)
85 {
86 	int i, err, size;
87 
88 	size = ndev->nr_queues * sizeof(struct nitrox_cmdq);
89 	ndev->pkt_cmdqs = kzalloc(size, GFP_KERNEL);
90 	if (!ndev->pkt_cmdqs)
91 		return -ENOMEM;
92 
93 	for (i = 0; i < ndev->nr_queues; i++) {
94 		struct nitrox_cmdq *cmdq;
95 		u64 offset;
96 
97 		cmdq = &ndev->pkt_cmdqs[i];
98 		cmdq->ndev = ndev;
99 		cmdq->qno = i;
100 		cmdq->instr_size = sizeof(struct nps_pkt_instr);
101 
102 		offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i);
103 		/* SE ring doorbell address for this queue */
104 		cmdq->dbell_csr_addr = NITROX_CSR_ADDR(ndev, offset);
105 
106 		err = cmdq_common_init(cmdq);
107 		if (err)
108 			goto pkt_cmdq_fail;
109 	}
110 	return 0;
111 
112 pkt_cmdq_fail:
113 	nitrox_cleanup_pkt_cmdqs(ndev);
114 	return err;
115 }
116 
117 static int create_crypto_dma_pool(struct nitrox_device *ndev)
118 {
119 	size_t size;
120 
121 	/* Crypto context pool, 16 byte aligned */
122 	size = CRYPTO_CTX_SIZE + sizeof(struct ctx_hdr);
123 	ndev->ctx_pool = dma_pool_create("crypto-context",
124 					 DEV(ndev), size, 16, 0);
125 	if (!ndev->ctx_pool)
126 		return -ENOMEM;
127 
128 	return 0;
129 }
130 
131 static void destroy_crypto_dma_pool(struct nitrox_device *ndev)
132 {
133 	if (!ndev->ctx_pool)
134 		return;
135 
136 	dma_pool_destroy(ndev->ctx_pool);
137 	ndev->ctx_pool = NULL;
138 }
139 
140 /*
141  * crypto_alloc_context - Allocate crypto context from pool
142  * @ndev: NITROX Device
143  */
144 void *crypto_alloc_context(struct nitrox_device *ndev)
145 {
146 	struct ctx_hdr *ctx;
147 	void *vaddr;
148 	dma_addr_t dma;
149 
150 	vaddr = dma_pool_alloc(ndev->ctx_pool, (GFP_ATOMIC | __GFP_ZERO), &dma);
151 	if (!vaddr)
152 		return NULL;
153 
154 	/* fill meta data */
155 	ctx = vaddr;
156 	ctx->pool = ndev->ctx_pool;
157 	ctx->dma = dma;
158 	ctx->ctx_dma = dma + sizeof(struct ctx_hdr);
159 
160 	return ((u8 *)vaddr + sizeof(struct ctx_hdr));
161 }
162 
163 /**
164  * crypto_free_context - Free crypto context to pool
165  * @ctx: context to free
166  */
167 void crypto_free_context(void *ctx)
168 {
169 	struct ctx_hdr *ctxp;
170 
171 	if (!ctx)
172 		return;
173 
174 	ctxp = (struct ctx_hdr *)((u8 *)ctx - sizeof(struct ctx_hdr));
175 	dma_pool_free(ctxp->pool, ctxp, ctxp->dma);
176 }
177 
178 /**
179  * nitrox_common_sw_init - allocate software resources.
180  * @ndev: NITROX device
181  *
182  * Allocates crypto context pools and command queues etc.
183  *
184  * Return: 0 on success, or a negative error code on error.
185  */
186 int nitrox_common_sw_init(struct nitrox_device *ndev)
187 {
188 	int err = 0;
189 
190 	/* per device crypto context pool */
191 	err = create_crypto_dma_pool(ndev);
192 	if (err)
193 		return err;
194 
195 	err = nitrox_init_pkt_cmdqs(ndev);
196 	if (err)
197 		destroy_crypto_dma_pool(ndev);
198 
199 	return err;
200 }
201 
202 /**
203  * nitrox_common_sw_cleanup - free software resources.
204  * @ndev: NITROX device
205  */
206 void nitrox_common_sw_cleanup(struct nitrox_device *ndev)
207 {
208 	nitrox_cleanup_pkt_cmdqs(ndev);
209 	destroy_crypto_dma_pool(ndev);
210 }
211