xref: /openbmc/linux/drivers/crypto/chelsio/chcr_core.c (revision 0760aad038b5a032c31ea124feed63d88627d2f1)
1 /**
2  * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
3  *
4  * Copyright (C) 2011-2016 Chelsio Communications.  All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  *
10  * Written and Maintained by:
11  * Manoj Malviya (manojmalviya@chelsio.com)
12  * Atul Gupta (atul.gupta@chelsio.com)
13  * Jitendra Lulla (jlulla@chelsio.com)
14  * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
15  * Harsh Jain (harsh@chelsio.com)
16  */
17 
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/skbuff.h>
21 
22 #include <crypto/aes.h>
23 #include <crypto/hash.h>
24 
25 #include "t4_msg.h"
26 #include "chcr_core.h"
27 #include "cxgb4_uld.h"
28 
29 static struct chcr_driver_data drv_data;
30 
31 typedef int (*chcr_handler_func)(struct adapter *adap, unsigned char *input);
32 static int cpl_fw6_pld_handler(struct adapter *adap, unsigned char *input);
33 static void *chcr_uld_add(const struct cxgb4_lld_info *lld);
34 static int chcr_uld_state_change(void *handle, enum cxgb4_state state);
35 
36 #if defined(CONFIG_CHELSIO_TLS_DEVICE)
37 static const struct tlsdev_ops chcr_ktls_ops = {
38 	.tls_dev_add = chcr_ktls_dev_add,
39 	.tls_dev_del = chcr_ktls_dev_del,
40 };
41 #endif
42 
43 static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
44 	[CPL_FW6_PLD] = cpl_fw6_pld_handler,
45 #ifdef CONFIG_CHELSIO_TLS_DEVICE
46 	[CPL_ACT_OPEN_RPL] = chcr_ktls_cpl_act_open_rpl,
47 	[CPL_SET_TCB_RPL] = chcr_ktls_cpl_set_tcb_rpl,
48 #endif
49 };
50 
51 static struct cxgb4_uld_info chcr_uld_info = {
52 	.name = DRV_MODULE_NAME,
53 	.nrxq = MAX_ULD_QSETS,
54 	/* Max ntxq will be derived from fw config file*/
55 	.rxq_size = 1024,
56 	.add = chcr_uld_add,
57 	.state_change = chcr_uld_state_change,
58 	.rx_handler = chcr_uld_rx_handler,
59 #if defined(CONFIG_CHELSIO_TLS_DEVICE)
60 	.tx_handler = chcr_uld_tx_handler,
61 	.tlsdev_ops = &chcr_ktls_ops,
62 #endif
63 };
64 
65 static void detach_work_fn(struct work_struct *work)
66 {
67 	struct chcr_dev *dev;
68 
69 	dev = container_of(work, struct chcr_dev, detach_work.work);
70 
71 	if (atomic_read(&dev->inflight)) {
72 		dev->wqretry--;
73 		if (dev->wqretry) {
74 			pr_debug("Request Inflight Count %d\n",
75 				atomic_read(&dev->inflight));
76 
77 			schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM);
78 		} else {
79 			WARN(1, "CHCR:%d request Still Pending\n",
80 				atomic_read(&dev->inflight));
81 			complete(&dev->detach_comp);
82 		}
83 	} else {
84 		complete(&dev->detach_comp);
85 	}
86 }
87 
88 struct uld_ctx *assign_chcr_device(void)
89 {
90 	struct uld_ctx *u_ctx = NULL;
91 
92 	/*
93 	 * When multiple devices are present in system select
94 	 * device in round-robin fashion for crypto operations
95 	 * Although One session must use the same device to
96 	 * maintain request-response ordering.
97 	 */
98 	mutex_lock(&drv_data.drv_mutex);
99 	if (!list_empty(&drv_data.act_dev)) {
100 		u_ctx = drv_data.last_dev;
101 		if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev))
102 			drv_data.last_dev = list_first_entry(&drv_data.act_dev,
103 						  struct uld_ctx, entry);
104 		else
105 			drv_data.last_dev =
106 				list_next_entry(drv_data.last_dev, entry);
107 	}
108 	mutex_unlock(&drv_data.drv_mutex);
109 	return u_ctx;
110 }
111 
112 static void chcr_dev_add(struct uld_ctx *u_ctx)
113 {
114 	struct chcr_dev *dev;
115 
116 	dev = &u_ctx->dev;
117 	dev->state = CHCR_ATTACH;
118 	atomic_set(&dev->inflight, 0);
119 	mutex_lock(&drv_data.drv_mutex);
120 	list_move(&u_ctx->entry, &drv_data.act_dev);
121 	if (!drv_data.last_dev)
122 		drv_data.last_dev = u_ctx;
123 	mutex_unlock(&drv_data.drv_mutex);
124 }
125 
126 static void chcr_dev_init(struct uld_ctx *u_ctx)
127 {
128 	struct chcr_dev *dev;
129 
130 	dev = &u_ctx->dev;
131 	spin_lock_init(&dev->lock_chcr_dev);
132 	INIT_DELAYED_WORK(&dev->detach_work, detach_work_fn);
133 	init_completion(&dev->detach_comp);
134 	dev->state = CHCR_INIT;
135 	dev->wqretry = WQ_RETRY;
136 	atomic_inc(&drv_data.dev_count);
137 	atomic_set(&dev->inflight, 0);
138 	mutex_lock(&drv_data.drv_mutex);
139 	list_add_tail(&u_ctx->entry, &drv_data.inact_dev);
140 	mutex_unlock(&drv_data.drv_mutex);
141 }
142 
143 static int chcr_dev_move(struct uld_ctx *u_ctx)
144 {
145 	 mutex_lock(&drv_data.drv_mutex);
146 	if (drv_data.last_dev == u_ctx) {
147 		if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev))
148 			drv_data.last_dev = list_first_entry(&drv_data.act_dev,
149 						  struct uld_ctx, entry);
150 		else
151 			drv_data.last_dev =
152 				list_next_entry(drv_data.last_dev, entry);
153 	}
154 	list_move(&u_ctx->entry, &drv_data.inact_dev);
155 	if (list_empty(&drv_data.act_dev))
156 		drv_data.last_dev = NULL;
157 	atomic_dec(&drv_data.dev_count);
158 	mutex_unlock(&drv_data.drv_mutex);
159 
160 	return 0;
161 }
162 
163 static int cpl_fw6_pld_handler(struct adapter *adap,
164 			       unsigned char *input)
165 {
166 	struct crypto_async_request *req;
167 	struct cpl_fw6_pld *fw6_pld;
168 	u32 ack_err_status = 0;
169 	int error_status = 0;
170 
171 	fw6_pld = (struct cpl_fw6_pld *)input;
172 	req = (struct crypto_async_request *)(uintptr_t)be64_to_cpu(
173 						    fw6_pld->data[1]);
174 
175 	ack_err_status =
176 		ntohl(*(__be32 *)((unsigned char *)&fw6_pld->data[0] + 4));
177 	if (CHK_MAC_ERR_BIT(ack_err_status) || CHK_PAD_ERR_BIT(ack_err_status))
178 		error_status = -EBADMSG;
179 	/* call completion callback with failure status */
180 	if (req) {
181 		error_status = chcr_handle_resp(req, input, error_status);
182 	} else {
183 		pr_err("Incorrect request address from the firmware\n");
184 		return -EFAULT;
185 	}
186 	if (error_status)
187 		atomic_inc(&adap->chcr_stats.error);
188 
189 	return 0;
190 }
191 
192 int chcr_send_wr(struct sk_buff *skb)
193 {
194 	return cxgb4_crypto_send(skb->dev, skb);
195 }
196 
197 static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
198 {
199 	struct uld_ctx *u_ctx;
200 
201 	/* Create the device and add it in the device list */
202 	pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
203 	if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE))
204 		return ERR_PTR(-EOPNOTSUPP);
205 
206 	/* Create the device and add it in the device list */
207 	u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL);
208 	if (!u_ctx) {
209 		u_ctx = ERR_PTR(-ENOMEM);
210 		goto out;
211 	}
212 	u_ctx->lldi = *lld;
213 	chcr_dev_init(u_ctx);
214 out:
215 	return u_ctx;
216 }
217 
218 int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
219 			const struct pkt_gl *pgl)
220 {
221 	struct uld_ctx *u_ctx = (struct uld_ctx *)handle;
222 	struct chcr_dev *dev = &u_ctx->dev;
223 	struct adapter *adap = padap(dev);
224 	const struct cpl_fw6_pld *rpl = (struct cpl_fw6_pld *)rsp;
225 
226 	if (!work_handlers[rpl->opcode]) {
227 		pr_err("Unsupported opcode %d received\n", rpl->opcode);
228 		return 0;
229 	}
230 
231 	if (!pgl)
232 		work_handlers[rpl->opcode](adap, (unsigned char *)&rsp[1]);
233 	else
234 		work_handlers[rpl->opcode](adap, pgl->va);
235 	return 0;
236 }
237 
238 #if defined(CONFIG_CHELSIO_TLS_DEVICE)
239 int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev)
240 {
241 	if (skb->decrypted)
242 		return chcr_ktls_xmit(skb, dev);
243 	return 0;
244 }
245 #endif /* CONFIG_CHELSIO_IPSEC_INLINE || CONFIG_CHELSIO_TLS_DEVICE */
246 
247 static void chcr_detach_device(struct uld_ctx *u_ctx)
248 {
249 	struct chcr_dev *dev = &u_ctx->dev;
250 
251 	if (dev->state == CHCR_DETACH) {
252 		pr_debug("Detached Event received for already detach device\n");
253 		return;
254 	}
255 	dev->state = CHCR_DETACH;
256 	if (atomic_read(&dev->inflight) != 0) {
257 		schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM);
258 		wait_for_completion(&dev->detach_comp);
259 	}
260 
261 	// Move u_ctx to inactive_dev list
262 	chcr_dev_move(u_ctx);
263 }
264 
265 static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
266 {
267 	struct uld_ctx *u_ctx = handle;
268 	int ret = 0;
269 
270 	switch (state) {
271 	case CXGB4_STATE_UP:
272 		if (u_ctx->dev.state != CHCR_INIT) {
273 			// ALready Initialised.
274 			return 0;
275 		}
276 		chcr_dev_add(u_ctx);
277 		ret = start_crypto();
278 		break;
279 
280 	case CXGB4_STATE_DETACH:
281 		chcr_detach_device(u_ctx);
282 		if (!atomic_read(&drv_data.dev_count))
283 			stop_crypto();
284 		break;
285 
286 	case CXGB4_STATE_START_RECOVERY:
287 	case CXGB4_STATE_DOWN:
288 	default:
289 		break;
290 	}
291 	return ret;
292 }
293 
294 static int __init chcr_crypto_init(void)
295 {
296 	INIT_LIST_HEAD(&drv_data.act_dev);
297 	INIT_LIST_HEAD(&drv_data.inact_dev);
298 	atomic_set(&drv_data.dev_count, 0);
299 	mutex_init(&drv_data.drv_mutex);
300 	drv_data.last_dev = NULL;
301 	cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info);
302 
303 	return 0;
304 }
305 
306 static void __exit chcr_crypto_exit(void)
307 {
308 	struct uld_ctx *u_ctx, *tmp;
309 	struct adapter *adap;
310 
311 	stop_crypto();
312 	cxgb4_unregister_uld(CXGB4_ULD_CRYPTO);
313 	/* Remove all devices from list */
314 	mutex_lock(&drv_data.drv_mutex);
315 	list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) {
316 		adap = padap(&u_ctx->dev);
317 		memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
318 		list_del(&u_ctx->entry);
319 		kfree(u_ctx);
320 	}
321 	list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) {
322 		adap = padap(&u_ctx->dev);
323 		memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
324 		list_del(&u_ctx->entry);
325 		kfree(u_ctx);
326 	}
327 	mutex_unlock(&drv_data.drv_mutex);
328 }
329 
330 module_init(chcr_crypto_init);
331 module_exit(chcr_crypto_exit);
332 
333 MODULE_DESCRIPTION("Crypto Co-processor for Chelsio Terminator cards.");
334 MODULE_LICENSE("GPL");
335 MODULE_AUTHOR("Chelsio Communications");
336 MODULE_VERSION(DRV_VERSION);
337