1 /**
2  * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
3  *
4  * Copyright (C) 2011-2016 Chelsio Communications.  All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  *
10  * Written and Maintained by:
11  * Manoj Malviya (manojmalviya@chelsio.com)
12  * Atul Gupta (atul.gupta@chelsio.com)
13  * Jitendra Lulla (jlulla@chelsio.com)
14  * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
15  * Harsh Jain (harsh@chelsio.com)
16  */
17 
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/skbuff.h>
21 
22 #include <crypto/aes.h>
23 #include <crypto/hash.h>
24 
25 #include "t4_msg.h"
26 #include "chcr_core.h"
27 #include "cxgb4_uld.h"
28 
29 static struct chcr_driver_data drv_data;
30 
31 typedef int (*chcr_handler_func)(struct adapter *adap, unsigned char *input);
32 static int cpl_fw6_pld_handler(struct adapter *adap, unsigned char *input);
33 static void *chcr_uld_add(const struct cxgb4_lld_info *lld);
34 static int chcr_uld_state_change(void *handle, enum cxgb4_state state);
35 
36 #if defined(CONFIG_CHELSIO_TLS_DEVICE)
37 static const struct tlsdev_ops chcr_ktls_ops = {
38 	.tls_dev_add = chcr_ktls_dev_add,
39 	.tls_dev_del = chcr_ktls_dev_del,
40 };
41 #endif
42 
43 #ifdef CONFIG_CHELSIO_IPSEC_INLINE
44 static void update_netdev_features(void);
45 #endif /* CONFIG_CHELSIO_IPSEC_INLINE */
46 
47 static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
48 	[CPL_FW6_PLD] = cpl_fw6_pld_handler,
49 #ifdef CONFIG_CHELSIO_TLS_DEVICE
50 	[CPL_ACT_OPEN_RPL] = chcr_ktls_cpl_act_open_rpl,
51 	[CPL_SET_TCB_RPL] = chcr_ktls_cpl_set_tcb_rpl,
52 #endif
53 };
54 
55 static struct cxgb4_uld_info chcr_uld_info = {
56 	.name = DRV_MODULE_NAME,
57 	.nrxq = MAX_ULD_QSETS,
58 	/* Max ntxq will be derived from fw config file*/
59 	.rxq_size = 1024,
60 	.add = chcr_uld_add,
61 	.state_change = chcr_uld_state_change,
62 	.rx_handler = chcr_uld_rx_handler,
63 #if defined(CONFIG_CHELSIO_IPSEC_INLINE) || defined(CONFIG_CHELSIO_TLS_DEVICE)
64 	.tx_handler = chcr_uld_tx_handler,
65 #endif /* CONFIG_CHELSIO_IPSEC_INLINE || CONFIG_CHELSIO_TLS_DEVICE */
66 #if defined(CONFIG_CHELSIO_TLS_DEVICE)
67 	.tlsdev_ops = &chcr_ktls_ops,
68 #endif
69 };
70 
71 static void detach_work_fn(struct work_struct *work)
72 {
73 	struct chcr_dev *dev;
74 
75 	dev = container_of(work, struct chcr_dev, detach_work.work);
76 
77 	if (atomic_read(&dev->inflight)) {
78 		dev->wqretry--;
79 		if (dev->wqretry) {
80 			pr_debug("Request Inflight Count %d\n",
81 				atomic_read(&dev->inflight));
82 
83 			schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM);
84 		} else {
85 			WARN(1, "CHCR:%d request Still Pending\n",
86 				atomic_read(&dev->inflight));
87 			complete(&dev->detach_comp);
88 		}
89 	} else {
90 		complete(&dev->detach_comp);
91 	}
92 }
93 
94 struct uld_ctx *assign_chcr_device(void)
95 {
96 	struct uld_ctx *u_ctx = NULL;
97 
98 	/*
99 	 * When multiple devices are present in system select
100 	 * device in round-robin fashion for crypto operations
101 	 * Although One session must use the same device to
102 	 * maintain request-response ordering.
103 	 */
104 	mutex_lock(&drv_data.drv_mutex);
105 	if (!list_empty(&drv_data.act_dev)) {
106 		u_ctx = drv_data.last_dev;
107 		if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev))
108 			drv_data.last_dev = list_first_entry(&drv_data.act_dev,
109 						  struct uld_ctx, entry);
110 		else
111 			drv_data.last_dev =
112 				list_next_entry(drv_data.last_dev, entry);
113 	}
114 	mutex_unlock(&drv_data.drv_mutex);
115 	return u_ctx;
116 }
117 
118 static void chcr_dev_add(struct uld_ctx *u_ctx)
119 {
120 	struct chcr_dev *dev;
121 
122 	dev = &u_ctx->dev;
123 	dev->state = CHCR_ATTACH;
124 	atomic_set(&dev->inflight, 0);
125 	mutex_lock(&drv_data.drv_mutex);
126 	list_move(&u_ctx->entry, &drv_data.act_dev);
127 	if (!drv_data.last_dev)
128 		drv_data.last_dev = u_ctx;
129 	mutex_unlock(&drv_data.drv_mutex);
130 }
131 
132 static void chcr_dev_init(struct uld_ctx *u_ctx)
133 {
134 	struct chcr_dev *dev;
135 
136 	dev = &u_ctx->dev;
137 	spin_lock_init(&dev->lock_chcr_dev);
138 	INIT_DELAYED_WORK(&dev->detach_work, detach_work_fn);
139 	init_completion(&dev->detach_comp);
140 	dev->state = CHCR_INIT;
141 	dev->wqretry = WQ_RETRY;
142 	atomic_inc(&drv_data.dev_count);
143 	atomic_set(&dev->inflight, 0);
144 	mutex_lock(&drv_data.drv_mutex);
145 	list_add_tail(&u_ctx->entry, &drv_data.inact_dev);
146 	mutex_unlock(&drv_data.drv_mutex);
147 }
148 
149 static int chcr_dev_move(struct uld_ctx *u_ctx)
150 {
151 	 mutex_lock(&drv_data.drv_mutex);
152 	if (drv_data.last_dev == u_ctx) {
153 		if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev))
154 			drv_data.last_dev = list_first_entry(&drv_data.act_dev,
155 						  struct uld_ctx, entry);
156 		else
157 			drv_data.last_dev =
158 				list_next_entry(drv_data.last_dev, entry);
159 	}
160 	list_move(&u_ctx->entry, &drv_data.inact_dev);
161 	if (list_empty(&drv_data.act_dev))
162 		drv_data.last_dev = NULL;
163 	atomic_dec(&drv_data.dev_count);
164 	mutex_unlock(&drv_data.drv_mutex);
165 
166 	return 0;
167 }
168 
169 static int cpl_fw6_pld_handler(struct adapter *adap,
170 			       unsigned char *input)
171 {
172 	struct crypto_async_request *req;
173 	struct cpl_fw6_pld *fw6_pld;
174 	u32 ack_err_status = 0;
175 	int error_status = 0;
176 
177 	fw6_pld = (struct cpl_fw6_pld *)input;
178 	req = (struct crypto_async_request *)(uintptr_t)be64_to_cpu(
179 						    fw6_pld->data[1]);
180 
181 	ack_err_status =
182 		ntohl(*(__be32 *)((unsigned char *)&fw6_pld->data[0] + 4));
183 	if (CHK_MAC_ERR_BIT(ack_err_status) || CHK_PAD_ERR_BIT(ack_err_status))
184 		error_status = -EBADMSG;
185 	/* call completion callback with failure status */
186 	if (req) {
187 		error_status = chcr_handle_resp(req, input, error_status);
188 	} else {
189 		pr_err("Incorrect request address from the firmware\n");
190 		return -EFAULT;
191 	}
192 	if (error_status)
193 		atomic_inc(&adap->chcr_stats.error);
194 
195 	return 0;
196 }
197 
198 int chcr_send_wr(struct sk_buff *skb)
199 {
200 	return cxgb4_crypto_send(skb->dev, skb);
201 }
202 
203 static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
204 {
205 	struct uld_ctx *u_ctx;
206 
207 	/* Create the device and add it in the device list */
208 	pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
209 	if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE))
210 		return ERR_PTR(-EOPNOTSUPP);
211 
212 	/* Create the device and add it in the device list */
213 	u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL);
214 	if (!u_ctx) {
215 		u_ctx = ERR_PTR(-ENOMEM);
216 		goto out;
217 	}
218 	u_ctx->lldi = *lld;
219 	chcr_dev_init(u_ctx);
220 out:
221 	return u_ctx;
222 }
223 
224 int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
225 			const struct pkt_gl *pgl)
226 {
227 	struct uld_ctx *u_ctx = (struct uld_ctx *)handle;
228 	struct chcr_dev *dev = &u_ctx->dev;
229 	struct adapter *adap = padap(dev);
230 	const struct cpl_fw6_pld *rpl = (struct cpl_fw6_pld *)rsp;
231 
232 	if (!work_handlers[rpl->opcode]) {
233 		pr_err("Unsupported opcode %d received\n", rpl->opcode);
234 		return 0;
235 	}
236 
237 	if (!pgl)
238 		work_handlers[rpl->opcode](adap, (unsigned char *)&rsp[1]);
239 	else
240 		work_handlers[rpl->opcode](adap, pgl->va);
241 	return 0;
242 }
243 
244 #if defined(CONFIG_CHELSIO_IPSEC_INLINE) || defined(CONFIG_CHELSIO_TLS_DEVICE)
245 int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev)
246 {
247 	/* In case if skb's decrypted bit is set, it's nic tls packet, else it's
248 	 * ipsec packet.
249 	 */
250 #ifdef CONFIG_CHELSIO_TLS_DEVICE
251 	if (skb->decrypted)
252 		return chcr_ktls_xmit(skb, dev);
253 #endif
254 #ifdef CONFIG_CHELSIO_IPSEC_INLINE
255 	return chcr_ipsec_xmit(skb, dev);
256 #endif
257 	return 0;
258 }
259 #endif /* CONFIG_CHELSIO_IPSEC_INLINE || CONFIG_CHELSIO_TLS_DEVICE */
260 
261 static void chcr_detach_device(struct uld_ctx *u_ctx)
262 {
263 	struct chcr_dev *dev = &u_ctx->dev;
264 
265 	if (dev->state == CHCR_DETACH) {
266 		pr_debug("Detached Event received for already detach device\n");
267 		return;
268 	}
269 	dev->state = CHCR_DETACH;
270 	if (atomic_read(&dev->inflight) != 0) {
271 		schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM);
272 		wait_for_completion(&dev->detach_comp);
273 	}
274 
275 	// Move u_ctx to inactive_dev list
276 	chcr_dev_move(u_ctx);
277 }
278 
279 static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
280 {
281 	struct uld_ctx *u_ctx = handle;
282 	int ret = 0;
283 
284 	switch (state) {
285 	case CXGB4_STATE_UP:
286 		if (u_ctx->dev.state != CHCR_INIT) {
287 			// ALready Initialised.
288 			return 0;
289 		}
290 		chcr_dev_add(u_ctx);
291 		ret = start_crypto();
292 		break;
293 
294 	case CXGB4_STATE_DETACH:
295 		chcr_detach_device(u_ctx);
296 		if (!atomic_read(&drv_data.dev_count))
297 			stop_crypto();
298 		break;
299 
300 	case CXGB4_STATE_START_RECOVERY:
301 	case CXGB4_STATE_DOWN:
302 	default:
303 		break;
304 	}
305 	return ret;
306 }
307 
308 #ifdef CONFIG_CHELSIO_IPSEC_INLINE
309 static void update_netdev_features(void)
310 {
311 	struct uld_ctx *u_ctx, *tmp;
312 
313 	mutex_lock(&drv_data.drv_mutex);
314 	list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) {
315 		if (u_ctx->lldi.crypto & ULP_CRYPTO_IPSEC_INLINE)
316 			chcr_add_xfrmops(&u_ctx->lldi);
317 	}
318 	list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) {
319 		if (u_ctx->lldi.crypto & ULP_CRYPTO_IPSEC_INLINE)
320 			chcr_add_xfrmops(&u_ctx->lldi);
321 	}
322 	mutex_unlock(&drv_data.drv_mutex);
323 }
324 #endif /* CONFIG_CHELSIO_IPSEC_INLINE */
325 
326 static int __init chcr_crypto_init(void)
327 {
328 	INIT_LIST_HEAD(&drv_data.act_dev);
329 	INIT_LIST_HEAD(&drv_data.inact_dev);
330 	atomic_set(&drv_data.dev_count, 0);
331 	mutex_init(&drv_data.drv_mutex);
332 	drv_data.last_dev = NULL;
333 	cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info);
334 
335 	#ifdef CONFIG_CHELSIO_IPSEC_INLINE
336 	rtnl_lock();
337 	update_netdev_features();
338 	rtnl_unlock();
339 	#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
340 
341 	return 0;
342 }
343 
344 static void __exit chcr_crypto_exit(void)
345 {
346 	struct uld_ctx *u_ctx, *tmp;
347 	struct adapter *adap;
348 
349 	stop_crypto();
350 	cxgb4_unregister_uld(CXGB4_ULD_CRYPTO);
351 	/* Remove all devices from list */
352 	mutex_lock(&drv_data.drv_mutex);
353 	list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) {
354 		adap = padap(&u_ctx->dev);
355 		memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
356 		list_del(&u_ctx->entry);
357 		kfree(u_ctx);
358 	}
359 	list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) {
360 		adap = padap(&u_ctx->dev);
361 		memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
362 		list_del(&u_ctx->entry);
363 		kfree(u_ctx);
364 	}
365 	mutex_unlock(&drv_data.drv_mutex);
366 }
367 
368 module_init(chcr_crypto_init);
369 module_exit(chcr_crypto_exit);
370 
371 MODULE_DESCRIPTION("Crypto Co-processor for Chelsio Terminator cards.");
372 MODULE_LICENSE("GPL");
373 MODULE_AUTHOR("Chelsio Communications");
374 MODULE_VERSION(DRV_VERSION);
375