1 /**
2  * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
3  *
4  * Copyright (C) 2011-2016 Chelsio Communications.  All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation.
9  *
10  * Written and Maintained by:
11  * Manoj Malviya (manojmalviya@chelsio.com)
12  * Atul Gupta (atul.gupta@chelsio.com)
13  * Jitendra Lulla (jlulla@chelsio.com)
14  * Yeshaswi M R Gowda (yeshaswi@chelsio.com)
15  * Harsh Jain (harsh@chelsio.com)
16  */
17 
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/skbuff.h>
21 
22 #include <crypto/aes.h>
23 #include <crypto/hash.h>
24 
25 #include "t4_msg.h"
26 #include "chcr_core.h"
27 #include "cxgb4_uld.h"
28 
29 static struct chcr_driver_data drv_data;
30 
31 typedef int (*chcr_handler_func)(struct adapter *adap, unsigned char *input);
32 static int cpl_fw6_pld_handler(struct adapter *adap, unsigned char *input);
33 static void *chcr_uld_add(const struct cxgb4_lld_info *lld);
34 static int chcr_uld_state_change(void *handle, enum cxgb4_state state);
35 
36 #ifdef CONFIG_CHELSIO_IPSEC_INLINE
37 static void update_netdev_features(void);
38 #endif /* CONFIG_CHELSIO_IPSEC_INLINE */
39 
40 static chcr_handler_func work_handlers[NUM_CPL_CMDS] = {
41 	[CPL_FW6_PLD] = cpl_fw6_pld_handler,
42 #ifdef CONFIG_CHELSIO_TLS_DEVICE
43 	[CPL_ACT_OPEN_RPL] = chcr_ktls_cpl_act_open_rpl,
44 	[CPL_SET_TCB_RPL] = chcr_ktls_cpl_set_tcb_rpl,
45 #endif
46 };
47 
48 static struct cxgb4_uld_info chcr_uld_info = {
49 	.name = DRV_MODULE_NAME,
50 	.nrxq = MAX_ULD_QSETS,
51 	/* Max ntxq will be derived from fw config file*/
52 	.rxq_size = 1024,
53 	.add = chcr_uld_add,
54 	.state_change = chcr_uld_state_change,
55 	.rx_handler = chcr_uld_rx_handler,
56 #if defined(CONFIG_CHELSIO_IPSEC_INLINE) || defined(CONFIG_CHELSIO_TLS_DEVICE)
57 	.tx_handler = chcr_uld_tx_handler,
58 #endif /* CONFIG_CHELSIO_IPSEC_INLINE || CONFIG_CHELSIO_TLS_DEVICE */
59 };
60 
61 static void detach_work_fn(struct work_struct *work)
62 {
63 	struct chcr_dev *dev;
64 
65 	dev = container_of(work, struct chcr_dev, detach_work.work);
66 
67 	if (atomic_read(&dev->inflight)) {
68 		dev->wqretry--;
69 		if (dev->wqretry) {
70 			pr_debug("Request Inflight Count %d\n",
71 				atomic_read(&dev->inflight));
72 
73 			schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM);
74 		} else {
75 			WARN(1, "CHCR:%d request Still Pending\n",
76 				atomic_read(&dev->inflight));
77 			complete(&dev->detach_comp);
78 		}
79 	} else {
80 		complete(&dev->detach_comp);
81 	}
82 }
83 
84 struct uld_ctx *assign_chcr_device(void)
85 {
86 	struct uld_ctx *u_ctx = NULL;
87 
88 	/*
89 	 * When multiple devices are present in system select
90 	 * device in round-robin fashion for crypto operations
91 	 * Although One session must use the same device to
92 	 * maintain request-response ordering.
93 	 */
94 	mutex_lock(&drv_data.drv_mutex);
95 	if (!list_empty(&drv_data.act_dev)) {
96 		u_ctx = drv_data.last_dev;
97 		if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev))
98 			drv_data.last_dev = list_first_entry(&drv_data.act_dev,
99 						  struct uld_ctx, entry);
100 		else
101 			drv_data.last_dev =
102 				list_next_entry(drv_data.last_dev, entry);
103 	}
104 	mutex_unlock(&drv_data.drv_mutex);
105 	return u_ctx;
106 }
107 
108 static void chcr_dev_add(struct uld_ctx *u_ctx)
109 {
110 	struct chcr_dev *dev;
111 
112 	dev = &u_ctx->dev;
113 	dev->state = CHCR_ATTACH;
114 	atomic_set(&dev->inflight, 0);
115 	mutex_lock(&drv_data.drv_mutex);
116 	list_move(&u_ctx->entry, &drv_data.act_dev);
117 	if (!drv_data.last_dev)
118 		drv_data.last_dev = u_ctx;
119 	mutex_unlock(&drv_data.drv_mutex);
120 }
121 
122 static void chcr_dev_init(struct uld_ctx *u_ctx)
123 {
124 	struct chcr_dev *dev;
125 
126 	dev = &u_ctx->dev;
127 	spin_lock_init(&dev->lock_chcr_dev);
128 	INIT_DELAYED_WORK(&dev->detach_work, detach_work_fn);
129 	init_completion(&dev->detach_comp);
130 	dev->state = CHCR_INIT;
131 	dev->wqretry = WQ_RETRY;
132 	atomic_inc(&drv_data.dev_count);
133 	atomic_set(&dev->inflight, 0);
134 	mutex_lock(&drv_data.drv_mutex);
135 	list_add_tail(&u_ctx->entry, &drv_data.inact_dev);
136 	mutex_unlock(&drv_data.drv_mutex);
137 }
138 
139 static int chcr_dev_move(struct uld_ctx *u_ctx)
140 {
141 	 mutex_lock(&drv_data.drv_mutex);
142 	if (drv_data.last_dev == u_ctx) {
143 		if (list_is_last(&drv_data.last_dev->entry, &drv_data.act_dev))
144 			drv_data.last_dev = list_first_entry(&drv_data.act_dev,
145 						  struct uld_ctx, entry);
146 		else
147 			drv_data.last_dev =
148 				list_next_entry(drv_data.last_dev, entry);
149 	}
150 	list_move(&u_ctx->entry, &drv_data.inact_dev);
151 	if (list_empty(&drv_data.act_dev))
152 		drv_data.last_dev = NULL;
153 	atomic_dec(&drv_data.dev_count);
154 	mutex_unlock(&drv_data.drv_mutex);
155 
156 	return 0;
157 }
158 
159 static int cpl_fw6_pld_handler(struct adapter *adap,
160 			       unsigned char *input)
161 {
162 	struct crypto_async_request *req;
163 	struct cpl_fw6_pld *fw6_pld;
164 	u32 ack_err_status = 0;
165 	int error_status = 0;
166 
167 	fw6_pld = (struct cpl_fw6_pld *)input;
168 	req = (struct crypto_async_request *)(uintptr_t)be64_to_cpu(
169 						    fw6_pld->data[1]);
170 
171 	ack_err_status =
172 		ntohl(*(__be32 *)((unsigned char *)&fw6_pld->data[0] + 4));
173 	if (CHK_MAC_ERR_BIT(ack_err_status) || CHK_PAD_ERR_BIT(ack_err_status))
174 		error_status = -EBADMSG;
175 	/* call completion callback with failure status */
176 	if (req) {
177 		error_status = chcr_handle_resp(req, input, error_status);
178 	} else {
179 		pr_err("Incorrect request address from the firmware\n");
180 		return -EFAULT;
181 	}
182 	if (error_status)
183 		atomic_inc(&adap->chcr_stats.error);
184 
185 	return 0;
186 }
187 
188 int chcr_send_wr(struct sk_buff *skb)
189 {
190 	return cxgb4_crypto_send(skb->dev, skb);
191 }
192 
193 static void *chcr_uld_add(const struct cxgb4_lld_info *lld)
194 {
195 	struct uld_ctx *u_ctx;
196 
197 	/* Create the device and add it in the device list */
198 	pr_info_once("%s - version %s\n", DRV_DESC, DRV_VERSION);
199 	if (!(lld->ulp_crypto & ULP_CRYPTO_LOOKASIDE))
200 		return ERR_PTR(-EOPNOTSUPP);
201 
202 	/* Create the device and add it in the device list */
203 	u_ctx = kzalloc(sizeof(*u_ctx), GFP_KERNEL);
204 	if (!u_ctx) {
205 		u_ctx = ERR_PTR(-ENOMEM);
206 		goto out;
207 	}
208 	u_ctx->lldi = *lld;
209 	chcr_dev_init(u_ctx);
210 
211 #ifdef CONFIG_CHELSIO_TLS_DEVICE
212 	if (lld->ulp_crypto & ULP_CRYPTO_KTLS_INLINE)
213 		chcr_enable_ktls(padap(&u_ctx->dev));
214 #endif
215 out:
216 	return u_ctx;
217 }
218 
219 int chcr_uld_rx_handler(void *handle, const __be64 *rsp,
220 			const struct pkt_gl *pgl)
221 {
222 	struct uld_ctx *u_ctx = (struct uld_ctx *)handle;
223 	struct chcr_dev *dev = &u_ctx->dev;
224 	struct adapter *adap = padap(dev);
225 	const struct cpl_fw6_pld *rpl = (struct cpl_fw6_pld *)rsp;
226 
227 	if (!work_handlers[rpl->opcode]) {
228 		pr_err("Unsupported opcode %d received\n", rpl->opcode);
229 		return 0;
230 	}
231 
232 	if (!pgl)
233 		work_handlers[rpl->opcode](adap, (unsigned char *)&rsp[1]);
234 	else
235 		work_handlers[rpl->opcode](adap, pgl->va);
236 	return 0;
237 }
238 
239 #if defined(CONFIG_CHELSIO_IPSEC_INLINE) || defined(CONFIG_CHELSIO_TLS_DEVICE)
240 int chcr_uld_tx_handler(struct sk_buff *skb, struct net_device *dev)
241 {
242 	/* In case if skb's decrypted bit is set, it's nic tls packet, else it's
243 	 * ipsec packet.
244 	 */
245 #ifdef CONFIG_CHELSIO_TLS_DEVICE
246 	if (skb->decrypted)
247 		return chcr_ktls_xmit(skb, dev);
248 #endif
249 #ifdef CONFIG_CHELSIO_IPSEC_INLINE
250 	return chcr_ipsec_xmit(skb, dev);
251 #endif
252 	return 0;
253 }
254 #endif /* CONFIG_CHELSIO_IPSEC_INLINE || CONFIG_CHELSIO_TLS_DEVICE */
255 
256 static void chcr_detach_device(struct uld_ctx *u_ctx)
257 {
258 	struct chcr_dev *dev = &u_ctx->dev;
259 
260 	if (dev->state == CHCR_DETACH) {
261 		pr_debug("Detached Event received for already detach device\n");
262 		return;
263 	}
264 	dev->state = CHCR_DETACH;
265 	if (atomic_read(&dev->inflight) != 0) {
266 		schedule_delayed_work(&dev->detach_work, WQ_DETACH_TM);
267 		wait_for_completion(&dev->detach_comp);
268 	}
269 
270 	// Move u_ctx to inactive_dev list
271 	chcr_dev_move(u_ctx);
272 }
273 
274 static int chcr_uld_state_change(void *handle, enum cxgb4_state state)
275 {
276 	struct uld_ctx *u_ctx = handle;
277 	int ret = 0;
278 
279 	switch (state) {
280 	case CXGB4_STATE_UP:
281 		if (u_ctx->dev.state != CHCR_INIT) {
282 			// ALready Initialised.
283 			return 0;
284 		}
285 		chcr_dev_add(u_ctx);
286 		ret = start_crypto();
287 		break;
288 
289 	case CXGB4_STATE_DETACH:
290 		chcr_detach_device(u_ctx);
291 		if (!atomic_read(&drv_data.dev_count))
292 			stop_crypto();
293 		break;
294 
295 	case CXGB4_STATE_START_RECOVERY:
296 	case CXGB4_STATE_DOWN:
297 	default:
298 		break;
299 	}
300 	return ret;
301 }
302 
303 #ifdef CONFIG_CHELSIO_IPSEC_INLINE
304 static void update_netdev_features(void)
305 {
306 	struct uld_ctx *u_ctx, *tmp;
307 
308 	mutex_lock(&drv_data.drv_mutex);
309 	list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) {
310 		if (u_ctx->lldi.crypto & ULP_CRYPTO_IPSEC_INLINE)
311 			chcr_add_xfrmops(&u_ctx->lldi);
312 	}
313 	list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) {
314 		if (u_ctx->lldi.crypto & ULP_CRYPTO_IPSEC_INLINE)
315 			chcr_add_xfrmops(&u_ctx->lldi);
316 	}
317 	mutex_unlock(&drv_data.drv_mutex);
318 }
319 #endif /* CONFIG_CHELSIO_IPSEC_INLINE */
320 
321 static int __init chcr_crypto_init(void)
322 {
323 	INIT_LIST_HEAD(&drv_data.act_dev);
324 	INIT_LIST_HEAD(&drv_data.inact_dev);
325 	atomic_set(&drv_data.dev_count, 0);
326 	mutex_init(&drv_data.drv_mutex);
327 	drv_data.last_dev = NULL;
328 	cxgb4_register_uld(CXGB4_ULD_CRYPTO, &chcr_uld_info);
329 
330 	#ifdef CONFIG_CHELSIO_IPSEC_INLINE
331 	rtnl_lock();
332 	update_netdev_features();
333 	rtnl_unlock();
334 	#endif /* CONFIG_CHELSIO_IPSEC_INLINE */
335 
336 	return 0;
337 }
338 
339 static void __exit chcr_crypto_exit(void)
340 {
341 	struct uld_ctx *u_ctx, *tmp;
342 	struct adapter *adap;
343 
344 	stop_crypto();
345 	cxgb4_unregister_uld(CXGB4_ULD_CRYPTO);
346 	/* Remove all devices from list */
347 	mutex_lock(&drv_data.drv_mutex);
348 	list_for_each_entry_safe(u_ctx, tmp, &drv_data.act_dev, entry) {
349 		adap = padap(&u_ctx->dev);
350 		memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
351 #ifdef CONFIG_CHELSIO_TLS_DEVICE
352 		if (u_ctx->lldi.ulp_crypto & ULP_CRYPTO_KTLS_INLINE)
353 			chcr_disable_ktls(adap);
354 #endif
355 		list_del(&u_ctx->entry);
356 		kfree(u_ctx);
357 	}
358 	list_for_each_entry_safe(u_ctx, tmp, &drv_data.inact_dev, entry) {
359 		adap = padap(&u_ctx->dev);
360 		memset(&adap->chcr_stats, 0, sizeof(adap->chcr_stats));
361 #ifdef CONFIG_CHELSIO_TLS_DEVICE
362 		if (u_ctx->lldi.ulp_crypto & ULP_CRYPTO_KTLS_INLINE)
363 			chcr_disable_ktls(adap);
364 #endif
365 		list_del(&u_ctx->entry);
366 		kfree(u_ctx);
367 	}
368 	mutex_unlock(&drv_data.drv_mutex);
369 }
370 
371 module_init(chcr_crypto_init);
372 module_exit(chcr_crypto_exit);
373 
374 MODULE_DESCRIPTION("Crypto Co-processor for Chelsio Terminator cards.");
375 MODULE_LICENSE("GPL");
376 MODULE_AUTHOR("Chelsio Communications");
377 MODULE_VERSION(DRV_VERSION);
378