1 /*
2  * Greybus Component Authentication Protocol (CAP) Driver.
3  *
4  * Copyright 2016 Google Inc.
5  * Copyright 2016 Linaro Ltd.
6  *
7  * Released under the GPLv2 only.
8  */
9 
10 #include "greybus.h"
11 
12 #include <linux/cdev.h>
13 #include <linux/fs.h>
14 #include <linux/ioctl.h>
15 #include <linux/uaccess.h>
16 
17 #include "greybus_authentication.h"
18 #include "firmware.h"
19 
20 #define CAP_TIMEOUT_MS		1000
21 
22 /*
23  * Number of minor devices this driver supports.
24  * There will be exactly one required per Interface.
25  */
26 #define NUM_MINORS		U8_MAX
27 
28 struct gb_cap {
29 	struct device		*parent;
30 	struct gb_connection	*connection;
31 	struct kref		kref;
32 	struct list_head	node;
33 	bool			disabled; /* connection getting disabled */
34 
35 	struct mutex		mutex;
36 	struct cdev		cdev;
37 	struct device		*class_device;
38 	dev_t			dev_num;
39 };
40 
41 static struct class *cap_class;
42 static dev_t cap_dev_num;
43 static DEFINE_IDA(cap_minors_map);
44 static LIST_HEAD(cap_list);
45 static DEFINE_MUTEX(list_mutex);
46 
47 static void cap_kref_release(struct kref *kref)
48 {
49 	struct gb_cap *cap = container_of(kref, struct gb_cap, kref);
50 
51 	kfree(cap);
52 }
53 
54 /*
55  * All users of cap take a reference (from within list_mutex lock), before
56  * they get a pointer to play with. And the structure will be freed only after
57  * the last user has put the reference to it.
58  */
59 static void put_cap(struct gb_cap *cap)
60 {
61 	kref_put(&cap->kref, cap_kref_release);
62 }
63 
64 /* Caller must call put_cap() after using struct gb_cap */
65 static struct gb_cap *get_cap(struct cdev *cdev)
66 {
67 	struct gb_cap *cap;
68 
69 	mutex_lock(&list_mutex);
70 
71 	list_for_each_entry(cap, &cap_list, node) {
72 		if (&cap->cdev == cdev) {
73 			kref_get(&cap->kref);
74 			goto unlock;
75 		}
76 	}
77 
78 	cap = NULL;
79 
80 unlock:
81 	mutex_unlock(&list_mutex);
82 
83 	return cap;
84 }
85 
86 static int cap_get_endpoint_uid(struct gb_cap *cap, u8 *euid)
87 {
88 	struct gb_connection *connection = cap->connection;
89 	struct gb_cap_get_endpoint_uid_response response;
90 	int ret;
91 
92 	ret = gb_operation_sync(connection, GB_CAP_TYPE_GET_ENDPOINT_UID, NULL,
93 				0, &response, sizeof(response));
94 	if (ret) {
95 		dev_err(cap->parent, "failed to get endpoint uid (%d)\n", ret);
96 		return ret;
97 	}
98 
99 	memcpy(euid, response.uid, sizeof(response.uid));
100 
101 	return 0;
102 }
103 
104 static int cap_get_ims_certificate(struct gb_cap *cap, u32 class, u32 id,
105 				   u8 *certificate, u32 *size, u8 *result)
106 {
107 	struct gb_connection *connection = cap->connection;
108 	struct gb_cap_get_ims_certificate_request *request;
109 	struct gb_cap_get_ims_certificate_response *response;
110 	size_t max_size = gb_operation_get_payload_size_max(connection);
111 	struct gb_operation *op;
112 	int ret;
113 
114 	op = gb_operation_create_flags(connection,
115 				       GB_CAP_TYPE_GET_IMS_CERTIFICATE,
116 				       sizeof(*request), max_size,
117 				       GB_OPERATION_FLAG_SHORT_RESPONSE,
118 				       GFP_KERNEL);
119 	if (!op)
120 		return -ENOMEM;
121 
122 	request = op->request->payload;
123 	request->certificate_class = cpu_to_le32(class);
124 	request->certificate_id = cpu_to_le32(id);
125 
126 	ret = gb_operation_request_send_sync(op);
127 	if (ret) {
128 		dev_err(cap->parent, "failed to get certificate (%d)\n", ret);
129 		goto done;
130 	}
131 
132 	response = op->response->payload;
133 	*result = response->result_code;
134 	*size = op->response->payload_size - sizeof(*response);
135 	memcpy(certificate, response->certificate, *size);
136 
137 done:
138 	gb_operation_put(op);
139 	return ret;
140 }
141 
142 static int cap_authenticate(struct gb_cap *cap, u32 auth_type, u8 *uid,
143 			    u8 *challenge, u8 *result, u8 *auth_response,
144 			    u32 *signature_size, u8 *signature)
145 {
146 	struct gb_connection *connection = cap->connection;
147 	struct gb_cap_authenticate_request *request;
148 	struct gb_cap_authenticate_response *response;
149 	size_t max_size = gb_operation_get_payload_size_max(connection);
150 	struct gb_operation *op;
151 	int ret;
152 
153 	op = gb_operation_create_flags(connection, GB_CAP_TYPE_AUTHENTICATE,
154 				       sizeof(*request), max_size,
155 				       GB_OPERATION_FLAG_SHORT_RESPONSE,
156 				       GFP_KERNEL);
157 	if (!op)
158 		return -ENOMEM;
159 
160 	request = op->request->payload;
161 	request->auth_type = cpu_to_le32(auth_type);
162 	memcpy(request->uid, uid, sizeof(request->uid));
163 	memcpy(request->challenge, challenge, sizeof(request->challenge));
164 
165 	ret = gb_operation_request_send_sync(op);
166 	if (ret) {
167 		dev_err(cap->parent, "failed to authenticate (%d)\n", ret);
168 		goto done;
169 	}
170 
171 	response = op->response->payload;
172 	*result = response->result_code;
173 	*signature_size = op->response->payload_size - sizeof(*response);
174 	memcpy(auth_response, response->response, sizeof(response->response));
175 	memcpy(signature, response->signature, *signature_size);
176 
177 done:
178 	gb_operation_put(op);
179 	return ret;
180 }
181 
182 /* Char device fops */
183 
184 static int cap_open(struct inode *inode, struct file *file)
185 {
186 	struct gb_cap *cap = get_cap(inode->i_cdev);
187 
188 	/* cap structure can't get freed until file descriptor is closed */
189 	if (cap) {
190 		file->private_data = cap;
191 		return 0;
192 	}
193 
194 	return -ENODEV;
195 }
196 
197 static int cap_release(struct inode *inode, struct file *file)
198 {
199 	struct gb_cap *cap = file->private_data;
200 
201 	put_cap(cap);
202 	return 0;
203 }
204 
205 static int cap_ioctl(struct gb_cap *cap, unsigned int cmd,
206 			 void __user *buf)
207 {
208 	struct cap_ioc_get_endpoint_uid endpoint_uid;
209 	struct cap_ioc_get_ims_certificate *ims_cert;
210 	struct cap_ioc_authenticate *authenticate;
211 	size_t size;
212 	int ret;
213 
214 	switch (cmd) {
215 	case CAP_IOC_GET_ENDPOINT_UID:
216 		ret = cap_get_endpoint_uid(cap, endpoint_uid.uid);
217 		if (ret)
218 			return ret;
219 
220 		if (copy_to_user(buf, &endpoint_uid, sizeof(endpoint_uid)))
221 			return -EFAULT;
222 
223 		return 0;
224 	case CAP_IOC_GET_IMS_CERTIFICATE:
225 		size = sizeof(*ims_cert);
226 		ims_cert = memdup_user(buf, size);
227 		if (IS_ERR(ims_cert))
228 			return PTR_ERR(ims_cert);
229 
230 		ret = cap_get_ims_certificate(cap, ims_cert->certificate_class,
231 					      ims_cert->certificate_id,
232 					      ims_cert->certificate,
233 					      &ims_cert->cert_size,
234 					      &ims_cert->result_code);
235 		if (!ret && copy_to_user(buf, ims_cert, size))
236 			ret = -EFAULT;
237 		kfree(ims_cert);
238 
239 		return ret;
240 	case CAP_IOC_AUTHENTICATE:
241 		size = sizeof(*authenticate);
242 		authenticate = memdup_user(buf, size);
243 		if (IS_ERR(authenticate))
244 			return PTR_ERR(authenticate);
245 
246 		ret = cap_authenticate(cap, authenticate->auth_type,
247 				       authenticate->uid,
248 				       authenticate->challenge,
249 				       &authenticate->result_code,
250 				       authenticate->response,
251 				       &authenticate->signature_size,
252 				       authenticate->signature);
253 		if (!ret && copy_to_user(buf, authenticate, size))
254 			ret = -EFAULT;
255 		kfree(authenticate);
256 
257 		return ret;
258 	default:
259 		return -ENOTTY;
260 	}
261 }
262 
263 static long cap_ioctl_unlocked(struct file *file, unsigned int cmd,
264 			       unsigned long arg)
265 {
266 	struct gb_cap *cap = file->private_data;
267 	struct gb_bundle *bundle = cap->connection->bundle;
268 	int ret = -ENODEV;
269 
270 	/*
271 	 * Serialize ioctls.
272 	 *
273 	 * We don't want the user to do multiple authentication operations in
274 	 * parallel.
275 	 *
276 	 * This is also used to protect ->disabled, which is used to check if
277 	 * the connection is getting disconnected, so that we don't start any
278 	 * new operations.
279 	 */
280 	mutex_lock(&cap->mutex);
281 	if (!cap->disabled) {
282 		ret = gb_pm_runtime_get_sync(bundle);
283 		if (!ret) {
284 			ret = cap_ioctl(cap, cmd, (void __user *)arg);
285 			gb_pm_runtime_put_autosuspend(bundle);
286 		}
287 	}
288 	mutex_unlock(&cap->mutex);
289 
290 	return ret;
291 }
292 
293 static const struct file_operations cap_fops = {
294 	.owner		= THIS_MODULE,
295 	.open		= cap_open,
296 	.release	= cap_release,
297 	.unlocked_ioctl	= cap_ioctl_unlocked,
298 };
299 
300 int gb_cap_connection_init(struct gb_connection *connection)
301 {
302 	struct gb_cap *cap;
303 	int ret, minor;
304 
305 	if (!connection)
306 		return 0;
307 
308 	cap = kzalloc(sizeof(*cap), GFP_KERNEL);
309 	if (!cap)
310 		return -ENOMEM;
311 
312 	cap->parent = &connection->bundle->dev;
313 	cap->connection = connection;
314 	mutex_init(&cap->mutex);
315 	gb_connection_set_data(connection, cap);
316 	kref_init(&cap->kref);
317 
318 	mutex_lock(&list_mutex);
319 	list_add(&cap->node, &cap_list);
320 	mutex_unlock(&list_mutex);
321 
322 	ret = gb_connection_enable(connection);
323 	if (ret)
324 		goto err_list_del;
325 
326 	minor = ida_simple_get(&cap_minors_map, 0, NUM_MINORS, GFP_KERNEL);
327 	if (minor < 0) {
328 		ret = minor;
329 		goto err_connection_disable;
330 	}
331 
332 	/* Add a char device to allow userspace to interact with cap */
333 	cap->dev_num = MKDEV(MAJOR(cap_dev_num), minor);
334 	cdev_init(&cap->cdev, &cap_fops);
335 
336 	ret = cdev_add(&cap->cdev, cap->dev_num, 1);
337 	if (ret)
338 		goto err_remove_ida;
339 
340 	/* Add a soft link to the previously added char-dev within the bundle */
341 	cap->class_device = device_create(cap_class, cap->parent, cap->dev_num,
342 					  NULL, "gb-authenticate-%d", minor);
343 	if (IS_ERR(cap->class_device)) {
344 		ret = PTR_ERR(cap->class_device);
345 		goto err_del_cdev;
346 	}
347 
348 	return 0;
349 
350 err_del_cdev:
351 	cdev_del(&cap->cdev);
352 err_remove_ida:
353 	ida_simple_remove(&cap_minors_map, minor);
354 err_connection_disable:
355 	gb_connection_disable(connection);
356 err_list_del:
357 	mutex_lock(&list_mutex);
358 	list_del(&cap->node);
359 	mutex_unlock(&list_mutex);
360 
361 	put_cap(cap);
362 
363 	return ret;
364 }
365 
366 void gb_cap_connection_exit(struct gb_connection *connection)
367 {
368 	struct gb_cap *cap;
369 
370 	if (!connection)
371 		return;
372 
373 	cap = gb_connection_get_data(connection);
374 
375 	device_destroy(cap_class, cap->dev_num);
376 	cdev_del(&cap->cdev);
377 	ida_simple_remove(&cap_minors_map, MINOR(cap->dev_num));
378 
379 	/*
380 	 * Disallow any new ioctl operations on the char device and wait for
381 	 * existing ones to finish.
382 	 */
383 	mutex_lock(&cap->mutex);
384 	cap->disabled = true;
385 	mutex_unlock(&cap->mutex);
386 
387 	/* All pending greybus operations should have finished by now */
388 	gb_connection_disable(cap->connection);
389 
390 	/* Disallow new users to get access to the cap structure */
391 	mutex_lock(&list_mutex);
392 	list_del(&cap->node);
393 	mutex_unlock(&list_mutex);
394 
395 	/*
396 	 * All current users of cap would have taken a reference to it by
397 	 * now, we can drop our reference and wait the last user will get
398 	 * cap freed.
399 	 */
400 	put_cap(cap);
401 }
402 
403 int cap_init(void)
404 {
405 	int ret;
406 
407 	cap_class = class_create(THIS_MODULE, "gb_authenticate");
408 	if (IS_ERR(cap_class))
409 		return PTR_ERR(cap_class);
410 
411 	ret = alloc_chrdev_region(&cap_dev_num, 0, NUM_MINORS,
412 				  "gb_authenticate");
413 	if (ret)
414 		goto err_remove_class;
415 
416 	return 0;
417 
418 err_remove_class:
419 	class_destroy(cap_class);
420 	return ret;
421 }
422 
423 void cap_exit(void)
424 {
425 	unregister_chrdev_region(cap_dev_num, NUM_MINORS);
426 	class_destroy(cap_class);
427 	ida_destroy(&cap_minors_map);
428 }
429