xref: /openbmc/linux/drivers/misc/mei/main.c (revision 68f436a8)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2003-2022, Intel Corporation. All rights reserved.
4  * Intel Management Engine Interface (Intel MEI) Linux driver
5  */
6 
7 #include <linux/module.h>
8 #include <linux/moduleparam.h>
9 #include <linux/kernel.h>
10 #include <linux/device.h>
11 #include <linux/slab.h>
12 #include <linux/fs.h>
13 #include <linux/errno.h>
14 #include <linux/types.h>
15 #include <linux/fcntl.h>
16 #include <linux/poll.h>
17 #include <linux/init.h>
18 #include <linux/ioctl.h>
19 #include <linux/cdev.h>
20 #include <linux/sched/signal.h>
21 #include <linux/compat.h>
22 #include <linux/jiffies.h>
23 #include <linux/interrupt.h>
24 
25 #include <linux/mei.h>
26 
27 #include "mei_dev.h"
28 #include "client.h"
29 
30 static struct class *mei_class;
31 static dev_t mei_devt;
32 #define MEI_MAX_DEVS  MINORMASK
33 static DEFINE_MUTEX(mei_minor_lock);
34 static DEFINE_IDR(mei_idr);
35 
36 /**
37  * mei_open - the open function
38  *
39  * @inode: pointer to inode structure
40  * @file: pointer to file structure
41  *
42  * Return: 0 on success, <0 on error
43  */
44 static int mei_open(struct inode *inode, struct file *file)
45 {
46 	struct mei_device *dev;
47 	struct mei_cl *cl;
48 
49 	int err;
50 
51 	dev = container_of(inode->i_cdev, struct mei_device, cdev);
52 
53 	mutex_lock(&dev->device_lock);
54 
55 	if (dev->dev_state != MEI_DEV_ENABLED) {
56 		dev_dbg(dev->dev, "dev_state != MEI_ENABLED  dev_state = %s\n",
57 		    mei_dev_state_str(dev->dev_state));
58 		err = -ENODEV;
59 		goto err_unlock;
60 	}
61 
62 	cl = mei_cl_alloc_linked(dev);
63 	if (IS_ERR(cl)) {
64 		err = PTR_ERR(cl);
65 		goto err_unlock;
66 	}
67 
68 	cl->fp = file;
69 	file->private_data = cl;
70 
71 	mutex_unlock(&dev->device_lock);
72 
73 	return nonseekable_open(inode, file);
74 
75 err_unlock:
76 	mutex_unlock(&dev->device_lock);
77 	return err;
78 }
79 
80 /**
81  * mei_cl_vtag_remove_by_fp - remove vtag that corresponds to fp from list
82  *
83  * @cl: host client
84  * @fp: pointer to file structure
85  *
86  */
87 static void mei_cl_vtag_remove_by_fp(const struct mei_cl *cl,
88 				     const struct file *fp)
89 {
90 	struct mei_cl_vtag *vtag_l, *next;
91 
92 	list_for_each_entry_safe(vtag_l, next, &cl->vtag_map, list) {
93 		if (vtag_l->fp == fp) {
94 			list_del(&vtag_l->list);
95 			kfree(vtag_l);
96 			return;
97 		}
98 	}
99 }
100 
101 /**
102  * mei_release - the release function
103  *
104  * @inode: pointer to inode structure
105  * @file: pointer to file structure
106  *
107  * Return: 0 on success, <0 on error
108  */
109 static int mei_release(struct inode *inode, struct file *file)
110 {
111 	struct mei_cl *cl = file->private_data;
112 	struct mei_device *dev;
113 	int rets;
114 
115 	if (WARN_ON(!cl || !cl->dev))
116 		return -ENODEV;
117 
118 	dev = cl->dev;
119 
120 	mutex_lock(&dev->device_lock);
121 
122 	mei_cl_vtag_remove_by_fp(cl, file);
123 
124 	if (!list_empty(&cl->vtag_map)) {
125 		cl_dbg(dev, cl, "not the last vtag\n");
126 		mei_cl_flush_queues(cl, file);
127 		rets = 0;
128 		goto out;
129 	}
130 
131 	rets = mei_cl_disconnect(cl);
132 	/*
133 	 * Check again: This is necessary since disconnect releases the lock
134 	 * and another client can connect in the meantime.
135 	 */
136 	if (!list_empty(&cl->vtag_map)) {
137 		cl_dbg(dev, cl, "not the last vtag after disconnect\n");
138 		mei_cl_flush_queues(cl, file);
139 		goto out;
140 	}
141 
142 	mei_cl_flush_queues(cl, NULL);
143 	cl_dbg(dev, cl, "removing\n");
144 
145 	mei_cl_unlink(cl);
146 	kfree(cl);
147 
148 out:
149 	file->private_data = NULL;
150 
151 	mutex_unlock(&dev->device_lock);
152 	return rets;
153 }
154 
155 
156 /**
157  * mei_read - the read function.
158  *
159  * @file: pointer to file structure
160  * @ubuf: pointer to user buffer
161  * @length: buffer length
162  * @offset: data offset in buffer
163  *
164  * Return: >=0 data length on success , <0 on error
165  */
166 static ssize_t mei_read(struct file *file, char __user *ubuf,
167 			size_t length, loff_t *offset)
168 {
169 	struct mei_cl *cl = file->private_data;
170 	struct mei_device *dev;
171 	struct mei_cl_cb *cb = NULL;
172 	bool nonblock = !!(file->f_flags & O_NONBLOCK);
173 	ssize_t rets;
174 
175 	if (WARN_ON(!cl || !cl->dev))
176 		return -ENODEV;
177 
178 	dev = cl->dev;
179 
180 
181 	mutex_lock(&dev->device_lock);
182 	if (dev->dev_state != MEI_DEV_ENABLED) {
183 		rets = -ENODEV;
184 		goto out;
185 	}
186 
187 	if (length == 0) {
188 		rets = 0;
189 		goto out;
190 	}
191 
192 	if (ubuf == NULL) {
193 		rets = -EMSGSIZE;
194 		goto out;
195 	}
196 
197 	cb = mei_cl_read_cb(cl, file);
198 	if (cb)
199 		goto copy_buffer;
200 
201 	if (*offset > 0)
202 		*offset = 0;
203 
204 	rets = mei_cl_read_start(cl, length, file);
205 	if (rets && rets != -EBUSY) {
206 		cl_dbg(dev, cl, "mei start read failure status = %zd\n", rets);
207 		goto out;
208 	}
209 
210 	if (nonblock) {
211 		rets = -EAGAIN;
212 		goto out;
213 	}
214 
215 	mutex_unlock(&dev->device_lock);
216 	if (wait_event_interruptible(cl->rx_wait,
217 				     mei_cl_read_cb(cl, file) ||
218 				     !mei_cl_is_connected(cl))) {
219 		if (signal_pending(current))
220 			return -EINTR;
221 		return -ERESTARTSYS;
222 	}
223 	mutex_lock(&dev->device_lock);
224 
225 	if (!mei_cl_is_connected(cl)) {
226 		rets = -ENODEV;
227 		goto out;
228 	}
229 
230 	cb = mei_cl_read_cb(cl, file);
231 	if (!cb) {
232 		rets = 0;
233 		goto out;
234 	}
235 
236 copy_buffer:
237 	/* now copy the data to user space */
238 	if (cb->status) {
239 		rets = cb->status;
240 		cl_dbg(dev, cl, "read operation failed %zd\n", rets);
241 		goto free;
242 	}
243 
244 	cl_dbg(dev, cl, "buf.size = %zu buf.idx = %zu offset = %lld\n",
245 	       cb->buf.size, cb->buf_idx, *offset);
246 	if (*offset >= cb->buf_idx) {
247 		rets = 0;
248 		goto free;
249 	}
250 
251 	/* length is being truncated to PAGE_SIZE,
252 	 * however buf_idx may point beyond that */
253 	length = min_t(size_t, length, cb->buf_idx - *offset);
254 
255 	if (copy_to_user(ubuf, cb->buf.data + *offset, length)) {
256 		dev_dbg(dev->dev, "failed to copy data to userland\n");
257 		rets = -EFAULT;
258 		goto free;
259 	}
260 
261 	rets = length;
262 	*offset += length;
263 	/* not all data was read, keep the cb */
264 	if (*offset < cb->buf_idx)
265 		goto out;
266 
267 free:
268 	mei_cl_del_rd_completed(cl, cb);
269 	*offset = 0;
270 
271 out:
272 	cl_dbg(dev, cl, "end mei read rets = %zd\n", rets);
273 	mutex_unlock(&dev->device_lock);
274 	return rets;
275 }
276 
277 /**
278  * mei_cl_vtag_by_fp - obtain the vtag by file pointer
279  *
280  * @cl: host client
281  * @fp: pointer to file structure
282  *
283  * Return: vtag value on success, otherwise 0
284  */
285 static u8 mei_cl_vtag_by_fp(const struct mei_cl *cl, const struct file *fp)
286 {
287 	struct mei_cl_vtag *cl_vtag;
288 
289 	if (!fp)
290 		return 0;
291 
292 	list_for_each_entry(cl_vtag, &cl->vtag_map, list)
293 		if (cl_vtag->fp == fp)
294 			return cl_vtag->vtag;
295 	return 0;
296 }
297 
298 /**
299  * mei_write - the write function.
300  *
301  * @file: pointer to file structure
302  * @ubuf: pointer to user buffer
303  * @length: buffer length
304  * @offset: data offset in buffer
305  *
306  * Return: >=0 data length on success , <0 on error
307  */
308 static ssize_t mei_write(struct file *file, const char __user *ubuf,
309 			 size_t length, loff_t *offset)
310 {
311 	struct mei_cl *cl = file->private_data;
312 	struct mei_cl_cb *cb;
313 	struct mei_device *dev;
314 	ssize_t rets;
315 
316 	if (WARN_ON(!cl || !cl->dev))
317 		return -ENODEV;
318 
319 	dev = cl->dev;
320 
321 	mutex_lock(&dev->device_lock);
322 
323 	if (dev->dev_state != MEI_DEV_ENABLED) {
324 		rets = -ENODEV;
325 		goto out;
326 	}
327 
328 	if (!mei_cl_is_connected(cl)) {
329 		cl_err(dev, cl, "is not connected");
330 		rets = -ENODEV;
331 		goto out;
332 	}
333 
334 	if (!mei_me_cl_is_active(cl->me_cl)) {
335 		rets = -ENOTTY;
336 		goto out;
337 	}
338 
339 	if (length > mei_cl_mtu(cl)) {
340 		rets = -EFBIG;
341 		goto out;
342 	}
343 
344 	if (length == 0) {
345 		rets = 0;
346 		goto out;
347 	}
348 
349 	while (cl->tx_cb_queued >= dev->tx_queue_limit) {
350 		if (file->f_flags & O_NONBLOCK) {
351 			rets = -EAGAIN;
352 			goto out;
353 		}
354 		mutex_unlock(&dev->device_lock);
355 		rets = wait_event_interruptible(cl->tx_wait,
356 				cl->writing_state == MEI_WRITE_COMPLETE ||
357 				(!mei_cl_is_connected(cl)));
358 		mutex_lock(&dev->device_lock);
359 		if (rets) {
360 			if (signal_pending(current))
361 				rets = -EINTR;
362 			goto out;
363 		}
364 		if (!mei_cl_is_connected(cl)) {
365 			rets = -ENODEV;
366 			goto out;
367 		}
368 	}
369 
370 	cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file);
371 	if (!cb) {
372 		rets = -ENOMEM;
373 		goto out;
374 	}
375 	cb->vtag = mei_cl_vtag_by_fp(cl, file);
376 
377 	rets = copy_from_user(cb->buf.data, ubuf, length);
378 	if (rets) {
379 		dev_dbg(dev->dev, "failed to copy data from userland\n");
380 		rets = -EFAULT;
381 		mei_io_cb_free(cb);
382 		goto out;
383 	}
384 
385 	rets = mei_cl_write(cl, cb, MAX_SCHEDULE_TIMEOUT);
386 out:
387 	mutex_unlock(&dev->device_lock);
388 	return rets;
389 }
390 
391 /**
392  * mei_ioctl_connect_client - the connect to fw client IOCTL function
393  *
394  * @file: private data of the file object
395  * @in_client_uuid: requested UUID for connection
396  * @client: IOCTL connect data, output parameters
397  *
398  * Locking: called under "dev->device_lock" lock
399  *
400  * Return: 0 on success, <0 on failure.
401  */
402 static int mei_ioctl_connect_client(struct file *file,
403 				    const uuid_le *in_client_uuid,
404 				    struct mei_client *client)
405 {
406 	struct mei_device *dev;
407 	struct mei_me_client *me_cl;
408 	struct mei_cl *cl;
409 	int rets;
410 
411 	cl = file->private_data;
412 	dev = cl->dev;
413 
414 	if (cl->state != MEI_FILE_INITIALIZING &&
415 	    cl->state != MEI_FILE_DISCONNECTED)
416 		return  -EBUSY;
417 
418 	/* find ME client we're trying to connect to */
419 	me_cl = mei_me_cl_by_uuid(dev, in_client_uuid);
420 	if (!me_cl) {
421 		dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n",
422 			in_client_uuid);
423 		rets = -ENOTTY;
424 		goto end;
425 	}
426 
427 	if (me_cl->props.fixed_address) {
428 		bool forbidden = dev->override_fixed_address ?
429 			 !dev->allow_fixed_address : !dev->hbm_f_fa_supported;
430 		if (forbidden) {
431 			dev_dbg(dev->dev, "Connection forbidden to FW Client UUID = %pUl\n",
432 				in_client_uuid);
433 			rets = -ENOTTY;
434 			goto end;
435 		}
436 	}
437 
438 	dev_dbg(dev->dev, "Connect to FW Client ID = %d\n",
439 			me_cl->client_id);
440 	dev_dbg(dev->dev, "FW Client - Protocol Version = %d\n",
441 			me_cl->props.protocol_version);
442 	dev_dbg(dev->dev, "FW Client - Max Msg Len = %d\n",
443 			me_cl->props.max_msg_length);
444 
445 	/* prepare the output buffer */
446 	client->max_msg_length = me_cl->props.max_msg_length;
447 	client->protocol_version = me_cl->props.protocol_version;
448 	dev_dbg(dev->dev, "Can connect?\n");
449 
450 	rets = mei_cl_connect(cl, me_cl, file);
451 
452 end:
453 	mei_me_cl_put(me_cl);
454 	return rets;
455 }
456 
457 /**
458  * mei_vt_support_check - check if client support vtags
459  *
460  * Locking: called under "dev->device_lock" lock
461  *
462  * @dev: mei_device
463  * @uuid: client UUID
464  *
465  * Return:
466  *	0 - supported
467  *	-ENOTTY - no such client
468  *	-EOPNOTSUPP - vtags are not supported by client
469  */
470 static int mei_vt_support_check(struct mei_device *dev, const uuid_le *uuid)
471 {
472 	struct mei_me_client *me_cl;
473 	int ret;
474 
475 	if (!dev->hbm_f_vt_supported)
476 		return -EOPNOTSUPP;
477 
478 	me_cl = mei_me_cl_by_uuid(dev, uuid);
479 	if (!me_cl) {
480 		dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n",
481 			uuid);
482 		return -ENOTTY;
483 	}
484 	ret = me_cl->props.vt_supported ? 0 : -EOPNOTSUPP;
485 	mei_me_cl_put(me_cl);
486 
487 	return ret;
488 }
489 
490 /**
491  * mei_ioctl_connect_vtag - connect to fw client with vtag IOCTL function
492  *
493  * @file: private data of the file object
494  * @in_client_uuid: requested UUID for connection
495  * @client: IOCTL connect data, output parameters
496  * @vtag: vm tag
497  *
498  * Locking: called under "dev->device_lock" lock
499  *
500  * Return: 0 on success, <0 on failure.
501  */
502 static int mei_ioctl_connect_vtag(struct file *file,
503 				  const uuid_le *in_client_uuid,
504 				  struct mei_client *client,
505 				  u8 vtag)
506 {
507 	struct mei_device *dev;
508 	struct mei_cl *cl;
509 	struct mei_cl *pos;
510 	struct mei_cl_vtag *cl_vtag;
511 
512 	cl = file->private_data;
513 	dev = cl->dev;
514 
515 	dev_dbg(dev->dev, "FW Client %pUl vtag %d\n", in_client_uuid, vtag);
516 
517 	switch (cl->state) {
518 	case MEI_FILE_DISCONNECTED:
519 		if (mei_cl_vtag_by_fp(cl, file) != vtag) {
520 			dev_err(dev->dev, "reconnect with different vtag\n");
521 			return -EINVAL;
522 		}
523 		break;
524 	case MEI_FILE_INITIALIZING:
525 		/* malicious connect from another thread may push vtag */
526 		if (!IS_ERR(mei_cl_fp_by_vtag(cl, vtag))) {
527 			dev_err(dev->dev, "vtag already filled\n");
528 			return -EINVAL;
529 		}
530 
531 		list_for_each_entry(pos, &dev->file_list, link) {
532 			if (pos == cl)
533 				continue;
534 			if (!pos->me_cl)
535 				continue;
536 
537 			/* only search for same UUID */
538 			if (uuid_le_cmp(*mei_cl_uuid(pos), *in_client_uuid))
539 				continue;
540 
541 			/* if tag already exist try another fp */
542 			if (!IS_ERR(mei_cl_fp_by_vtag(pos, vtag)))
543 				continue;
544 
545 			/* replace cl with acquired one */
546 			dev_dbg(dev->dev, "replacing with existing cl\n");
547 			mei_cl_unlink(cl);
548 			kfree(cl);
549 			file->private_data = pos;
550 			cl = pos;
551 			break;
552 		}
553 
554 		cl_vtag = mei_cl_vtag_alloc(file, vtag);
555 		if (IS_ERR(cl_vtag))
556 			return -ENOMEM;
557 
558 		list_add_tail(&cl_vtag->list, &cl->vtag_map);
559 		break;
560 	default:
561 		return -EBUSY;
562 	}
563 
564 	while (cl->state != MEI_FILE_INITIALIZING &&
565 	       cl->state != MEI_FILE_DISCONNECTED &&
566 	       cl->state != MEI_FILE_CONNECTED) {
567 		mutex_unlock(&dev->device_lock);
568 		wait_event_timeout(cl->wait,
569 				   (cl->state == MEI_FILE_CONNECTED ||
570 				    cl->state == MEI_FILE_DISCONNECTED ||
571 				    cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
572 				    cl->state == MEI_FILE_DISCONNECT_REPLY),
573 				   dev->timeouts.cl_connect);
574 		mutex_lock(&dev->device_lock);
575 	}
576 
577 	if (!mei_cl_is_connected(cl))
578 		return mei_ioctl_connect_client(file, in_client_uuid, client);
579 
580 	client->max_msg_length = cl->me_cl->props.max_msg_length;
581 	client->protocol_version = cl->me_cl->props.protocol_version;
582 
583 	return 0;
584 }
585 
586 /**
587  * mei_ioctl_client_notify_request -
588  *     propagate event notification request to client
589  *
590  * @file: pointer to file structure
591  * @request: 0 - disable, 1 - enable
592  *
593  * Return: 0 on success , <0 on error
594  */
595 static int mei_ioctl_client_notify_request(const struct file *file, u32 request)
596 {
597 	struct mei_cl *cl = file->private_data;
598 
599 	if (request != MEI_HBM_NOTIFICATION_START &&
600 	    request != MEI_HBM_NOTIFICATION_STOP)
601 		return -EINVAL;
602 
603 	return mei_cl_notify_request(cl, file, (u8)request);
604 }
605 
606 /**
607  * mei_ioctl_client_notify_get -  wait for notification request
608  *
609  * @file: pointer to file structure
610  * @notify_get: 0 - disable, 1 - enable
611  *
612  * Return: 0 on success , <0 on error
613  */
614 static int mei_ioctl_client_notify_get(const struct file *file, u32 *notify_get)
615 {
616 	struct mei_cl *cl = file->private_data;
617 	bool notify_ev;
618 	bool block = (file->f_flags & O_NONBLOCK) == 0;
619 	int rets;
620 
621 	rets = mei_cl_notify_get(cl, block, &notify_ev);
622 	if (rets)
623 		return rets;
624 
625 	*notify_get = notify_ev ? 1 : 0;
626 	return 0;
627 }
628 
629 /**
630  * mei_ioctl - the IOCTL function
631  *
632  * @file: pointer to file structure
633  * @cmd: ioctl command
634  * @data: pointer to mei message structure
635  *
636  * Return: 0 on success , <0 on error
637  */
638 static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
639 {
640 	struct mei_device *dev;
641 	struct mei_cl *cl = file->private_data;
642 	struct mei_connect_client_data conn;
643 	struct mei_connect_client_data_vtag conn_vtag;
644 	const uuid_le *cl_uuid;
645 	struct mei_client *props;
646 	u8 vtag;
647 	u32 notify_get, notify_req;
648 	int rets;
649 
650 
651 	if (WARN_ON(!cl || !cl->dev))
652 		return -ENODEV;
653 
654 	dev = cl->dev;
655 
656 	dev_dbg(dev->dev, "IOCTL cmd = 0x%x", cmd);
657 
658 	mutex_lock(&dev->device_lock);
659 	if (dev->dev_state != MEI_DEV_ENABLED) {
660 		rets = -ENODEV;
661 		goto out;
662 	}
663 
664 	switch (cmd) {
665 	case IOCTL_MEI_CONNECT_CLIENT:
666 		dev_dbg(dev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n");
667 		if (copy_from_user(&conn, (char __user *)data, sizeof(conn))) {
668 			dev_dbg(dev->dev, "failed to copy data from userland\n");
669 			rets = -EFAULT;
670 			goto out;
671 		}
672 		cl_uuid = &conn.in_client_uuid;
673 		props = &conn.out_client_properties;
674 		vtag = 0;
675 
676 		rets = mei_vt_support_check(dev, cl_uuid);
677 		if (rets == -ENOTTY)
678 			goto out;
679 		if (!rets)
680 			rets = mei_ioctl_connect_vtag(file, cl_uuid, props,
681 						      vtag);
682 		else
683 			rets = mei_ioctl_connect_client(file, cl_uuid, props);
684 		if (rets)
685 			goto out;
686 
687 		/* if all is ok, copying the data back to user. */
688 		if (copy_to_user((char __user *)data, &conn, sizeof(conn))) {
689 			dev_dbg(dev->dev, "failed to copy data to userland\n");
690 			rets = -EFAULT;
691 			goto out;
692 		}
693 
694 		break;
695 
696 	case IOCTL_MEI_CONNECT_CLIENT_VTAG:
697 		dev_dbg(dev->dev, "IOCTL_MEI_CONNECT_CLIENT_VTAG\n");
698 		if (copy_from_user(&conn_vtag, (char __user *)data,
699 				   sizeof(conn_vtag))) {
700 			dev_dbg(dev->dev, "failed to copy data from userland\n");
701 			rets = -EFAULT;
702 			goto out;
703 		}
704 
705 		cl_uuid = &conn_vtag.connect.in_client_uuid;
706 		props = &conn_vtag.out_client_properties;
707 		vtag = conn_vtag.connect.vtag;
708 
709 		rets = mei_vt_support_check(dev, cl_uuid);
710 		if (rets == -EOPNOTSUPP)
711 			dev_dbg(dev->dev, "FW Client %pUl does not support vtags\n",
712 				cl_uuid);
713 		if (rets)
714 			goto out;
715 
716 		if (!vtag) {
717 			dev_dbg(dev->dev, "vtag can't be zero\n");
718 			rets = -EINVAL;
719 			goto out;
720 		}
721 
722 		rets = mei_ioctl_connect_vtag(file, cl_uuid, props, vtag);
723 		if (rets)
724 			goto out;
725 
726 		/* if all is ok, copying the data back to user. */
727 		if (copy_to_user((char __user *)data, &conn_vtag,
728 				 sizeof(conn_vtag))) {
729 			dev_dbg(dev->dev, "failed to copy data to userland\n");
730 			rets = -EFAULT;
731 			goto out;
732 		}
733 
734 		break;
735 
736 	case IOCTL_MEI_NOTIFY_SET:
737 		dev_dbg(dev->dev, ": IOCTL_MEI_NOTIFY_SET.\n");
738 		if (copy_from_user(&notify_req,
739 				   (char __user *)data, sizeof(notify_req))) {
740 			dev_dbg(dev->dev, "failed to copy data from userland\n");
741 			rets = -EFAULT;
742 			goto out;
743 		}
744 		rets = mei_ioctl_client_notify_request(file, notify_req);
745 		break;
746 
747 	case IOCTL_MEI_NOTIFY_GET:
748 		dev_dbg(dev->dev, ": IOCTL_MEI_NOTIFY_GET.\n");
749 		rets = mei_ioctl_client_notify_get(file, &notify_get);
750 		if (rets)
751 			goto out;
752 
753 		dev_dbg(dev->dev, "copy connect data to user\n");
754 		if (copy_to_user((char __user *)data,
755 				&notify_get, sizeof(notify_get))) {
756 			dev_dbg(dev->dev, "failed to copy data to userland\n");
757 			rets = -EFAULT;
758 			goto out;
759 
760 		}
761 		break;
762 
763 	default:
764 		rets = -ENOIOCTLCMD;
765 	}
766 
767 out:
768 	mutex_unlock(&dev->device_lock);
769 	return rets;
770 }
771 
772 /**
773  * mei_poll - the poll function
774  *
775  * @file: pointer to file structure
776  * @wait: pointer to poll_table structure
777  *
778  * Return: poll mask
779  */
780 static __poll_t mei_poll(struct file *file, poll_table *wait)
781 {
782 	__poll_t req_events = poll_requested_events(wait);
783 	struct mei_cl *cl = file->private_data;
784 	struct mei_device *dev;
785 	__poll_t mask = 0;
786 	bool notify_en;
787 
788 	if (WARN_ON(!cl || !cl->dev))
789 		return EPOLLERR;
790 
791 	dev = cl->dev;
792 
793 	mutex_lock(&dev->device_lock);
794 
795 	notify_en = cl->notify_en && (req_events & EPOLLPRI);
796 
797 	if (dev->dev_state != MEI_DEV_ENABLED ||
798 	    !mei_cl_is_connected(cl)) {
799 		mask = EPOLLERR;
800 		goto out;
801 	}
802 
803 	if (notify_en) {
804 		poll_wait(file, &cl->ev_wait, wait);
805 		if (cl->notify_ev)
806 			mask |= EPOLLPRI;
807 	}
808 
809 	if (req_events & (EPOLLIN | EPOLLRDNORM)) {
810 		poll_wait(file, &cl->rx_wait, wait);
811 
812 		if (mei_cl_read_cb(cl, file))
813 			mask |= EPOLLIN | EPOLLRDNORM;
814 		else
815 			mei_cl_read_start(cl, mei_cl_mtu(cl), file);
816 	}
817 
818 	if (req_events & (EPOLLOUT | EPOLLWRNORM)) {
819 		poll_wait(file, &cl->tx_wait, wait);
820 		if (cl->tx_cb_queued < dev->tx_queue_limit)
821 			mask |= EPOLLOUT | EPOLLWRNORM;
822 	}
823 
824 out:
825 	mutex_unlock(&dev->device_lock);
826 	return mask;
827 }
828 
829 /**
830  * mei_cl_is_write_queued - check if the client has pending writes.
831  *
832  * @cl: writing host client
833  *
834  * Return: true if client is writing, false otherwise.
835  */
836 static bool mei_cl_is_write_queued(struct mei_cl *cl)
837 {
838 	struct mei_device *dev = cl->dev;
839 	struct mei_cl_cb *cb;
840 
841 	list_for_each_entry(cb, &dev->write_list, list)
842 		if (cb->cl == cl)
843 			return true;
844 	list_for_each_entry(cb, &dev->write_waiting_list, list)
845 		if (cb->cl == cl)
846 			return true;
847 	return false;
848 }
849 
850 /**
851  * mei_fsync - the fsync handler
852  *
853  * @fp:       pointer to file structure
854  * @start:    unused
855  * @end:      unused
856  * @datasync: unused
857  *
858  * Return: 0 on success, -ENODEV if client is not connected
859  */
860 static int mei_fsync(struct file *fp, loff_t start, loff_t end, int datasync)
861 {
862 	struct mei_cl *cl = fp->private_data;
863 	struct mei_device *dev;
864 	int rets;
865 
866 	if (WARN_ON(!cl || !cl->dev))
867 		return -ENODEV;
868 
869 	dev = cl->dev;
870 
871 	mutex_lock(&dev->device_lock);
872 
873 	if (dev->dev_state != MEI_DEV_ENABLED || !mei_cl_is_connected(cl)) {
874 		rets = -ENODEV;
875 		goto out;
876 	}
877 
878 	while (mei_cl_is_write_queued(cl)) {
879 		mutex_unlock(&dev->device_lock);
880 		rets = wait_event_interruptible(cl->tx_wait,
881 				cl->writing_state == MEI_WRITE_COMPLETE ||
882 				!mei_cl_is_connected(cl));
883 		mutex_lock(&dev->device_lock);
884 		if (rets) {
885 			if (signal_pending(current))
886 				rets = -EINTR;
887 			goto out;
888 		}
889 		if (!mei_cl_is_connected(cl)) {
890 			rets = -ENODEV;
891 			goto out;
892 		}
893 	}
894 	rets = 0;
895 out:
896 	mutex_unlock(&dev->device_lock);
897 	return rets;
898 }
899 
900 /**
901  * mei_fasync - asynchronous io support
902  *
903  * @fd: file descriptor
904  * @file: pointer to file structure
905  * @band: band bitmap
906  *
907  * Return: negative on error,
908  *         0 if it did no changes,
909  *         and positive a process was added or deleted
910  */
911 static int mei_fasync(int fd, struct file *file, int band)
912 {
913 
914 	struct mei_cl *cl = file->private_data;
915 
916 	if (!mei_cl_is_connected(cl))
917 		return -ENODEV;
918 
919 	return fasync_helper(fd, file, band, &cl->ev_async);
920 }
921 
922 /**
923  * trc_show - mei device trc attribute show method
924  *
925  * @device: device pointer
926  * @attr: attribute pointer
927  * @buf:  char out buffer
928  *
929  * Return: number of the bytes printed into buf or error
930  */
931 static ssize_t trc_show(struct device *device,
932 			struct device_attribute *attr, char *buf)
933 {
934 	struct mei_device *dev = dev_get_drvdata(device);
935 	u32 trc;
936 	int ret;
937 
938 	ret = mei_trc_status(dev, &trc);
939 	if (ret)
940 		return ret;
941 	return sprintf(buf, "%08X\n", trc);
942 }
943 static DEVICE_ATTR_RO(trc);
944 
945 /**
946  * fw_status_show - mei device fw_status attribute show method
947  *
948  * @device: device pointer
949  * @attr: attribute pointer
950  * @buf:  char out buffer
951  *
952  * Return: number of the bytes printed into buf or error
953  */
954 static ssize_t fw_status_show(struct device *device,
955 		struct device_attribute *attr, char *buf)
956 {
957 	struct mei_device *dev = dev_get_drvdata(device);
958 	struct mei_fw_status fw_status;
959 	int err, i;
960 	ssize_t cnt = 0;
961 
962 	mutex_lock(&dev->device_lock);
963 	err = mei_fw_status(dev, &fw_status);
964 	mutex_unlock(&dev->device_lock);
965 	if (err) {
966 		dev_err(device, "read fw_status error = %d\n", err);
967 		return err;
968 	}
969 
970 	for (i = 0; i < fw_status.count; i++)
971 		cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "%08X\n",
972 				fw_status.status[i]);
973 	return cnt;
974 }
975 static DEVICE_ATTR_RO(fw_status);
976 
977 /**
978  * hbm_ver_show - display HBM protocol version negotiated with FW
979  *
980  * @device: device pointer
981  * @attr: attribute pointer
982  * @buf:  char out buffer
983  *
984  * Return: number of the bytes printed into buf or error
985  */
986 static ssize_t hbm_ver_show(struct device *device,
987 			    struct device_attribute *attr, char *buf)
988 {
989 	struct mei_device *dev = dev_get_drvdata(device);
990 	struct hbm_version ver;
991 
992 	mutex_lock(&dev->device_lock);
993 	ver = dev->version;
994 	mutex_unlock(&dev->device_lock);
995 
996 	return sprintf(buf, "%u.%u\n", ver.major_version, ver.minor_version);
997 }
998 static DEVICE_ATTR_RO(hbm_ver);
999 
1000 /**
1001  * hbm_ver_drv_show - display HBM protocol version advertised by driver
1002  *
1003  * @device: device pointer
1004  * @attr: attribute pointer
1005  * @buf:  char out buffer
1006  *
1007  * Return: number of the bytes printed into buf or error
1008  */
1009 static ssize_t hbm_ver_drv_show(struct device *device,
1010 				struct device_attribute *attr, char *buf)
1011 {
1012 	return sprintf(buf, "%u.%u\n", HBM_MAJOR_VERSION, HBM_MINOR_VERSION);
1013 }
1014 static DEVICE_ATTR_RO(hbm_ver_drv);
1015 
1016 static ssize_t tx_queue_limit_show(struct device *device,
1017 				   struct device_attribute *attr, char *buf)
1018 {
1019 	struct mei_device *dev = dev_get_drvdata(device);
1020 	u8 size = 0;
1021 
1022 	mutex_lock(&dev->device_lock);
1023 	size = dev->tx_queue_limit;
1024 	mutex_unlock(&dev->device_lock);
1025 
1026 	return sysfs_emit(buf, "%u\n", size);
1027 }
1028 
1029 static ssize_t tx_queue_limit_store(struct device *device,
1030 				    struct device_attribute *attr,
1031 				    const char *buf, size_t count)
1032 {
1033 	struct mei_device *dev = dev_get_drvdata(device);
1034 	u8 limit;
1035 	unsigned int inp;
1036 	int err;
1037 
1038 	err = kstrtouint(buf, 10, &inp);
1039 	if (err)
1040 		return err;
1041 	if (inp > MEI_TX_QUEUE_LIMIT_MAX || inp < MEI_TX_QUEUE_LIMIT_MIN)
1042 		return -EINVAL;
1043 	limit = inp;
1044 
1045 	mutex_lock(&dev->device_lock);
1046 	dev->tx_queue_limit = limit;
1047 	mutex_unlock(&dev->device_lock);
1048 
1049 	return count;
1050 }
1051 static DEVICE_ATTR_RW(tx_queue_limit);
1052 
1053 /**
1054  * fw_ver_show - display ME FW version
1055  *
1056  * @device: device pointer
1057  * @attr: attribute pointer
1058  * @buf:  char out buffer
1059  *
1060  * Return: number of the bytes printed into buf or error
1061  */
1062 static ssize_t fw_ver_show(struct device *device,
1063 			   struct device_attribute *attr, char *buf)
1064 {
1065 	struct mei_device *dev = dev_get_drvdata(device);
1066 	struct mei_fw_version *ver;
1067 	ssize_t cnt = 0;
1068 	int i;
1069 
1070 	ver = dev->fw_ver;
1071 
1072 	for (i = 0; i < MEI_MAX_FW_VER_BLOCKS; i++)
1073 		cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "%u:%u.%u.%u.%u\n",
1074 				 ver[i].platform, ver[i].major, ver[i].minor,
1075 				 ver[i].hotfix, ver[i].buildno);
1076 	return cnt;
1077 }
1078 static DEVICE_ATTR_RO(fw_ver);
1079 
1080 /**
1081  * dev_state_show - display device state
1082  *
1083  * @device: device pointer
1084  * @attr: attribute pointer
1085  * @buf:  char out buffer
1086  *
1087  * Return: number of the bytes printed into buf or error
1088  */
1089 static ssize_t dev_state_show(struct device *device,
1090 			      struct device_attribute *attr, char *buf)
1091 {
1092 	struct mei_device *dev = dev_get_drvdata(device);
1093 	enum mei_dev_state dev_state;
1094 
1095 	mutex_lock(&dev->device_lock);
1096 	dev_state = dev->dev_state;
1097 	mutex_unlock(&dev->device_lock);
1098 
1099 	return sprintf(buf, "%s", mei_dev_state_str(dev_state));
1100 }
1101 static DEVICE_ATTR_RO(dev_state);
1102 
1103 /**
1104  * mei_set_devstate: set to new device state and notify sysfs file.
1105  *
1106  * @dev: mei_device
1107  * @state: new device state
1108  */
1109 void mei_set_devstate(struct mei_device *dev, enum mei_dev_state state)
1110 {
1111 	struct device *clsdev;
1112 
1113 	if (dev->dev_state == state)
1114 		return;
1115 
1116 	dev->dev_state = state;
1117 
1118 	clsdev = class_find_device_by_devt(mei_class, dev->cdev.dev);
1119 	if (clsdev) {
1120 		sysfs_notify(&clsdev->kobj, NULL, "dev_state");
1121 		put_device(clsdev);
1122 	}
1123 }
1124 
1125 /**
1126  * kind_show - display device kind
1127  *
1128  * @device: device pointer
1129  * @attr: attribute pointer
1130  * @buf: char out buffer
1131  *
1132  * Return: number of the bytes printed into buf or error
1133  */
1134 static ssize_t kind_show(struct device *device,
1135 			 struct device_attribute *attr, char *buf)
1136 {
1137 	struct mei_device *dev = dev_get_drvdata(device);
1138 	ssize_t ret;
1139 
1140 	if (dev->kind)
1141 		ret = sprintf(buf, "%s\n", dev->kind);
1142 	else
1143 		ret = sprintf(buf, "%s\n", "mei");
1144 
1145 	return ret;
1146 }
1147 static DEVICE_ATTR_RO(kind);
1148 
1149 static struct attribute *mei_attrs[] = {
1150 	&dev_attr_fw_status.attr,
1151 	&dev_attr_hbm_ver.attr,
1152 	&dev_attr_hbm_ver_drv.attr,
1153 	&dev_attr_tx_queue_limit.attr,
1154 	&dev_attr_fw_ver.attr,
1155 	&dev_attr_dev_state.attr,
1156 	&dev_attr_trc.attr,
1157 	&dev_attr_kind.attr,
1158 	NULL
1159 };
1160 ATTRIBUTE_GROUPS(mei);
1161 
1162 /*
1163  * file operations structure will be used for mei char device.
1164  */
1165 static const struct file_operations mei_fops = {
1166 	.owner = THIS_MODULE,
1167 	.read = mei_read,
1168 	.unlocked_ioctl = mei_ioctl,
1169 	.compat_ioctl = compat_ptr_ioctl,
1170 	.open = mei_open,
1171 	.release = mei_release,
1172 	.write = mei_write,
1173 	.poll = mei_poll,
1174 	.fsync = mei_fsync,
1175 	.fasync = mei_fasync,
1176 	.llseek = no_llseek
1177 };
1178 
1179 /**
1180  * mei_minor_get - obtain next free device minor number
1181  *
1182  * @dev:  device pointer
1183  *
1184  * Return: allocated minor, or -ENOSPC if no free minor left
1185  */
1186 static int mei_minor_get(struct mei_device *dev)
1187 {
1188 	int ret;
1189 
1190 	mutex_lock(&mei_minor_lock);
1191 	ret = idr_alloc(&mei_idr, dev, 0, MEI_MAX_DEVS, GFP_KERNEL);
1192 	if (ret >= 0)
1193 		dev->minor = ret;
1194 	else if (ret == -ENOSPC)
1195 		dev_err(dev->dev, "too many mei devices\n");
1196 
1197 	mutex_unlock(&mei_minor_lock);
1198 	return ret;
1199 }
1200 
1201 /**
1202  * mei_minor_free - mark device minor number as free
1203  *
1204  * @dev:  device pointer
1205  */
1206 static void mei_minor_free(struct mei_device *dev)
1207 {
1208 	mutex_lock(&mei_minor_lock);
1209 	idr_remove(&mei_idr, dev->minor);
1210 	mutex_unlock(&mei_minor_lock);
1211 }
1212 
1213 int mei_register(struct mei_device *dev, struct device *parent)
1214 {
1215 	struct device *clsdev; /* class device */
1216 	int ret, devno;
1217 
1218 	ret = mei_minor_get(dev);
1219 	if (ret < 0)
1220 		return ret;
1221 
1222 	/* Fill in the data structures */
1223 	devno = MKDEV(MAJOR(mei_devt), dev->minor);
1224 	cdev_init(&dev->cdev, &mei_fops);
1225 	dev->cdev.owner = parent->driver->owner;
1226 
1227 	/* Add the device */
1228 	ret = cdev_add(&dev->cdev, devno, 1);
1229 	if (ret) {
1230 		dev_err(parent, "unable to add device %d:%d\n",
1231 			MAJOR(mei_devt), dev->minor);
1232 		goto err_dev_add;
1233 	}
1234 
1235 	clsdev = device_create_with_groups(mei_class, parent, devno,
1236 					   dev, mei_groups,
1237 					   "mei%d", dev->minor);
1238 
1239 	if (IS_ERR(clsdev)) {
1240 		dev_err(parent, "unable to create device %d:%d\n",
1241 			MAJOR(mei_devt), dev->minor);
1242 		ret = PTR_ERR(clsdev);
1243 		goto err_dev_create;
1244 	}
1245 
1246 	mei_dbgfs_register(dev, dev_name(clsdev));
1247 
1248 	return 0;
1249 
1250 err_dev_create:
1251 	cdev_del(&dev->cdev);
1252 err_dev_add:
1253 	mei_minor_free(dev);
1254 	return ret;
1255 }
1256 EXPORT_SYMBOL_GPL(mei_register);
1257 
1258 void mei_deregister(struct mei_device *dev)
1259 {
1260 	int devno;
1261 
1262 	devno = dev->cdev.dev;
1263 	cdev_del(&dev->cdev);
1264 
1265 	mei_dbgfs_deregister(dev);
1266 
1267 	device_destroy(mei_class, devno);
1268 
1269 	mei_minor_free(dev);
1270 }
1271 EXPORT_SYMBOL_GPL(mei_deregister);
1272 
1273 static int __init mei_init(void)
1274 {
1275 	int ret;
1276 
1277 	mei_class = class_create("mei");
1278 	if (IS_ERR(mei_class)) {
1279 		pr_err("couldn't create class\n");
1280 		ret = PTR_ERR(mei_class);
1281 		goto err;
1282 	}
1283 
1284 	ret = alloc_chrdev_region(&mei_devt, 0, MEI_MAX_DEVS, "mei");
1285 	if (ret < 0) {
1286 		pr_err("unable to allocate char dev region\n");
1287 		goto err_class;
1288 	}
1289 
1290 	ret = mei_cl_bus_init();
1291 	if (ret < 0) {
1292 		pr_err("unable to initialize bus\n");
1293 		goto err_chrdev;
1294 	}
1295 
1296 	return 0;
1297 
1298 err_chrdev:
1299 	unregister_chrdev_region(mei_devt, MEI_MAX_DEVS);
1300 err_class:
1301 	class_destroy(mei_class);
1302 err:
1303 	return ret;
1304 }
1305 
1306 static void __exit mei_exit(void)
1307 {
1308 	unregister_chrdev_region(mei_devt, MEI_MAX_DEVS);
1309 	class_destroy(mei_class);
1310 	mei_cl_bus_exit();
1311 }
1312 
1313 module_init(mei_init);
1314 module_exit(mei_exit);
1315 
1316 MODULE_AUTHOR("Intel Corporation");
1317 MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
1318 MODULE_LICENSE("GPL v2");
1319 
1320