xref: /openbmc/linux/drivers/misc/mei/bus.c (revision 1dbb4f0235a450f22e518124cbf9b922802ce38f)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2012-2019, Intel Corporation. All rights reserved.
4  * Intel Management Engine Interface (Intel MEI) Linux driver
5  */
6 
7 #include <linux/module.h>
8 #include <linux/device.h>
9 #include <linux/kernel.h>
10 #include <linux/sched/signal.h>
11 #include <linux/init.h>
12 #include <linux/errno.h>
13 #include <linux/slab.h>
14 #include <linux/mutex.h>
15 #include <linux/interrupt.h>
16 #include <linux/mei_cl_bus.h>
17 
18 #include "mei_dev.h"
19 #include "client.h"
20 
21 #define to_mei_cl_driver(d) container_of(d, struct mei_cl_driver, driver)
22 
23 /**
24  * __mei_cl_send - internal client send (write)
25  *
26  * @cl: host client
27  * @buf: buffer to send
28  * @length: buffer length
29  * @vtag: virtual tag
30  * @mode: sending mode
31  *
32  * Return: written size bytes or < 0 on error
33  */
34 ssize_t __mei_cl_send(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag,
35 		      unsigned int mode)
36 {
37 	return __mei_cl_send_timeout(cl, buf, length, vtag, mode, MAX_SCHEDULE_TIMEOUT);
38 }
39 
40 /**
41  * __mei_cl_send_timeout - internal client send (write)
42  *
43  * @cl: host client
44  * @buf: buffer to send
45  * @length: buffer length
46  * @vtag: virtual tag
47  * @mode: sending mode
48  * @timeout: send timeout in milliseconds.
49  *           effective only for blocking writes: the MEI_CL_IO_TX_BLOCKING mode bit is set.
50  *           set timeout to the MAX_SCHEDULE_TIMEOUT to maixum allowed wait.
51  *
52  * Return: written size bytes or < 0 on error
53  */
54 ssize_t __mei_cl_send_timeout(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag,
55 			      unsigned int mode, unsigned long timeout)
56 {
57 	struct mei_device *bus;
58 	struct mei_cl_cb *cb;
59 	ssize_t rets;
60 
61 	if (WARN_ON(!cl || !cl->dev))
62 		return -ENODEV;
63 
64 	bus = cl->dev;
65 
66 	mutex_lock(&bus->device_lock);
67 	if (bus->dev_state != MEI_DEV_ENABLED &&
68 	    bus->dev_state != MEI_DEV_POWERING_DOWN) {
69 		rets = -ENODEV;
70 		goto out;
71 	}
72 
73 	if (!mei_cl_is_connected(cl)) {
74 		rets = -ENODEV;
75 		goto out;
76 	}
77 
78 	/* Check if we have an ME client device */
79 	if (!mei_me_cl_is_active(cl->me_cl)) {
80 		rets = -ENOTTY;
81 		goto out;
82 	}
83 
84 	if (vtag) {
85 		/* Check if vtag is supported by client */
86 		rets = mei_cl_vt_support_check(cl);
87 		if (rets)
88 			goto out;
89 	}
90 
91 	if (length > mei_cl_mtu(cl)) {
92 		rets = -EFBIG;
93 		goto out;
94 	}
95 
96 	while (cl->tx_cb_queued >= bus->tx_queue_limit) {
97 		mutex_unlock(&bus->device_lock);
98 		rets = wait_event_interruptible(cl->tx_wait,
99 				cl->writing_state == MEI_WRITE_COMPLETE ||
100 				(!mei_cl_is_connected(cl)));
101 		mutex_lock(&bus->device_lock);
102 		if (rets) {
103 			if (signal_pending(current))
104 				rets = -EINTR;
105 			goto out;
106 		}
107 		if (!mei_cl_is_connected(cl)) {
108 			rets = -ENODEV;
109 			goto out;
110 		}
111 	}
112 
113 	cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, NULL);
114 	if (!cb) {
115 		rets = -ENOMEM;
116 		goto out;
117 	}
118 	cb->vtag = vtag;
119 
120 	cb->internal = !!(mode & MEI_CL_IO_TX_INTERNAL);
121 	cb->blocking = !!(mode & MEI_CL_IO_TX_BLOCKING);
122 	memcpy(cb->buf.data, buf, length);
123 
124 	rets = mei_cl_write(cl, cb, timeout);
125 
126 out:
127 	mutex_unlock(&bus->device_lock);
128 
129 	return rets;
130 }
131 
132 /**
133  * __mei_cl_recv - internal client receive (read)
134  *
135  * @cl: host client
136  * @buf: buffer to receive
137  * @length: buffer length
138  * @mode: io mode
139  * @vtag: virtual tag
140  * @timeout: recv timeout, 0 for infinite timeout
141  *
142  * Return: read size in bytes of < 0 on error
143  */
144 ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, u8 *vtag,
145 		      unsigned int mode, unsigned long timeout)
146 {
147 	struct mei_device *bus;
148 	struct mei_cl_cb *cb;
149 	size_t r_length;
150 	ssize_t rets;
151 	bool nonblock = !!(mode & MEI_CL_IO_RX_NONBLOCK);
152 
153 	if (WARN_ON(!cl || !cl->dev))
154 		return -ENODEV;
155 
156 	bus = cl->dev;
157 
158 	mutex_lock(&bus->device_lock);
159 	if (bus->dev_state != MEI_DEV_ENABLED &&
160 	    bus->dev_state != MEI_DEV_POWERING_DOWN) {
161 		rets = -ENODEV;
162 		goto out;
163 	}
164 
165 	cb = mei_cl_read_cb(cl, NULL);
166 	if (cb)
167 		goto copy;
168 
169 	rets = mei_cl_read_start(cl, length, NULL);
170 	if (rets && rets != -EBUSY)
171 		goto out;
172 
173 	if (nonblock) {
174 		rets = -EAGAIN;
175 		goto out;
176 	}
177 
178 	/* wait on event only if there is no other waiter */
179 	/* synchronized under device mutex */
180 	if (!waitqueue_active(&cl->rx_wait)) {
181 
182 		mutex_unlock(&bus->device_lock);
183 
184 		if (timeout) {
185 			rets = wait_event_interruptible_timeout
186 					(cl->rx_wait,
187 					mei_cl_read_cb(cl, NULL) ||
188 					(!mei_cl_is_connected(cl)),
189 					msecs_to_jiffies(timeout));
190 			if (rets == 0)
191 				return -ETIME;
192 			if (rets < 0) {
193 				if (signal_pending(current))
194 					return -EINTR;
195 				return -ERESTARTSYS;
196 			}
197 		} else {
198 			if (wait_event_interruptible
199 					(cl->rx_wait,
200 					mei_cl_read_cb(cl, NULL) ||
201 					(!mei_cl_is_connected(cl)))) {
202 				if (signal_pending(current))
203 					return -EINTR;
204 				return -ERESTARTSYS;
205 			}
206 		}
207 
208 		mutex_lock(&bus->device_lock);
209 
210 		if (!mei_cl_is_connected(cl)) {
211 			rets = -ENODEV;
212 			goto out;
213 		}
214 	}
215 
216 	cb = mei_cl_read_cb(cl, NULL);
217 	if (!cb) {
218 		rets = 0;
219 		goto out;
220 	}
221 
222 copy:
223 	if (cb->status) {
224 		rets = cb->status;
225 		goto free;
226 	}
227 
228 	r_length = min_t(size_t, length, cb->buf_idx);
229 	memcpy(buf, cb->buf.data, r_length);
230 	rets = r_length;
231 	if (vtag)
232 		*vtag = cb->vtag;
233 
234 free:
235 	mei_cl_del_rd_completed(cl, cb);
236 out:
237 	mutex_unlock(&bus->device_lock);
238 
239 	return rets;
240 }
241 
242 /**
243  * mei_cldev_send_vtag - me device send with vtag  (write)
244  *
245  * @cldev: me client device
246  * @buf: buffer to send
247  * @length: buffer length
248  * @vtag: virtual tag
249  *
250  * Return:
251  *  * written size in bytes
252  *  * < 0 on error
253  */
254 
255 ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, const u8 *buf,
256 			    size_t length, u8 vtag)
257 {
258 	struct mei_cl *cl = cldev->cl;
259 
260 	return __mei_cl_send(cl, buf, length, vtag, MEI_CL_IO_TX_BLOCKING);
261 }
262 EXPORT_SYMBOL_GPL(mei_cldev_send_vtag);
263 
264 /**
265  * mei_cldev_recv_vtag - client receive with vtag (read)
266  *
267  * @cldev: me client device
268  * @buf: buffer to receive
269  * @length: buffer length
270  * @vtag: virtual tag
271  *
272  * Return:
273  * * read size in bytes
274  * *  < 0 on error
275  */
276 
277 ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length,
278 			    u8 *vtag)
279 {
280 	struct mei_cl *cl = cldev->cl;
281 
282 	return __mei_cl_recv(cl, buf, length, vtag, 0, 0);
283 }
284 EXPORT_SYMBOL_GPL(mei_cldev_recv_vtag);
285 
286 /**
287  * mei_cldev_recv_nonblock_vtag - non block client receive with vtag (read)
288  *
289  * @cldev: me client device
290  * @buf: buffer to receive
291  * @length: buffer length
292  * @vtag: virtual tag
293  *
294  * Return:
295  * * read size in bytes
296  * * -EAGAIN if function will block.
297  * * < 0 on other error
298  */
299 ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf,
300 				     size_t length, u8 *vtag)
301 {
302 	struct mei_cl *cl = cldev->cl;
303 
304 	return __mei_cl_recv(cl, buf, length, vtag, MEI_CL_IO_RX_NONBLOCK, 0);
305 }
306 EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock_vtag);
307 
308 /**
309  * mei_cldev_send - me device send  (write)
310  *
311  * @cldev: me client device
312  * @buf: buffer to send
313  * @length: buffer length
314  *
315  * Return:
316  *  * written size in bytes
317  *  * < 0 on error
318  */
319 ssize_t mei_cldev_send(struct mei_cl_device *cldev, const u8 *buf, size_t length)
320 {
321 	return mei_cldev_send_vtag(cldev, buf, length, 0);
322 }
323 EXPORT_SYMBOL_GPL(mei_cldev_send);
324 
325 /**
326  * mei_cldev_recv - client receive (read)
327  *
328  * @cldev: me client device
329  * @buf: buffer to receive
330  * @length: buffer length
331  *
332  * Return: read size in bytes of < 0 on error
333  */
334 ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length)
335 {
336 	return mei_cldev_recv_vtag(cldev, buf, length, NULL);
337 }
338 EXPORT_SYMBOL_GPL(mei_cldev_recv);
339 
340 /**
341  * mei_cldev_recv_nonblock - non block client receive (read)
342  *
343  * @cldev: me client device
344  * @buf: buffer to receive
345  * @length: buffer length
346  *
347  * Return: read size in bytes of < 0 on error
348  *         -EAGAIN if function will block.
349  */
350 ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf,
351 				size_t length)
352 {
353 	return mei_cldev_recv_nonblock_vtag(cldev, buf, length, NULL);
354 }
355 EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock);
356 
357 /**
358  * mei_cl_bus_rx_work - dispatch rx event for a bus device
359  *
360  * @work: work
361  */
362 static void mei_cl_bus_rx_work(struct work_struct *work)
363 {
364 	struct mei_cl_device *cldev;
365 	struct mei_device *bus;
366 
367 	cldev = container_of(work, struct mei_cl_device, rx_work);
368 
369 	bus = cldev->bus;
370 
371 	if (cldev->rx_cb)
372 		cldev->rx_cb(cldev);
373 
374 	mutex_lock(&bus->device_lock);
375 	if (mei_cl_is_connected(cldev->cl))
376 		mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
377 	mutex_unlock(&bus->device_lock);
378 }
379 
380 /**
381  * mei_cl_bus_notif_work - dispatch FW notif event for a bus device
382  *
383  * @work: work
384  */
385 static void mei_cl_bus_notif_work(struct work_struct *work)
386 {
387 	struct mei_cl_device *cldev;
388 
389 	cldev = container_of(work, struct mei_cl_device, notif_work);
390 
391 	if (cldev->notif_cb)
392 		cldev->notif_cb(cldev);
393 }
394 
395 /**
396  * mei_cl_bus_notify_event - schedule notify cb on bus client
397  *
398  * @cl: host client
399  *
400  * Return: true if event was scheduled
401  *         false if the client is not waiting for event
402  */
403 bool mei_cl_bus_notify_event(struct mei_cl *cl)
404 {
405 	struct mei_cl_device *cldev = cl->cldev;
406 
407 	if (!cldev || !cldev->notif_cb)
408 		return false;
409 
410 	if (!cl->notify_ev)
411 		return false;
412 
413 	schedule_work(&cldev->notif_work);
414 
415 	cl->notify_ev = false;
416 
417 	return true;
418 }
419 
420 /**
421  * mei_cl_bus_rx_event - schedule rx event
422  *
423  * @cl: host client
424  *
425  * Return: true if event was scheduled
426  *         false if the client is not waiting for event
427  */
428 bool mei_cl_bus_rx_event(struct mei_cl *cl)
429 {
430 	struct mei_cl_device *cldev = cl->cldev;
431 
432 	if (!cldev || !cldev->rx_cb)
433 		return false;
434 
435 	schedule_work(&cldev->rx_work);
436 
437 	return true;
438 }
439 
440 /**
441  * mei_cldev_register_rx_cb - register Rx event callback
442  *
443  * @cldev: me client devices
444  * @rx_cb: callback function
445  *
446  * Return: 0 on success
447  *         -EALREADY if an callback is already registered
448  *         <0 on other errors
449  */
450 int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb)
451 {
452 	struct mei_device *bus = cldev->bus;
453 	int ret;
454 
455 	if (!rx_cb)
456 		return -EINVAL;
457 	if (cldev->rx_cb)
458 		return -EALREADY;
459 
460 	cldev->rx_cb = rx_cb;
461 	INIT_WORK(&cldev->rx_work, mei_cl_bus_rx_work);
462 
463 	mutex_lock(&bus->device_lock);
464 	if (mei_cl_is_connected(cldev->cl))
465 		ret = mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
466 	else
467 		ret = -ENODEV;
468 	mutex_unlock(&bus->device_lock);
469 	if (ret && ret != -EBUSY) {
470 		cancel_work_sync(&cldev->rx_work);
471 		cldev->rx_cb = NULL;
472 		return ret;
473 	}
474 
475 	return 0;
476 }
477 EXPORT_SYMBOL_GPL(mei_cldev_register_rx_cb);
478 
479 /**
480  * mei_cldev_register_notif_cb - register FW notification event callback
481  *
482  * @cldev: me client devices
483  * @notif_cb: callback function
484  *
485  * Return: 0 on success
486  *         -EALREADY if an callback is already registered
487  *         <0 on other errors
488  */
489 int mei_cldev_register_notif_cb(struct mei_cl_device *cldev,
490 				mei_cldev_cb_t notif_cb)
491 {
492 	struct mei_device *bus = cldev->bus;
493 	int ret;
494 
495 	if (!notif_cb)
496 		return -EINVAL;
497 
498 	if (cldev->notif_cb)
499 		return -EALREADY;
500 
501 	cldev->notif_cb = notif_cb;
502 	INIT_WORK(&cldev->notif_work, mei_cl_bus_notif_work);
503 
504 	mutex_lock(&bus->device_lock);
505 	ret = mei_cl_notify_request(cldev->cl, NULL, 1);
506 	mutex_unlock(&bus->device_lock);
507 	if (ret) {
508 		cancel_work_sync(&cldev->notif_work);
509 		cldev->notif_cb = NULL;
510 		return ret;
511 	}
512 
513 	return 0;
514 }
515 EXPORT_SYMBOL_GPL(mei_cldev_register_notif_cb);
516 
517 /**
518  * mei_cldev_get_drvdata - driver data getter
519  *
520  * @cldev: mei client device
521  *
522  * Return: driver private data
523  */
524 void *mei_cldev_get_drvdata(const struct mei_cl_device *cldev)
525 {
526 	return dev_get_drvdata(&cldev->dev);
527 }
528 EXPORT_SYMBOL_GPL(mei_cldev_get_drvdata);
529 
530 /**
531  * mei_cldev_set_drvdata - driver data setter
532  *
533  * @cldev: mei client device
534  * @data: data to store
535  */
536 void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data)
537 {
538 	dev_set_drvdata(&cldev->dev, data);
539 }
540 EXPORT_SYMBOL_GPL(mei_cldev_set_drvdata);
541 
542 /**
543  * mei_cldev_uuid - return uuid of the underlying me client
544  *
545  * @cldev: mei client device
546  *
547  * Return: me client uuid
548  */
549 const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev)
550 {
551 	return mei_me_cl_uuid(cldev->me_cl);
552 }
553 EXPORT_SYMBOL_GPL(mei_cldev_uuid);
554 
555 /**
556  * mei_cldev_ver - return protocol version of the underlying me client
557  *
558  * @cldev: mei client device
559  *
560  * Return: me client protocol version
561  */
562 u8 mei_cldev_ver(const struct mei_cl_device *cldev)
563 {
564 	return mei_me_cl_ver(cldev->me_cl);
565 }
566 EXPORT_SYMBOL_GPL(mei_cldev_ver);
567 
568 /**
569  * mei_cldev_enabled - check whether the device is enabled
570  *
571  * @cldev: mei client device
572  *
573  * Return: true if me client is initialized and connected
574  */
575 bool mei_cldev_enabled(const struct mei_cl_device *cldev)
576 {
577 	return mei_cl_is_connected(cldev->cl);
578 }
579 EXPORT_SYMBOL_GPL(mei_cldev_enabled);
580 
581 /**
582  * mei_cl_bus_module_get - acquire module of the underlying
583  *    hw driver.
584  *
585  * @cldev: mei client device
586  *
587  * Return: true on success; false if the module was removed.
588  */
589 static bool mei_cl_bus_module_get(struct mei_cl_device *cldev)
590 {
591 	return try_module_get(cldev->bus->dev->driver->owner);
592 }
593 
594 /**
595  * mei_cl_bus_module_put -  release the underlying hw module.
596  *
597  * @cldev: mei client device
598  */
599 static void mei_cl_bus_module_put(struct mei_cl_device *cldev)
600 {
601 	module_put(cldev->bus->dev->driver->owner);
602 }
603 
604 /**
605  * mei_cl_bus_vtag - get bus vtag entry wrapper
606  *     The tag for bus client is always first.
607  *
608  * @cl: host client
609  *
610  * Return: bus vtag or NULL
611  */
612 static inline struct mei_cl_vtag *mei_cl_bus_vtag(struct mei_cl *cl)
613 {
614 	return list_first_entry_or_null(&cl->vtag_map,
615 					struct mei_cl_vtag, list);
616 }
617 
618 /**
619  * mei_cl_bus_vtag_alloc - add bus client entry to vtag map
620  *
621  * @cldev: me client device
622  *
623  * Return:
624  * * 0 on success
625  * * -ENOMEM if memory allocation failed
626  */
627 static int mei_cl_bus_vtag_alloc(struct mei_cl_device *cldev)
628 {
629 	struct mei_cl *cl = cldev->cl;
630 	struct mei_cl_vtag *cl_vtag;
631 
632 	/*
633 	 * Bail out if the client does not supports vtags
634 	 * or has already allocated one
635 	 */
636 	if (mei_cl_vt_support_check(cl) || mei_cl_bus_vtag(cl))
637 		return 0;
638 
639 	cl_vtag = mei_cl_vtag_alloc(NULL, 0);
640 	if (IS_ERR(cl_vtag))
641 		return -ENOMEM;
642 
643 	list_add_tail(&cl_vtag->list, &cl->vtag_map);
644 
645 	return 0;
646 }
647 
648 /**
649  * mei_cl_bus_vtag_free - remove the bus entry from vtag map
650  *
651  * @cldev: me client device
652  */
653 static void mei_cl_bus_vtag_free(struct mei_cl_device *cldev)
654 {
655 	struct mei_cl *cl = cldev->cl;
656 	struct mei_cl_vtag *cl_vtag;
657 
658 	cl_vtag = mei_cl_bus_vtag(cl);
659 	if (!cl_vtag)
660 		return;
661 
662 	list_del(&cl_vtag->list);
663 	kfree(cl_vtag);
664 }
665 
666 void *mei_cldev_dma_map(struct mei_cl_device *cldev, u8 buffer_id, size_t size)
667 {
668 	struct mei_device *bus;
669 	struct mei_cl *cl;
670 	int ret;
671 
672 	if (!cldev || !buffer_id || !size)
673 		return ERR_PTR(-EINVAL);
674 
675 	if (!IS_ALIGNED(size, MEI_FW_PAGE_SIZE)) {
676 		dev_err(&cldev->dev, "Map size should be aligned to %lu\n",
677 			MEI_FW_PAGE_SIZE);
678 		return ERR_PTR(-EINVAL);
679 	}
680 
681 	cl = cldev->cl;
682 	bus = cldev->bus;
683 
684 	mutex_lock(&bus->device_lock);
685 	if (cl->state == MEI_FILE_UNINITIALIZED) {
686 		ret = mei_cl_link(cl);
687 		if (ret)
688 			goto out;
689 		/* update pointers */
690 		cl->cldev = cldev;
691 	}
692 
693 	ret = mei_cl_dma_alloc_and_map(cl, NULL, buffer_id, size);
694 out:
695 	mutex_unlock(&bus->device_lock);
696 	if (ret)
697 		return ERR_PTR(ret);
698 	return cl->dma.vaddr;
699 }
700 EXPORT_SYMBOL_GPL(mei_cldev_dma_map);
701 
702 int mei_cldev_dma_unmap(struct mei_cl_device *cldev)
703 {
704 	struct mei_device *bus;
705 	struct mei_cl *cl;
706 	int ret;
707 
708 	if (!cldev)
709 		return -EINVAL;
710 
711 	cl = cldev->cl;
712 	bus = cldev->bus;
713 
714 	mutex_lock(&bus->device_lock);
715 	ret = mei_cl_dma_unmap(cl, NULL);
716 
717 	mei_cl_flush_queues(cl, NULL);
718 	mei_cl_unlink(cl);
719 	mutex_unlock(&bus->device_lock);
720 	return ret;
721 }
722 EXPORT_SYMBOL_GPL(mei_cldev_dma_unmap);
723 
724 /**
725  * mei_cldev_enable - enable me client device
726  *     create connection with me client
727  *
728  * @cldev: me client device
729  *
730  * Return: 0 on success and < 0 on error
731  */
732 int mei_cldev_enable(struct mei_cl_device *cldev)
733 {
734 	struct mei_device *bus = cldev->bus;
735 	struct mei_cl *cl;
736 	int ret;
737 
738 	cl = cldev->cl;
739 
740 	mutex_lock(&bus->device_lock);
741 	if (cl->state == MEI_FILE_UNINITIALIZED) {
742 		ret = mei_cl_link(cl);
743 		if (ret)
744 			goto out;
745 		/* update pointers */
746 		cl->cldev = cldev;
747 	}
748 
749 	if (mei_cl_is_connected(cl)) {
750 		ret = 0;
751 		goto out;
752 	}
753 
754 	if (!mei_me_cl_is_active(cldev->me_cl)) {
755 		dev_err(&cldev->dev, "me client is not active\n");
756 		ret = -ENOTTY;
757 		goto out;
758 	}
759 
760 	ret = mei_cl_bus_vtag_alloc(cldev);
761 	if (ret)
762 		goto out;
763 
764 	ret = mei_cl_connect(cl, cldev->me_cl, NULL);
765 	if (ret < 0) {
766 		dev_err(&cldev->dev, "cannot connect\n");
767 		mei_cl_bus_vtag_free(cldev);
768 	}
769 
770 out:
771 	mutex_unlock(&bus->device_lock);
772 
773 	return ret;
774 }
775 EXPORT_SYMBOL_GPL(mei_cldev_enable);
776 
777 /**
778  * mei_cldev_unregister_callbacks - internal wrapper for unregistering
779  *  callbacks.
780  *
781  * @cldev: client device
782  */
783 static void mei_cldev_unregister_callbacks(struct mei_cl_device *cldev)
784 {
785 	if (cldev->rx_cb) {
786 		cancel_work_sync(&cldev->rx_work);
787 		cldev->rx_cb = NULL;
788 	}
789 
790 	if (cldev->notif_cb) {
791 		cancel_work_sync(&cldev->notif_work);
792 		cldev->notif_cb = NULL;
793 	}
794 }
795 
796 /**
797  * mei_cldev_disable - disable me client device
798  *     disconnect form the me client
799  *
800  * @cldev: me client device
801  *
802  * Return: 0 on success and < 0 on error
803  */
804 int mei_cldev_disable(struct mei_cl_device *cldev)
805 {
806 	struct mei_device *bus;
807 	struct mei_cl *cl;
808 	int err;
809 
810 	if (!cldev)
811 		return -ENODEV;
812 
813 	cl = cldev->cl;
814 
815 	bus = cldev->bus;
816 
817 	mei_cldev_unregister_callbacks(cldev);
818 
819 	mutex_lock(&bus->device_lock);
820 
821 	mei_cl_bus_vtag_free(cldev);
822 
823 	if (!mei_cl_is_connected(cl)) {
824 		dev_dbg(bus->dev, "Already disconnected\n");
825 		err = 0;
826 		goto out;
827 	}
828 
829 	err = mei_cl_disconnect(cl);
830 	if (err < 0)
831 		dev_err(bus->dev, "Could not disconnect from the ME client\n");
832 
833 out:
834 	/* Flush queues and remove any pending read unless we have mapped DMA */
835 	if (!cl->dma_mapped) {
836 		mei_cl_flush_queues(cl, NULL);
837 		mei_cl_unlink(cl);
838 	}
839 
840 	mutex_unlock(&bus->device_lock);
841 	return err;
842 }
843 EXPORT_SYMBOL_GPL(mei_cldev_disable);
844 
845 /**
846  * mei_cl_device_find - find matching entry in the driver id table
847  *
848  * @cldev: me client device
849  * @cldrv: me client driver
850  *
851  * Return: id on success; NULL if no id is matching
852  */
853 static const
854 struct mei_cl_device_id *mei_cl_device_find(const struct mei_cl_device *cldev,
855 					    const struct mei_cl_driver *cldrv)
856 {
857 	const struct mei_cl_device_id *id;
858 	const uuid_le *uuid;
859 	u8 version;
860 	bool match;
861 
862 	uuid = mei_me_cl_uuid(cldev->me_cl);
863 	version = mei_me_cl_ver(cldev->me_cl);
864 
865 	id = cldrv->id_table;
866 	while (uuid_le_cmp(NULL_UUID_LE, id->uuid)) {
867 		if (!uuid_le_cmp(*uuid, id->uuid)) {
868 			match = true;
869 
870 			if (cldev->name[0])
871 				if (strncmp(cldev->name, id->name,
872 					    sizeof(id->name)))
873 					match = false;
874 
875 			if (id->version != MEI_CL_VERSION_ANY)
876 				if (id->version != version)
877 					match = false;
878 			if (match)
879 				return id;
880 		}
881 
882 		id++;
883 	}
884 
885 	return NULL;
886 }
887 
888 /**
889  * mei_cl_device_match  - device match function
890  *
891  * @dev: device
892  * @drv: driver
893  *
894  * Return:  1 if matching device was found 0 otherwise
895  */
896 static int mei_cl_device_match(struct device *dev, struct device_driver *drv)
897 {
898 	const struct mei_cl_device *cldev = to_mei_cl_device(dev);
899 	const struct mei_cl_driver *cldrv = to_mei_cl_driver(drv);
900 	const struct mei_cl_device_id *found_id;
901 
902 	if (!cldev)
903 		return 0;
904 
905 	if (!cldev->do_match)
906 		return 0;
907 
908 	if (!cldrv || !cldrv->id_table)
909 		return 0;
910 
911 	found_id = mei_cl_device_find(cldev, cldrv);
912 	if (found_id)
913 		return 1;
914 
915 	return 0;
916 }
917 
918 /**
919  * mei_cl_device_probe - bus probe function
920  *
921  * @dev: device
922  *
923  * Return:  0 on success; < 0 otherwise
924  */
925 static int mei_cl_device_probe(struct device *dev)
926 {
927 	struct mei_cl_device *cldev;
928 	struct mei_cl_driver *cldrv;
929 	const struct mei_cl_device_id *id;
930 	int ret;
931 
932 	cldev = to_mei_cl_device(dev);
933 	cldrv = to_mei_cl_driver(dev->driver);
934 
935 	if (!cldev)
936 		return 0;
937 
938 	if (!cldrv || !cldrv->probe)
939 		return -ENODEV;
940 
941 	id = mei_cl_device_find(cldev, cldrv);
942 	if (!id)
943 		return -ENODEV;
944 
945 	if (!mei_cl_bus_module_get(cldev)) {
946 		dev_err(&cldev->dev, "get hw module failed");
947 		return -ENODEV;
948 	}
949 
950 	ret = cldrv->probe(cldev, id);
951 	if (ret) {
952 		mei_cl_bus_module_put(cldev);
953 		return ret;
954 	}
955 
956 	__module_get(THIS_MODULE);
957 	return 0;
958 }
959 
960 /**
961  * mei_cl_device_remove - remove device from the bus
962  *
963  * @dev: device
964  *
965  * Return:  0 on success; < 0 otherwise
966  */
967 static void mei_cl_device_remove(struct device *dev)
968 {
969 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
970 	struct mei_cl_driver *cldrv = to_mei_cl_driver(dev->driver);
971 
972 	if (cldrv->remove)
973 		cldrv->remove(cldev);
974 
975 	mei_cldev_unregister_callbacks(cldev);
976 
977 	mei_cl_bus_module_put(cldev);
978 	module_put(THIS_MODULE);
979 }
980 
981 static ssize_t name_show(struct device *dev, struct device_attribute *a,
982 			     char *buf)
983 {
984 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
985 
986 	return scnprintf(buf, PAGE_SIZE, "%s", cldev->name);
987 }
988 static DEVICE_ATTR_RO(name);
989 
990 static ssize_t uuid_show(struct device *dev, struct device_attribute *a,
991 			     char *buf)
992 {
993 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
994 	const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
995 
996 	return sprintf(buf, "%pUl", uuid);
997 }
998 static DEVICE_ATTR_RO(uuid);
999 
1000 static ssize_t version_show(struct device *dev, struct device_attribute *a,
1001 			     char *buf)
1002 {
1003 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1004 	u8 version = mei_me_cl_ver(cldev->me_cl);
1005 
1006 	return sprintf(buf, "%02X", version);
1007 }
1008 static DEVICE_ATTR_RO(version);
1009 
1010 static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
1011 			     char *buf)
1012 {
1013 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1014 	const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
1015 	u8 version = mei_me_cl_ver(cldev->me_cl);
1016 
1017 	return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:%02X:",
1018 			 cldev->name, uuid, version);
1019 }
1020 static DEVICE_ATTR_RO(modalias);
1021 
1022 static ssize_t max_conn_show(struct device *dev, struct device_attribute *a,
1023 			     char *buf)
1024 {
1025 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1026 	u8 maxconn = mei_me_cl_max_conn(cldev->me_cl);
1027 
1028 	return sprintf(buf, "%d", maxconn);
1029 }
1030 static DEVICE_ATTR_RO(max_conn);
1031 
1032 static ssize_t fixed_show(struct device *dev, struct device_attribute *a,
1033 			  char *buf)
1034 {
1035 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1036 	u8 fixed = mei_me_cl_fixed(cldev->me_cl);
1037 
1038 	return sprintf(buf, "%d", fixed);
1039 }
1040 static DEVICE_ATTR_RO(fixed);
1041 
1042 static ssize_t vtag_show(struct device *dev, struct device_attribute *a,
1043 			 char *buf)
1044 {
1045 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1046 	bool vt = mei_me_cl_vt(cldev->me_cl);
1047 
1048 	return sprintf(buf, "%d", vt);
1049 }
1050 static DEVICE_ATTR_RO(vtag);
1051 
1052 static ssize_t max_len_show(struct device *dev, struct device_attribute *a,
1053 			    char *buf)
1054 {
1055 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1056 	u32 maxlen = mei_me_cl_max_len(cldev->me_cl);
1057 
1058 	return sprintf(buf, "%u", maxlen);
1059 }
1060 static DEVICE_ATTR_RO(max_len);
1061 
1062 static struct attribute *mei_cldev_attrs[] = {
1063 	&dev_attr_name.attr,
1064 	&dev_attr_uuid.attr,
1065 	&dev_attr_version.attr,
1066 	&dev_attr_modalias.attr,
1067 	&dev_attr_max_conn.attr,
1068 	&dev_attr_fixed.attr,
1069 	&dev_attr_vtag.attr,
1070 	&dev_attr_max_len.attr,
1071 	NULL,
1072 };
1073 ATTRIBUTE_GROUPS(mei_cldev);
1074 
1075 /**
1076  * mei_cl_device_uevent - me client bus uevent handler
1077  *
1078  * @dev: device
1079  * @env: uevent kobject
1080  *
1081  * Return: 0 on success -ENOMEM on when add_uevent_var fails
1082  */
1083 static int mei_cl_device_uevent(struct device *dev, struct kobj_uevent_env *env)
1084 {
1085 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1086 	const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
1087 	u8 version = mei_me_cl_ver(cldev->me_cl);
1088 
1089 	if (add_uevent_var(env, "MEI_CL_VERSION=%d", version))
1090 		return -ENOMEM;
1091 
1092 	if (add_uevent_var(env, "MEI_CL_UUID=%pUl", uuid))
1093 		return -ENOMEM;
1094 
1095 	if (add_uevent_var(env, "MEI_CL_NAME=%s", cldev->name))
1096 		return -ENOMEM;
1097 
1098 	if (add_uevent_var(env, "MODALIAS=mei:%s:%pUl:%02X:",
1099 			   cldev->name, uuid, version))
1100 		return -ENOMEM;
1101 
1102 	return 0;
1103 }
1104 
1105 static struct bus_type mei_cl_bus_type = {
1106 	.name		= "mei",
1107 	.dev_groups	= mei_cldev_groups,
1108 	.match		= mei_cl_device_match,
1109 	.probe		= mei_cl_device_probe,
1110 	.remove		= mei_cl_device_remove,
1111 	.uevent		= mei_cl_device_uevent,
1112 };
1113 
1114 static struct mei_device *mei_dev_bus_get(struct mei_device *bus)
1115 {
1116 	if (bus)
1117 		get_device(bus->dev);
1118 
1119 	return bus;
1120 }
1121 
1122 static void mei_dev_bus_put(struct mei_device *bus)
1123 {
1124 	if (bus)
1125 		put_device(bus->dev);
1126 }
1127 
1128 static void mei_cl_bus_dev_release(struct device *dev)
1129 {
1130 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1131 
1132 	if (!cldev)
1133 		return;
1134 
1135 	mei_cl_flush_queues(cldev->cl, NULL);
1136 	mei_me_cl_put(cldev->me_cl);
1137 	mei_dev_bus_put(cldev->bus);
1138 	mei_cl_unlink(cldev->cl);
1139 	kfree(cldev->cl);
1140 	kfree(cldev);
1141 }
1142 
1143 static const struct device_type mei_cl_device_type = {
1144 	.release = mei_cl_bus_dev_release,
1145 };
1146 
1147 /**
1148  * mei_cl_bus_set_name - set device name for me client device
1149  *  <controller>-<client device>
1150  *  Example: 0000:00:16.0-55213584-9a29-4916-badf-0fb7ed682aeb
1151  *
1152  * @cldev: me client device
1153  */
1154 static inline void mei_cl_bus_set_name(struct mei_cl_device *cldev)
1155 {
1156 	dev_set_name(&cldev->dev, "%s-%pUl",
1157 		     dev_name(cldev->bus->dev),
1158 		     mei_me_cl_uuid(cldev->me_cl));
1159 }
1160 
1161 /**
1162  * mei_cl_bus_dev_alloc - initialize and allocate mei client device
1163  *
1164  * @bus: mei device
1165  * @me_cl: me client
1166  *
1167  * Return: allocated device structur or NULL on allocation failure
1168  */
1169 static struct mei_cl_device *mei_cl_bus_dev_alloc(struct mei_device *bus,
1170 						  struct mei_me_client *me_cl)
1171 {
1172 	struct mei_cl_device *cldev;
1173 	struct mei_cl *cl;
1174 
1175 	cldev = kzalloc(sizeof(*cldev), GFP_KERNEL);
1176 	if (!cldev)
1177 		return NULL;
1178 
1179 	cl = mei_cl_allocate(bus);
1180 	if (!cl) {
1181 		kfree(cldev);
1182 		return NULL;
1183 	}
1184 
1185 	device_initialize(&cldev->dev);
1186 	cldev->dev.parent = bus->dev;
1187 	cldev->dev.bus    = &mei_cl_bus_type;
1188 	cldev->dev.type   = &mei_cl_device_type;
1189 	cldev->bus        = mei_dev_bus_get(bus);
1190 	cldev->me_cl      = mei_me_cl_get(me_cl);
1191 	cldev->cl         = cl;
1192 	mei_cl_bus_set_name(cldev);
1193 	cldev->is_added   = 0;
1194 	INIT_LIST_HEAD(&cldev->bus_list);
1195 
1196 	return cldev;
1197 }
1198 
1199 /**
1200  * mei_cl_bus_dev_setup - setup me client device
1201  *    run fix up routines and set the device name
1202  *
1203  * @bus: mei device
1204  * @cldev: me client device
1205  *
1206  * Return: true if the device is eligible for enumeration
1207  */
1208 static bool mei_cl_bus_dev_setup(struct mei_device *bus,
1209 				 struct mei_cl_device *cldev)
1210 {
1211 	cldev->do_match = 1;
1212 	mei_cl_bus_dev_fixup(cldev);
1213 
1214 	/* the device name can change during fix up */
1215 	if (cldev->do_match)
1216 		mei_cl_bus_set_name(cldev);
1217 
1218 	return cldev->do_match == 1;
1219 }
1220 
1221 /**
1222  * mei_cl_bus_dev_add - add me client devices
1223  *
1224  * @cldev: me client device
1225  *
1226  * Return: 0 on success; < 0 on failre
1227  */
1228 static int mei_cl_bus_dev_add(struct mei_cl_device *cldev)
1229 {
1230 	int ret;
1231 
1232 	dev_dbg(cldev->bus->dev, "adding %pUL:%02X\n",
1233 		mei_me_cl_uuid(cldev->me_cl),
1234 		mei_me_cl_ver(cldev->me_cl));
1235 	ret = device_add(&cldev->dev);
1236 	if (!ret)
1237 		cldev->is_added = 1;
1238 
1239 	return ret;
1240 }
1241 
1242 /**
1243  * mei_cl_bus_dev_stop - stop the driver
1244  *
1245  * @cldev: me client device
1246  */
1247 static void mei_cl_bus_dev_stop(struct mei_cl_device *cldev)
1248 {
1249 	if (cldev->is_added)
1250 		device_release_driver(&cldev->dev);
1251 }
1252 
1253 /**
1254  * mei_cl_bus_dev_destroy - destroy me client devices object
1255  *
1256  * @cldev: me client device
1257  *
1258  * Locking: called under "dev->cl_bus_lock" lock
1259  */
1260 static void mei_cl_bus_dev_destroy(struct mei_cl_device *cldev)
1261 {
1262 
1263 	WARN_ON(!mutex_is_locked(&cldev->bus->cl_bus_lock));
1264 
1265 	if (!cldev->is_added)
1266 		return;
1267 
1268 	device_del(&cldev->dev);
1269 
1270 	list_del_init(&cldev->bus_list);
1271 
1272 	cldev->is_added = 0;
1273 	put_device(&cldev->dev);
1274 }
1275 
1276 /**
1277  * mei_cl_bus_remove_device - remove a devices form the bus
1278  *
1279  * @cldev: me client device
1280  */
1281 static void mei_cl_bus_remove_device(struct mei_cl_device *cldev)
1282 {
1283 	mei_cl_bus_dev_stop(cldev);
1284 	mei_cl_bus_dev_destroy(cldev);
1285 }
1286 
1287 /**
1288  * mei_cl_bus_remove_devices - remove all devices form the bus
1289  *
1290  * @bus: mei device
1291  */
1292 void mei_cl_bus_remove_devices(struct mei_device *bus)
1293 {
1294 	struct mei_cl_device *cldev, *next;
1295 
1296 	mutex_lock(&bus->cl_bus_lock);
1297 	list_for_each_entry_safe(cldev, next, &bus->device_list, bus_list)
1298 		mei_cl_bus_remove_device(cldev);
1299 	mutex_unlock(&bus->cl_bus_lock);
1300 }
1301 
1302 
1303 /**
1304  * mei_cl_bus_dev_init - allocate and initializes an mei client devices
1305  *     based on me client
1306  *
1307  * @bus: mei device
1308  * @me_cl: me client
1309  *
1310  * Locking: called under "dev->cl_bus_lock" lock
1311  */
1312 static void mei_cl_bus_dev_init(struct mei_device *bus,
1313 				struct mei_me_client *me_cl)
1314 {
1315 	struct mei_cl_device *cldev;
1316 
1317 	WARN_ON(!mutex_is_locked(&bus->cl_bus_lock));
1318 
1319 	dev_dbg(bus->dev, "initializing %pUl", mei_me_cl_uuid(me_cl));
1320 
1321 	if (me_cl->bus_added)
1322 		return;
1323 
1324 	cldev = mei_cl_bus_dev_alloc(bus, me_cl);
1325 	if (!cldev)
1326 		return;
1327 
1328 	me_cl->bus_added = true;
1329 	list_add_tail(&cldev->bus_list, &bus->device_list);
1330 
1331 }
1332 
1333 /**
1334  * mei_cl_bus_rescan - scan me clients list and add create
1335  *    devices for eligible clients
1336  *
1337  * @bus: mei device
1338  */
1339 static void mei_cl_bus_rescan(struct mei_device *bus)
1340 {
1341 	struct mei_cl_device *cldev, *n;
1342 	struct mei_me_client *me_cl;
1343 
1344 	mutex_lock(&bus->cl_bus_lock);
1345 
1346 	down_read(&bus->me_clients_rwsem);
1347 	list_for_each_entry(me_cl, &bus->me_clients, list)
1348 		mei_cl_bus_dev_init(bus, me_cl);
1349 	up_read(&bus->me_clients_rwsem);
1350 
1351 	list_for_each_entry_safe(cldev, n, &bus->device_list, bus_list) {
1352 
1353 		if (!mei_me_cl_is_active(cldev->me_cl)) {
1354 			mei_cl_bus_remove_device(cldev);
1355 			continue;
1356 		}
1357 
1358 		if (cldev->is_added)
1359 			continue;
1360 
1361 		if (mei_cl_bus_dev_setup(bus, cldev))
1362 			mei_cl_bus_dev_add(cldev);
1363 		else {
1364 			list_del_init(&cldev->bus_list);
1365 			put_device(&cldev->dev);
1366 		}
1367 	}
1368 	mutex_unlock(&bus->cl_bus_lock);
1369 
1370 	dev_dbg(bus->dev, "rescan end");
1371 }
1372 
1373 void mei_cl_bus_rescan_work(struct work_struct *work)
1374 {
1375 	struct mei_device *bus =
1376 		container_of(work, struct mei_device, bus_rescan_work);
1377 
1378 	mei_cl_bus_rescan(bus);
1379 }
1380 
1381 int __mei_cldev_driver_register(struct mei_cl_driver *cldrv,
1382 				struct module *owner)
1383 {
1384 	int err;
1385 
1386 	cldrv->driver.name = cldrv->name;
1387 	cldrv->driver.owner = owner;
1388 	cldrv->driver.bus = &mei_cl_bus_type;
1389 
1390 	err = driver_register(&cldrv->driver);
1391 	if (err)
1392 		return err;
1393 
1394 	pr_debug("mei: driver [%s] registered\n", cldrv->driver.name);
1395 
1396 	return 0;
1397 }
1398 EXPORT_SYMBOL_GPL(__mei_cldev_driver_register);
1399 
1400 void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv)
1401 {
1402 	driver_unregister(&cldrv->driver);
1403 
1404 	pr_debug("mei: driver [%s] unregistered\n", cldrv->driver.name);
1405 }
1406 EXPORT_SYMBOL_GPL(mei_cldev_driver_unregister);
1407 
1408 
1409 int __init mei_cl_bus_init(void)
1410 {
1411 	return bus_register(&mei_cl_bus_type);
1412 }
1413 
1414 void __exit mei_cl_bus_exit(void)
1415 {
1416 	bus_unregister(&mei_cl_bus_type);
1417 }
1418