xref: /openbmc/linux/drivers/misc/mei/bus.c (revision bc33f5e5)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2012-2019, Intel Corporation. All rights reserved.
4  * Intel Management Engine Interface (Intel MEI) Linux driver
5  */
6 
7 #include <linux/module.h>
8 #include <linux/device.h>
9 #include <linux/kernel.h>
10 #include <linux/sched/signal.h>
11 #include <linux/init.h>
12 #include <linux/errno.h>
13 #include <linux/slab.h>
14 #include <linux/mutex.h>
15 #include <linux/interrupt.h>
16 #include <linux/scatterlist.h>
17 #include <linux/mei_cl_bus.h>
18 
19 #include "mei_dev.h"
20 #include "client.h"
21 
22 #define to_mei_cl_driver(d) container_of(d, struct mei_cl_driver, driver)
23 
24 /**
25  * __mei_cl_send - internal client send (write)
26  *
27  * @cl: host client
28  * @buf: buffer to send
29  * @length: buffer length
30  * @vtag: virtual tag
31  * @mode: sending mode
32  *
33  * Return: written size bytes or < 0 on error
34  */
35 ssize_t __mei_cl_send(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag,
36 		      unsigned int mode)
37 {
38 	struct mei_device *bus;
39 	struct mei_cl_cb *cb;
40 	ssize_t rets;
41 
42 	if (WARN_ON(!cl || !cl->dev))
43 		return -ENODEV;
44 
45 	bus = cl->dev;
46 
47 	mutex_lock(&bus->device_lock);
48 	if (bus->dev_state != MEI_DEV_ENABLED &&
49 	    bus->dev_state != MEI_DEV_POWERING_DOWN) {
50 		rets = -ENODEV;
51 		goto out;
52 	}
53 
54 	if (!mei_cl_is_connected(cl)) {
55 		rets = -ENODEV;
56 		goto out;
57 	}
58 
59 	/* Check if we have an ME client device */
60 	if (!mei_me_cl_is_active(cl->me_cl)) {
61 		rets = -ENOTTY;
62 		goto out;
63 	}
64 
65 	if (vtag) {
66 		/* Check if vtag is supported by client */
67 		rets = mei_cl_vt_support_check(cl);
68 		if (rets)
69 			goto out;
70 	}
71 
72 	if (length > mei_cl_mtu(cl)) {
73 		rets = -EFBIG;
74 		goto out;
75 	}
76 
77 	while (cl->tx_cb_queued >= bus->tx_queue_limit) {
78 		mutex_unlock(&bus->device_lock);
79 		rets = wait_event_interruptible(cl->tx_wait,
80 				cl->writing_state == MEI_WRITE_COMPLETE ||
81 				(!mei_cl_is_connected(cl)));
82 		mutex_lock(&bus->device_lock);
83 		if (rets) {
84 			if (signal_pending(current))
85 				rets = -EINTR;
86 			goto out;
87 		}
88 		if (!mei_cl_is_connected(cl)) {
89 			rets = -ENODEV;
90 			goto out;
91 		}
92 	}
93 
94 	cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, NULL);
95 	if (!cb) {
96 		rets = -ENOMEM;
97 		goto out;
98 	}
99 	cb->vtag = vtag;
100 
101 	cb->internal = !!(mode & MEI_CL_IO_TX_INTERNAL);
102 	cb->blocking = !!(mode & MEI_CL_IO_TX_BLOCKING);
103 	memcpy(cb->buf.data, buf, length);
104 	/* hack we point data to header */
105 	if (mode & MEI_CL_IO_SGL) {
106 		cb->ext_hdr = (struct mei_ext_hdr *)cb->buf.data;
107 		cb->buf.data = NULL;
108 		cb->buf.size = 0;
109 	}
110 
111 	rets = mei_cl_write(cl, cb);
112 
113 	if (mode & MEI_CL_IO_SGL && rets == 0)
114 		rets = length;
115 
116 out:
117 	mutex_unlock(&bus->device_lock);
118 
119 	return rets;
120 }
121 
122 /**
123  * __mei_cl_recv - internal client receive (read)
124  *
125  * @cl: host client
126  * @buf: buffer to receive
127  * @length: buffer length
128  * @mode: io mode
129  * @vtag: virtual tag
130  * @timeout: recv timeout, 0 for infinite timeout
131  *
132  * Return: read size in bytes of < 0 on error
133  */
134 ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, u8 *vtag,
135 		      unsigned int mode, unsigned long timeout)
136 {
137 	struct mei_device *bus;
138 	struct mei_cl_cb *cb;
139 	size_t r_length;
140 	ssize_t rets;
141 	bool nonblock = !!(mode & MEI_CL_IO_RX_NONBLOCK);
142 
143 	if (WARN_ON(!cl || !cl->dev))
144 		return -ENODEV;
145 
146 	bus = cl->dev;
147 
148 	mutex_lock(&bus->device_lock);
149 	if (bus->dev_state != MEI_DEV_ENABLED &&
150 	    bus->dev_state != MEI_DEV_POWERING_DOWN) {
151 		rets = -ENODEV;
152 		goto out;
153 	}
154 
155 	cb = mei_cl_read_cb(cl, NULL);
156 	if (cb)
157 		goto copy;
158 
159 	rets = mei_cl_read_start(cl, length, NULL);
160 	if (rets && rets != -EBUSY)
161 		goto out;
162 
163 	if (nonblock) {
164 		rets = -EAGAIN;
165 		goto out;
166 	}
167 
168 	/* wait on event only if there is no other waiter */
169 	/* synchronized under device mutex */
170 	if (!waitqueue_active(&cl->rx_wait)) {
171 
172 		mutex_unlock(&bus->device_lock);
173 
174 		if (timeout) {
175 			rets = wait_event_interruptible_timeout
176 					(cl->rx_wait,
177 					mei_cl_read_cb(cl, NULL) ||
178 					(!mei_cl_is_connected(cl)),
179 					msecs_to_jiffies(timeout));
180 			if (rets == 0)
181 				return -ETIME;
182 			if (rets < 0) {
183 				if (signal_pending(current))
184 					return -EINTR;
185 				return -ERESTARTSYS;
186 			}
187 		} else {
188 			if (wait_event_interruptible
189 					(cl->rx_wait,
190 					mei_cl_read_cb(cl, NULL) ||
191 					(!mei_cl_is_connected(cl)))) {
192 				if (signal_pending(current))
193 					return -EINTR;
194 				return -ERESTARTSYS;
195 			}
196 		}
197 
198 		mutex_lock(&bus->device_lock);
199 
200 		if (!mei_cl_is_connected(cl)) {
201 			rets = -ENODEV;
202 			goto out;
203 		}
204 	}
205 
206 	cb = mei_cl_read_cb(cl, NULL);
207 	if (!cb) {
208 		rets = 0;
209 		goto out;
210 	}
211 
212 copy:
213 	if (cb->status) {
214 		rets = cb->status;
215 		goto free;
216 	}
217 
218 	/* for the GSC type - copy the extended header to the buffer */
219 	if (cb->ext_hdr && cb->ext_hdr->type == MEI_EXT_HDR_GSC) {
220 		r_length = min_t(size_t, length, cb->ext_hdr->length * sizeof(u32));
221 		memcpy(buf, cb->ext_hdr, r_length);
222 	} else {
223 		r_length = min_t(size_t, length, cb->buf_idx);
224 		memcpy(buf, cb->buf.data, r_length);
225 	}
226 	rets = r_length;
227 
228 	if (vtag)
229 		*vtag = cb->vtag;
230 
231 free:
232 	mei_cl_del_rd_completed(cl, cb);
233 out:
234 	mutex_unlock(&bus->device_lock);
235 
236 	return rets;
237 }
238 
239 /**
240  * mei_cldev_send_vtag - me device send with vtag  (write)
241  *
242  * @cldev: me client device
243  * @buf: buffer to send
244  * @length: buffer length
245  * @vtag: virtual tag
246  *
247  * Return:
248  *  * written size in bytes
249  *  * < 0 on error
250  */
251 
252 ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, const u8 *buf,
253 			    size_t length, u8 vtag)
254 {
255 	struct mei_cl *cl = cldev->cl;
256 
257 	return __mei_cl_send(cl, buf, length, vtag, MEI_CL_IO_TX_BLOCKING);
258 }
259 EXPORT_SYMBOL_GPL(mei_cldev_send_vtag);
260 
261 /**
262  * mei_cldev_recv_vtag - client receive with vtag (read)
263  *
264  * @cldev: me client device
265  * @buf: buffer to receive
266  * @length: buffer length
267  * @vtag: virtual tag
268  *
269  * Return:
270  * * read size in bytes
271  * *  < 0 on error
272  */
273 
274 ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length,
275 			    u8 *vtag)
276 {
277 	struct mei_cl *cl = cldev->cl;
278 
279 	return __mei_cl_recv(cl, buf, length, vtag, 0, 0);
280 }
281 EXPORT_SYMBOL_GPL(mei_cldev_recv_vtag);
282 
283 /**
284  * mei_cldev_recv_nonblock_vtag - non block client receive with vtag (read)
285  *
286  * @cldev: me client device
287  * @buf: buffer to receive
288  * @length: buffer length
289  * @vtag: virtual tag
290  *
291  * Return:
292  * * read size in bytes
293  * * -EAGAIN if function will block.
294  * * < 0 on other error
295  */
296 ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf,
297 				     size_t length, u8 *vtag)
298 {
299 	struct mei_cl *cl = cldev->cl;
300 
301 	return __mei_cl_recv(cl, buf, length, vtag, MEI_CL_IO_RX_NONBLOCK, 0);
302 }
303 EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock_vtag);
304 
305 /**
306  * mei_cldev_send - me device send  (write)
307  *
308  * @cldev: me client device
309  * @buf: buffer to send
310  * @length: buffer length
311  *
312  * Return:
313  *  * written size in bytes
314  *  * < 0 on error
315  */
316 ssize_t mei_cldev_send(struct mei_cl_device *cldev, const u8 *buf, size_t length)
317 {
318 	return mei_cldev_send_vtag(cldev, buf, length, 0);
319 }
320 EXPORT_SYMBOL_GPL(mei_cldev_send);
321 
322 /**
323  * mei_cldev_recv - client receive (read)
324  *
325  * @cldev: me client device
326  * @buf: buffer to receive
327  * @length: buffer length
328  *
329  * Return: read size in bytes of < 0 on error
330  */
331 ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length)
332 {
333 	return mei_cldev_recv_vtag(cldev, buf, length, NULL);
334 }
335 EXPORT_SYMBOL_GPL(mei_cldev_recv);
336 
337 /**
338  * mei_cldev_recv_nonblock - non block client receive (read)
339  *
340  * @cldev: me client device
341  * @buf: buffer to receive
342  * @length: buffer length
343  *
344  * Return: read size in bytes of < 0 on error
345  *         -EAGAIN if function will block.
346  */
347 ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf,
348 				size_t length)
349 {
350 	return mei_cldev_recv_nonblock_vtag(cldev, buf, length, NULL);
351 }
352 EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock);
353 
354 /**
355  * mei_cl_bus_rx_work - dispatch rx event for a bus device
356  *
357  * @work: work
358  */
359 static void mei_cl_bus_rx_work(struct work_struct *work)
360 {
361 	struct mei_cl_device *cldev;
362 	struct mei_device *bus;
363 
364 	cldev = container_of(work, struct mei_cl_device, rx_work);
365 
366 	bus = cldev->bus;
367 
368 	if (cldev->rx_cb)
369 		cldev->rx_cb(cldev);
370 
371 	mutex_lock(&bus->device_lock);
372 	if (mei_cl_is_connected(cldev->cl))
373 		mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
374 	mutex_unlock(&bus->device_lock);
375 }
376 
377 /**
378  * mei_cl_bus_notif_work - dispatch FW notif event for a bus device
379  *
380  * @work: work
381  */
382 static void mei_cl_bus_notif_work(struct work_struct *work)
383 {
384 	struct mei_cl_device *cldev;
385 
386 	cldev = container_of(work, struct mei_cl_device, notif_work);
387 
388 	if (cldev->notif_cb)
389 		cldev->notif_cb(cldev);
390 }
391 
392 /**
393  * mei_cl_bus_notify_event - schedule notify cb on bus client
394  *
395  * @cl: host client
396  *
397  * Return: true if event was scheduled
398  *         false if the client is not waiting for event
399  */
400 bool mei_cl_bus_notify_event(struct mei_cl *cl)
401 {
402 	struct mei_cl_device *cldev = cl->cldev;
403 
404 	if (!cldev || !cldev->notif_cb)
405 		return false;
406 
407 	if (!cl->notify_ev)
408 		return false;
409 
410 	schedule_work(&cldev->notif_work);
411 
412 	cl->notify_ev = false;
413 
414 	return true;
415 }
416 
417 /**
418  * mei_cl_bus_rx_event - schedule rx event
419  *
420  * @cl: host client
421  *
422  * Return: true if event was scheduled
423  *         false if the client is not waiting for event
424  */
425 bool mei_cl_bus_rx_event(struct mei_cl *cl)
426 {
427 	struct mei_cl_device *cldev = cl->cldev;
428 
429 	if (!cldev || !cldev->rx_cb)
430 		return false;
431 
432 	schedule_work(&cldev->rx_work);
433 
434 	return true;
435 }
436 
437 /**
438  * mei_cldev_register_rx_cb - register Rx event callback
439  *
440  * @cldev: me client devices
441  * @rx_cb: callback function
442  *
443  * Return: 0 on success
444  *         -EALREADY if an callback is already registered
445  *         <0 on other errors
446  */
447 int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb)
448 {
449 	struct mei_device *bus = cldev->bus;
450 	int ret;
451 
452 	if (!rx_cb)
453 		return -EINVAL;
454 	if (cldev->rx_cb)
455 		return -EALREADY;
456 
457 	cldev->rx_cb = rx_cb;
458 	INIT_WORK(&cldev->rx_work, mei_cl_bus_rx_work);
459 
460 	mutex_lock(&bus->device_lock);
461 	if (mei_cl_is_connected(cldev->cl))
462 		ret = mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
463 	else
464 		ret = -ENODEV;
465 	mutex_unlock(&bus->device_lock);
466 	if (ret && ret != -EBUSY) {
467 		cancel_work_sync(&cldev->rx_work);
468 		cldev->rx_cb = NULL;
469 		return ret;
470 	}
471 
472 	return 0;
473 }
474 EXPORT_SYMBOL_GPL(mei_cldev_register_rx_cb);
475 
476 /**
477  * mei_cldev_register_notif_cb - register FW notification event callback
478  *
479  * @cldev: me client devices
480  * @notif_cb: callback function
481  *
482  * Return: 0 on success
483  *         -EALREADY if an callback is already registered
484  *         <0 on other errors
485  */
486 int mei_cldev_register_notif_cb(struct mei_cl_device *cldev,
487 				mei_cldev_cb_t notif_cb)
488 {
489 	struct mei_device *bus = cldev->bus;
490 	int ret;
491 
492 	if (!notif_cb)
493 		return -EINVAL;
494 
495 	if (cldev->notif_cb)
496 		return -EALREADY;
497 
498 	cldev->notif_cb = notif_cb;
499 	INIT_WORK(&cldev->notif_work, mei_cl_bus_notif_work);
500 
501 	mutex_lock(&bus->device_lock);
502 	ret = mei_cl_notify_request(cldev->cl, NULL, 1);
503 	mutex_unlock(&bus->device_lock);
504 	if (ret) {
505 		cancel_work_sync(&cldev->notif_work);
506 		cldev->notif_cb = NULL;
507 		return ret;
508 	}
509 
510 	return 0;
511 }
512 EXPORT_SYMBOL_GPL(mei_cldev_register_notif_cb);
513 
514 /**
515  * mei_cldev_get_drvdata - driver data getter
516  *
517  * @cldev: mei client device
518  *
519  * Return: driver private data
520  */
521 void *mei_cldev_get_drvdata(const struct mei_cl_device *cldev)
522 {
523 	return dev_get_drvdata(&cldev->dev);
524 }
525 EXPORT_SYMBOL_GPL(mei_cldev_get_drvdata);
526 
527 /**
528  * mei_cldev_set_drvdata - driver data setter
529  *
530  * @cldev: mei client device
531  * @data: data to store
532  */
533 void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data)
534 {
535 	dev_set_drvdata(&cldev->dev, data);
536 }
537 EXPORT_SYMBOL_GPL(mei_cldev_set_drvdata);
538 
539 /**
540  * mei_cldev_uuid - return uuid of the underlying me client
541  *
542  * @cldev: mei client device
543  *
544  * Return: me client uuid
545  */
546 const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev)
547 {
548 	return mei_me_cl_uuid(cldev->me_cl);
549 }
550 EXPORT_SYMBOL_GPL(mei_cldev_uuid);
551 
552 /**
553  * mei_cldev_ver - return protocol version of the underlying me client
554  *
555  * @cldev: mei client device
556  *
557  * Return: me client protocol version
558  */
559 u8 mei_cldev_ver(const struct mei_cl_device *cldev)
560 {
561 	return mei_me_cl_ver(cldev->me_cl);
562 }
563 EXPORT_SYMBOL_GPL(mei_cldev_ver);
564 
565 /**
566  * mei_cldev_enabled - check whether the device is enabled
567  *
568  * @cldev: mei client device
569  *
570  * Return: true if me client is initialized and connected
571  */
572 bool mei_cldev_enabled(const struct mei_cl_device *cldev)
573 {
574 	return mei_cl_is_connected(cldev->cl);
575 }
576 EXPORT_SYMBOL_GPL(mei_cldev_enabled);
577 
578 /**
579  * mei_cl_bus_module_get - acquire module of the underlying
580  *    hw driver.
581  *
582  * @cldev: mei client device
583  *
584  * Return: true on success; false if the module was removed.
585  */
586 static bool mei_cl_bus_module_get(struct mei_cl_device *cldev)
587 {
588 	return try_module_get(cldev->bus->dev->driver->owner);
589 }
590 
591 /**
592  * mei_cl_bus_module_put -  release the underlying hw module.
593  *
594  * @cldev: mei client device
595  */
596 static void mei_cl_bus_module_put(struct mei_cl_device *cldev)
597 {
598 	module_put(cldev->bus->dev->driver->owner);
599 }
600 
601 /**
602  * mei_cl_bus_vtag - get bus vtag entry wrapper
603  *     The tag for bus client is always first.
604  *
605  * @cl: host client
606  *
607  * Return: bus vtag or NULL
608  */
609 static inline struct mei_cl_vtag *mei_cl_bus_vtag(struct mei_cl *cl)
610 {
611 	return list_first_entry_or_null(&cl->vtag_map,
612 					struct mei_cl_vtag, list);
613 }
614 
615 /**
616  * mei_cl_bus_vtag_alloc - add bus client entry to vtag map
617  *
618  * @cldev: me client device
619  *
620  * Return:
621  * * 0 on success
622  * * -ENOMEM if memory allocation failed
623  */
624 static int mei_cl_bus_vtag_alloc(struct mei_cl_device *cldev)
625 {
626 	struct mei_cl *cl = cldev->cl;
627 	struct mei_cl_vtag *cl_vtag;
628 
629 	/*
630 	 * Bail out if the client does not supports vtags
631 	 * or has already allocated one
632 	 */
633 	if (mei_cl_vt_support_check(cl) || mei_cl_bus_vtag(cl))
634 		return 0;
635 
636 	cl_vtag = mei_cl_vtag_alloc(NULL, 0);
637 	if (IS_ERR(cl_vtag))
638 		return -ENOMEM;
639 
640 	list_add_tail(&cl_vtag->list, &cl->vtag_map);
641 
642 	return 0;
643 }
644 
645 /**
646  * mei_cl_bus_vtag_free - remove the bus entry from vtag map
647  *
648  * @cldev: me client device
649  */
650 static void mei_cl_bus_vtag_free(struct mei_cl_device *cldev)
651 {
652 	struct mei_cl *cl = cldev->cl;
653 	struct mei_cl_vtag *cl_vtag;
654 
655 	cl_vtag = mei_cl_bus_vtag(cl);
656 	if (!cl_vtag)
657 		return;
658 
659 	list_del(&cl_vtag->list);
660 	kfree(cl_vtag);
661 }
662 
663 void *mei_cldev_dma_map(struct mei_cl_device *cldev, u8 buffer_id, size_t size)
664 {
665 	struct mei_device *bus;
666 	struct mei_cl *cl;
667 	int ret;
668 
669 	if (!cldev || !buffer_id || !size)
670 		return ERR_PTR(-EINVAL);
671 
672 	if (!IS_ALIGNED(size, MEI_FW_PAGE_SIZE)) {
673 		dev_err(&cldev->dev, "Map size should be aligned to %lu\n",
674 			MEI_FW_PAGE_SIZE);
675 		return ERR_PTR(-EINVAL);
676 	}
677 
678 	cl = cldev->cl;
679 	bus = cldev->bus;
680 
681 	mutex_lock(&bus->device_lock);
682 	if (cl->state == MEI_FILE_UNINITIALIZED) {
683 		ret = mei_cl_link(cl);
684 		if (ret)
685 			goto out;
686 		/* update pointers */
687 		cl->cldev = cldev;
688 	}
689 
690 	ret = mei_cl_dma_alloc_and_map(cl, NULL, buffer_id, size);
691 out:
692 	mutex_unlock(&bus->device_lock);
693 	if (ret)
694 		return ERR_PTR(ret);
695 	return cl->dma.vaddr;
696 }
697 EXPORT_SYMBOL_GPL(mei_cldev_dma_map);
698 
699 int mei_cldev_dma_unmap(struct mei_cl_device *cldev)
700 {
701 	struct mei_device *bus;
702 	struct mei_cl *cl;
703 	int ret;
704 
705 	if (!cldev)
706 		return -EINVAL;
707 
708 	cl = cldev->cl;
709 	bus = cldev->bus;
710 
711 	mutex_lock(&bus->device_lock);
712 	ret = mei_cl_dma_unmap(cl, NULL);
713 
714 	mei_cl_flush_queues(cl, NULL);
715 	mei_cl_unlink(cl);
716 	mutex_unlock(&bus->device_lock);
717 	return ret;
718 }
719 EXPORT_SYMBOL_GPL(mei_cldev_dma_unmap);
720 
721 /**
722  * mei_cldev_enable - enable me client device
723  *     create connection with me client
724  *
725  * @cldev: me client device
726  *
727  * Return: 0 on success and < 0 on error
728  */
729 int mei_cldev_enable(struct mei_cl_device *cldev)
730 {
731 	struct mei_device *bus = cldev->bus;
732 	struct mei_cl *cl;
733 	int ret;
734 
735 	cl = cldev->cl;
736 
737 	mutex_lock(&bus->device_lock);
738 	if (cl->state == MEI_FILE_UNINITIALIZED) {
739 		ret = mei_cl_link(cl);
740 		if (ret)
741 			goto out;
742 		/* update pointers */
743 		cl->cldev = cldev;
744 	}
745 
746 	if (mei_cl_is_connected(cl)) {
747 		ret = 0;
748 		goto out;
749 	}
750 
751 	if (!mei_me_cl_is_active(cldev->me_cl)) {
752 		dev_err(&cldev->dev, "me client is not active\n");
753 		ret = -ENOTTY;
754 		goto out;
755 	}
756 
757 	ret = mei_cl_bus_vtag_alloc(cldev);
758 	if (ret)
759 		goto out;
760 
761 	ret = mei_cl_connect(cl, cldev->me_cl, NULL);
762 	if (ret < 0) {
763 		dev_err(&cldev->dev, "cannot connect\n");
764 		mei_cl_bus_vtag_free(cldev);
765 	}
766 
767 out:
768 	mutex_unlock(&bus->device_lock);
769 
770 	return ret;
771 }
772 EXPORT_SYMBOL_GPL(mei_cldev_enable);
773 
774 /**
775  * mei_cldev_unregister_callbacks - internal wrapper for unregistering
776  *  callbacks.
777  *
778  * @cldev: client device
779  */
780 static void mei_cldev_unregister_callbacks(struct mei_cl_device *cldev)
781 {
782 	if (cldev->rx_cb) {
783 		cancel_work_sync(&cldev->rx_work);
784 		cldev->rx_cb = NULL;
785 	}
786 
787 	if (cldev->notif_cb) {
788 		cancel_work_sync(&cldev->notif_work);
789 		cldev->notif_cb = NULL;
790 	}
791 }
792 
793 /**
794  * mei_cldev_disable - disable me client device
795  *     disconnect form the me client
796  *
797  * @cldev: me client device
798  *
799  * Return: 0 on success and < 0 on error
800  */
801 int mei_cldev_disable(struct mei_cl_device *cldev)
802 {
803 	struct mei_device *bus;
804 	struct mei_cl *cl;
805 	int err;
806 
807 	if (!cldev)
808 		return -ENODEV;
809 
810 	cl = cldev->cl;
811 
812 	bus = cldev->bus;
813 
814 	mei_cldev_unregister_callbacks(cldev);
815 
816 	mutex_lock(&bus->device_lock);
817 
818 	mei_cl_bus_vtag_free(cldev);
819 
820 	if (!mei_cl_is_connected(cl)) {
821 		dev_dbg(bus->dev, "Already disconnected\n");
822 		err = 0;
823 		goto out;
824 	}
825 
826 	err = mei_cl_disconnect(cl);
827 	if (err < 0)
828 		dev_err(bus->dev, "Could not disconnect from the ME client\n");
829 
830 out:
831 	/* Flush queues and remove any pending read unless we have mapped DMA */
832 	if (!cl->dma_mapped) {
833 		mei_cl_flush_queues(cl, NULL);
834 		mei_cl_unlink(cl);
835 	}
836 
837 	mutex_unlock(&bus->device_lock);
838 	return err;
839 }
840 EXPORT_SYMBOL_GPL(mei_cldev_disable);
841 
842 /**
843  * mei_cldev_send_gsc_command - sends a gsc command, by sending
844  * a gsl mei message to gsc and receiving reply from gsc
845  *
846  * @cldev: me client device
847  * @client_id: client id to send the command to
848  * @fence_id: fence id to send the command to
849  * @sg_in: scatter gather list containing addresses for rx message buffer
850  * @total_in_len: total length of data in 'in' sg, can be less than the sum of buffers sizes
851  * @sg_out: scatter gather list containing addresses for tx message buffer
852  *
853  * Return:
854  *  * written size in bytes
855  *  * < 0 on error
856  */
857 ssize_t mei_cldev_send_gsc_command(struct mei_cl_device *cldev,
858 				   u8 client_id, u32 fence_id,
859 				   struct scatterlist *sg_in,
860 				   size_t total_in_len,
861 				   struct scatterlist *sg_out)
862 {
863 	struct mei_cl *cl;
864 	struct mei_device *bus;
865 	ssize_t ret = 0;
866 
867 	struct mei_ext_hdr_gsc_h2f *ext_hdr;
868 	size_t buf_sz = sizeof(struct mei_ext_hdr_gsc_h2f);
869 	int sg_out_nents, sg_in_nents;
870 	int i;
871 	struct scatterlist *sg;
872 	struct mei_ext_hdr_gsc_f2h rx_msg;
873 	unsigned int sg_len;
874 
875 	if (!cldev || !sg_in || !sg_out)
876 		return -EINVAL;
877 
878 	cl = cldev->cl;
879 	bus = cldev->bus;
880 
881 	dev_dbg(bus->dev, "client_id %u, fence_id %u\n", client_id, fence_id);
882 
883 	if (!bus->hbm_f_gsc_supported)
884 		return -EOPNOTSUPP;
885 
886 	sg_out_nents = sg_nents(sg_out);
887 	sg_in_nents = sg_nents(sg_in);
888 	/* at least one entry in tx and rx sgls must be present */
889 	if (sg_out_nents <= 0 || sg_in_nents <= 0)
890 		return -EINVAL;
891 
892 	buf_sz += (sg_out_nents + sg_in_nents) * sizeof(struct mei_gsc_sgl);
893 	ext_hdr = kzalloc(buf_sz, GFP_KERNEL);
894 	if (!ext_hdr)
895 		return -ENOMEM;
896 
897 	/* construct the GSC message */
898 	ext_hdr->hdr.type = MEI_EXT_HDR_GSC;
899 	ext_hdr->hdr.length = buf_sz / sizeof(u32); /* length is in dw */
900 
901 	ext_hdr->client_id = client_id;
902 	ext_hdr->addr_type = GSC_ADDRESS_TYPE_PHYSICAL_SGL;
903 	ext_hdr->fence_id = fence_id;
904 	ext_hdr->input_address_count = sg_in_nents;
905 	ext_hdr->output_address_count = sg_out_nents;
906 	ext_hdr->reserved[0] = 0;
907 	ext_hdr->reserved[1] = 0;
908 
909 	/* copy in-sgl to the message */
910 	for (i = 0, sg = sg_in; i < sg_in_nents; i++, sg++) {
911 		ext_hdr->sgl[i].low = lower_32_bits(sg_dma_address(sg));
912 		ext_hdr->sgl[i].high = upper_32_bits(sg_dma_address(sg));
913 		sg_len = min_t(unsigned int, sg_dma_len(sg), PAGE_SIZE);
914 		ext_hdr->sgl[i].length = (sg_len <= total_in_len) ? sg_len : total_in_len;
915 		total_in_len -= ext_hdr->sgl[i].length;
916 	}
917 
918 	/* copy out-sgl to the message */
919 	for (i = sg_in_nents, sg = sg_out; i < sg_in_nents + sg_out_nents; i++, sg++) {
920 		ext_hdr->sgl[i].low = lower_32_bits(sg_dma_address(sg));
921 		ext_hdr->sgl[i].high = upper_32_bits(sg_dma_address(sg));
922 		sg_len = min_t(unsigned int, sg_dma_len(sg), PAGE_SIZE);
923 		ext_hdr->sgl[i].length = sg_len;
924 	}
925 
926 	/* send the message to GSC */
927 	ret = __mei_cl_send(cl, (u8 *)ext_hdr, buf_sz, 0, MEI_CL_IO_SGL);
928 	if (ret < 0) {
929 		dev_err(bus->dev, "__mei_cl_send failed, returned %zd\n", ret);
930 		goto end;
931 	}
932 	if (ret != buf_sz) {
933 		dev_err(bus->dev, "__mei_cl_send returned %zd instead of expected %zd\n",
934 			ret, buf_sz);
935 		ret = -EIO;
936 		goto end;
937 	}
938 
939 	/* receive the reply from GSC, note that at this point sg_in should contain the reply */
940 	ret = __mei_cl_recv(cl, (u8 *)&rx_msg, sizeof(rx_msg), NULL, MEI_CL_IO_SGL, 0);
941 
942 	if (ret != sizeof(rx_msg)) {
943 		dev_err(bus->dev, "__mei_cl_recv returned %zd instead of expected %zd\n",
944 			ret, sizeof(rx_msg));
945 		if (ret >= 0)
946 			ret = -EIO;
947 		goto end;
948 	}
949 
950 	/* check rx_msg.client_id and rx_msg.fence_id match the ones we send */
951 	if (rx_msg.client_id != client_id || rx_msg.fence_id != fence_id) {
952 		dev_err(bus->dev, "received client_id/fence_id  %u/%u  instead of %u/%u sent\n",
953 			rx_msg.client_id, rx_msg.fence_id, client_id, fence_id);
954 		ret = -EFAULT;
955 		goto end;
956 	}
957 
958 	dev_dbg(bus->dev, "gsc command: successfully written %u bytes\n",  rx_msg.written);
959 	ret = rx_msg.written;
960 
961 end:
962 	kfree(ext_hdr);
963 	return ret;
964 }
965 EXPORT_SYMBOL_GPL(mei_cldev_send_gsc_command);
966 
967 /**
968  * mei_cl_device_find - find matching entry in the driver id table
969  *
970  * @cldev: me client device
971  * @cldrv: me client driver
972  *
973  * Return: id on success; NULL if no id is matching
974  */
975 static const
976 struct mei_cl_device_id *mei_cl_device_find(const struct mei_cl_device *cldev,
977 					    const struct mei_cl_driver *cldrv)
978 {
979 	const struct mei_cl_device_id *id;
980 	const uuid_le *uuid;
981 	u8 version;
982 	bool match;
983 
984 	uuid = mei_me_cl_uuid(cldev->me_cl);
985 	version = mei_me_cl_ver(cldev->me_cl);
986 
987 	id = cldrv->id_table;
988 	while (uuid_le_cmp(NULL_UUID_LE, id->uuid)) {
989 		if (!uuid_le_cmp(*uuid, id->uuid)) {
990 			match = true;
991 
992 			if (cldev->name[0])
993 				if (strncmp(cldev->name, id->name,
994 					    sizeof(id->name)))
995 					match = false;
996 
997 			if (id->version != MEI_CL_VERSION_ANY)
998 				if (id->version != version)
999 					match = false;
1000 			if (match)
1001 				return id;
1002 		}
1003 
1004 		id++;
1005 	}
1006 
1007 	return NULL;
1008 }
1009 
1010 /**
1011  * mei_cl_device_match  - device match function
1012  *
1013  * @dev: device
1014  * @drv: driver
1015  *
1016  * Return:  1 if matching device was found 0 otherwise
1017  */
1018 static int mei_cl_device_match(struct device *dev, struct device_driver *drv)
1019 {
1020 	const struct mei_cl_device *cldev = to_mei_cl_device(dev);
1021 	const struct mei_cl_driver *cldrv = to_mei_cl_driver(drv);
1022 	const struct mei_cl_device_id *found_id;
1023 
1024 	if (!cldev)
1025 		return 0;
1026 
1027 	if (!cldev->do_match)
1028 		return 0;
1029 
1030 	if (!cldrv || !cldrv->id_table)
1031 		return 0;
1032 
1033 	found_id = mei_cl_device_find(cldev, cldrv);
1034 	if (found_id)
1035 		return 1;
1036 
1037 	return 0;
1038 }
1039 
1040 /**
1041  * mei_cl_device_probe - bus probe function
1042  *
1043  * @dev: device
1044  *
1045  * Return:  0 on success; < 0 otherwise
1046  */
1047 static int mei_cl_device_probe(struct device *dev)
1048 {
1049 	struct mei_cl_device *cldev;
1050 	struct mei_cl_driver *cldrv;
1051 	const struct mei_cl_device_id *id;
1052 	int ret;
1053 
1054 	cldev = to_mei_cl_device(dev);
1055 	cldrv = to_mei_cl_driver(dev->driver);
1056 
1057 	if (!cldev)
1058 		return 0;
1059 
1060 	if (!cldrv || !cldrv->probe)
1061 		return -ENODEV;
1062 
1063 	id = mei_cl_device_find(cldev, cldrv);
1064 	if (!id)
1065 		return -ENODEV;
1066 
1067 	if (!mei_cl_bus_module_get(cldev)) {
1068 		dev_err(&cldev->dev, "get hw module failed");
1069 		return -ENODEV;
1070 	}
1071 
1072 	ret = cldrv->probe(cldev, id);
1073 	if (ret) {
1074 		mei_cl_bus_module_put(cldev);
1075 		return ret;
1076 	}
1077 
1078 	__module_get(THIS_MODULE);
1079 	return 0;
1080 }
1081 
1082 /**
1083  * mei_cl_device_remove - remove device from the bus
1084  *
1085  * @dev: device
1086  *
1087  * Return:  0 on success; < 0 otherwise
1088  */
1089 static void mei_cl_device_remove(struct device *dev)
1090 {
1091 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1092 	struct mei_cl_driver *cldrv = to_mei_cl_driver(dev->driver);
1093 
1094 	if (cldrv->remove)
1095 		cldrv->remove(cldev);
1096 
1097 	mei_cldev_unregister_callbacks(cldev);
1098 
1099 	mei_cl_bus_module_put(cldev);
1100 	module_put(THIS_MODULE);
1101 }
1102 
1103 static ssize_t name_show(struct device *dev, struct device_attribute *a,
1104 			     char *buf)
1105 {
1106 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1107 
1108 	return scnprintf(buf, PAGE_SIZE, "%s", cldev->name);
1109 }
1110 static DEVICE_ATTR_RO(name);
1111 
1112 static ssize_t uuid_show(struct device *dev, struct device_attribute *a,
1113 			     char *buf)
1114 {
1115 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1116 	const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
1117 
1118 	return sprintf(buf, "%pUl", uuid);
1119 }
1120 static DEVICE_ATTR_RO(uuid);
1121 
1122 static ssize_t version_show(struct device *dev, struct device_attribute *a,
1123 			     char *buf)
1124 {
1125 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1126 	u8 version = mei_me_cl_ver(cldev->me_cl);
1127 
1128 	return sprintf(buf, "%02X", version);
1129 }
1130 static DEVICE_ATTR_RO(version);
1131 
1132 static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
1133 			     char *buf)
1134 {
1135 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1136 	const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
1137 	u8 version = mei_me_cl_ver(cldev->me_cl);
1138 
1139 	return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:%02X:",
1140 			 cldev->name, uuid, version);
1141 }
1142 static DEVICE_ATTR_RO(modalias);
1143 
1144 static ssize_t max_conn_show(struct device *dev, struct device_attribute *a,
1145 			     char *buf)
1146 {
1147 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1148 	u8 maxconn = mei_me_cl_max_conn(cldev->me_cl);
1149 
1150 	return sprintf(buf, "%d", maxconn);
1151 }
1152 static DEVICE_ATTR_RO(max_conn);
1153 
1154 static ssize_t fixed_show(struct device *dev, struct device_attribute *a,
1155 			  char *buf)
1156 {
1157 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1158 	u8 fixed = mei_me_cl_fixed(cldev->me_cl);
1159 
1160 	return sprintf(buf, "%d", fixed);
1161 }
1162 static DEVICE_ATTR_RO(fixed);
1163 
1164 static ssize_t vtag_show(struct device *dev, struct device_attribute *a,
1165 			 char *buf)
1166 {
1167 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1168 	bool vt = mei_me_cl_vt(cldev->me_cl);
1169 
1170 	return sprintf(buf, "%d", vt);
1171 }
1172 static DEVICE_ATTR_RO(vtag);
1173 
1174 static ssize_t max_len_show(struct device *dev, struct device_attribute *a,
1175 			    char *buf)
1176 {
1177 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1178 	u32 maxlen = mei_me_cl_max_len(cldev->me_cl);
1179 
1180 	return sprintf(buf, "%u", maxlen);
1181 }
1182 static DEVICE_ATTR_RO(max_len);
1183 
1184 static struct attribute *mei_cldev_attrs[] = {
1185 	&dev_attr_name.attr,
1186 	&dev_attr_uuid.attr,
1187 	&dev_attr_version.attr,
1188 	&dev_attr_modalias.attr,
1189 	&dev_attr_max_conn.attr,
1190 	&dev_attr_fixed.attr,
1191 	&dev_attr_vtag.attr,
1192 	&dev_attr_max_len.attr,
1193 	NULL,
1194 };
1195 ATTRIBUTE_GROUPS(mei_cldev);
1196 
1197 /**
1198  * mei_cl_device_uevent - me client bus uevent handler
1199  *
1200  * @dev: device
1201  * @env: uevent kobject
1202  *
1203  * Return: 0 on success -ENOMEM on when add_uevent_var fails
1204  */
1205 static int mei_cl_device_uevent(struct device *dev, struct kobj_uevent_env *env)
1206 {
1207 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1208 	const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
1209 	u8 version = mei_me_cl_ver(cldev->me_cl);
1210 
1211 	if (add_uevent_var(env, "MEI_CL_VERSION=%d", version))
1212 		return -ENOMEM;
1213 
1214 	if (add_uevent_var(env, "MEI_CL_UUID=%pUl", uuid))
1215 		return -ENOMEM;
1216 
1217 	if (add_uevent_var(env, "MEI_CL_NAME=%s", cldev->name))
1218 		return -ENOMEM;
1219 
1220 	if (add_uevent_var(env, "MODALIAS=mei:%s:%pUl:%02X:",
1221 			   cldev->name, uuid, version))
1222 		return -ENOMEM;
1223 
1224 	return 0;
1225 }
1226 
1227 static struct bus_type mei_cl_bus_type = {
1228 	.name		= "mei",
1229 	.dev_groups	= mei_cldev_groups,
1230 	.match		= mei_cl_device_match,
1231 	.probe		= mei_cl_device_probe,
1232 	.remove		= mei_cl_device_remove,
1233 	.uevent		= mei_cl_device_uevent,
1234 };
1235 
1236 static struct mei_device *mei_dev_bus_get(struct mei_device *bus)
1237 {
1238 	if (bus)
1239 		get_device(bus->dev);
1240 
1241 	return bus;
1242 }
1243 
1244 static void mei_dev_bus_put(struct mei_device *bus)
1245 {
1246 	if (bus)
1247 		put_device(bus->dev);
1248 }
1249 
1250 static void mei_cl_bus_dev_release(struct device *dev)
1251 {
1252 	struct mei_cl_device *cldev = to_mei_cl_device(dev);
1253 
1254 	if (!cldev)
1255 		return;
1256 
1257 	mei_cl_flush_queues(cldev->cl, NULL);
1258 	mei_me_cl_put(cldev->me_cl);
1259 	mei_dev_bus_put(cldev->bus);
1260 	mei_cl_unlink(cldev->cl);
1261 	kfree(cldev->cl);
1262 	kfree(cldev);
1263 }
1264 
1265 static const struct device_type mei_cl_device_type = {
1266 	.release = mei_cl_bus_dev_release,
1267 };
1268 
1269 /**
1270  * mei_cl_bus_set_name - set device name for me client device
1271  *  <controller>-<client device>
1272  *  Example: 0000:00:16.0-55213584-9a29-4916-badf-0fb7ed682aeb
1273  *
1274  * @cldev: me client device
1275  */
1276 static inline void mei_cl_bus_set_name(struct mei_cl_device *cldev)
1277 {
1278 	dev_set_name(&cldev->dev, "%s-%pUl",
1279 		     dev_name(cldev->bus->dev),
1280 		     mei_me_cl_uuid(cldev->me_cl));
1281 }
1282 
1283 /**
1284  * mei_cl_bus_dev_alloc - initialize and allocate mei client device
1285  *
1286  * @bus: mei device
1287  * @me_cl: me client
1288  *
1289  * Return: allocated device structur or NULL on allocation failure
1290  */
1291 static struct mei_cl_device *mei_cl_bus_dev_alloc(struct mei_device *bus,
1292 						  struct mei_me_client *me_cl)
1293 {
1294 	struct mei_cl_device *cldev;
1295 	struct mei_cl *cl;
1296 
1297 	cldev = kzalloc(sizeof(*cldev), GFP_KERNEL);
1298 	if (!cldev)
1299 		return NULL;
1300 
1301 	cl = mei_cl_allocate(bus);
1302 	if (!cl) {
1303 		kfree(cldev);
1304 		return NULL;
1305 	}
1306 
1307 	device_initialize(&cldev->dev);
1308 	cldev->dev.parent = bus->dev;
1309 	cldev->dev.bus    = &mei_cl_bus_type;
1310 	cldev->dev.type   = &mei_cl_device_type;
1311 	cldev->bus        = mei_dev_bus_get(bus);
1312 	cldev->me_cl      = mei_me_cl_get(me_cl);
1313 	cldev->cl         = cl;
1314 	mei_cl_bus_set_name(cldev);
1315 	cldev->is_added   = 0;
1316 	INIT_LIST_HEAD(&cldev->bus_list);
1317 
1318 	return cldev;
1319 }
1320 
1321 /**
1322  * mei_cl_bus_dev_setup - setup me client device
1323  *    run fix up routines and set the device name
1324  *
1325  * @bus: mei device
1326  * @cldev: me client device
1327  *
1328  * Return: true if the device is eligible for enumeration
1329  */
1330 static bool mei_cl_bus_dev_setup(struct mei_device *bus,
1331 				 struct mei_cl_device *cldev)
1332 {
1333 	cldev->do_match = 1;
1334 	mei_cl_bus_dev_fixup(cldev);
1335 
1336 	/* the device name can change during fix up */
1337 	if (cldev->do_match)
1338 		mei_cl_bus_set_name(cldev);
1339 
1340 	return cldev->do_match == 1;
1341 }
1342 
1343 /**
1344  * mei_cl_bus_dev_add - add me client devices
1345  *
1346  * @cldev: me client device
1347  *
1348  * Return: 0 on success; < 0 on failre
1349  */
1350 static int mei_cl_bus_dev_add(struct mei_cl_device *cldev)
1351 {
1352 	int ret;
1353 
1354 	dev_dbg(cldev->bus->dev, "adding %pUL:%02X\n",
1355 		mei_me_cl_uuid(cldev->me_cl),
1356 		mei_me_cl_ver(cldev->me_cl));
1357 	ret = device_add(&cldev->dev);
1358 	if (!ret)
1359 		cldev->is_added = 1;
1360 
1361 	return ret;
1362 }
1363 
1364 /**
1365  * mei_cl_bus_dev_stop - stop the driver
1366  *
1367  * @cldev: me client device
1368  */
1369 static void mei_cl_bus_dev_stop(struct mei_cl_device *cldev)
1370 {
1371 	if (cldev->is_added)
1372 		device_release_driver(&cldev->dev);
1373 }
1374 
1375 /**
1376  * mei_cl_bus_dev_destroy - destroy me client devices object
1377  *
1378  * @cldev: me client device
1379  *
1380  * Locking: called under "dev->cl_bus_lock" lock
1381  */
1382 static void mei_cl_bus_dev_destroy(struct mei_cl_device *cldev)
1383 {
1384 
1385 	WARN_ON(!mutex_is_locked(&cldev->bus->cl_bus_lock));
1386 
1387 	if (!cldev->is_added)
1388 		return;
1389 
1390 	device_del(&cldev->dev);
1391 
1392 	list_del_init(&cldev->bus_list);
1393 
1394 	cldev->is_added = 0;
1395 	put_device(&cldev->dev);
1396 }
1397 
1398 /**
1399  * mei_cl_bus_remove_device - remove a devices form the bus
1400  *
1401  * @cldev: me client device
1402  */
1403 static void mei_cl_bus_remove_device(struct mei_cl_device *cldev)
1404 {
1405 	mei_cl_bus_dev_stop(cldev);
1406 	mei_cl_bus_dev_destroy(cldev);
1407 }
1408 
1409 /**
1410  * mei_cl_bus_remove_devices - remove all devices form the bus
1411  *
1412  * @bus: mei device
1413  */
1414 void mei_cl_bus_remove_devices(struct mei_device *bus)
1415 {
1416 	struct mei_cl_device *cldev, *next;
1417 
1418 	mutex_lock(&bus->cl_bus_lock);
1419 	list_for_each_entry_safe(cldev, next, &bus->device_list, bus_list)
1420 		mei_cl_bus_remove_device(cldev);
1421 	mutex_unlock(&bus->cl_bus_lock);
1422 }
1423 
1424 
1425 /**
1426  * mei_cl_bus_dev_init - allocate and initializes an mei client devices
1427  *     based on me client
1428  *
1429  * @bus: mei device
1430  * @me_cl: me client
1431  *
1432  * Locking: called under "dev->cl_bus_lock" lock
1433  */
1434 static void mei_cl_bus_dev_init(struct mei_device *bus,
1435 				struct mei_me_client *me_cl)
1436 {
1437 	struct mei_cl_device *cldev;
1438 
1439 	WARN_ON(!mutex_is_locked(&bus->cl_bus_lock));
1440 
1441 	dev_dbg(bus->dev, "initializing %pUl", mei_me_cl_uuid(me_cl));
1442 
1443 	if (me_cl->bus_added)
1444 		return;
1445 
1446 	cldev = mei_cl_bus_dev_alloc(bus, me_cl);
1447 	if (!cldev)
1448 		return;
1449 
1450 	me_cl->bus_added = true;
1451 	list_add_tail(&cldev->bus_list, &bus->device_list);
1452 
1453 }
1454 
1455 /**
1456  * mei_cl_bus_rescan - scan me clients list and add create
1457  *    devices for eligible clients
1458  *
1459  * @bus: mei device
1460  */
1461 static void mei_cl_bus_rescan(struct mei_device *bus)
1462 {
1463 	struct mei_cl_device *cldev, *n;
1464 	struct mei_me_client *me_cl;
1465 
1466 	mutex_lock(&bus->cl_bus_lock);
1467 
1468 	down_read(&bus->me_clients_rwsem);
1469 	list_for_each_entry(me_cl, &bus->me_clients, list)
1470 		mei_cl_bus_dev_init(bus, me_cl);
1471 	up_read(&bus->me_clients_rwsem);
1472 
1473 	list_for_each_entry_safe(cldev, n, &bus->device_list, bus_list) {
1474 
1475 		if (!mei_me_cl_is_active(cldev->me_cl)) {
1476 			mei_cl_bus_remove_device(cldev);
1477 			continue;
1478 		}
1479 
1480 		if (cldev->is_added)
1481 			continue;
1482 
1483 		if (mei_cl_bus_dev_setup(bus, cldev))
1484 			mei_cl_bus_dev_add(cldev);
1485 		else {
1486 			list_del_init(&cldev->bus_list);
1487 			put_device(&cldev->dev);
1488 		}
1489 	}
1490 	mutex_unlock(&bus->cl_bus_lock);
1491 
1492 	dev_dbg(bus->dev, "rescan end");
1493 }
1494 
1495 void mei_cl_bus_rescan_work(struct work_struct *work)
1496 {
1497 	struct mei_device *bus =
1498 		container_of(work, struct mei_device, bus_rescan_work);
1499 
1500 	mei_cl_bus_rescan(bus);
1501 }
1502 
1503 int __mei_cldev_driver_register(struct mei_cl_driver *cldrv,
1504 				struct module *owner)
1505 {
1506 	int err;
1507 
1508 	cldrv->driver.name = cldrv->name;
1509 	cldrv->driver.owner = owner;
1510 	cldrv->driver.bus = &mei_cl_bus_type;
1511 
1512 	err = driver_register(&cldrv->driver);
1513 	if (err)
1514 		return err;
1515 
1516 	pr_debug("mei: driver [%s] registered\n", cldrv->driver.name);
1517 
1518 	return 0;
1519 }
1520 EXPORT_SYMBOL_GPL(__mei_cldev_driver_register);
1521 
1522 void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv)
1523 {
1524 	driver_unregister(&cldrv->driver);
1525 
1526 	pr_debug("mei: driver [%s] unregistered\n", cldrv->driver.name);
1527 }
1528 EXPORT_SYMBOL_GPL(mei_cldev_driver_unregister);
1529 
1530 
1531 int __init mei_cl_bus_init(void)
1532 {
1533 	return bus_register(&mei_cl_bus_type);
1534 }
1535 
1536 void __exit mei_cl_bus_exit(void)
1537 {
1538 	bus_unregister(&mei_cl_bus_type);
1539 }
1540