xref: /openbmc/linux/drivers/misc/mei/client.c (revision c8ed9fc9)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
4  * Intel Management Engine Interface (Intel MEI) Linux driver
5  */
6 
7 #include <linux/sched/signal.h>
8 #include <linux/wait.h>
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/pm_runtime.h>
12 
13 #include <linux/mei.h>
14 
15 #include "mei_dev.h"
16 #include "hbm.h"
17 #include "client.h"
18 
19 /**
20  * mei_me_cl_init - initialize me client
21  *
22  * @me_cl: me client
23  */
24 void mei_me_cl_init(struct mei_me_client *me_cl)
25 {
26 	INIT_LIST_HEAD(&me_cl->list);
27 	kref_init(&me_cl->refcnt);
28 }
29 
30 /**
31  * mei_me_cl_get - increases me client refcount
32  *
33  * @me_cl: me client
34  *
35  * Locking: called under "dev->device_lock" lock
36  *
37  * Return: me client or NULL
38  */
39 struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl)
40 {
41 	if (me_cl && kref_get_unless_zero(&me_cl->refcnt))
42 		return me_cl;
43 
44 	return NULL;
45 }
46 
47 /**
48  * mei_me_cl_release - free me client
49  *
50  * Locking: called under "dev->device_lock" lock
51  *
52  * @ref: me_client refcount
53  */
54 static void mei_me_cl_release(struct kref *ref)
55 {
56 	struct mei_me_client *me_cl =
57 		container_of(ref, struct mei_me_client, refcnt);
58 
59 	kfree(me_cl);
60 }
61 
62 /**
63  * mei_me_cl_put - decrease me client refcount and free client if necessary
64  *
65  * Locking: called under "dev->device_lock" lock
66  *
67  * @me_cl: me client
68  */
69 void mei_me_cl_put(struct mei_me_client *me_cl)
70 {
71 	if (me_cl)
72 		kref_put(&me_cl->refcnt, mei_me_cl_release);
73 }
74 
75 /**
76  * __mei_me_cl_del  - delete me client from the list and decrease
77  *     reference counter
78  *
79  * @dev: mei device
80  * @me_cl: me client
81  *
82  * Locking: dev->me_clients_rwsem
83  */
84 static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
85 {
86 	if (!me_cl)
87 		return;
88 
89 	list_del_init(&me_cl->list);
90 	mei_me_cl_put(me_cl);
91 }
92 
93 /**
94  * mei_me_cl_del - delete me client from the list and decrease
95  *     reference counter
96  *
97  * @dev: mei device
98  * @me_cl: me client
99  */
100 void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
101 {
102 	down_write(&dev->me_clients_rwsem);
103 	__mei_me_cl_del(dev, me_cl);
104 	up_write(&dev->me_clients_rwsem);
105 }
106 
107 /**
108  * mei_me_cl_add - add me client to the list
109  *
110  * @dev: mei device
111  * @me_cl: me client
112  */
113 void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl)
114 {
115 	down_write(&dev->me_clients_rwsem);
116 	list_add(&me_cl->list, &dev->me_clients);
117 	up_write(&dev->me_clients_rwsem);
118 }
119 
120 /**
121  * __mei_me_cl_by_uuid - locate me client by uuid
122  *	increases ref count
123  *
124  * @dev: mei device
125  * @uuid: me client uuid
126  *
127  * Return: me client or NULL if not found
128  *
129  * Locking: dev->me_clients_rwsem
130  */
131 static struct mei_me_client *__mei_me_cl_by_uuid(struct mei_device *dev,
132 					const uuid_le *uuid)
133 {
134 	struct mei_me_client *me_cl;
135 	const uuid_le *pn;
136 
137 	WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
138 
139 	list_for_each_entry(me_cl, &dev->me_clients, list) {
140 		pn = &me_cl->props.protocol_name;
141 		if (uuid_le_cmp(*uuid, *pn) == 0)
142 			return mei_me_cl_get(me_cl);
143 	}
144 
145 	return NULL;
146 }
147 
148 /**
149  * mei_me_cl_by_uuid - locate me client by uuid
150  *	increases ref count
151  *
152  * @dev: mei device
153  * @uuid: me client uuid
154  *
155  * Return: me client or NULL if not found
156  *
157  * Locking: dev->me_clients_rwsem
158  */
159 struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev,
160 					const uuid_le *uuid)
161 {
162 	struct mei_me_client *me_cl;
163 
164 	down_read(&dev->me_clients_rwsem);
165 	me_cl = __mei_me_cl_by_uuid(dev, uuid);
166 	up_read(&dev->me_clients_rwsem);
167 
168 	return me_cl;
169 }
170 
171 /**
172  * mei_me_cl_by_id - locate me client by client id
173  *	increases ref count
174  *
175  * @dev: the device structure
176  * @client_id: me client id
177  *
178  * Return: me client or NULL if not found
179  *
180  * Locking: dev->me_clients_rwsem
181  */
182 struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
183 {
184 
185 	struct mei_me_client *__me_cl, *me_cl = NULL;
186 
187 	down_read(&dev->me_clients_rwsem);
188 	list_for_each_entry(__me_cl, &dev->me_clients, list) {
189 		if (__me_cl->client_id == client_id) {
190 			me_cl = mei_me_cl_get(__me_cl);
191 			break;
192 		}
193 	}
194 	up_read(&dev->me_clients_rwsem);
195 
196 	return me_cl;
197 }
198 
199 /**
200  * __mei_me_cl_by_uuid_id - locate me client by client id and uuid
201  *	increases ref count
202  *
203  * @dev: the device structure
204  * @uuid: me client uuid
205  * @client_id: me client id
206  *
207  * Return: me client or null if not found
208  *
209  * Locking: dev->me_clients_rwsem
210  */
211 static struct mei_me_client *__mei_me_cl_by_uuid_id(struct mei_device *dev,
212 					   const uuid_le *uuid, u8 client_id)
213 {
214 	struct mei_me_client *me_cl;
215 	const uuid_le *pn;
216 
217 	WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
218 
219 	list_for_each_entry(me_cl, &dev->me_clients, list) {
220 		pn = &me_cl->props.protocol_name;
221 		if (uuid_le_cmp(*uuid, *pn) == 0 &&
222 		    me_cl->client_id == client_id)
223 			return mei_me_cl_get(me_cl);
224 	}
225 
226 	return NULL;
227 }
228 
229 
230 /**
231  * mei_me_cl_by_uuid_id - locate me client by client id and uuid
232  *	increases ref count
233  *
234  * @dev: the device structure
235  * @uuid: me client uuid
236  * @client_id: me client id
237  *
238  * Return: me client or null if not found
239  */
240 struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
241 					   const uuid_le *uuid, u8 client_id)
242 {
243 	struct mei_me_client *me_cl;
244 
245 	down_read(&dev->me_clients_rwsem);
246 	me_cl = __mei_me_cl_by_uuid_id(dev, uuid, client_id);
247 	up_read(&dev->me_clients_rwsem);
248 
249 	return me_cl;
250 }
251 
252 /**
253  * mei_me_cl_rm_by_uuid - remove all me clients matching uuid
254  *
255  * @dev: the device structure
256  * @uuid: me client uuid
257  *
258  * Locking: called under "dev->device_lock" lock
259  */
260 void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
261 {
262 	struct mei_me_client *me_cl;
263 
264 	dev_dbg(dev->dev, "remove %pUl\n", uuid);
265 
266 	down_write(&dev->me_clients_rwsem);
267 	me_cl = __mei_me_cl_by_uuid(dev, uuid);
268 	__mei_me_cl_del(dev, me_cl);
269 	mei_me_cl_put(me_cl);
270 	up_write(&dev->me_clients_rwsem);
271 }
272 
273 /**
274  * mei_me_cl_rm_by_uuid_id - remove all me clients matching client id
275  *
276  * @dev: the device structure
277  * @uuid: me client uuid
278  * @id: me client id
279  *
280  * Locking: called under "dev->device_lock" lock
281  */
282 void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id)
283 {
284 	struct mei_me_client *me_cl;
285 
286 	dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id);
287 
288 	down_write(&dev->me_clients_rwsem);
289 	me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id);
290 	__mei_me_cl_del(dev, me_cl);
291 	mei_me_cl_put(me_cl);
292 	up_write(&dev->me_clients_rwsem);
293 }
294 
295 /**
296  * mei_me_cl_rm_all - remove all me clients
297  *
298  * @dev: the device structure
299  *
300  * Locking: called under "dev->device_lock" lock
301  */
302 void mei_me_cl_rm_all(struct mei_device *dev)
303 {
304 	struct mei_me_client *me_cl, *next;
305 
306 	down_write(&dev->me_clients_rwsem);
307 	list_for_each_entry_safe(me_cl, next, &dev->me_clients, list)
308 		__mei_me_cl_del(dev, me_cl);
309 	up_write(&dev->me_clients_rwsem);
310 }
311 
312 /**
313  * mei_io_cb_free - free mei_cb_private related memory
314  *
315  * @cb: mei callback struct
316  */
317 void mei_io_cb_free(struct mei_cl_cb *cb)
318 {
319 	if (cb == NULL)
320 		return;
321 
322 	list_del(&cb->list);
323 	kfree(cb->buf.data);
324 	kfree(cb);
325 }
326 
327 /**
328  * mei_tx_cb_queue - queue tx callback
329  *
330  * Locking: called under "dev->device_lock" lock
331  *
332  * @cb: mei callback struct
333  * @head: an instance of list to queue on
334  */
335 static inline void mei_tx_cb_enqueue(struct mei_cl_cb *cb,
336 				     struct list_head *head)
337 {
338 	list_add_tail(&cb->list, head);
339 	cb->cl->tx_cb_queued++;
340 }
341 
342 /**
343  * mei_tx_cb_dequeue - dequeue tx callback
344  *
345  * Locking: called under "dev->device_lock" lock
346  *
347  * @cb: mei callback struct to dequeue and free
348  */
349 static inline void mei_tx_cb_dequeue(struct mei_cl_cb *cb)
350 {
351 	if (!WARN_ON(cb->cl->tx_cb_queued == 0))
352 		cb->cl->tx_cb_queued--;
353 
354 	mei_io_cb_free(cb);
355 }
356 
357 /**
358  * mei_io_cb_init - allocate and initialize io callback
359  *
360  * @cl: mei client
361  * @type: operation type
362  * @fp: pointer to file structure
363  *
364  * Return: mei_cl_cb pointer or NULL;
365  */
366 static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl,
367 					enum mei_cb_file_ops type,
368 					const struct file *fp)
369 {
370 	struct mei_cl_cb *cb;
371 
372 	cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
373 	if (!cb)
374 		return NULL;
375 
376 	INIT_LIST_HEAD(&cb->list);
377 	cb->fp = fp;
378 	cb->cl = cl;
379 	cb->buf_idx = 0;
380 	cb->fop_type = type;
381 	return cb;
382 }
383 
384 /**
385  * mei_io_list_flush_cl - removes cbs belonging to the cl.
386  *
387  * @head:  an instance of our list structure
388  * @cl:    host client
389  */
390 static void mei_io_list_flush_cl(struct list_head *head,
391 				 const struct mei_cl *cl)
392 {
393 	struct mei_cl_cb *cb, *next;
394 
395 	list_for_each_entry_safe(cb, next, head, list) {
396 		if (cl == cb->cl) {
397 			list_del_init(&cb->list);
398 			if (cb->fop_type == MEI_FOP_READ)
399 				mei_io_cb_free(cb);
400 		}
401 	}
402 }
403 
404 /**
405  * mei_io_tx_list_free_cl - removes cb belonging to the cl and free them
406  *
407  * @head: An instance of our list structure
408  * @cl: host client
409  */
410 static void mei_io_tx_list_free_cl(struct list_head *head,
411 				   const struct mei_cl *cl)
412 {
413 	struct mei_cl_cb *cb, *next;
414 
415 	list_for_each_entry_safe(cb, next, head, list) {
416 		if (cl == cb->cl)
417 			mei_tx_cb_dequeue(cb);
418 	}
419 }
420 
421 /**
422  * mei_io_list_free_fp - free cb from a list that matches file pointer
423  *
424  * @head: io list
425  * @fp: file pointer (matching cb file object), may be NULL
426  */
427 static void mei_io_list_free_fp(struct list_head *head, const struct file *fp)
428 {
429 	struct mei_cl_cb *cb, *next;
430 
431 	list_for_each_entry_safe(cb, next, head, list)
432 		if (!fp || fp == cb->fp)
433 			mei_io_cb_free(cb);
434 }
435 
436 /**
437  * mei_cl_alloc_cb - a convenient wrapper for allocating read cb
438  *
439  * @cl: host client
440  * @length: size of the buffer
441  * @fop_type: operation type
442  * @fp: associated file pointer (might be NULL)
443  *
444  * Return: cb on success and NULL on failure
445  */
446 struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
447 				  enum mei_cb_file_ops fop_type,
448 				  const struct file *fp)
449 {
450 	struct mei_cl_cb *cb;
451 
452 	cb = mei_io_cb_init(cl, fop_type, fp);
453 	if (!cb)
454 		return NULL;
455 
456 	if (length == 0)
457 		return cb;
458 
459 	cb->buf.data = kmalloc(roundup(length, MEI_SLOT_SIZE), GFP_KERNEL);
460 	if (!cb->buf.data) {
461 		mei_io_cb_free(cb);
462 		return NULL;
463 	}
464 	cb->buf.size = length;
465 
466 	return cb;
467 }
468 
469 /**
470  * mei_cl_enqueue_ctrl_wr_cb - a convenient wrapper for allocating
471  *     and enqueuing of the control commands cb
472  *
473  * @cl: host client
474  * @length: size of the buffer
475  * @fop_type: operation type
476  * @fp: associated file pointer (might be NULL)
477  *
478  * Return: cb on success and NULL on failure
479  * Locking: called under "dev->device_lock" lock
480  */
481 struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length,
482 					    enum mei_cb_file_ops fop_type,
483 					    const struct file *fp)
484 {
485 	struct mei_cl_cb *cb;
486 
487 	/* for RX always allocate at least client's mtu */
488 	if (length)
489 		length = max_t(size_t, length, mei_cl_mtu(cl));
490 
491 	cb = mei_cl_alloc_cb(cl, length, fop_type, fp);
492 	if (!cb)
493 		return NULL;
494 
495 	list_add_tail(&cb->list, &cl->dev->ctrl_wr_list);
496 	return cb;
497 }
498 
499 /**
500  * mei_cl_read_cb - find this cl's callback in the read list
501  *     for a specific file
502  *
503  * @cl: host client
504  * @fp: file pointer (matching cb file object), may be NULL
505  *
506  * Return: cb on success, NULL if cb is not found
507  */
508 struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, const struct file *fp)
509 {
510 	struct mei_cl_cb *cb;
511 
512 	list_for_each_entry(cb, &cl->rd_completed, list)
513 		if (!fp || fp == cb->fp)
514 			return cb;
515 
516 	return NULL;
517 }
518 
519 /**
520  * mei_cl_flush_queues - flushes queue lists belonging to cl.
521  *
522  * @cl: host client
523  * @fp: file pointer (matching cb file object), may be NULL
524  *
525  * Return: 0 on success, -EINVAL if cl or cl->dev is NULL.
526  */
527 int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
528 {
529 	struct mei_device *dev;
530 
531 	if (WARN_ON(!cl || !cl->dev))
532 		return -EINVAL;
533 
534 	dev = cl->dev;
535 
536 	cl_dbg(dev, cl, "remove list entry belonging to cl\n");
537 	mei_io_tx_list_free_cl(&cl->dev->write_list, cl);
538 	mei_io_tx_list_free_cl(&cl->dev->write_waiting_list, cl);
539 	mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl);
540 	mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl);
541 	mei_io_list_free_fp(&cl->rd_pending, fp);
542 	mei_io_list_free_fp(&cl->rd_completed, fp);
543 
544 	return 0;
545 }
546 
547 /**
548  * mei_cl_init - initializes cl.
549  *
550  * @cl: host client to be initialized
551  * @dev: mei device
552  */
553 static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
554 {
555 	memset(cl, 0, sizeof(struct mei_cl));
556 	init_waitqueue_head(&cl->wait);
557 	init_waitqueue_head(&cl->rx_wait);
558 	init_waitqueue_head(&cl->tx_wait);
559 	init_waitqueue_head(&cl->ev_wait);
560 	INIT_LIST_HEAD(&cl->rd_completed);
561 	INIT_LIST_HEAD(&cl->rd_pending);
562 	INIT_LIST_HEAD(&cl->link);
563 	cl->writing_state = MEI_IDLE;
564 	cl->state = MEI_FILE_UNINITIALIZED;
565 	cl->dev = dev;
566 }
567 
568 /**
569  * mei_cl_allocate - allocates cl  structure and sets it up.
570  *
571  * @dev: mei device
572  * Return:  The allocated file or NULL on failure
573  */
574 struct mei_cl *mei_cl_allocate(struct mei_device *dev)
575 {
576 	struct mei_cl *cl;
577 
578 	cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
579 	if (!cl)
580 		return NULL;
581 
582 	mei_cl_init(cl, dev);
583 
584 	return cl;
585 }
586 
587 /**
588  * mei_cl_link - allocate host id in the host map
589  *
590  * @cl: host client
591  *
592  * Return: 0 on success
593  *	-EINVAL on incorrect values
594  *	-EMFILE if open count exceeded.
595  */
596 int mei_cl_link(struct mei_cl *cl)
597 {
598 	struct mei_device *dev;
599 	int id;
600 
601 	if (WARN_ON(!cl || !cl->dev))
602 		return -EINVAL;
603 
604 	dev = cl->dev;
605 
606 	id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
607 	if (id >= MEI_CLIENTS_MAX) {
608 		dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
609 		return -EMFILE;
610 	}
611 
612 	if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
613 		dev_err(dev->dev, "open_handle_count exceeded %d",
614 			MEI_MAX_OPEN_HANDLE_COUNT);
615 		return -EMFILE;
616 	}
617 
618 	dev->open_handle_count++;
619 
620 	cl->host_client_id = id;
621 	list_add_tail(&cl->link, &dev->file_list);
622 
623 	set_bit(id, dev->host_clients_map);
624 
625 	cl->state = MEI_FILE_INITIALIZING;
626 
627 	cl_dbg(dev, cl, "link cl\n");
628 	return 0;
629 }
630 
631 /**
632  * mei_cl_unlink - remove host client from the list
633  *
634  * @cl: host client
635  *
636  * Return: always 0
637  */
638 int mei_cl_unlink(struct mei_cl *cl)
639 {
640 	struct mei_device *dev;
641 
642 	/* don't shout on error exit path */
643 	if (!cl)
644 		return 0;
645 
646 	if (WARN_ON(!cl->dev))
647 		return 0;
648 
649 	dev = cl->dev;
650 
651 	cl_dbg(dev, cl, "unlink client");
652 
653 	if (dev->open_handle_count > 0)
654 		dev->open_handle_count--;
655 
656 	/* never clear the 0 bit */
657 	if (cl->host_client_id)
658 		clear_bit(cl->host_client_id, dev->host_clients_map);
659 
660 	list_del_init(&cl->link);
661 
662 	cl->state = MEI_FILE_UNINITIALIZED;
663 	cl->writing_state = MEI_IDLE;
664 
665 	WARN_ON(!list_empty(&cl->rd_completed) ||
666 		!list_empty(&cl->rd_pending) ||
667 		!list_empty(&cl->link));
668 
669 	return 0;
670 }
671 
672 void mei_host_client_init(struct mei_device *dev)
673 {
674 	mei_set_devstate(dev, MEI_DEV_ENABLED);
675 	dev->reset_count = 0;
676 
677 	schedule_work(&dev->bus_rescan_work);
678 
679 	pm_runtime_mark_last_busy(dev->dev);
680 	dev_dbg(dev->dev, "rpm: autosuspend\n");
681 	pm_request_autosuspend(dev->dev);
682 }
683 
684 /**
685  * mei_hbuf_acquire - try to acquire host buffer
686  *
687  * @dev: the device structure
688  * Return: true if host buffer was acquired
689  */
690 bool mei_hbuf_acquire(struct mei_device *dev)
691 {
692 	if (mei_pg_state(dev) == MEI_PG_ON ||
693 	    mei_pg_in_transition(dev)) {
694 		dev_dbg(dev->dev, "device is in pg\n");
695 		return false;
696 	}
697 
698 	if (!dev->hbuf_is_ready) {
699 		dev_dbg(dev->dev, "hbuf is not ready\n");
700 		return false;
701 	}
702 
703 	dev->hbuf_is_ready = false;
704 
705 	return true;
706 }
707 
708 /**
709  * mei_cl_wake_all - wake up readers, writers and event waiters so
710  *                 they can be interrupted
711  *
712  * @cl: host client
713  */
714 static void mei_cl_wake_all(struct mei_cl *cl)
715 {
716 	struct mei_device *dev = cl->dev;
717 
718 	/* synchronized under device mutex */
719 	if (waitqueue_active(&cl->rx_wait)) {
720 		cl_dbg(dev, cl, "Waking up reading client!\n");
721 		wake_up_interruptible(&cl->rx_wait);
722 	}
723 	/* synchronized under device mutex */
724 	if (waitqueue_active(&cl->tx_wait)) {
725 		cl_dbg(dev, cl, "Waking up writing client!\n");
726 		wake_up_interruptible(&cl->tx_wait);
727 	}
728 	/* synchronized under device mutex */
729 	if (waitqueue_active(&cl->ev_wait)) {
730 		cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
731 		wake_up_interruptible(&cl->ev_wait);
732 	}
733 	/* synchronized under device mutex */
734 	if (waitqueue_active(&cl->wait)) {
735 		cl_dbg(dev, cl, "Waking up ctrl write clients!\n");
736 		wake_up(&cl->wait);
737 	}
738 }
739 
740 /**
741  * mei_cl_set_disconnected - set disconnected state and clear
742  *   associated states and resources
743  *
744  * @cl: host client
745  */
746 static void mei_cl_set_disconnected(struct mei_cl *cl)
747 {
748 	struct mei_device *dev = cl->dev;
749 
750 	if (cl->state == MEI_FILE_DISCONNECTED ||
751 	    cl->state <= MEI_FILE_INITIALIZING)
752 		return;
753 
754 	cl->state = MEI_FILE_DISCONNECTED;
755 	mei_io_tx_list_free_cl(&dev->write_list, cl);
756 	mei_io_tx_list_free_cl(&dev->write_waiting_list, cl);
757 	mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
758 	mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
759 	mei_cl_wake_all(cl);
760 	cl->rx_flow_ctrl_creds = 0;
761 	cl->tx_flow_ctrl_creds = 0;
762 	cl->timer_count = 0;
763 
764 	if (!cl->me_cl)
765 		return;
766 
767 	if (!WARN_ON(cl->me_cl->connect_count == 0))
768 		cl->me_cl->connect_count--;
769 
770 	if (cl->me_cl->connect_count == 0)
771 		cl->me_cl->tx_flow_ctrl_creds = 0;
772 
773 	mei_me_cl_put(cl->me_cl);
774 	cl->me_cl = NULL;
775 }
776 
777 static int mei_cl_set_connecting(struct mei_cl *cl, struct mei_me_client *me_cl)
778 {
779 	if (!mei_me_cl_get(me_cl))
780 		return -ENOENT;
781 
782 	/* only one connection is allowed for fixed address clients */
783 	if (me_cl->props.fixed_address) {
784 		if (me_cl->connect_count) {
785 			mei_me_cl_put(me_cl);
786 			return -EBUSY;
787 		}
788 	}
789 
790 	cl->me_cl = me_cl;
791 	cl->state = MEI_FILE_CONNECTING;
792 	cl->me_cl->connect_count++;
793 
794 	return 0;
795 }
796 
797 /*
798  * mei_cl_send_disconnect - send disconnect request
799  *
800  * @cl: host client
801  * @cb: callback block
802  *
803  * Return: 0, OK; otherwise, error.
804  */
805 static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
806 {
807 	struct mei_device *dev;
808 	int ret;
809 
810 	dev = cl->dev;
811 
812 	ret = mei_hbm_cl_disconnect_req(dev, cl);
813 	cl->status = ret;
814 	if (ret) {
815 		cl->state = MEI_FILE_DISCONNECT_REPLY;
816 		return ret;
817 	}
818 
819 	list_move_tail(&cb->list, &dev->ctrl_rd_list);
820 	cl->timer_count = MEI_CONNECT_TIMEOUT;
821 	mei_schedule_stall_timer(dev);
822 
823 	return 0;
824 }
825 
826 /**
827  * mei_cl_irq_disconnect - processes close related operation from
828  *	interrupt thread context - send disconnect request
829  *
830  * @cl: client
831  * @cb: callback block.
832  * @cmpl_list: complete list.
833  *
834  * Return: 0, OK; otherwise, error.
835  */
836 int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
837 			  struct list_head *cmpl_list)
838 {
839 	struct mei_device *dev = cl->dev;
840 	u32 msg_slots;
841 	int slots;
842 	int ret;
843 
844 	msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
845 	slots = mei_hbuf_empty_slots(dev);
846 	if (slots < 0)
847 		return -EOVERFLOW;
848 
849 	if ((u32)slots < msg_slots)
850 		return -EMSGSIZE;
851 
852 	ret = mei_cl_send_disconnect(cl, cb);
853 	if (ret)
854 		list_move_tail(&cb->list, cmpl_list);
855 
856 	return ret;
857 }
858 
859 /**
860  * __mei_cl_disconnect - disconnect host client from the me one
861  *     internal function runtime pm has to be already acquired
862  *
863  * @cl: host client
864  *
865  * Return: 0 on success, <0 on failure.
866  */
867 static int __mei_cl_disconnect(struct mei_cl *cl)
868 {
869 	struct mei_device *dev;
870 	struct mei_cl_cb *cb;
871 	int rets;
872 
873 	dev = cl->dev;
874 
875 	cl->state = MEI_FILE_DISCONNECTING;
876 
877 	cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT, NULL);
878 	if (!cb) {
879 		rets = -ENOMEM;
880 		goto out;
881 	}
882 
883 	if (mei_hbuf_acquire(dev)) {
884 		rets = mei_cl_send_disconnect(cl, cb);
885 		if (rets) {
886 			cl_err(dev, cl, "failed to disconnect.\n");
887 			goto out;
888 		}
889 	}
890 
891 	mutex_unlock(&dev->device_lock);
892 	wait_event_timeout(cl->wait,
893 			   cl->state == MEI_FILE_DISCONNECT_REPLY ||
894 			   cl->state == MEI_FILE_DISCONNECTED,
895 			   mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
896 	mutex_lock(&dev->device_lock);
897 
898 	rets = cl->status;
899 	if (cl->state != MEI_FILE_DISCONNECT_REPLY &&
900 	    cl->state != MEI_FILE_DISCONNECTED) {
901 		cl_dbg(dev, cl, "timeout on disconnect from FW client.\n");
902 		rets = -ETIME;
903 	}
904 
905 out:
906 	/* we disconnect also on error */
907 	mei_cl_set_disconnected(cl);
908 	if (!rets)
909 		cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
910 
911 	mei_io_cb_free(cb);
912 	return rets;
913 }
914 
915 /**
916  * mei_cl_disconnect - disconnect host client from the me one
917  *
918  * @cl: host client
919  *
920  * Locking: called under "dev->device_lock" lock
921  *
922  * Return: 0 on success, <0 on failure.
923  */
924 int mei_cl_disconnect(struct mei_cl *cl)
925 {
926 	struct mei_device *dev;
927 	int rets;
928 
929 	if (WARN_ON(!cl || !cl->dev))
930 		return -ENODEV;
931 
932 	dev = cl->dev;
933 
934 	cl_dbg(dev, cl, "disconnecting");
935 
936 	if (!mei_cl_is_connected(cl))
937 		return 0;
938 
939 	if (mei_cl_is_fixed_address(cl)) {
940 		mei_cl_set_disconnected(cl);
941 		return 0;
942 	}
943 
944 	if (dev->dev_state == MEI_DEV_POWER_DOWN) {
945 		cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n");
946 		mei_cl_set_disconnected(cl);
947 		return 0;
948 	}
949 
950 	rets = pm_runtime_get(dev->dev);
951 	if (rets < 0 && rets != -EINPROGRESS) {
952 		pm_runtime_put_noidle(dev->dev);
953 		cl_err(dev, cl, "rpm: get failed %d\n", rets);
954 		return rets;
955 	}
956 
957 	rets = __mei_cl_disconnect(cl);
958 
959 	cl_dbg(dev, cl, "rpm: autosuspend\n");
960 	pm_runtime_mark_last_busy(dev->dev);
961 	pm_runtime_put_autosuspend(dev->dev);
962 
963 	return rets;
964 }
965 
966 
967 /**
968  * mei_cl_is_other_connecting - checks if other
969  *    client with the same me client id is connecting
970  *
971  * @cl: private data of the file object
972  *
973  * Return: true if other client is connected, false - otherwise.
974  */
975 static bool mei_cl_is_other_connecting(struct mei_cl *cl)
976 {
977 	struct mei_device *dev;
978 	struct mei_cl_cb *cb;
979 
980 	dev = cl->dev;
981 
982 	list_for_each_entry(cb, &dev->ctrl_rd_list, list) {
983 		if (cb->fop_type == MEI_FOP_CONNECT &&
984 		    mei_cl_me_id(cl) == mei_cl_me_id(cb->cl))
985 			return true;
986 	}
987 
988 	return false;
989 }
990 
991 /**
992  * mei_cl_send_connect - send connect request
993  *
994  * @cl: host client
995  * @cb: callback block
996  *
997  * Return: 0, OK; otherwise, error.
998  */
999 static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
1000 {
1001 	struct mei_device *dev;
1002 	int ret;
1003 
1004 	dev = cl->dev;
1005 
1006 	ret = mei_hbm_cl_connect_req(dev, cl);
1007 	cl->status = ret;
1008 	if (ret) {
1009 		cl->state = MEI_FILE_DISCONNECT_REPLY;
1010 		return ret;
1011 	}
1012 
1013 	list_move_tail(&cb->list, &dev->ctrl_rd_list);
1014 	cl->timer_count = MEI_CONNECT_TIMEOUT;
1015 	mei_schedule_stall_timer(dev);
1016 	return 0;
1017 }
1018 
1019 /**
1020  * mei_cl_irq_connect - send connect request in irq_thread context
1021  *
1022  * @cl: host client
1023  * @cb: callback block
1024  * @cmpl_list: complete list
1025  *
1026  * Return: 0, OK; otherwise, error.
1027  */
1028 int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
1029 		       struct list_head *cmpl_list)
1030 {
1031 	struct mei_device *dev = cl->dev;
1032 	u32 msg_slots;
1033 	int slots;
1034 	int rets;
1035 
1036 	if (mei_cl_is_other_connecting(cl))
1037 		return 0;
1038 
1039 	msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
1040 	slots = mei_hbuf_empty_slots(dev);
1041 	if (slots < 0)
1042 		return -EOVERFLOW;
1043 
1044 	if ((u32)slots < msg_slots)
1045 		return -EMSGSIZE;
1046 
1047 	rets = mei_cl_send_connect(cl, cb);
1048 	if (rets)
1049 		list_move_tail(&cb->list, cmpl_list);
1050 
1051 	return rets;
1052 }
1053 
1054 /**
1055  * mei_cl_connect - connect host client to the me one
1056  *
1057  * @cl: host client
1058  * @me_cl: me client
1059  * @fp: pointer to file structure
1060  *
1061  * Locking: called under "dev->device_lock" lock
1062  *
1063  * Return: 0 on success, <0 on failure.
1064  */
1065 int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
1066 		   const struct file *fp)
1067 {
1068 	struct mei_device *dev;
1069 	struct mei_cl_cb *cb;
1070 	int rets;
1071 
1072 	if (WARN_ON(!cl || !cl->dev || !me_cl))
1073 		return -ENODEV;
1074 
1075 	dev = cl->dev;
1076 
1077 	rets = mei_cl_set_connecting(cl, me_cl);
1078 	if (rets)
1079 		goto nortpm;
1080 
1081 	if (mei_cl_is_fixed_address(cl)) {
1082 		cl->state = MEI_FILE_CONNECTED;
1083 		rets = 0;
1084 		goto nortpm;
1085 	}
1086 
1087 	rets = pm_runtime_get(dev->dev);
1088 	if (rets < 0 && rets != -EINPROGRESS) {
1089 		pm_runtime_put_noidle(dev->dev);
1090 		cl_err(dev, cl, "rpm: get failed %d\n", rets);
1091 		goto nortpm;
1092 	}
1093 
1094 	cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_CONNECT, fp);
1095 	if (!cb) {
1096 		rets = -ENOMEM;
1097 		goto out;
1098 	}
1099 
1100 	/* run hbuf acquire last so we don't have to undo */
1101 	if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
1102 		rets = mei_cl_send_connect(cl, cb);
1103 		if (rets)
1104 			goto out;
1105 	}
1106 
1107 	mutex_unlock(&dev->device_lock);
1108 	wait_event_timeout(cl->wait,
1109 			(cl->state == MEI_FILE_CONNECTED ||
1110 			 cl->state == MEI_FILE_DISCONNECTED ||
1111 			 cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
1112 			 cl->state == MEI_FILE_DISCONNECT_REPLY),
1113 			mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
1114 	mutex_lock(&dev->device_lock);
1115 
1116 	if (!mei_cl_is_connected(cl)) {
1117 		if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) {
1118 			mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
1119 			mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
1120 			 /* ignore disconnect return valuue;
1121 			  * in case of failure reset will be invoked
1122 			  */
1123 			__mei_cl_disconnect(cl);
1124 			rets = -EFAULT;
1125 			goto out;
1126 		}
1127 
1128 		/* timeout or something went really wrong */
1129 		if (!cl->status)
1130 			cl->status = -EFAULT;
1131 	}
1132 
1133 	rets = cl->status;
1134 out:
1135 	cl_dbg(dev, cl, "rpm: autosuspend\n");
1136 	pm_runtime_mark_last_busy(dev->dev);
1137 	pm_runtime_put_autosuspend(dev->dev);
1138 
1139 	mei_io_cb_free(cb);
1140 
1141 nortpm:
1142 	if (!mei_cl_is_connected(cl))
1143 		mei_cl_set_disconnected(cl);
1144 
1145 	return rets;
1146 }
1147 
1148 /**
1149  * mei_cl_alloc_linked - allocate and link host client
1150  *
1151  * @dev: the device structure
1152  *
1153  * Return: cl on success ERR_PTR on failure
1154  */
1155 struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev)
1156 {
1157 	struct mei_cl *cl;
1158 	int ret;
1159 
1160 	cl = mei_cl_allocate(dev);
1161 	if (!cl) {
1162 		ret = -ENOMEM;
1163 		goto err;
1164 	}
1165 
1166 	ret = mei_cl_link(cl);
1167 	if (ret)
1168 		goto err;
1169 
1170 	return cl;
1171 err:
1172 	kfree(cl);
1173 	return ERR_PTR(ret);
1174 }
1175 
1176 /**
1177  * mei_cl_tx_flow_ctrl_creds - checks flow_control credits for cl.
1178  *
1179  * @cl: host client
1180  *
1181  * Return: 1 if tx_flow_ctrl_creds >0, 0 - otherwise.
1182  */
1183 static int mei_cl_tx_flow_ctrl_creds(struct mei_cl *cl)
1184 {
1185 	if (WARN_ON(!cl || !cl->me_cl))
1186 		return -EINVAL;
1187 
1188 	if (cl->tx_flow_ctrl_creds > 0)
1189 		return 1;
1190 
1191 	if (mei_cl_is_fixed_address(cl))
1192 		return 1;
1193 
1194 	if (mei_cl_is_single_recv_buf(cl)) {
1195 		if (cl->me_cl->tx_flow_ctrl_creds > 0)
1196 			return 1;
1197 	}
1198 	return 0;
1199 }
1200 
1201 /**
1202  * mei_cl_tx_flow_ctrl_creds_reduce - reduces transmit flow control credits
1203  *   for a client
1204  *
1205  * @cl: host client
1206  *
1207  * Return:
1208  *	0 on success
1209  *	-EINVAL when ctrl credits are <= 0
1210  */
1211 static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl)
1212 {
1213 	if (WARN_ON(!cl || !cl->me_cl))
1214 		return -EINVAL;
1215 
1216 	if (mei_cl_is_fixed_address(cl))
1217 		return 0;
1218 
1219 	if (mei_cl_is_single_recv_buf(cl)) {
1220 		if (WARN_ON(cl->me_cl->tx_flow_ctrl_creds <= 0))
1221 			return -EINVAL;
1222 		cl->me_cl->tx_flow_ctrl_creds--;
1223 	} else {
1224 		if (WARN_ON(cl->tx_flow_ctrl_creds <= 0))
1225 			return -EINVAL;
1226 		cl->tx_flow_ctrl_creds--;
1227 	}
1228 	return 0;
1229 }
1230 
1231 /**
1232  *  mei_cl_notify_fop2req - convert fop to proper request
1233  *
1234  * @fop: client notification start response command
1235  *
1236  * Return:  MEI_HBM_NOTIFICATION_START/STOP
1237  */
1238 u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop)
1239 {
1240 	if (fop == MEI_FOP_NOTIFY_START)
1241 		return MEI_HBM_NOTIFICATION_START;
1242 	else
1243 		return MEI_HBM_NOTIFICATION_STOP;
1244 }
1245 
1246 /**
1247  *  mei_cl_notify_req2fop - convert notification request top file operation type
1248  *
1249  * @req: hbm notification request type
1250  *
1251  * Return:  MEI_FOP_NOTIFY_START/STOP
1252  */
1253 enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req)
1254 {
1255 	if (req == MEI_HBM_NOTIFICATION_START)
1256 		return MEI_FOP_NOTIFY_START;
1257 	else
1258 		return MEI_FOP_NOTIFY_STOP;
1259 }
1260 
1261 /**
1262  * mei_cl_irq_notify - send notification request in irq_thread context
1263  *
1264  * @cl: client
1265  * @cb: callback block.
1266  * @cmpl_list: complete list.
1267  *
1268  * Return: 0 on such and error otherwise.
1269  */
1270 int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
1271 		      struct list_head *cmpl_list)
1272 {
1273 	struct mei_device *dev = cl->dev;
1274 	u32 msg_slots;
1275 	int slots;
1276 	int ret;
1277 	bool request;
1278 
1279 	msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
1280 	slots = mei_hbuf_empty_slots(dev);
1281 	if (slots < 0)
1282 		return -EOVERFLOW;
1283 
1284 	if ((u32)slots < msg_slots)
1285 		return -EMSGSIZE;
1286 
1287 	request = mei_cl_notify_fop2req(cb->fop_type);
1288 	ret = mei_hbm_cl_notify_req(dev, cl, request);
1289 	if (ret) {
1290 		cl->status = ret;
1291 		list_move_tail(&cb->list, cmpl_list);
1292 		return ret;
1293 	}
1294 
1295 	list_move_tail(&cb->list, &dev->ctrl_rd_list);
1296 	return 0;
1297 }
1298 
1299 /**
1300  * mei_cl_notify_request - send notification stop/start request
1301  *
1302  * @cl: host client
1303  * @fp: associate request with file
1304  * @request: 1 for start or 0 for stop
1305  *
1306  * Locking: called under "dev->device_lock" lock
1307  *
1308  * Return: 0 on such and error otherwise.
1309  */
1310 int mei_cl_notify_request(struct mei_cl *cl,
1311 			  const struct file *fp, u8 request)
1312 {
1313 	struct mei_device *dev;
1314 	struct mei_cl_cb *cb;
1315 	enum mei_cb_file_ops fop_type;
1316 	int rets;
1317 
1318 	if (WARN_ON(!cl || !cl->dev))
1319 		return -ENODEV;
1320 
1321 	dev = cl->dev;
1322 
1323 	if (!dev->hbm_f_ev_supported) {
1324 		cl_dbg(dev, cl, "notifications not supported\n");
1325 		return -EOPNOTSUPP;
1326 	}
1327 
1328 	if (!mei_cl_is_connected(cl))
1329 		return -ENODEV;
1330 
1331 	rets = pm_runtime_get(dev->dev);
1332 	if (rets < 0 && rets != -EINPROGRESS) {
1333 		pm_runtime_put_noidle(dev->dev);
1334 		cl_err(dev, cl, "rpm: get failed %d\n", rets);
1335 		return rets;
1336 	}
1337 
1338 	fop_type = mei_cl_notify_req2fop(request);
1339 	cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, fop_type, fp);
1340 	if (!cb) {
1341 		rets = -ENOMEM;
1342 		goto out;
1343 	}
1344 
1345 	if (mei_hbuf_acquire(dev)) {
1346 		if (mei_hbm_cl_notify_req(dev, cl, request)) {
1347 			rets = -ENODEV;
1348 			goto out;
1349 		}
1350 		list_move_tail(&cb->list, &dev->ctrl_rd_list);
1351 	}
1352 
1353 	mutex_unlock(&dev->device_lock);
1354 	wait_event_timeout(cl->wait,
1355 			   cl->notify_en == request ||
1356 			   cl->status ||
1357 			   !mei_cl_is_connected(cl),
1358 			   mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
1359 	mutex_lock(&dev->device_lock);
1360 
1361 	if (cl->notify_en != request && !cl->status)
1362 		cl->status = -EFAULT;
1363 
1364 	rets = cl->status;
1365 
1366 out:
1367 	cl_dbg(dev, cl, "rpm: autosuspend\n");
1368 	pm_runtime_mark_last_busy(dev->dev);
1369 	pm_runtime_put_autosuspend(dev->dev);
1370 
1371 	mei_io_cb_free(cb);
1372 	return rets;
1373 }
1374 
1375 /**
1376  * mei_cl_notify - raise notification
1377  *
1378  * @cl: host client
1379  *
1380  * Locking: called under "dev->device_lock" lock
1381  */
1382 void mei_cl_notify(struct mei_cl *cl)
1383 {
1384 	struct mei_device *dev;
1385 
1386 	if (!cl || !cl->dev)
1387 		return;
1388 
1389 	dev = cl->dev;
1390 
1391 	if (!cl->notify_en)
1392 		return;
1393 
1394 	cl_dbg(dev, cl, "notify event");
1395 	cl->notify_ev = true;
1396 	if (!mei_cl_bus_notify_event(cl))
1397 		wake_up_interruptible(&cl->ev_wait);
1398 
1399 	if (cl->ev_async)
1400 		kill_fasync(&cl->ev_async, SIGIO, POLL_PRI);
1401 
1402 }
1403 
1404 /**
1405  * mei_cl_notify_get - get or wait for notification event
1406  *
1407  * @cl: host client
1408  * @block: this request is blocking
1409  * @notify_ev: true if notification event was received
1410  *
1411  * Locking: called under "dev->device_lock" lock
1412  *
1413  * Return: 0 on such and error otherwise.
1414  */
1415 int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev)
1416 {
1417 	struct mei_device *dev;
1418 	int rets;
1419 
1420 	*notify_ev = false;
1421 
1422 	if (WARN_ON(!cl || !cl->dev))
1423 		return -ENODEV;
1424 
1425 	dev = cl->dev;
1426 
1427 	if (!dev->hbm_f_ev_supported) {
1428 		cl_dbg(dev, cl, "notifications not supported\n");
1429 		return -EOPNOTSUPP;
1430 	}
1431 
1432 	if (!mei_cl_is_connected(cl))
1433 		return -ENODEV;
1434 
1435 	if (cl->notify_ev)
1436 		goto out;
1437 
1438 	if (!block)
1439 		return -EAGAIN;
1440 
1441 	mutex_unlock(&dev->device_lock);
1442 	rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev);
1443 	mutex_lock(&dev->device_lock);
1444 
1445 	if (rets < 0)
1446 		return rets;
1447 
1448 out:
1449 	*notify_ev = cl->notify_ev;
1450 	cl->notify_ev = false;
1451 	return 0;
1452 }
1453 
1454 /**
1455  * mei_cl_read_start - the start read client message function.
1456  *
1457  * @cl: host client
1458  * @length: number of bytes to read
1459  * @fp: pointer to file structure
1460  *
1461  * Return: 0 on success, <0 on failure.
1462  */
1463 int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
1464 {
1465 	struct mei_device *dev;
1466 	struct mei_cl_cb *cb;
1467 	int rets;
1468 
1469 	if (WARN_ON(!cl || !cl->dev))
1470 		return -ENODEV;
1471 
1472 	dev = cl->dev;
1473 
1474 	if (!mei_cl_is_connected(cl))
1475 		return -ENODEV;
1476 
1477 	if (!mei_me_cl_is_active(cl->me_cl)) {
1478 		cl_err(dev, cl, "no such me client\n");
1479 		return  -ENOTTY;
1480 	}
1481 
1482 	if (mei_cl_is_fixed_address(cl))
1483 		return 0;
1484 
1485 	/* HW currently supports only one pending read */
1486 	if (cl->rx_flow_ctrl_creds)
1487 		return -EBUSY;
1488 
1489 	cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp);
1490 	if (!cb)
1491 		return -ENOMEM;
1492 
1493 	rets = pm_runtime_get(dev->dev);
1494 	if (rets < 0 && rets != -EINPROGRESS) {
1495 		pm_runtime_put_noidle(dev->dev);
1496 		cl_err(dev, cl, "rpm: get failed %d\n", rets);
1497 		goto nortpm;
1498 	}
1499 
1500 	rets = 0;
1501 	if (mei_hbuf_acquire(dev)) {
1502 		rets = mei_hbm_cl_flow_control_req(dev, cl);
1503 		if (rets < 0)
1504 			goto out;
1505 
1506 		list_move_tail(&cb->list, &cl->rd_pending);
1507 	}
1508 	cl->rx_flow_ctrl_creds++;
1509 
1510 out:
1511 	cl_dbg(dev, cl, "rpm: autosuspend\n");
1512 	pm_runtime_mark_last_busy(dev->dev);
1513 	pm_runtime_put_autosuspend(dev->dev);
1514 nortpm:
1515 	if (rets)
1516 		mei_io_cb_free(cb);
1517 
1518 	return rets;
1519 }
1520 
1521 /**
1522  * mei_msg_hdr_init - initialize mei message header
1523  *
1524  * @mei_hdr: mei message header
1525  * @cb: message callback structure
1526  */
1527 static void mei_msg_hdr_init(struct mei_msg_hdr *mei_hdr, struct mei_cl_cb *cb)
1528 {
1529 	mei_hdr->host_addr = mei_cl_host_addr(cb->cl);
1530 	mei_hdr->me_addr = mei_cl_me_id(cb->cl);
1531 	mei_hdr->length = 0;
1532 	mei_hdr->reserved = 0;
1533 	mei_hdr->msg_complete = 0;
1534 	mei_hdr->dma_ring = 0;
1535 	mei_hdr->internal = cb->internal;
1536 }
1537 
1538 /**
1539  * mei_cl_irq_write - write a message to device
1540  *	from the interrupt thread context
1541  *
1542  * @cl: client
1543  * @cb: callback block.
1544  * @cmpl_list: complete list.
1545  *
1546  * Return: 0, OK; otherwise error.
1547  */
1548 int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
1549 		     struct list_head *cmpl_list)
1550 {
1551 	struct mei_device *dev;
1552 	struct mei_msg_data *buf;
1553 	struct mei_msg_hdr mei_hdr;
1554 	size_t hdr_len = sizeof(mei_hdr);
1555 	size_t len;
1556 	size_t hbuf_len, dr_len;
1557 	int hbuf_slots;
1558 	u32 dr_slots;
1559 	u32 dma_len;
1560 	int rets;
1561 	bool first_chunk;
1562 	const void *data;
1563 
1564 	if (WARN_ON(!cl || !cl->dev))
1565 		return -ENODEV;
1566 
1567 	dev = cl->dev;
1568 
1569 	buf = &cb->buf;
1570 
1571 	first_chunk = cb->buf_idx == 0;
1572 
1573 	rets = first_chunk ? mei_cl_tx_flow_ctrl_creds(cl) : 1;
1574 	if (rets < 0)
1575 		goto err;
1576 
1577 	if (rets == 0) {
1578 		cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
1579 		return 0;
1580 	}
1581 
1582 	len = buf->size - cb->buf_idx;
1583 	data = buf->data + cb->buf_idx;
1584 	hbuf_slots = mei_hbuf_empty_slots(dev);
1585 	if (hbuf_slots < 0) {
1586 		rets = -EOVERFLOW;
1587 		goto err;
1588 	}
1589 
1590 	hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK;
1591 	dr_slots = mei_dma_ring_empty_slots(dev);
1592 	dr_len = mei_slots2data(dr_slots);
1593 
1594 	mei_msg_hdr_init(&mei_hdr, cb);
1595 
1596 	/**
1597 	 * Split the message only if we can write the whole host buffer
1598 	 * otherwise wait for next time the host buffer is empty.
1599 	 */
1600 	if (len + hdr_len <= hbuf_len) {
1601 		mei_hdr.length = len;
1602 		mei_hdr.msg_complete = 1;
1603 	} else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
1604 		mei_hdr.dma_ring = 1;
1605 		if (len > dr_len)
1606 			len = dr_len;
1607 		else
1608 			mei_hdr.msg_complete = 1;
1609 
1610 		mei_hdr.length = sizeof(dma_len);
1611 		dma_len = len;
1612 		data = &dma_len;
1613 	} else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) {
1614 		len = hbuf_len - hdr_len;
1615 		mei_hdr.length = len;
1616 	} else {
1617 		return 0;
1618 	}
1619 
1620 	if (mei_hdr.dma_ring)
1621 		mei_dma_ring_write(dev, buf->data + cb->buf_idx, len);
1622 
1623 	rets = mei_write_message(dev, &mei_hdr, hdr_len, data, mei_hdr.length);
1624 	if (rets)
1625 		goto err;
1626 
1627 	cl->status = 0;
1628 	cl->writing_state = MEI_WRITING;
1629 	cb->buf_idx += len;
1630 
1631 	if (first_chunk) {
1632 		if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) {
1633 			rets = -EIO;
1634 			goto err;
1635 		}
1636 	}
1637 
1638 	if (mei_hdr.msg_complete)
1639 		list_move_tail(&cb->list, &dev->write_waiting_list);
1640 
1641 	return 0;
1642 
1643 err:
1644 	cl->status = rets;
1645 	list_move_tail(&cb->list, cmpl_list);
1646 	return rets;
1647 }
1648 
1649 /**
1650  * mei_cl_write - submit a write cb to mei device
1651  *	assumes device_lock is locked
1652  *
1653  * @cl: host client
1654  * @cb: write callback with filled data
1655  *
1656  * Return: number of bytes sent on success, <0 on failure.
1657  */
1658 ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
1659 {
1660 	struct mei_device *dev;
1661 	struct mei_msg_data *buf;
1662 	struct mei_msg_hdr mei_hdr;
1663 	size_t hdr_len = sizeof(mei_hdr);
1664 	size_t len, hbuf_len, dr_len;
1665 	int hbuf_slots;
1666 	u32 dr_slots;
1667 	u32 dma_len;
1668 	ssize_t rets;
1669 	bool blocking;
1670 	const void *data;
1671 
1672 	if (WARN_ON(!cl || !cl->dev))
1673 		return -ENODEV;
1674 
1675 	if (WARN_ON(!cb))
1676 		return -EINVAL;
1677 
1678 	dev = cl->dev;
1679 
1680 	buf = &cb->buf;
1681 	len = buf->size;
1682 
1683 	cl_dbg(dev, cl, "len=%zd\n", len);
1684 
1685 	blocking = cb->blocking;
1686 	data = buf->data;
1687 
1688 	rets = pm_runtime_get(dev->dev);
1689 	if (rets < 0 && rets != -EINPROGRESS) {
1690 		pm_runtime_put_noidle(dev->dev);
1691 		cl_err(dev, cl, "rpm: get failed %zd\n", rets);
1692 		goto free;
1693 	}
1694 
1695 	cb->buf_idx = 0;
1696 	cl->writing_state = MEI_IDLE;
1697 
1698 
1699 	rets = mei_cl_tx_flow_ctrl_creds(cl);
1700 	if (rets < 0)
1701 		goto err;
1702 
1703 	mei_msg_hdr_init(&mei_hdr, cb);
1704 
1705 	if (rets == 0) {
1706 		cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
1707 		rets = len;
1708 		goto out;
1709 	}
1710 
1711 	if (!mei_hbuf_acquire(dev)) {
1712 		cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
1713 		rets = len;
1714 		goto out;
1715 	}
1716 
1717 	hbuf_slots = mei_hbuf_empty_slots(dev);
1718 	if (hbuf_slots < 0) {
1719 		rets = -EOVERFLOW;
1720 		goto out;
1721 	}
1722 
1723 	hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK;
1724 	dr_slots = mei_dma_ring_empty_slots(dev);
1725 	dr_len =  mei_slots2data(dr_slots);
1726 
1727 	if (len + hdr_len <= hbuf_len) {
1728 		mei_hdr.length = len;
1729 		mei_hdr.msg_complete = 1;
1730 	} else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
1731 		mei_hdr.dma_ring = 1;
1732 		if (len > dr_len)
1733 			len = dr_len;
1734 		else
1735 			mei_hdr.msg_complete = 1;
1736 
1737 		mei_hdr.length = sizeof(dma_len);
1738 		dma_len = len;
1739 		data = &dma_len;
1740 	} else {
1741 		len = hbuf_len - hdr_len;
1742 		mei_hdr.length = len;
1743 	}
1744 
1745 	if (mei_hdr.dma_ring)
1746 		mei_dma_ring_write(dev, buf->data, len);
1747 
1748 	rets = mei_write_message(dev, &mei_hdr, hdr_len,
1749 				 data, mei_hdr.length);
1750 	if (rets)
1751 		goto err;
1752 
1753 	rets = mei_cl_tx_flow_ctrl_creds_reduce(cl);
1754 	if (rets)
1755 		goto err;
1756 
1757 	cl->writing_state = MEI_WRITING;
1758 	cb->buf_idx = len;
1759 	/* restore return value */
1760 	len = buf->size;
1761 
1762 out:
1763 	if (mei_hdr.msg_complete)
1764 		mei_tx_cb_enqueue(cb, &dev->write_waiting_list);
1765 	else
1766 		mei_tx_cb_enqueue(cb, &dev->write_list);
1767 
1768 	cb = NULL;
1769 	if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
1770 
1771 		mutex_unlock(&dev->device_lock);
1772 		rets = wait_event_interruptible(cl->tx_wait,
1773 				cl->writing_state == MEI_WRITE_COMPLETE ||
1774 				(!mei_cl_is_connected(cl)));
1775 		mutex_lock(&dev->device_lock);
1776 		/* wait_event_interruptible returns -ERESTARTSYS */
1777 		if (rets) {
1778 			if (signal_pending(current))
1779 				rets = -EINTR;
1780 			goto err;
1781 		}
1782 		if (cl->writing_state != MEI_WRITE_COMPLETE) {
1783 			rets = -EFAULT;
1784 			goto err;
1785 		}
1786 	}
1787 
1788 	rets = len;
1789 err:
1790 	cl_dbg(dev, cl, "rpm: autosuspend\n");
1791 	pm_runtime_mark_last_busy(dev->dev);
1792 	pm_runtime_put_autosuspend(dev->dev);
1793 free:
1794 	mei_io_cb_free(cb);
1795 
1796 	return rets;
1797 }
1798 
1799 
1800 /**
1801  * mei_cl_complete - processes completed operation for a client
1802  *
1803  * @cl: private data of the file object.
1804  * @cb: callback block.
1805  */
1806 void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
1807 {
1808 	struct mei_device *dev = cl->dev;
1809 
1810 	switch (cb->fop_type) {
1811 	case MEI_FOP_WRITE:
1812 		mei_tx_cb_dequeue(cb);
1813 		cl->writing_state = MEI_WRITE_COMPLETE;
1814 		if (waitqueue_active(&cl->tx_wait)) {
1815 			wake_up_interruptible(&cl->tx_wait);
1816 		} else {
1817 			pm_runtime_mark_last_busy(dev->dev);
1818 			pm_request_autosuspend(dev->dev);
1819 		}
1820 		break;
1821 
1822 	case MEI_FOP_READ:
1823 		list_add_tail(&cb->list, &cl->rd_completed);
1824 		if (!mei_cl_is_fixed_address(cl) &&
1825 		    !WARN_ON(!cl->rx_flow_ctrl_creds))
1826 			cl->rx_flow_ctrl_creds--;
1827 		if (!mei_cl_bus_rx_event(cl))
1828 			wake_up_interruptible(&cl->rx_wait);
1829 		break;
1830 
1831 	case MEI_FOP_CONNECT:
1832 	case MEI_FOP_DISCONNECT:
1833 	case MEI_FOP_NOTIFY_STOP:
1834 	case MEI_FOP_NOTIFY_START:
1835 		if (waitqueue_active(&cl->wait))
1836 			wake_up(&cl->wait);
1837 
1838 		break;
1839 	case MEI_FOP_DISCONNECT_RSP:
1840 		mei_io_cb_free(cb);
1841 		mei_cl_set_disconnected(cl);
1842 		break;
1843 	default:
1844 		BUG_ON(0);
1845 	}
1846 }
1847 
1848 
1849 /**
1850  * mei_cl_all_disconnect - disconnect forcefully all connected clients
1851  *
1852  * @dev: mei device
1853  */
1854 void mei_cl_all_disconnect(struct mei_device *dev)
1855 {
1856 	struct mei_cl *cl;
1857 
1858 	list_for_each_entry(cl, &dev->file_list, link)
1859 		mei_cl_set_disconnected(cl);
1860 }
1861