xref: /openbmc/linux/drivers/misc/mei/client.c (revision 0cd7c01a60f850987e727b8f08683ace9546eac0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
4  * Intel Management Engine Interface (Intel MEI) Linux driver
5  */
6 
7 #include <linux/sched/signal.h>
8 #include <linux/wait.h>
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/pm_runtime.h>
12 
13 #include <linux/mei.h>
14 
15 #include "mei_dev.h"
16 #include "hbm.h"
17 #include "client.h"
18 
19 /**
20  * mei_me_cl_init - initialize me client
21  *
22  * @me_cl: me client
23  */
24 void mei_me_cl_init(struct mei_me_client *me_cl)
25 {
26 	INIT_LIST_HEAD(&me_cl->list);
27 	kref_init(&me_cl->refcnt);
28 }
29 
30 /**
31  * mei_me_cl_get - increases me client refcount
32  *
33  * @me_cl: me client
34  *
35  * Locking: called under "dev->device_lock" lock
36  *
37  * Return: me client or NULL
38  */
39 struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl)
40 {
41 	if (me_cl && kref_get_unless_zero(&me_cl->refcnt))
42 		return me_cl;
43 
44 	return NULL;
45 }
46 
47 /**
48  * mei_me_cl_release - free me client
49  *
50  * Locking: called under "dev->device_lock" lock
51  *
52  * @ref: me_client refcount
53  */
54 static void mei_me_cl_release(struct kref *ref)
55 {
56 	struct mei_me_client *me_cl =
57 		container_of(ref, struct mei_me_client, refcnt);
58 
59 	kfree(me_cl);
60 }
61 
62 /**
63  * mei_me_cl_put - decrease me client refcount and free client if necessary
64  *
65  * Locking: called under "dev->device_lock" lock
66  *
67  * @me_cl: me client
68  */
69 void mei_me_cl_put(struct mei_me_client *me_cl)
70 {
71 	if (me_cl)
72 		kref_put(&me_cl->refcnt, mei_me_cl_release);
73 }
74 
75 /**
76  * __mei_me_cl_del  - delete me client from the list and decrease
77  *     reference counter
78  *
79  * @dev: mei device
80  * @me_cl: me client
81  *
82  * Locking: dev->me_clients_rwsem
83  */
84 static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
85 {
86 	if (!me_cl)
87 		return;
88 
89 	list_del_init(&me_cl->list);
90 	mei_me_cl_put(me_cl);
91 }
92 
93 /**
94  * mei_me_cl_del - delete me client from the list and decrease
95  *     reference counter
96  *
97  * @dev: mei device
98  * @me_cl: me client
99  */
100 void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
101 {
102 	down_write(&dev->me_clients_rwsem);
103 	__mei_me_cl_del(dev, me_cl);
104 	up_write(&dev->me_clients_rwsem);
105 }
106 
107 /**
108  * mei_me_cl_add - add me client to the list
109  *
110  * @dev: mei device
111  * @me_cl: me client
112  */
113 void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl)
114 {
115 	down_write(&dev->me_clients_rwsem);
116 	list_add(&me_cl->list, &dev->me_clients);
117 	up_write(&dev->me_clients_rwsem);
118 }
119 
120 /**
121  * __mei_me_cl_by_uuid - locate me client by uuid
122  *	increases ref count
123  *
124  * @dev: mei device
125  * @uuid: me client uuid
126  *
127  * Return: me client or NULL if not found
128  *
129  * Locking: dev->me_clients_rwsem
130  */
131 static struct mei_me_client *__mei_me_cl_by_uuid(struct mei_device *dev,
132 					const uuid_le *uuid)
133 {
134 	struct mei_me_client *me_cl;
135 	const uuid_le *pn;
136 
137 	WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
138 
139 	list_for_each_entry(me_cl, &dev->me_clients, list) {
140 		pn = &me_cl->props.protocol_name;
141 		if (uuid_le_cmp(*uuid, *pn) == 0)
142 			return mei_me_cl_get(me_cl);
143 	}
144 
145 	return NULL;
146 }
147 
148 /**
149  * mei_me_cl_by_uuid - locate me client by uuid
150  *	increases ref count
151  *
152  * @dev: mei device
153  * @uuid: me client uuid
154  *
155  * Return: me client or NULL if not found
156  *
157  * Locking: dev->me_clients_rwsem
158  */
159 struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev,
160 					const uuid_le *uuid)
161 {
162 	struct mei_me_client *me_cl;
163 
164 	down_read(&dev->me_clients_rwsem);
165 	me_cl = __mei_me_cl_by_uuid(dev, uuid);
166 	up_read(&dev->me_clients_rwsem);
167 
168 	return me_cl;
169 }
170 
171 /**
172  * mei_me_cl_by_id - locate me client by client id
173  *	increases ref count
174  *
175  * @dev: the device structure
176  * @client_id: me client id
177  *
178  * Return: me client or NULL if not found
179  *
180  * Locking: dev->me_clients_rwsem
181  */
182 struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
183 {
184 
185 	struct mei_me_client *__me_cl, *me_cl = NULL;
186 
187 	down_read(&dev->me_clients_rwsem);
188 	list_for_each_entry(__me_cl, &dev->me_clients, list) {
189 		if (__me_cl->client_id == client_id) {
190 			me_cl = mei_me_cl_get(__me_cl);
191 			break;
192 		}
193 	}
194 	up_read(&dev->me_clients_rwsem);
195 
196 	return me_cl;
197 }
198 
199 /**
200  * __mei_me_cl_by_uuid_id - locate me client by client id and uuid
201  *	increases ref count
202  *
203  * @dev: the device structure
204  * @uuid: me client uuid
205  * @client_id: me client id
206  *
207  * Return: me client or null if not found
208  *
209  * Locking: dev->me_clients_rwsem
210  */
211 static struct mei_me_client *__mei_me_cl_by_uuid_id(struct mei_device *dev,
212 					   const uuid_le *uuid, u8 client_id)
213 {
214 	struct mei_me_client *me_cl;
215 	const uuid_le *pn;
216 
217 	WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
218 
219 	list_for_each_entry(me_cl, &dev->me_clients, list) {
220 		pn = &me_cl->props.protocol_name;
221 		if (uuid_le_cmp(*uuid, *pn) == 0 &&
222 		    me_cl->client_id == client_id)
223 			return mei_me_cl_get(me_cl);
224 	}
225 
226 	return NULL;
227 }
228 
229 
230 /**
231  * mei_me_cl_by_uuid_id - locate me client by client id and uuid
232  *	increases ref count
233  *
234  * @dev: the device structure
235  * @uuid: me client uuid
236  * @client_id: me client id
237  *
238  * Return: me client or null if not found
239  */
240 struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
241 					   const uuid_le *uuid, u8 client_id)
242 {
243 	struct mei_me_client *me_cl;
244 
245 	down_read(&dev->me_clients_rwsem);
246 	me_cl = __mei_me_cl_by_uuid_id(dev, uuid, client_id);
247 	up_read(&dev->me_clients_rwsem);
248 
249 	return me_cl;
250 }
251 
252 /**
253  * mei_me_cl_rm_by_uuid - remove all me clients matching uuid
254  *
255  * @dev: the device structure
256  * @uuid: me client uuid
257  *
258  * Locking: called under "dev->device_lock" lock
259  */
260 void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
261 {
262 	struct mei_me_client *me_cl;
263 
264 	dev_dbg(dev->dev, "remove %pUl\n", uuid);
265 
266 	down_write(&dev->me_clients_rwsem);
267 	me_cl = __mei_me_cl_by_uuid(dev, uuid);
268 	__mei_me_cl_del(dev, me_cl);
269 	mei_me_cl_put(me_cl);
270 	up_write(&dev->me_clients_rwsem);
271 }
272 
273 /**
274  * mei_me_cl_rm_by_uuid_id - remove all me clients matching client id
275  *
276  * @dev: the device structure
277  * @uuid: me client uuid
278  * @id: me client id
279  *
280  * Locking: called under "dev->device_lock" lock
281  */
282 void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id)
283 {
284 	struct mei_me_client *me_cl;
285 
286 	dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id);
287 
288 	down_write(&dev->me_clients_rwsem);
289 	me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id);
290 	__mei_me_cl_del(dev, me_cl);
291 	mei_me_cl_put(me_cl);
292 	up_write(&dev->me_clients_rwsem);
293 }
294 
295 /**
296  * mei_me_cl_rm_all - remove all me clients
297  *
298  * @dev: the device structure
299  *
300  * Locking: called under "dev->device_lock" lock
301  */
302 void mei_me_cl_rm_all(struct mei_device *dev)
303 {
304 	struct mei_me_client *me_cl, *next;
305 
306 	down_write(&dev->me_clients_rwsem);
307 	list_for_each_entry_safe(me_cl, next, &dev->me_clients, list)
308 		__mei_me_cl_del(dev, me_cl);
309 	up_write(&dev->me_clients_rwsem);
310 }
311 
312 /**
313  * mei_io_cb_free - free mei_cb_private related memory
314  *
315  * @cb: mei callback struct
316  */
317 void mei_io_cb_free(struct mei_cl_cb *cb)
318 {
319 	if (cb == NULL)
320 		return;
321 
322 	list_del(&cb->list);
323 	kfree(cb->buf.data);
324 	kfree(cb);
325 }
326 
327 /**
328  * mei_tx_cb_queue - queue tx callback
329  *
330  * Locking: called under "dev->device_lock" lock
331  *
332  * @cb: mei callback struct
333  * @head: an instance of list to queue on
334  */
335 static inline void mei_tx_cb_enqueue(struct mei_cl_cb *cb,
336 				     struct list_head *head)
337 {
338 	list_add_tail(&cb->list, head);
339 	cb->cl->tx_cb_queued++;
340 }
341 
342 /**
343  * mei_tx_cb_dequeue - dequeue tx callback
344  *
345  * Locking: called under "dev->device_lock" lock
346  *
347  * @cb: mei callback struct to dequeue and free
348  */
349 static inline void mei_tx_cb_dequeue(struct mei_cl_cb *cb)
350 {
351 	if (!WARN_ON(cb->cl->tx_cb_queued == 0))
352 		cb->cl->tx_cb_queued--;
353 
354 	mei_io_cb_free(cb);
355 }
356 
357 /**
358  * mei_io_cb_init - allocate and initialize io callback
359  *
360  * @cl: mei client
361  * @type: operation type
362  * @fp: pointer to file structure
363  *
364  * Return: mei_cl_cb pointer or NULL;
365  */
366 static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl,
367 					enum mei_cb_file_ops type,
368 					const struct file *fp)
369 {
370 	struct mei_cl_cb *cb;
371 
372 	cb = kzalloc(sizeof(*cb), GFP_KERNEL);
373 	if (!cb)
374 		return NULL;
375 
376 	INIT_LIST_HEAD(&cb->list);
377 	cb->fp = fp;
378 	cb->cl = cl;
379 	cb->buf_idx = 0;
380 	cb->fop_type = type;
381 	cb->vtag = 0;
382 
383 	return cb;
384 }
385 
386 /**
387  * mei_io_list_flush_cl - removes cbs belonging to the cl.
388  *
389  * @head:  an instance of our list structure
390  * @cl:    host client
391  */
392 static void mei_io_list_flush_cl(struct list_head *head,
393 				 const struct mei_cl *cl)
394 {
395 	struct mei_cl_cb *cb, *next;
396 
397 	list_for_each_entry_safe(cb, next, head, list) {
398 		if (cl == cb->cl) {
399 			list_del_init(&cb->list);
400 			if (cb->fop_type == MEI_FOP_READ)
401 				mei_io_cb_free(cb);
402 		}
403 	}
404 }
405 
406 /**
407  * mei_io_tx_list_free_cl - removes cb belonging to the cl and free them
408  *
409  * @head: An instance of our list structure
410  * @cl: host client
411  */
412 static void mei_io_tx_list_free_cl(struct list_head *head,
413 				   const struct mei_cl *cl)
414 {
415 	struct mei_cl_cb *cb, *next;
416 
417 	list_for_each_entry_safe(cb, next, head, list) {
418 		if (cl == cb->cl)
419 			mei_tx_cb_dequeue(cb);
420 	}
421 }
422 
423 /**
424  * mei_io_list_free_fp - free cb from a list that matches file pointer
425  *
426  * @head: io list
427  * @fp: file pointer (matching cb file object), may be NULL
428  */
429 static void mei_io_list_free_fp(struct list_head *head, const struct file *fp)
430 {
431 	struct mei_cl_cb *cb, *next;
432 
433 	list_for_each_entry_safe(cb, next, head, list)
434 		if (!fp || fp == cb->fp)
435 			mei_io_cb_free(cb);
436 }
437 
438 /**
439  * mei_cl_alloc_cb - a convenient wrapper for allocating read cb
440  *
441  * @cl: host client
442  * @length: size of the buffer
443  * @fop_type: operation type
444  * @fp: associated file pointer (might be NULL)
445  *
446  * Return: cb on success and NULL on failure
447  */
448 struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
449 				  enum mei_cb_file_ops fop_type,
450 				  const struct file *fp)
451 {
452 	struct mei_cl_cb *cb;
453 
454 	cb = mei_io_cb_init(cl, fop_type, fp);
455 	if (!cb)
456 		return NULL;
457 
458 	if (length == 0)
459 		return cb;
460 
461 	cb->buf.data = kmalloc(roundup(length, MEI_SLOT_SIZE), GFP_KERNEL);
462 	if (!cb->buf.data) {
463 		mei_io_cb_free(cb);
464 		return NULL;
465 	}
466 	cb->buf.size = length;
467 
468 	return cb;
469 }
470 
471 /**
472  * mei_cl_enqueue_ctrl_wr_cb - a convenient wrapper for allocating
473  *     and enqueuing of the control commands cb
474  *
475  * @cl: host client
476  * @length: size of the buffer
477  * @fop_type: operation type
478  * @fp: associated file pointer (might be NULL)
479  *
480  * Return: cb on success and NULL on failure
481  * Locking: called under "dev->device_lock" lock
482  */
483 struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length,
484 					    enum mei_cb_file_ops fop_type,
485 					    const struct file *fp)
486 {
487 	struct mei_cl_cb *cb;
488 
489 	/* for RX always allocate at least client's mtu */
490 	if (length)
491 		length = max_t(size_t, length, mei_cl_mtu(cl));
492 
493 	cb = mei_cl_alloc_cb(cl, length, fop_type, fp);
494 	if (!cb)
495 		return NULL;
496 
497 	list_add_tail(&cb->list, &cl->dev->ctrl_wr_list);
498 	return cb;
499 }
500 
501 /**
502  * mei_cl_read_cb - find this cl's callback in the read list
503  *     for a specific file
504  *
505  * @cl: host client
506  * @fp: file pointer (matching cb file object), may be NULL
507  *
508  * Return: cb on success, NULL if cb is not found
509  */
510 struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, const struct file *fp)
511 {
512 	struct mei_cl_cb *cb;
513 
514 	list_for_each_entry(cb, &cl->rd_completed, list)
515 		if (!fp || fp == cb->fp)
516 			return cb;
517 
518 	return NULL;
519 }
520 
521 /**
522  * mei_cl_flush_queues - flushes queue lists belonging to cl.
523  *
524  * @cl: host client
525  * @fp: file pointer (matching cb file object), may be NULL
526  *
527  * Return: 0 on success, -EINVAL if cl or cl->dev is NULL.
528  */
529 int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
530 {
531 	struct mei_device *dev;
532 
533 	if (WARN_ON(!cl || !cl->dev))
534 		return -EINVAL;
535 
536 	dev = cl->dev;
537 
538 	cl_dbg(dev, cl, "remove list entry belonging to cl\n");
539 	mei_io_tx_list_free_cl(&cl->dev->write_list, cl);
540 	mei_io_tx_list_free_cl(&cl->dev->write_waiting_list, cl);
541 	mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl);
542 	mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl);
543 	mei_io_list_free_fp(&cl->rd_pending, fp);
544 	mei_io_list_free_fp(&cl->rd_completed, fp);
545 
546 	return 0;
547 }
548 
549 /**
550  * mei_cl_init - initializes cl.
551  *
552  * @cl: host client to be initialized
553  * @dev: mei device
554  */
555 static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
556 {
557 	memset(cl, 0, sizeof(*cl));
558 	init_waitqueue_head(&cl->wait);
559 	init_waitqueue_head(&cl->rx_wait);
560 	init_waitqueue_head(&cl->tx_wait);
561 	init_waitqueue_head(&cl->ev_wait);
562 	INIT_LIST_HEAD(&cl->rd_completed);
563 	INIT_LIST_HEAD(&cl->rd_pending);
564 	INIT_LIST_HEAD(&cl->link);
565 	cl->writing_state = MEI_IDLE;
566 	cl->state = MEI_FILE_UNINITIALIZED;
567 	cl->dev = dev;
568 }
569 
570 /**
571  * mei_cl_allocate - allocates cl  structure and sets it up.
572  *
573  * @dev: mei device
574  * Return:  The allocated file or NULL on failure
575  */
576 struct mei_cl *mei_cl_allocate(struct mei_device *dev)
577 {
578 	struct mei_cl *cl;
579 
580 	cl = kmalloc(sizeof(*cl), GFP_KERNEL);
581 	if (!cl)
582 		return NULL;
583 
584 	mei_cl_init(cl, dev);
585 
586 	return cl;
587 }
588 
589 /**
590  * mei_cl_link - allocate host id in the host map
591  *
592  * @cl: host client
593  *
594  * Return: 0 on success
595  *	-EINVAL on incorrect values
596  *	-EMFILE if open count exceeded.
597  */
598 int mei_cl_link(struct mei_cl *cl)
599 {
600 	struct mei_device *dev;
601 	int id;
602 
603 	if (WARN_ON(!cl || !cl->dev))
604 		return -EINVAL;
605 
606 	dev = cl->dev;
607 
608 	id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
609 	if (id >= MEI_CLIENTS_MAX) {
610 		dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
611 		return -EMFILE;
612 	}
613 
614 	if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
615 		dev_err(dev->dev, "open_handle_count exceeded %d",
616 			MEI_MAX_OPEN_HANDLE_COUNT);
617 		return -EMFILE;
618 	}
619 
620 	dev->open_handle_count++;
621 
622 	cl->host_client_id = id;
623 	list_add_tail(&cl->link, &dev->file_list);
624 
625 	set_bit(id, dev->host_clients_map);
626 
627 	cl->state = MEI_FILE_INITIALIZING;
628 
629 	cl_dbg(dev, cl, "link cl\n");
630 	return 0;
631 }
632 
633 /**
634  * mei_cl_unlink - remove host client from the list
635  *
636  * @cl: host client
637  *
638  * Return: always 0
639  */
640 int mei_cl_unlink(struct mei_cl *cl)
641 {
642 	struct mei_device *dev;
643 
644 	/* don't shout on error exit path */
645 	if (!cl)
646 		return 0;
647 
648 	if (WARN_ON(!cl->dev))
649 		return 0;
650 
651 	dev = cl->dev;
652 
653 	cl_dbg(dev, cl, "unlink client");
654 
655 	if (dev->open_handle_count > 0)
656 		dev->open_handle_count--;
657 
658 	/* never clear the 0 bit */
659 	if (cl->host_client_id)
660 		clear_bit(cl->host_client_id, dev->host_clients_map);
661 
662 	list_del_init(&cl->link);
663 
664 	cl->state = MEI_FILE_UNINITIALIZED;
665 	cl->writing_state = MEI_IDLE;
666 
667 	WARN_ON(!list_empty(&cl->rd_completed) ||
668 		!list_empty(&cl->rd_pending) ||
669 		!list_empty(&cl->link));
670 
671 	return 0;
672 }
673 
674 void mei_host_client_init(struct mei_device *dev)
675 {
676 	mei_set_devstate(dev, MEI_DEV_ENABLED);
677 	dev->reset_count = 0;
678 
679 	schedule_work(&dev->bus_rescan_work);
680 
681 	pm_runtime_mark_last_busy(dev->dev);
682 	dev_dbg(dev->dev, "rpm: autosuspend\n");
683 	pm_request_autosuspend(dev->dev);
684 }
685 
686 /**
687  * mei_hbuf_acquire - try to acquire host buffer
688  *
689  * @dev: the device structure
690  * Return: true if host buffer was acquired
691  */
692 bool mei_hbuf_acquire(struct mei_device *dev)
693 {
694 	if (mei_pg_state(dev) == MEI_PG_ON ||
695 	    mei_pg_in_transition(dev)) {
696 		dev_dbg(dev->dev, "device is in pg\n");
697 		return false;
698 	}
699 
700 	if (!dev->hbuf_is_ready) {
701 		dev_dbg(dev->dev, "hbuf is not ready\n");
702 		return false;
703 	}
704 
705 	dev->hbuf_is_ready = false;
706 
707 	return true;
708 }
709 
710 /**
711  * mei_cl_wake_all - wake up readers, writers and event waiters so
712  *                 they can be interrupted
713  *
714  * @cl: host client
715  */
716 static void mei_cl_wake_all(struct mei_cl *cl)
717 {
718 	struct mei_device *dev = cl->dev;
719 
720 	/* synchronized under device mutex */
721 	if (waitqueue_active(&cl->rx_wait)) {
722 		cl_dbg(dev, cl, "Waking up reading client!\n");
723 		wake_up_interruptible(&cl->rx_wait);
724 	}
725 	/* synchronized under device mutex */
726 	if (waitqueue_active(&cl->tx_wait)) {
727 		cl_dbg(dev, cl, "Waking up writing client!\n");
728 		wake_up_interruptible(&cl->tx_wait);
729 	}
730 	/* synchronized under device mutex */
731 	if (waitqueue_active(&cl->ev_wait)) {
732 		cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
733 		wake_up_interruptible(&cl->ev_wait);
734 	}
735 	/* synchronized under device mutex */
736 	if (waitqueue_active(&cl->wait)) {
737 		cl_dbg(dev, cl, "Waking up ctrl write clients!\n");
738 		wake_up(&cl->wait);
739 	}
740 }
741 
742 /**
743  * mei_cl_set_disconnected - set disconnected state and clear
744  *   associated states and resources
745  *
746  * @cl: host client
747  */
748 static void mei_cl_set_disconnected(struct mei_cl *cl)
749 {
750 	struct mei_device *dev = cl->dev;
751 
752 	if (cl->state == MEI_FILE_DISCONNECTED ||
753 	    cl->state <= MEI_FILE_INITIALIZING)
754 		return;
755 
756 	cl->state = MEI_FILE_DISCONNECTED;
757 	mei_io_tx_list_free_cl(&dev->write_list, cl);
758 	mei_io_tx_list_free_cl(&dev->write_waiting_list, cl);
759 	mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
760 	mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
761 	mei_cl_wake_all(cl);
762 	cl->rx_flow_ctrl_creds = 0;
763 	cl->tx_flow_ctrl_creds = 0;
764 	cl->timer_count = 0;
765 
766 	if (!cl->me_cl)
767 		return;
768 
769 	if (!WARN_ON(cl->me_cl->connect_count == 0))
770 		cl->me_cl->connect_count--;
771 
772 	if (cl->me_cl->connect_count == 0)
773 		cl->me_cl->tx_flow_ctrl_creds = 0;
774 
775 	mei_me_cl_put(cl->me_cl);
776 	cl->me_cl = NULL;
777 }
778 
779 static int mei_cl_set_connecting(struct mei_cl *cl, struct mei_me_client *me_cl)
780 {
781 	if (!mei_me_cl_get(me_cl))
782 		return -ENOENT;
783 
784 	/* only one connection is allowed for fixed address clients */
785 	if (me_cl->props.fixed_address) {
786 		if (me_cl->connect_count) {
787 			mei_me_cl_put(me_cl);
788 			return -EBUSY;
789 		}
790 	}
791 
792 	cl->me_cl = me_cl;
793 	cl->state = MEI_FILE_CONNECTING;
794 	cl->me_cl->connect_count++;
795 
796 	return 0;
797 }
798 
799 /*
800  * mei_cl_send_disconnect - send disconnect request
801  *
802  * @cl: host client
803  * @cb: callback block
804  *
805  * Return: 0, OK; otherwise, error.
806  */
807 static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
808 {
809 	struct mei_device *dev;
810 	int ret;
811 
812 	dev = cl->dev;
813 
814 	ret = mei_hbm_cl_disconnect_req(dev, cl);
815 	cl->status = ret;
816 	if (ret) {
817 		cl->state = MEI_FILE_DISCONNECT_REPLY;
818 		return ret;
819 	}
820 
821 	list_move_tail(&cb->list, &dev->ctrl_rd_list);
822 	cl->timer_count = MEI_CONNECT_TIMEOUT;
823 	mei_schedule_stall_timer(dev);
824 
825 	return 0;
826 }
827 
828 /**
829  * mei_cl_irq_disconnect - processes close related operation from
830  *	interrupt thread context - send disconnect request
831  *
832  * @cl: client
833  * @cb: callback block.
834  * @cmpl_list: complete list.
835  *
836  * Return: 0, OK; otherwise, error.
837  */
838 int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
839 			  struct list_head *cmpl_list)
840 {
841 	struct mei_device *dev = cl->dev;
842 	u32 msg_slots;
843 	int slots;
844 	int ret;
845 
846 	msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
847 	slots = mei_hbuf_empty_slots(dev);
848 	if (slots < 0)
849 		return -EOVERFLOW;
850 
851 	if ((u32)slots < msg_slots)
852 		return -EMSGSIZE;
853 
854 	ret = mei_cl_send_disconnect(cl, cb);
855 	if (ret)
856 		list_move_tail(&cb->list, cmpl_list);
857 
858 	return ret;
859 }
860 
861 /**
862  * __mei_cl_disconnect - disconnect host client from the me one
863  *     internal function runtime pm has to be already acquired
864  *
865  * @cl: host client
866  *
867  * Return: 0 on success, <0 on failure.
868  */
869 static int __mei_cl_disconnect(struct mei_cl *cl)
870 {
871 	struct mei_device *dev;
872 	struct mei_cl_cb *cb;
873 	int rets;
874 
875 	dev = cl->dev;
876 
877 	cl->state = MEI_FILE_DISCONNECTING;
878 
879 	cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT, NULL);
880 	if (!cb) {
881 		rets = -ENOMEM;
882 		goto out;
883 	}
884 
885 	if (mei_hbuf_acquire(dev)) {
886 		rets = mei_cl_send_disconnect(cl, cb);
887 		if (rets) {
888 			cl_err(dev, cl, "failed to disconnect.\n");
889 			goto out;
890 		}
891 	}
892 
893 	mutex_unlock(&dev->device_lock);
894 	wait_event_timeout(cl->wait,
895 			   cl->state == MEI_FILE_DISCONNECT_REPLY ||
896 			   cl->state == MEI_FILE_DISCONNECTED,
897 			   mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
898 	mutex_lock(&dev->device_lock);
899 
900 	rets = cl->status;
901 	if (cl->state != MEI_FILE_DISCONNECT_REPLY &&
902 	    cl->state != MEI_FILE_DISCONNECTED) {
903 		cl_dbg(dev, cl, "timeout on disconnect from FW client.\n");
904 		rets = -ETIME;
905 	}
906 
907 out:
908 	/* we disconnect also on error */
909 	mei_cl_set_disconnected(cl);
910 	if (!rets)
911 		cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
912 
913 	mei_io_cb_free(cb);
914 	return rets;
915 }
916 
917 /**
918  * mei_cl_disconnect - disconnect host client from the me one
919  *
920  * @cl: host client
921  *
922  * Locking: called under "dev->device_lock" lock
923  *
924  * Return: 0 on success, <0 on failure.
925  */
926 int mei_cl_disconnect(struct mei_cl *cl)
927 {
928 	struct mei_device *dev;
929 	int rets;
930 
931 	if (WARN_ON(!cl || !cl->dev))
932 		return -ENODEV;
933 
934 	dev = cl->dev;
935 
936 	cl_dbg(dev, cl, "disconnecting");
937 
938 	if (!mei_cl_is_connected(cl))
939 		return 0;
940 
941 	if (mei_cl_is_fixed_address(cl)) {
942 		mei_cl_set_disconnected(cl);
943 		return 0;
944 	}
945 
946 	if (dev->dev_state == MEI_DEV_POWER_DOWN) {
947 		cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n");
948 		mei_cl_set_disconnected(cl);
949 		return 0;
950 	}
951 
952 	rets = pm_runtime_get(dev->dev);
953 	if (rets < 0 && rets != -EINPROGRESS) {
954 		pm_runtime_put_noidle(dev->dev);
955 		cl_err(dev, cl, "rpm: get failed %d\n", rets);
956 		return rets;
957 	}
958 
959 	rets = __mei_cl_disconnect(cl);
960 
961 	cl_dbg(dev, cl, "rpm: autosuspend\n");
962 	pm_runtime_mark_last_busy(dev->dev);
963 	pm_runtime_put_autosuspend(dev->dev);
964 
965 	return rets;
966 }
967 
968 
969 /**
970  * mei_cl_is_other_connecting - checks if other
971  *    client with the same me client id is connecting
972  *
973  * @cl: private data of the file object
974  *
975  * Return: true if other client is connected, false - otherwise.
976  */
977 static bool mei_cl_is_other_connecting(struct mei_cl *cl)
978 {
979 	struct mei_device *dev;
980 	struct mei_cl_cb *cb;
981 
982 	dev = cl->dev;
983 
984 	list_for_each_entry(cb, &dev->ctrl_rd_list, list) {
985 		if (cb->fop_type == MEI_FOP_CONNECT &&
986 		    mei_cl_me_id(cl) == mei_cl_me_id(cb->cl))
987 			return true;
988 	}
989 
990 	return false;
991 }
992 
993 /**
994  * mei_cl_send_connect - send connect request
995  *
996  * @cl: host client
997  * @cb: callback block
998  *
999  * Return: 0, OK; otherwise, error.
1000  */
1001 static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
1002 {
1003 	struct mei_device *dev;
1004 	int ret;
1005 
1006 	dev = cl->dev;
1007 
1008 	ret = mei_hbm_cl_connect_req(dev, cl);
1009 	cl->status = ret;
1010 	if (ret) {
1011 		cl->state = MEI_FILE_DISCONNECT_REPLY;
1012 		return ret;
1013 	}
1014 
1015 	list_move_tail(&cb->list, &dev->ctrl_rd_list);
1016 	cl->timer_count = MEI_CONNECT_TIMEOUT;
1017 	mei_schedule_stall_timer(dev);
1018 	return 0;
1019 }
1020 
1021 /**
1022  * mei_cl_irq_connect - send connect request in irq_thread context
1023  *
1024  * @cl: host client
1025  * @cb: callback block
1026  * @cmpl_list: complete list
1027  *
1028  * Return: 0, OK; otherwise, error.
1029  */
1030 int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
1031 		       struct list_head *cmpl_list)
1032 {
1033 	struct mei_device *dev = cl->dev;
1034 	u32 msg_slots;
1035 	int slots;
1036 	int rets;
1037 
1038 	if (mei_cl_is_other_connecting(cl))
1039 		return 0;
1040 
1041 	msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
1042 	slots = mei_hbuf_empty_slots(dev);
1043 	if (slots < 0)
1044 		return -EOVERFLOW;
1045 
1046 	if ((u32)slots < msg_slots)
1047 		return -EMSGSIZE;
1048 
1049 	rets = mei_cl_send_connect(cl, cb);
1050 	if (rets)
1051 		list_move_tail(&cb->list, cmpl_list);
1052 
1053 	return rets;
1054 }
1055 
1056 /**
1057  * mei_cl_connect - connect host client to the me one
1058  *
1059  * @cl: host client
1060  * @me_cl: me client
1061  * @fp: pointer to file structure
1062  *
1063  * Locking: called under "dev->device_lock" lock
1064  *
1065  * Return: 0 on success, <0 on failure.
1066  */
1067 int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
1068 		   const struct file *fp)
1069 {
1070 	struct mei_device *dev;
1071 	struct mei_cl_cb *cb;
1072 	int rets;
1073 
1074 	if (WARN_ON(!cl || !cl->dev || !me_cl))
1075 		return -ENODEV;
1076 
1077 	dev = cl->dev;
1078 
1079 	rets = mei_cl_set_connecting(cl, me_cl);
1080 	if (rets)
1081 		goto nortpm;
1082 
1083 	if (mei_cl_is_fixed_address(cl)) {
1084 		cl->state = MEI_FILE_CONNECTED;
1085 		rets = 0;
1086 		goto nortpm;
1087 	}
1088 
1089 	rets = pm_runtime_get(dev->dev);
1090 	if (rets < 0 && rets != -EINPROGRESS) {
1091 		pm_runtime_put_noidle(dev->dev);
1092 		cl_err(dev, cl, "rpm: get failed %d\n", rets);
1093 		goto nortpm;
1094 	}
1095 
1096 	cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_CONNECT, fp);
1097 	if (!cb) {
1098 		rets = -ENOMEM;
1099 		goto out;
1100 	}
1101 
1102 	/* run hbuf acquire last so we don't have to undo */
1103 	if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
1104 		rets = mei_cl_send_connect(cl, cb);
1105 		if (rets)
1106 			goto out;
1107 	}
1108 
1109 	mutex_unlock(&dev->device_lock);
1110 	wait_event_timeout(cl->wait,
1111 			(cl->state == MEI_FILE_CONNECTED ||
1112 			 cl->state == MEI_FILE_DISCONNECTED ||
1113 			 cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
1114 			 cl->state == MEI_FILE_DISCONNECT_REPLY),
1115 			mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
1116 	mutex_lock(&dev->device_lock);
1117 
1118 	if (!mei_cl_is_connected(cl)) {
1119 		if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) {
1120 			mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
1121 			mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
1122 			 /* ignore disconnect return valuue;
1123 			  * in case of failure reset will be invoked
1124 			  */
1125 			__mei_cl_disconnect(cl);
1126 			rets = -EFAULT;
1127 			goto out;
1128 		}
1129 
1130 		/* timeout or something went really wrong */
1131 		if (!cl->status)
1132 			cl->status = -EFAULT;
1133 	}
1134 
1135 	rets = cl->status;
1136 out:
1137 	cl_dbg(dev, cl, "rpm: autosuspend\n");
1138 	pm_runtime_mark_last_busy(dev->dev);
1139 	pm_runtime_put_autosuspend(dev->dev);
1140 
1141 	mei_io_cb_free(cb);
1142 
1143 nortpm:
1144 	if (!mei_cl_is_connected(cl))
1145 		mei_cl_set_disconnected(cl);
1146 
1147 	return rets;
1148 }
1149 
1150 /**
1151  * mei_cl_alloc_linked - allocate and link host client
1152  *
1153  * @dev: the device structure
1154  *
1155  * Return: cl on success ERR_PTR on failure
1156  */
1157 struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev)
1158 {
1159 	struct mei_cl *cl;
1160 	int ret;
1161 
1162 	cl = mei_cl_allocate(dev);
1163 	if (!cl) {
1164 		ret = -ENOMEM;
1165 		goto err;
1166 	}
1167 
1168 	ret = mei_cl_link(cl);
1169 	if (ret)
1170 		goto err;
1171 
1172 	return cl;
1173 err:
1174 	kfree(cl);
1175 	return ERR_PTR(ret);
1176 }
1177 
1178 /**
1179  * mei_cl_tx_flow_ctrl_creds - checks flow_control credits for cl.
1180  *
1181  * @cl: host client
1182  *
1183  * Return: 1 if tx_flow_ctrl_creds >0, 0 - otherwise.
1184  */
1185 static int mei_cl_tx_flow_ctrl_creds(struct mei_cl *cl)
1186 {
1187 	if (WARN_ON(!cl || !cl->me_cl))
1188 		return -EINVAL;
1189 
1190 	if (cl->tx_flow_ctrl_creds > 0)
1191 		return 1;
1192 
1193 	if (mei_cl_is_fixed_address(cl))
1194 		return 1;
1195 
1196 	if (mei_cl_is_single_recv_buf(cl)) {
1197 		if (cl->me_cl->tx_flow_ctrl_creds > 0)
1198 			return 1;
1199 	}
1200 	return 0;
1201 }
1202 
1203 /**
1204  * mei_cl_tx_flow_ctrl_creds_reduce - reduces transmit flow control credits
1205  *   for a client
1206  *
1207  * @cl: host client
1208  *
1209  * Return:
1210  *	0 on success
1211  *	-EINVAL when ctrl credits are <= 0
1212  */
1213 static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl)
1214 {
1215 	if (WARN_ON(!cl || !cl->me_cl))
1216 		return -EINVAL;
1217 
1218 	if (mei_cl_is_fixed_address(cl))
1219 		return 0;
1220 
1221 	if (mei_cl_is_single_recv_buf(cl)) {
1222 		if (WARN_ON(cl->me_cl->tx_flow_ctrl_creds <= 0))
1223 			return -EINVAL;
1224 		cl->me_cl->tx_flow_ctrl_creds--;
1225 	} else {
1226 		if (WARN_ON(cl->tx_flow_ctrl_creds <= 0))
1227 			return -EINVAL;
1228 		cl->tx_flow_ctrl_creds--;
1229 	}
1230 	return 0;
1231 }
1232 
1233 /**
1234  *  mei_cl_notify_fop2req - convert fop to proper request
1235  *
1236  * @fop: client notification start response command
1237  *
1238  * Return:  MEI_HBM_NOTIFICATION_START/STOP
1239  */
1240 u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop)
1241 {
1242 	if (fop == MEI_FOP_NOTIFY_START)
1243 		return MEI_HBM_NOTIFICATION_START;
1244 	else
1245 		return MEI_HBM_NOTIFICATION_STOP;
1246 }
1247 
1248 /**
1249  *  mei_cl_notify_req2fop - convert notification request top file operation type
1250  *
1251  * @req: hbm notification request type
1252  *
1253  * Return:  MEI_FOP_NOTIFY_START/STOP
1254  */
1255 enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req)
1256 {
1257 	if (req == MEI_HBM_NOTIFICATION_START)
1258 		return MEI_FOP_NOTIFY_START;
1259 	else
1260 		return MEI_FOP_NOTIFY_STOP;
1261 }
1262 
1263 /**
1264  * mei_cl_irq_notify - send notification request in irq_thread context
1265  *
1266  * @cl: client
1267  * @cb: callback block.
1268  * @cmpl_list: complete list.
1269  *
1270  * Return: 0 on such and error otherwise.
1271  */
1272 int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
1273 		      struct list_head *cmpl_list)
1274 {
1275 	struct mei_device *dev = cl->dev;
1276 	u32 msg_slots;
1277 	int slots;
1278 	int ret;
1279 	bool request;
1280 
1281 	msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
1282 	slots = mei_hbuf_empty_slots(dev);
1283 	if (slots < 0)
1284 		return -EOVERFLOW;
1285 
1286 	if ((u32)slots < msg_slots)
1287 		return -EMSGSIZE;
1288 
1289 	request = mei_cl_notify_fop2req(cb->fop_type);
1290 	ret = mei_hbm_cl_notify_req(dev, cl, request);
1291 	if (ret) {
1292 		cl->status = ret;
1293 		list_move_tail(&cb->list, cmpl_list);
1294 		return ret;
1295 	}
1296 
1297 	list_move_tail(&cb->list, &dev->ctrl_rd_list);
1298 	return 0;
1299 }
1300 
1301 /**
1302  * mei_cl_notify_request - send notification stop/start request
1303  *
1304  * @cl: host client
1305  * @fp: associate request with file
1306  * @request: 1 for start or 0 for stop
1307  *
1308  * Locking: called under "dev->device_lock" lock
1309  *
1310  * Return: 0 on such and error otherwise.
1311  */
1312 int mei_cl_notify_request(struct mei_cl *cl,
1313 			  const struct file *fp, u8 request)
1314 {
1315 	struct mei_device *dev;
1316 	struct mei_cl_cb *cb;
1317 	enum mei_cb_file_ops fop_type;
1318 	int rets;
1319 
1320 	if (WARN_ON(!cl || !cl->dev))
1321 		return -ENODEV;
1322 
1323 	dev = cl->dev;
1324 
1325 	if (!dev->hbm_f_ev_supported) {
1326 		cl_dbg(dev, cl, "notifications not supported\n");
1327 		return -EOPNOTSUPP;
1328 	}
1329 
1330 	if (!mei_cl_is_connected(cl))
1331 		return -ENODEV;
1332 
1333 	rets = pm_runtime_get(dev->dev);
1334 	if (rets < 0 && rets != -EINPROGRESS) {
1335 		pm_runtime_put_noidle(dev->dev);
1336 		cl_err(dev, cl, "rpm: get failed %d\n", rets);
1337 		return rets;
1338 	}
1339 
1340 	fop_type = mei_cl_notify_req2fop(request);
1341 	cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, fop_type, fp);
1342 	if (!cb) {
1343 		rets = -ENOMEM;
1344 		goto out;
1345 	}
1346 
1347 	if (mei_hbuf_acquire(dev)) {
1348 		if (mei_hbm_cl_notify_req(dev, cl, request)) {
1349 			rets = -ENODEV;
1350 			goto out;
1351 		}
1352 		list_move_tail(&cb->list, &dev->ctrl_rd_list);
1353 	}
1354 
1355 	mutex_unlock(&dev->device_lock);
1356 	wait_event_timeout(cl->wait,
1357 			   cl->notify_en == request ||
1358 			   cl->status ||
1359 			   !mei_cl_is_connected(cl),
1360 			   mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
1361 	mutex_lock(&dev->device_lock);
1362 
1363 	if (cl->notify_en != request && !cl->status)
1364 		cl->status = -EFAULT;
1365 
1366 	rets = cl->status;
1367 
1368 out:
1369 	cl_dbg(dev, cl, "rpm: autosuspend\n");
1370 	pm_runtime_mark_last_busy(dev->dev);
1371 	pm_runtime_put_autosuspend(dev->dev);
1372 
1373 	mei_io_cb_free(cb);
1374 	return rets;
1375 }
1376 
1377 /**
1378  * mei_cl_notify - raise notification
1379  *
1380  * @cl: host client
1381  *
1382  * Locking: called under "dev->device_lock" lock
1383  */
1384 void mei_cl_notify(struct mei_cl *cl)
1385 {
1386 	struct mei_device *dev;
1387 
1388 	if (!cl || !cl->dev)
1389 		return;
1390 
1391 	dev = cl->dev;
1392 
1393 	if (!cl->notify_en)
1394 		return;
1395 
1396 	cl_dbg(dev, cl, "notify event");
1397 	cl->notify_ev = true;
1398 	if (!mei_cl_bus_notify_event(cl))
1399 		wake_up_interruptible(&cl->ev_wait);
1400 
1401 	if (cl->ev_async)
1402 		kill_fasync(&cl->ev_async, SIGIO, POLL_PRI);
1403 
1404 }
1405 
1406 /**
1407  * mei_cl_notify_get - get or wait for notification event
1408  *
1409  * @cl: host client
1410  * @block: this request is blocking
1411  * @notify_ev: true if notification event was received
1412  *
1413  * Locking: called under "dev->device_lock" lock
1414  *
1415  * Return: 0 on such and error otherwise.
1416  */
1417 int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev)
1418 {
1419 	struct mei_device *dev;
1420 	int rets;
1421 
1422 	*notify_ev = false;
1423 
1424 	if (WARN_ON(!cl || !cl->dev))
1425 		return -ENODEV;
1426 
1427 	dev = cl->dev;
1428 
1429 	if (!dev->hbm_f_ev_supported) {
1430 		cl_dbg(dev, cl, "notifications not supported\n");
1431 		return -EOPNOTSUPP;
1432 	}
1433 
1434 	if (!mei_cl_is_connected(cl))
1435 		return -ENODEV;
1436 
1437 	if (cl->notify_ev)
1438 		goto out;
1439 
1440 	if (!block)
1441 		return -EAGAIN;
1442 
1443 	mutex_unlock(&dev->device_lock);
1444 	rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev);
1445 	mutex_lock(&dev->device_lock);
1446 
1447 	if (rets < 0)
1448 		return rets;
1449 
1450 out:
1451 	*notify_ev = cl->notify_ev;
1452 	cl->notify_ev = false;
1453 	return 0;
1454 }
1455 
1456 /**
1457  * mei_cl_read_start - the start read client message function.
1458  *
1459  * @cl: host client
1460  * @length: number of bytes to read
1461  * @fp: pointer to file structure
1462  *
1463  * Return: 0 on success, <0 on failure.
1464  */
1465 int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
1466 {
1467 	struct mei_device *dev;
1468 	struct mei_cl_cb *cb;
1469 	int rets;
1470 
1471 	if (WARN_ON(!cl || !cl->dev))
1472 		return -ENODEV;
1473 
1474 	dev = cl->dev;
1475 
1476 	if (!mei_cl_is_connected(cl))
1477 		return -ENODEV;
1478 
1479 	if (!mei_me_cl_is_active(cl->me_cl)) {
1480 		cl_err(dev, cl, "no such me client\n");
1481 		return  -ENOTTY;
1482 	}
1483 
1484 	if (mei_cl_is_fixed_address(cl))
1485 		return 0;
1486 
1487 	/* HW currently supports only one pending read */
1488 	if (cl->rx_flow_ctrl_creds)
1489 		return -EBUSY;
1490 
1491 	cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp);
1492 	if (!cb)
1493 		return -ENOMEM;
1494 
1495 	rets = pm_runtime_get(dev->dev);
1496 	if (rets < 0 && rets != -EINPROGRESS) {
1497 		pm_runtime_put_noidle(dev->dev);
1498 		cl_err(dev, cl, "rpm: get failed %d\n", rets);
1499 		goto nortpm;
1500 	}
1501 
1502 	rets = 0;
1503 	if (mei_hbuf_acquire(dev)) {
1504 		rets = mei_hbm_cl_flow_control_req(dev, cl);
1505 		if (rets < 0)
1506 			goto out;
1507 
1508 		list_move_tail(&cb->list, &cl->rd_pending);
1509 	}
1510 	cl->rx_flow_ctrl_creds++;
1511 
1512 out:
1513 	cl_dbg(dev, cl, "rpm: autosuspend\n");
1514 	pm_runtime_mark_last_busy(dev->dev);
1515 	pm_runtime_put_autosuspend(dev->dev);
1516 nortpm:
1517 	if (rets)
1518 		mei_io_cb_free(cb);
1519 
1520 	return rets;
1521 }
1522 
1523 static inline u8 mei_ext_hdr_set_vtag(struct mei_ext_hdr *ext, u8 vtag)
1524 {
1525 	ext->type = MEI_EXT_HDR_VTAG;
1526 	ext->ext_payload[0] = vtag;
1527 	ext->length = mei_data2slots(sizeof(*ext));
1528 	return ext->length;
1529 }
1530 
1531 /**
1532  * mei_msg_hdr_init - allocate and initialize mei message header
1533  *
1534  * @cb: message callback structure
1535  *
1536  * Return: a pointer to initialized header
1537  */
1538 static struct mei_msg_hdr *mei_msg_hdr_init(const struct mei_cl_cb *cb)
1539 {
1540 	size_t hdr_len;
1541 	struct mei_ext_meta_hdr *meta;
1542 	struct mei_ext_hdr *ext;
1543 	struct mei_msg_hdr *mei_hdr;
1544 	bool is_ext, is_vtag;
1545 
1546 	if (!cb)
1547 		return ERR_PTR(-EINVAL);
1548 
1549 	/* Extended header for vtag is attached only on the first fragment */
1550 	is_vtag = (cb->vtag && cb->buf_idx == 0);
1551 	is_ext = is_vtag;
1552 
1553 	/* Compute extended header size */
1554 	hdr_len = sizeof(*mei_hdr);
1555 
1556 	if (!is_ext)
1557 		goto setup_hdr;
1558 
1559 	hdr_len += sizeof(*meta);
1560 	if (is_vtag)
1561 		hdr_len += sizeof(*ext);
1562 
1563 setup_hdr:
1564 	mei_hdr = kzalloc(hdr_len, GFP_KERNEL);
1565 	if (!mei_hdr)
1566 		return ERR_PTR(-ENOMEM);
1567 
1568 	mei_hdr->host_addr = mei_cl_host_addr(cb->cl);
1569 	mei_hdr->me_addr = mei_cl_me_id(cb->cl);
1570 	mei_hdr->internal = cb->internal;
1571 	mei_hdr->extended = is_ext;
1572 
1573 	if (!is_ext)
1574 		goto out;
1575 
1576 	meta = (struct mei_ext_meta_hdr *)mei_hdr->extension;
1577 	if (is_vtag) {
1578 		meta->count++;
1579 		meta->size += mei_ext_hdr_set_vtag(meta->hdrs, cb->vtag);
1580 	}
1581 out:
1582 	mei_hdr->length = hdr_len - sizeof(*mei_hdr);
1583 	return mei_hdr;
1584 }
1585 
1586 /**
1587  * mei_cl_irq_write - write a message to device
1588  *	from the interrupt thread context
1589  *
1590  * @cl: client
1591  * @cb: callback block.
1592  * @cmpl_list: complete list.
1593  *
1594  * Return: 0, OK; otherwise error.
1595  */
1596 int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
1597 		     struct list_head *cmpl_list)
1598 {
1599 	struct mei_device *dev;
1600 	struct mei_msg_data *buf;
1601 	struct mei_msg_hdr *mei_hdr = NULL;
1602 	size_t hdr_len;
1603 	size_t hbuf_len, dr_len;
1604 	size_t buf_len;
1605 	size_t data_len;
1606 	int hbuf_slots;
1607 	u32 dr_slots;
1608 	u32 dma_len;
1609 	int rets;
1610 	bool first_chunk;
1611 	const void *data;
1612 
1613 	if (WARN_ON(!cl || !cl->dev))
1614 		return -ENODEV;
1615 
1616 	dev = cl->dev;
1617 
1618 	buf = &cb->buf;
1619 
1620 	first_chunk = cb->buf_idx == 0;
1621 
1622 	rets = first_chunk ? mei_cl_tx_flow_ctrl_creds(cl) : 1;
1623 	if (rets < 0)
1624 		goto err;
1625 
1626 	if (rets == 0) {
1627 		cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
1628 		return 0;
1629 	}
1630 
1631 	buf_len = buf->size - cb->buf_idx;
1632 	data = buf->data + cb->buf_idx;
1633 	hbuf_slots = mei_hbuf_empty_slots(dev);
1634 	if (hbuf_slots < 0) {
1635 		rets = -EOVERFLOW;
1636 		goto err;
1637 	}
1638 
1639 	hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK;
1640 	dr_slots = mei_dma_ring_empty_slots(dev);
1641 	dr_len = mei_slots2data(dr_slots);
1642 
1643 	mei_hdr = mei_msg_hdr_init(cb);
1644 	if (IS_ERR(mei_hdr)) {
1645 		rets = PTR_ERR(mei_hdr);
1646 		mei_hdr = NULL;
1647 		goto err;
1648 	}
1649 
1650 	cl_dbg(dev, cl, "Extended Header %d vtag = %d\n",
1651 	       mei_hdr->extended, cb->vtag);
1652 
1653 	hdr_len = sizeof(*mei_hdr) + mei_hdr->length;
1654 
1655 	/**
1656 	 * Split the message only if we can write the whole host buffer
1657 	 * otherwise wait for next time the host buffer is empty.
1658 	 */
1659 	if (hdr_len + buf_len <= hbuf_len) {
1660 		data_len = buf_len;
1661 		mei_hdr->msg_complete = 1;
1662 	} else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
1663 		mei_hdr->dma_ring = 1;
1664 		if (buf_len > dr_len)
1665 			buf_len = dr_len;
1666 		else
1667 			mei_hdr->msg_complete = 1;
1668 
1669 		data_len = sizeof(dma_len);
1670 		dma_len = buf_len;
1671 		data = &dma_len;
1672 	} else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) {
1673 		buf_len = hbuf_len - hdr_len;
1674 		data_len = buf_len;
1675 	} else {
1676 		kfree(mei_hdr);
1677 		return 0;
1678 	}
1679 	mei_hdr->length += data_len;
1680 
1681 	if (mei_hdr->dma_ring)
1682 		mei_dma_ring_write(dev, buf->data + cb->buf_idx, buf_len);
1683 	rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len);
1684 
1685 	if (rets)
1686 		goto err;
1687 
1688 	cl->status = 0;
1689 	cl->writing_state = MEI_WRITING;
1690 	cb->buf_idx += buf_len;
1691 
1692 	if (first_chunk) {
1693 		if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) {
1694 			rets = -EIO;
1695 			goto err;
1696 		}
1697 	}
1698 
1699 	if (mei_hdr->msg_complete)
1700 		list_move_tail(&cb->list, &dev->write_waiting_list);
1701 
1702 	kfree(mei_hdr);
1703 	return 0;
1704 
1705 err:
1706 	kfree(mei_hdr);
1707 	cl->status = rets;
1708 	list_move_tail(&cb->list, cmpl_list);
1709 	return rets;
1710 }
1711 
1712 /**
1713  * mei_cl_write - submit a write cb to mei device
1714  *	assumes device_lock is locked
1715  *
1716  * @cl: host client
1717  * @cb: write callback with filled data
1718  *
1719  * Return: number of bytes sent on success, <0 on failure.
1720  */
1721 ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
1722 {
1723 	struct mei_device *dev;
1724 	struct mei_msg_data *buf;
1725 	struct mei_msg_hdr *mei_hdr = NULL;
1726 	size_t hdr_len;
1727 	size_t hbuf_len, dr_len;
1728 	size_t buf_len;
1729 	size_t data_len;
1730 	int hbuf_slots;
1731 	u32 dr_slots;
1732 	u32 dma_len;
1733 	ssize_t rets;
1734 	bool blocking;
1735 	const void *data;
1736 
1737 	if (WARN_ON(!cl || !cl->dev))
1738 		return -ENODEV;
1739 
1740 	if (WARN_ON(!cb))
1741 		return -EINVAL;
1742 
1743 	dev = cl->dev;
1744 
1745 	buf = &cb->buf;
1746 	buf_len = buf->size;
1747 
1748 	cl_dbg(dev, cl, "buf_len=%zd\n", buf_len);
1749 
1750 	blocking = cb->blocking;
1751 	data = buf->data;
1752 
1753 	rets = pm_runtime_get(dev->dev);
1754 	if (rets < 0 && rets != -EINPROGRESS) {
1755 		pm_runtime_put_noidle(dev->dev);
1756 		cl_err(dev, cl, "rpm: get failed %zd\n", rets);
1757 		goto free;
1758 	}
1759 
1760 	cb->buf_idx = 0;
1761 	cl->writing_state = MEI_IDLE;
1762 
1763 
1764 	rets = mei_cl_tx_flow_ctrl_creds(cl);
1765 	if (rets < 0)
1766 		goto err;
1767 
1768 	mei_hdr = mei_msg_hdr_init(cb);
1769 	if (IS_ERR(mei_hdr)) {
1770 		rets = -PTR_ERR(mei_hdr);
1771 		mei_hdr = NULL;
1772 		goto err;
1773 	}
1774 
1775 	cl_dbg(dev, cl, "Extended Header %d vtag = %d\n",
1776 	       mei_hdr->extended, cb->vtag);
1777 
1778 	hdr_len = sizeof(*mei_hdr) + mei_hdr->length;
1779 
1780 	if (rets == 0) {
1781 		cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
1782 		rets = buf_len;
1783 		goto out;
1784 	}
1785 
1786 	if (!mei_hbuf_acquire(dev)) {
1787 		cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
1788 		rets = buf_len;
1789 		goto out;
1790 	}
1791 
1792 	hbuf_slots = mei_hbuf_empty_slots(dev);
1793 	if (hbuf_slots < 0) {
1794 		rets = -EOVERFLOW;
1795 		goto out;
1796 	}
1797 
1798 	hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK;
1799 	dr_slots = mei_dma_ring_empty_slots(dev);
1800 	dr_len =  mei_slots2data(dr_slots);
1801 
1802 	if (hdr_len + buf_len <= hbuf_len) {
1803 		data_len = buf_len;
1804 		mei_hdr->msg_complete = 1;
1805 	} else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
1806 		mei_hdr->dma_ring = 1;
1807 		if (buf_len > dr_len)
1808 			buf_len = dr_len;
1809 		else
1810 			mei_hdr->msg_complete = 1;
1811 
1812 		data_len = sizeof(dma_len);
1813 		dma_len = buf_len;
1814 		data = &dma_len;
1815 	} else {
1816 		buf_len = hbuf_len - hdr_len;
1817 		data_len = buf_len;
1818 	}
1819 
1820 	mei_hdr->length += data_len;
1821 
1822 	if (mei_hdr->dma_ring)
1823 		mei_dma_ring_write(dev, buf->data, buf_len);
1824 	rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len);
1825 
1826 	if (rets)
1827 		goto err;
1828 
1829 	rets = mei_cl_tx_flow_ctrl_creds_reduce(cl);
1830 	if (rets)
1831 		goto err;
1832 
1833 	cl->writing_state = MEI_WRITING;
1834 	cb->buf_idx = buf_len;
1835 	/* restore return value */
1836 	buf_len = buf->size;
1837 
1838 out:
1839 	if (mei_hdr->msg_complete)
1840 		mei_tx_cb_enqueue(cb, &dev->write_waiting_list);
1841 	else
1842 		mei_tx_cb_enqueue(cb, &dev->write_list);
1843 
1844 	cb = NULL;
1845 	if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
1846 
1847 		mutex_unlock(&dev->device_lock);
1848 		rets = wait_event_interruptible(cl->tx_wait,
1849 				cl->writing_state == MEI_WRITE_COMPLETE ||
1850 				(!mei_cl_is_connected(cl)));
1851 		mutex_lock(&dev->device_lock);
1852 		/* wait_event_interruptible returns -ERESTARTSYS */
1853 		if (rets) {
1854 			if (signal_pending(current))
1855 				rets = -EINTR;
1856 			goto err;
1857 		}
1858 		if (cl->writing_state != MEI_WRITE_COMPLETE) {
1859 			rets = -EFAULT;
1860 			goto err;
1861 		}
1862 	}
1863 
1864 	rets = buf_len;
1865 err:
1866 	cl_dbg(dev, cl, "rpm: autosuspend\n");
1867 	pm_runtime_mark_last_busy(dev->dev);
1868 	pm_runtime_put_autosuspend(dev->dev);
1869 free:
1870 	mei_io_cb_free(cb);
1871 
1872 	kfree(mei_hdr);
1873 
1874 	return rets;
1875 }
1876 
1877 /**
1878  * mei_cl_complete - processes completed operation for a client
1879  *
1880  * @cl: private data of the file object.
1881  * @cb: callback block.
1882  */
1883 void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
1884 {
1885 	struct mei_device *dev = cl->dev;
1886 
1887 	switch (cb->fop_type) {
1888 	case MEI_FOP_WRITE:
1889 		mei_tx_cb_dequeue(cb);
1890 		cl->writing_state = MEI_WRITE_COMPLETE;
1891 		if (waitqueue_active(&cl->tx_wait)) {
1892 			wake_up_interruptible(&cl->tx_wait);
1893 		} else {
1894 			pm_runtime_mark_last_busy(dev->dev);
1895 			pm_request_autosuspend(dev->dev);
1896 		}
1897 		break;
1898 
1899 	case MEI_FOP_READ:
1900 		list_add_tail(&cb->list, &cl->rd_completed);
1901 		if (!mei_cl_is_fixed_address(cl) &&
1902 		    !WARN_ON(!cl->rx_flow_ctrl_creds))
1903 			cl->rx_flow_ctrl_creds--;
1904 		if (!mei_cl_bus_rx_event(cl))
1905 			wake_up_interruptible(&cl->rx_wait);
1906 		break;
1907 
1908 	case MEI_FOP_CONNECT:
1909 	case MEI_FOP_DISCONNECT:
1910 	case MEI_FOP_NOTIFY_STOP:
1911 	case MEI_FOP_NOTIFY_START:
1912 		if (waitqueue_active(&cl->wait))
1913 			wake_up(&cl->wait);
1914 
1915 		break;
1916 	case MEI_FOP_DISCONNECT_RSP:
1917 		mei_io_cb_free(cb);
1918 		mei_cl_set_disconnected(cl);
1919 		break;
1920 	default:
1921 		BUG_ON(0);
1922 	}
1923 }
1924 
1925 
1926 /**
1927  * mei_cl_all_disconnect - disconnect forcefully all connected clients
1928  *
1929  * @dev: mei device
1930  */
1931 void mei_cl_all_disconnect(struct mei_device *dev)
1932 {
1933 	struct mei_cl *cl;
1934 
1935 	list_for_each_entry(cl, &dev->file_list, link)
1936 		mei_cl_set_disconnected(cl);
1937 }
1938