xref: /openbmc/linux/drivers/misc/mei/client.c (revision f35fe5f47ed0ea532a81603cac13a259d056c077)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
4  * Intel Management Engine Interface (Intel MEI) Linux driver
5  */
6 
7 #include <linux/sched/signal.h>
8 #include <linux/wait.h>
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/pm_runtime.h>
12 
13 #include <linux/mei.h>
14 
15 #include "mei_dev.h"
16 #include "hbm.h"
17 #include "client.h"
18 
19 /**
20  * mei_me_cl_init - initialize me client
21  *
22  * @me_cl: me client
23  */
24 void mei_me_cl_init(struct mei_me_client *me_cl)
25 {
26 	INIT_LIST_HEAD(&me_cl->list);
27 	kref_init(&me_cl->refcnt);
28 }
29 
30 /**
31  * mei_me_cl_get - increases me client refcount
32  *
33  * @me_cl: me client
34  *
35  * Locking: called under "dev->device_lock" lock
36  *
37  * Return: me client or NULL
38  */
39 struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl)
40 {
41 	if (me_cl && kref_get_unless_zero(&me_cl->refcnt))
42 		return me_cl;
43 
44 	return NULL;
45 }
46 
47 /**
48  * mei_me_cl_release - free me client
49  *
50  * Locking: called under "dev->device_lock" lock
51  *
52  * @ref: me_client refcount
53  */
54 static void mei_me_cl_release(struct kref *ref)
55 {
56 	struct mei_me_client *me_cl =
57 		container_of(ref, struct mei_me_client, refcnt);
58 
59 	kfree(me_cl);
60 }
61 
62 /**
63  * mei_me_cl_put - decrease me client refcount and free client if necessary
64  *
65  * Locking: called under "dev->device_lock" lock
66  *
67  * @me_cl: me client
68  */
69 void mei_me_cl_put(struct mei_me_client *me_cl)
70 {
71 	if (me_cl)
72 		kref_put(&me_cl->refcnt, mei_me_cl_release);
73 }
74 
75 /**
76  * __mei_me_cl_del  - delete me client from the list and decrease
77  *     reference counter
78  *
79  * @dev: mei device
80  * @me_cl: me client
81  *
82  * Locking: dev->me_clients_rwsem
83  */
84 static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
85 {
86 	if (!me_cl)
87 		return;
88 
89 	list_del_init(&me_cl->list);
90 	mei_me_cl_put(me_cl);
91 }
92 
93 /**
94  * mei_me_cl_del - delete me client from the list and decrease
95  *     reference counter
96  *
97  * @dev: mei device
98  * @me_cl: me client
99  */
100 void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
101 {
102 	down_write(&dev->me_clients_rwsem);
103 	__mei_me_cl_del(dev, me_cl);
104 	up_write(&dev->me_clients_rwsem);
105 }
106 
107 /**
108  * mei_me_cl_add - add me client to the list
109  *
110  * @dev: mei device
111  * @me_cl: me client
112  */
113 void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl)
114 {
115 	down_write(&dev->me_clients_rwsem);
116 	list_add(&me_cl->list, &dev->me_clients);
117 	up_write(&dev->me_clients_rwsem);
118 }
119 
120 /**
121  * __mei_me_cl_by_uuid - locate me client by uuid
122  *	increases ref count
123  *
124  * @dev: mei device
125  * @uuid: me client uuid
126  *
127  * Return: me client or NULL if not found
128  *
129  * Locking: dev->me_clients_rwsem
130  */
131 static struct mei_me_client *__mei_me_cl_by_uuid(struct mei_device *dev,
132 					const uuid_le *uuid)
133 {
134 	struct mei_me_client *me_cl;
135 	const uuid_le *pn;
136 
137 	WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
138 
139 	list_for_each_entry(me_cl, &dev->me_clients, list) {
140 		pn = &me_cl->props.protocol_name;
141 		if (uuid_le_cmp(*uuid, *pn) == 0)
142 			return mei_me_cl_get(me_cl);
143 	}
144 
145 	return NULL;
146 }
147 
148 /**
149  * mei_me_cl_by_uuid - locate me client by uuid
150  *	increases ref count
151  *
152  * @dev: mei device
153  * @uuid: me client uuid
154  *
155  * Return: me client or NULL if not found
156  *
157  * Locking: dev->me_clients_rwsem
158  */
159 struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev,
160 					const uuid_le *uuid)
161 {
162 	struct mei_me_client *me_cl;
163 
164 	down_read(&dev->me_clients_rwsem);
165 	me_cl = __mei_me_cl_by_uuid(dev, uuid);
166 	up_read(&dev->me_clients_rwsem);
167 
168 	return me_cl;
169 }
170 
171 /**
172  * mei_me_cl_by_id - locate me client by client id
173  *	increases ref count
174  *
175  * @dev: the device structure
176  * @client_id: me client id
177  *
178  * Return: me client or NULL if not found
179  *
180  * Locking: dev->me_clients_rwsem
181  */
182 struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
183 {
184 
185 	struct mei_me_client *__me_cl, *me_cl = NULL;
186 
187 	down_read(&dev->me_clients_rwsem);
188 	list_for_each_entry(__me_cl, &dev->me_clients, list) {
189 		if (__me_cl->client_id == client_id) {
190 			me_cl = mei_me_cl_get(__me_cl);
191 			break;
192 		}
193 	}
194 	up_read(&dev->me_clients_rwsem);
195 
196 	return me_cl;
197 }
198 
199 /**
200  * __mei_me_cl_by_uuid_id - locate me client by client id and uuid
201  *	increases ref count
202  *
203  * @dev: the device structure
204  * @uuid: me client uuid
205  * @client_id: me client id
206  *
207  * Return: me client or null if not found
208  *
209  * Locking: dev->me_clients_rwsem
210  */
211 static struct mei_me_client *__mei_me_cl_by_uuid_id(struct mei_device *dev,
212 					   const uuid_le *uuid, u8 client_id)
213 {
214 	struct mei_me_client *me_cl;
215 	const uuid_le *pn;
216 
217 	WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
218 
219 	list_for_each_entry(me_cl, &dev->me_clients, list) {
220 		pn = &me_cl->props.protocol_name;
221 		if (uuid_le_cmp(*uuid, *pn) == 0 &&
222 		    me_cl->client_id == client_id)
223 			return mei_me_cl_get(me_cl);
224 	}
225 
226 	return NULL;
227 }
228 
229 
230 /**
231  * mei_me_cl_by_uuid_id - locate me client by client id and uuid
232  *	increases ref count
233  *
234  * @dev: the device structure
235  * @uuid: me client uuid
236  * @client_id: me client id
237  *
238  * Return: me client or null if not found
239  */
240 struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
241 					   const uuid_le *uuid, u8 client_id)
242 {
243 	struct mei_me_client *me_cl;
244 
245 	down_read(&dev->me_clients_rwsem);
246 	me_cl = __mei_me_cl_by_uuid_id(dev, uuid, client_id);
247 	up_read(&dev->me_clients_rwsem);
248 
249 	return me_cl;
250 }
251 
252 /**
253  * mei_me_cl_rm_by_uuid - remove all me clients matching uuid
254  *
255  * @dev: the device structure
256  * @uuid: me client uuid
257  *
258  * Locking: called under "dev->device_lock" lock
259  */
260 void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
261 {
262 	struct mei_me_client *me_cl;
263 
264 	dev_dbg(dev->dev, "remove %pUl\n", uuid);
265 
266 	down_write(&dev->me_clients_rwsem);
267 	me_cl = __mei_me_cl_by_uuid(dev, uuid);
268 	__mei_me_cl_del(dev, me_cl);
269 	mei_me_cl_put(me_cl);
270 	up_write(&dev->me_clients_rwsem);
271 }
272 
273 /**
274  * mei_me_cl_rm_by_uuid_id - remove all me clients matching client id
275  *
276  * @dev: the device structure
277  * @uuid: me client uuid
278  * @id: me client id
279  *
280  * Locking: called under "dev->device_lock" lock
281  */
282 void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id)
283 {
284 	struct mei_me_client *me_cl;
285 
286 	dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id);
287 
288 	down_write(&dev->me_clients_rwsem);
289 	me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id);
290 	__mei_me_cl_del(dev, me_cl);
291 	mei_me_cl_put(me_cl);
292 	up_write(&dev->me_clients_rwsem);
293 }
294 
295 /**
296  * mei_me_cl_rm_all - remove all me clients
297  *
298  * @dev: the device structure
299  *
300  * Locking: called under "dev->device_lock" lock
301  */
302 void mei_me_cl_rm_all(struct mei_device *dev)
303 {
304 	struct mei_me_client *me_cl, *next;
305 
306 	down_write(&dev->me_clients_rwsem);
307 	list_for_each_entry_safe(me_cl, next, &dev->me_clients, list)
308 		__mei_me_cl_del(dev, me_cl);
309 	up_write(&dev->me_clients_rwsem);
310 }
311 
312 /**
313  * mei_io_cb_free - free mei_cb_private related memory
314  *
315  * @cb: mei callback struct
316  */
317 void mei_io_cb_free(struct mei_cl_cb *cb)
318 {
319 	if (cb == NULL)
320 		return;
321 
322 	list_del(&cb->list);
323 	kfree(cb->buf.data);
324 	kfree(cb);
325 }
326 
327 /**
328  * mei_tx_cb_queue - queue tx callback
329  *
330  * Locking: called under "dev->device_lock" lock
331  *
332  * @cb: mei callback struct
333  * @head: an instance of list to queue on
334  */
335 static inline void mei_tx_cb_enqueue(struct mei_cl_cb *cb,
336 				     struct list_head *head)
337 {
338 	list_add_tail(&cb->list, head);
339 	cb->cl->tx_cb_queued++;
340 }
341 
342 /**
343  * mei_tx_cb_dequeue - dequeue tx callback
344  *
345  * Locking: called under "dev->device_lock" lock
346  *
347  * @cb: mei callback struct to dequeue and free
348  */
349 static inline void mei_tx_cb_dequeue(struct mei_cl_cb *cb)
350 {
351 	if (!WARN_ON(cb->cl->tx_cb_queued == 0))
352 		cb->cl->tx_cb_queued--;
353 
354 	mei_io_cb_free(cb);
355 }
356 
357 /**
358  * mei_cl_set_read_by_fp - set pending_read flag to vtag struct for given fp
359  *
360  * Locking: called under "dev->device_lock" lock
361  *
362  * @cl: mei client
363  * @fp: pointer to file structure
364  */
365 static void mei_cl_set_read_by_fp(const struct mei_cl *cl,
366 				  const struct file *fp)
367 {
368 	struct mei_cl_vtag *cl_vtag;
369 
370 	list_for_each_entry(cl_vtag, &cl->vtag_map, list) {
371 		if (cl_vtag->fp == fp) {
372 			cl_vtag->pending_read = true;
373 			return;
374 		}
375 	}
376 }
377 
378 /**
379  * mei_io_cb_init - allocate and initialize io callback
380  *
381  * @cl: mei client
382  * @type: operation type
383  * @fp: pointer to file structure
384  *
385  * Return: mei_cl_cb pointer or NULL;
386  */
387 static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl,
388 					enum mei_cb_file_ops type,
389 					const struct file *fp)
390 {
391 	struct mei_cl_cb *cb;
392 
393 	cb = kzalloc(sizeof(*cb), GFP_KERNEL);
394 	if (!cb)
395 		return NULL;
396 
397 	INIT_LIST_HEAD(&cb->list);
398 	cb->fp = fp;
399 	cb->cl = cl;
400 	cb->buf_idx = 0;
401 	cb->fop_type = type;
402 	cb->vtag = 0;
403 
404 	return cb;
405 }
406 
407 /**
408  * mei_io_list_flush_cl - removes cbs belonging to the cl.
409  *
410  * @head:  an instance of our list structure
411  * @cl:    host client
412  */
413 static void mei_io_list_flush_cl(struct list_head *head,
414 				 const struct mei_cl *cl)
415 {
416 	struct mei_cl_cb *cb, *next;
417 
418 	list_for_each_entry_safe(cb, next, head, list) {
419 		if (cl == cb->cl) {
420 			list_del_init(&cb->list);
421 			if (cb->fop_type == MEI_FOP_READ)
422 				mei_io_cb_free(cb);
423 		}
424 	}
425 }
426 
427 /**
428  * mei_io_tx_list_free_cl - removes cb belonging to the cl and free them
429  *
430  * @head: An instance of our list structure
431  * @cl: host client
432  */
433 static void mei_io_tx_list_free_cl(struct list_head *head,
434 				   const struct mei_cl *cl)
435 {
436 	struct mei_cl_cb *cb, *next;
437 
438 	list_for_each_entry_safe(cb, next, head, list) {
439 		if (cl == cb->cl)
440 			mei_tx_cb_dequeue(cb);
441 	}
442 }
443 
444 /**
445  * mei_io_list_free_fp - free cb from a list that matches file pointer
446  *
447  * @head: io list
448  * @fp: file pointer (matching cb file object), may be NULL
449  */
450 static void mei_io_list_free_fp(struct list_head *head, const struct file *fp)
451 {
452 	struct mei_cl_cb *cb, *next;
453 
454 	list_for_each_entry_safe(cb, next, head, list)
455 		if (!fp || fp == cb->fp)
456 			mei_io_cb_free(cb);
457 }
458 
459 /**
460  * mei_cl_free_pending - free pending cb
461  *
462  * @cl: host client
463  */
464 static void mei_cl_free_pending(struct mei_cl *cl)
465 {
466 	struct mei_cl_cb *cb;
467 
468 	cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list);
469 	mei_io_cb_free(cb);
470 }
471 
472 /**
473  * mei_cl_alloc_cb - a convenient wrapper for allocating read cb
474  *
475  * @cl: host client
476  * @length: size of the buffer
477  * @fop_type: operation type
478  * @fp: associated file pointer (might be NULL)
479  *
480  * Return: cb on success and NULL on failure
481  */
482 struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
483 				  enum mei_cb_file_ops fop_type,
484 				  const struct file *fp)
485 {
486 	struct mei_cl_cb *cb;
487 
488 	cb = mei_io_cb_init(cl, fop_type, fp);
489 	if (!cb)
490 		return NULL;
491 
492 	if (length == 0)
493 		return cb;
494 
495 	cb->buf.data = kmalloc(roundup(length, MEI_SLOT_SIZE), GFP_KERNEL);
496 	if (!cb->buf.data) {
497 		mei_io_cb_free(cb);
498 		return NULL;
499 	}
500 	cb->buf.size = length;
501 
502 	return cb;
503 }
504 
505 /**
506  * mei_cl_enqueue_ctrl_wr_cb - a convenient wrapper for allocating
507  *     and enqueuing of the control commands cb
508  *
509  * @cl: host client
510  * @length: size of the buffer
511  * @fop_type: operation type
512  * @fp: associated file pointer (might be NULL)
513  *
514  * Return: cb on success and NULL on failure
515  * Locking: called under "dev->device_lock" lock
516  */
517 struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length,
518 					    enum mei_cb_file_ops fop_type,
519 					    const struct file *fp)
520 {
521 	struct mei_cl_cb *cb;
522 
523 	/* for RX always allocate at least client's mtu */
524 	if (length)
525 		length = max_t(size_t, length, mei_cl_mtu(cl));
526 
527 	cb = mei_cl_alloc_cb(cl, length, fop_type, fp);
528 	if (!cb)
529 		return NULL;
530 
531 	list_add_tail(&cb->list, &cl->dev->ctrl_wr_list);
532 	return cb;
533 }
534 
535 /**
536  * mei_cl_read_cb - find this cl's callback in the read list
537  *     for a specific file
538  *
539  * @cl: host client
540  * @fp: file pointer (matching cb file object), may be NULL
541  *
542  * Return: cb on success, NULL if cb is not found
543  */
544 struct mei_cl_cb *mei_cl_read_cb(struct mei_cl *cl, const struct file *fp)
545 {
546 	struct mei_cl_cb *cb;
547 	struct mei_cl_cb *ret_cb = NULL;
548 
549 	spin_lock(&cl->rd_completed_lock);
550 	list_for_each_entry(cb, &cl->rd_completed, list)
551 		if (!fp || fp == cb->fp) {
552 			ret_cb = cb;
553 			break;
554 		}
555 	spin_unlock(&cl->rd_completed_lock);
556 	return ret_cb;
557 }
558 
559 /**
560  * mei_cl_flush_queues - flushes queue lists belonging to cl.
561  *
562  * @cl: host client
563  * @fp: file pointer (matching cb file object), may be NULL
564  *
565  * Return: 0 on success, -EINVAL if cl or cl->dev is NULL.
566  */
567 int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
568 {
569 	struct mei_device *dev;
570 
571 	if (WARN_ON(!cl || !cl->dev))
572 		return -EINVAL;
573 
574 	dev = cl->dev;
575 
576 	cl_dbg(dev, cl, "remove list entry belonging to cl\n");
577 	mei_io_tx_list_free_cl(&cl->dev->write_list, cl);
578 	mei_io_tx_list_free_cl(&cl->dev->write_waiting_list, cl);
579 	mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl);
580 	mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl);
581 	/* free pending cb only in final flush */
582 	if (!fp)
583 		mei_cl_free_pending(cl);
584 	spin_lock(&cl->rd_completed_lock);
585 	mei_io_list_free_fp(&cl->rd_completed, fp);
586 	spin_unlock(&cl->rd_completed_lock);
587 
588 	return 0;
589 }
590 
591 /**
592  * mei_cl_init - initializes cl.
593  *
594  * @cl: host client to be initialized
595  * @dev: mei device
596  */
597 static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
598 {
599 	memset(cl, 0, sizeof(*cl));
600 	init_waitqueue_head(&cl->wait);
601 	init_waitqueue_head(&cl->rx_wait);
602 	init_waitqueue_head(&cl->tx_wait);
603 	init_waitqueue_head(&cl->ev_wait);
604 	INIT_LIST_HEAD(&cl->vtag_map);
605 	spin_lock_init(&cl->rd_completed_lock);
606 	INIT_LIST_HEAD(&cl->rd_completed);
607 	INIT_LIST_HEAD(&cl->rd_pending);
608 	INIT_LIST_HEAD(&cl->link);
609 	cl->writing_state = MEI_IDLE;
610 	cl->state = MEI_FILE_UNINITIALIZED;
611 	cl->dev = dev;
612 }
613 
614 /**
615  * mei_cl_allocate - allocates cl  structure and sets it up.
616  *
617  * @dev: mei device
618  * Return:  The allocated file or NULL on failure
619  */
620 struct mei_cl *mei_cl_allocate(struct mei_device *dev)
621 {
622 	struct mei_cl *cl;
623 
624 	cl = kmalloc(sizeof(*cl), GFP_KERNEL);
625 	if (!cl)
626 		return NULL;
627 
628 	mei_cl_init(cl, dev);
629 
630 	return cl;
631 }
632 
633 /**
634  * mei_cl_link - allocate host id in the host map
635  *
636  * @cl: host client
637  *
638  * Return: 0 on success
639  *	-EINVAL on incorrect values
640  *	-EMFILE if open count exceeded.
641  */
642 int mei_cl_link(struct mei_cl *cl)
643 {
644 	struct mei_device *dev;
645 	int id;
646 
647 	if (WARN_ON(!cl || !cl->dev))
648 		return -EINVAL;
649 
650 	dev = cl->dev;
651 
652 	id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
653 	if (id >= MEI_CLIENTS_MAX) {
654 		dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
655 		return -EMFILE;
656 	}
657 
658 	if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
659 		dev_err(dev->dev, "open_handle_count exceeded %d",
660 			MEI_MAX_OPEN_HANDLE_COUNT);
661 		return -EMFILE;
662 	}
663 
664 	dev->open_handle_count++;
665 
666 	cl->host_client_id = id;
667 	list_add_tail(&cl->link, &dev->file_list);
668 
669 	set_bit(id, dev->host_clients_map);
670 
671 	cl->state = MEI_FILE_INITIALIZING;
672 
673 	cl_dbg(dev, cl, "link cl\n");
674 	return 0;
675 }
676 
677 /**
678  * mei_cl_unlink - remove host client from the list
679  *
680  * @cl: host client
681  *
682  * Return: always 0
683  */
684 int mei_cl_unlink(struct mei_cl *cl)
685 {
686 	struct mei_device *dev;
687 
688 	/* don't shout on error exit path */
689 	if (!cl)
690 		return 0;
691 
692 	if (WARN_ON(!cl->dev))
693 		return 0;
694 
695 	dev = cl->dev;
696 
697 	cl_dbg(dev, cl, "unlink client");
698 
699 	if (dev->open_handle_count > 0)
700 		dev->open_handle_count--;
701 
702 	/* never clear the 0 bit */
703 	if (cl->host_client_id)
704 		clear_bit(cl->host_client_id, dev->host_clients_map);
705 
706 	list_del_init(&cl->link);
707 
708 	cl->state = MEI_FILE_UNINITIALIZED;
709 	cl->writing_state = MEI_IDLE;
710 
711 	WARN_ON(!list_empty(&cl->rd_completed) ||
712 		!list_empty(&cl->rd_pending) ||
713 		!list_empty(&cl->link));
714 
715 	return 0;
716 }
717 
718 void mei_host_client_init(struct mei_device *dev)
719 {
720 	mei_set_devstate(dev, MEI_DEV_ENABLED);
721 	dev->reset_count = 0;
722 
723 	schedule_work(&dev->bus_rescan_work);
724 
725 	pm_runtime_mark_last_busy(dev->dev);
726 	dev_dbg(dev->dev, "rpm: autosuspend\n");
727 	pm_request_autosuspend(dev->dev);
728 }
729 
730 /**
731  * mei_hbuf_acquire - try to acquire host buffer
732  *
733  * @dev: the device structure
734  * Return: true if host buffer was acquired
735  */
736 bool mei_hbuf_acquire(struct mei_device *dev)
737 {
738 	if (mei_pg_state(dev) == MEI_PG_ON ||
739 	    mei_pg_in_transition(dev)) {
740 		dev_dbg(dev->dev, "device is in pg\n");
741 		return false;
742 	}
743 
744 	if (!dev->hbuf_is_ready) {
745 		dev_dbg(dev->dev, "hbuf is not ready\n");
746 		return false;
747 	}
748 
749 	dev->hbuf_is_ready = false;
750 
751 	return true;
752 }
753 
754 /**
755  * mei_cl_wake_all - wake up readers, writers and event waiters so
756  *                 they can be interrupted
757  *
758  * @cl: host client
759  */
760 static void mei_cl_wake_all(struct mei_cl *cl)
761 {
762 	struct mei_device *dev = cl->dev;
763 
764 	/* synchronized under device mutex */
765 	if (waitqueue_active(&cl->rx_wait)) {
766 		cl_dbg(dev, cl, "Waking up reading client!\n");
767 		wake_up_interruptible(&cl->rx_wait);
768 	}
769 	/* synchronized under device mutex */
770 	if (waitqueue_active(&cl->tx_wait)) {
771 		cl_dbg(dev, cl, "Waking up writing client!\n");
772 		wake_up_interruptible(&cl->tx_wait);
773 	}
774 	/* synchronized under device mutex */
775 	if (waitqueue_active(&cl->ev_wait)) {
776 		cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
777 		wake_up_interruptible(&cl->ev_wait);
778 	}
779 	/* synchronized under device mutex */
780 	if (waitqueue_active(&cl->wait)) {
781 		cl_dbg(dev, cl, "Waking up ctrl write clients!\n");
782 		wake_up(&cl->wait);
783 	}
784 }
785 
786 /**
787  * mei_cl_set_disconnected - set disconnected state and clear
788  *   associated states and resources
789  *
790  * @cl: host client
791  */
792 static void mei_cl_set_disconnected(struct mei_cl *cl)
793 {
794 	struct mei_device *dev = cl->dev;
795 
796 	if (cl->state == MEI_FILE_DISCONNECTED ||
797 	    cl->state <= MEI_FILE_INITIALIZING)
798 		return;
799 
800 	cl->state = MEI_FILE_DISCONNECTED;
801 	mei_io_tx_list_free_cl(&dev->write_list, cl);
802 	mei_io_tx_list_free_cl(&dev->write_waiting_list, cl);
803 	mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
804 	mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
805 	mei_cl_wake_all(cl);
806 	cl->rx_flow_ctrl_creds = 0;
807 	cl->tx_flow_ctrl_creds = 0;
808 	cl->timer_count = 0;
809 
810 	if (!cl->me_cl)
811 		return;
812 
813 	if (!WARN_ON(cl->me_cl->connect_count == 0))
814 		cl->me_cl->connect_count--;
815 
816 	if (cl->me_cl->connect_count == 0)
817 		cl->me_cl->tx_flow_ctrl_creds = 0;
818 
819 	mei_me_cl_put(cl->me_cl);
820 	cl->me_cl = NULL;
821 }
822 
823 static int mei_cl_set_connecting(struct mei_cl *cl, struct mei_me_client *me_cl)
824 {
825 	if (!mei_me_cl_get(me_cl))
826 		return -ENOENT;
827 
828 	/* only one connection is allowed for fixed address clients */
829 	if (me_cl->props.fixed_address) {
830 		if (me_cl->connect_count) {
831 			mei_me_cl_put(me_cl);
832 			return -EBUSY;
833 		}
834 	}
835 
836 	cl->me_cl = me_cl;
837 	cl->state = MEI_FILE_CONNECTING;
838 	cl->me_cl->connect_count++;
839 
840 	return 0;
841 }
842 
843 /*
844  * mei_cl_send_disconnect - send disconnect request
845  *
846  * @cl: host client
847  * @cb: callback block
848  *
849  * Return: 0, OK; otherwise, error.
850  */
851 static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
852 {
853 	struct mei_device *dev;
854 	int ret;
855 
856 	dev = cl->dev;
857 
858 	ret = mei_hbm_cl_disconnect_req(dev, cl);
859 	cl->status = ret;
860 	if (ret) {
861 		cl->state = MEI_FILE_DISCONNECT_REPLY;
862 		return ret;
863 	}
864 
865 	list_move_tail(&cb->list, &dev->ctrl_rd_list);
866 	cl->timer_count = MEI_CONNECT_TIMEOUT;
867 	mei_schedule_stall_timer(dev);
868 
869 	return 0;
870 }
871 
872 /**
873  * mei_cl_irq_disconnect - processes close related operation from
874  *	interrupt thread context - send disconnect request
875  *
876  * @cl: client
877  * @cb: callback block.
878  * @cmpl_list: complete list.
879  *
880  * Return: 0, OK; otherwise, error.
881  */
882 int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
883 			  struct list_head *cmpl_list)
884 {
885 	struct mei_device *dev = cl->dev;
886 	u32 msg_slots;
887 	int slots;
888 	int ret;
889 
890 	msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
891 	slots = mei_hbuf_empty_slots(dev);
892 	if (slots < 0)
893 		return -EOVERFLOW;
894 
895 	if ((u32)slots < msg_slots)
896 		return -EMSGSIZE;
897 
898 	ret = mei_cl_send_disconnect(cl, cb);
899 	if (ret)
900 		list_move_tail(&cb->list, cmpl_list);
901 
902 	return ret;
903 }
904 
905 /**
906  * __mei_cl_disconnect - disconnect host client from the me one
907  *     internal function runtime pm has to be already acquired
908  *
909  * @cl: host client
910  *
911  * Return: 0 on success, <0 on failure.
912  */
913 static int __mei_cl_disconnect(struct mei_cl *cl)
914 {
915 	struct mei_device *dev;
916 	struct mei_cl_cb *cb;
917 	int rets;
918 
919 	dev = cl->dev;
920 
921 	cl->state = MEI_FILE_DISCONNECTING;
922 
923 	cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT, NULL);
924 	if (!cb) {
925 		rets = -ENOMEM;
926 		goto out;
927 	}
928 
929 	if (mei_hbuf_acquire(dev)) {
930 		rets = mei_cl_send_disconnect(cl, cb);
931 		if (rets) {
932 			cl_err(dev, cl, "failed to disconnect.\n");
933 			goto out;
934 		}
935 	}
936 
937 	mutex_unlock(&dev->device_lock);
938 	wait_event_timeout(cl->wait,
939 			   cl->state == MEI_FILE_DISCONNECT_REPLY ||
940 			   cl->state == MEI_FILE_DISCONNECTED,
941 			   mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
942 	mutex_lock(&dev->device_lock);
943 
944 	rets = cl->status;
945 	if (cl->state != MEI_FILE_DISCONNECT_REPLY &&
946 	    cl->state != MEI_FILE_DISCONNECTED) {
947 		cl_dbg(dev, cl, "timeout on disconnect from FW client.\n");
948 		rets = -ETIME;
949 	}
950 
951 out:
952 	/* we disconnect also on error */
953 	mei_cl_set_disconnected(cl);
954 	if (!rets)
955 		cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
956 
957 	mei_io_cb_free(cb);
958 	return rets;
959 }
960 
961 /**
962  * mei_cl_disconnect - disconnect host client from the me one
963  *
964  * @cl: host client
965  *
966  * Locking: called under "dev->device_lock" lock
967  *
968  * Return: 0 on success, <0 on failure.
969  */
970 int mei_cl_disconnect(struct mei_cl *cl)
971 {
972 	struct mei_device *dev;
973 	int rets;
974 
975 	if (WARN_ON(!cl || !cl->dev))
976 		return -ENODEV;
977 
978 	dev = cl->dev;
979 
980 	cl_dbg(dev, cl, "disconnecting");
981 
982 	if (!mei_cl_is_connected(cl))
983 		return 0;
984 
985 	if (mei_cl_is_fixed_address(cl)) {
986 		mei_cl_set_disconnected(cl);
987 		return 0;
988 	}
989 
990 	if (dev->dev_state == MEI_DEV_POWER_DOWN) {
991 		cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n");
992 		mei_cl_set_disconnected(cl);
993 		return 0;
994 	}
995 
996 	rets = pm_runtime_get(dev->dev);
997 	if (rets < 0 && rets != -EINPROGRESS) {
998 		pm_runtime_put_noidle(dev->dev);
999 		cl_err(dev, cl, "rpm: get failed %d\n", rets);
1000 		return rets;
1001 	}
1002 
1003 	rets = __mei_cl_disconnect(cl);
1004 
1005 	cl_dbg(dev, cl, "rpm: autosuspend\n");
1006 	pm_runtime_mark_last_busy(dev->dev);
1007 	pm_runtime_put_autosuspend(dev->dev);
1008 
1009 	return rets;
1010 }
1011 
1012 
1013 /**
1014  * mei_cl_is_other_connecting - checks if other
1015  *    client with the same me client id is connecting
1016  *
1017  * @cl: private data of the file object
1018  *
1019  * Return: true if other client is connected, false - otherwise.
1020  */
1021 static bool mei_cl_is_other_connecting(struct mei_cl *cl)
1022 {
1023 	struct mei_device *dev;
1024 	struct mei_cl_cb *cb;
1025 
1026 	dev = cl->dev;
1027 
1028 	list_for_each_entry(cb, &dev->ctrl_rd_list, list) {
1029 		if (cb->fop_type == MEI_FOP_CONNECT &&
1030 		    mei_cl_me_id(cl) == mei_cl_me_id(cb->cl))
1031 			return true;
1032 	}
1033 
1034 	return false;
1035 }
1036 
1037 /**
1038  * mei_cl_send_connect - send connect request
1039  *
1040  * @cl: host client
1041  * @cb: callback block
1042  *
1043  * Return: 0, OK; otherwise, error.
1044  */
1045 static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
1046 {
1047 	struct mei_device *dev;
1048 	int ret;
1049 
1050 	dev = cl->dev;
1051 
1052 	ret = mei_hbm_cl_connect_req(dev, cl);
1053 	cl->status = ret;
1054 	if (ret) {
1055 		cl->state = MEI_FILE_DISCONNECT_REPLY;
1056 		return ret;
1057 	}
1058 
1059 	list_move_tail(&cb->list, &dev->ctrl_rd_list);
1060 	cl->timer_count = MEI_CONNECT_TIMEOUT;
1061 	mei_schedule_stall_timer(dev);
1062 	return 0;
1063 }
1064 
1065 /**
1066  * mei_cl_irq_connect - send connect request in irq_thread context
1067  *
1068  * @cl: host client
1069  * @cb: callback block
1070  * @cmpl_list: complete list
1071  *
1072  * Return: 0, OK; otherwise, error.
1073  */
1074 int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
1075 		       struct list_head *cmpl_list)
1076 {
1077 	struct mei_device *dev = cl->dev;
1078 	u32 msg_slots;
1079 	int slots;
1080 	int rets;
1081 
1082 	if (mei_cl_is_other_connecting(cl))
1083 		return 0;
1084 
1085 	msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
1086 	slots = mei_hbuf_empty_slots(dev);
1087 	if (slots < 0)
1088 		return -EOVERFLOW;
1089 
1090 	if ((u32)slots < msg_slots)
1091 		return -EMSGSIZE;
1092 
1093 	rets = mei_cl_send_connect(cl, cb);
1094 	if (rets)
1095 		list_move_tail(&cb->list, cmpl_list);
1096 
1097 	return rets;
1098 }
1099 
1100 /**
1101  * mei_cl_connect - connect host client to the me one
1102  *
1103  * @cl: host client
1104  * @me_cl: me client
1105  * @fp: pointer to file structure
1106  *
1107  * Locking: called under "dev->device_lock" lock
1108  *
1109  * Return: 0 on success, <0 on failure.
1110  */
1111 int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
1112 		   const struct file *fp)
1113 {
1114 	struct mei_device *dev;
1115 	struct mei_cl_cb *cb;
1116 	int rets;
1117 
1118 	if (WARN_ON(!cl || !cl->dev || !me_cl))
1119 		return -ENODEV;
1120 
1121 	dev = cl->dev;
1122 
1123 	rets = mei_cl_set_connecting(cl, me_cl);
1124 	if (rets)
1125 		goto nortpm;
1126 
1127 	if (mei_cl_is_fixed_address(cl)) {
1128 		cl->state = MEI_FILE_CONNECTED;
1129 		rets = 0;
1130 		goto nortpm;
1131 	}
1132 
1133 	rets = pm_runtime_get(dev->dev);
1134 	if (rets < 0 && rets != -EINPROGRESS) {
1135 		pm_runtime_put_noidle(dev->dev);
1136 		cl_err(dev, cl, "rpm: get failed %d\n", rets);
1137 		goto nortpm;
1138 	}
1139 
1140 	cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_CONNECT, fp);
1141 	if (!cb) {
1142 		rets = -ENOMEM;
1143 		goto out;
1144 	}
1145 
1146 	/* run hbuf acquire last so we don't have to undo */
1147 	if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
1148 		rets = mei_cl_send_connect(cl, cb);
1149 		if (rets)
1150 			goto out;
1151 	}
1152 
1153 	mutex_unlock(&dev->device_lock);
1154 	wait_event_timeout(cl->wait,
1155 			(cl->state == MEI_FILE_CONNECTED ||
1156 			 cl->state == MEI_FILE_DISCONNECTED ||
1157 			 cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
1158 			 cl->state == MEI_FILE_DISCONNECT_REPLY),
1159 			mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
1160 	mutex_lock(&dev->device_lock);
1161 
1162 	if (!mei_cl_is_connected(cl)) {
1163 		if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) {
1164 			mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
1165 			mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
1166 			 /* ignore disconnect return valuue;
1167 			  * in case of failure reset will be invoked
1168 			  */
1169 			__mei_cl_disconnect(cl);
1170 			rets = -EFAULT;
1171 			goto out;
1172 		}
1173 
1174 		/* timeout or something went really wrong */
1175 		if (!cl->status)
1176 			cl->status = -EFAULT;
1177 	}
1178 
1179 	rets = cl->status;
1180 out:
1181 	cl_dbg(dev, cl, "rpm: autosuspend\n");
1182 	pm_runtime_mark_last_busy(dev->dev);
1183 	pm_runtime_put_autosuspend(dev->dev);
1184 
1185 	mei_io_cb_free(cb);
1186 
1187 nortpm:
1188 	if (!mei_cl_is_connected(cl))
1189 		mei_cl_set_disconnected(cl);
1190 
1191 	return rets;
1192 }
1193 
1194 /**
1195  * mei_cl_alloc_linked - allocate and link host client
1196  *
1197  * @dev: the device structure
1198  *
1199  * Return: cl on success ERR_PTR on failure
1200  */
1201 struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev)
1202 {
1203 	struct mei_cl *cl;
1204 	int ret;
1205 
1206 	cl = mei_cl_allocate(dev);
1207 	if (!cl) {
1208 		ret = -ENOMEM;
1209 		goto err;
1210 	}
1211 
1212 	ret = mei_cl_link(cl);
1213 	if (ret)
1214 		goto err;
1215 
1216 	return cl;
1217 err:
1218 	kfree(cl);
1219 	return ERR_PTR(ret);
1220 }
1221 
1222 /**
1223  * mei_cl_tx_flow_ctrl_creds - checks flow_control credits for cl.
1224  *
1225  * @cl: host client
1226  *
1227  * Return: 1 if tx_flow_ctrl_creds >0, 0 - otherwise.
1228  */
1229 static int mei_cl_tx_flow_ctrl_creds(struct mei_cl *cl)
1230 {
1231 	if (WARN_ON(!cl || !cl->me_cl))
1232 		return -EINVAL;
1233 
1234 	if (cl->tx_flow_ctrl_creds > 0)
1235 		return 1;
1236 
1237 	if (mei_cl_is_fixed_address(cl))
1238 		return 1;
1239 
1240 	if (mei_cl_is_single_recv_buf(cl)) {
1241 		if (cl->me_cl->tx_flow_ctrl_creds > 0)
1242 			return 1;
1243 	}
1244 	return 0;
1245 }
1246 
1247 /**
1248  * mei_cl_tx_flow_ctrl_creds_reduce - reduces transmit flow control credits
1249  *   for a client
1250  *
1251  * @cl: host client
1252  *
1253  * Return:
1254  *	0 on success
1255  *	-EINVAL when ctrl credits are <= 0
1256  */
1257 static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl)
1258 {
1259 	if (WARN_ON(!cl || !cl->me_cl))
1260 		return -EINVAL;
1261 
1262 	if (mei_cl_is_fixed_address(cl))
1263 		return 0;
1264 
1265 	if (mei_cl_is_single_recv_buf(cl)) {
1266 		if (WARN_ON(cl->me_cl->tx_flow_ctrl_creds <= 0))
1267 			return -EINVAL;
1268 		cl->me_cl->tx_flow_ctrl_creds--;
1269 	} else {
1270 		if (WARN_ON(cl->tx_flow_ctrl_creds <= 0))
1271 			return -EINVAL;
1272 		cl->tx_flow_ctrl_creds--;
1273 	}
1274 	return 0;
1275 }
1276 
1277 /**
1278  * mei_cl_vtag_alloc - allocate and fill the vtag structure
1279  *
1280  * @fp: pointer to file structure
1281  * @vtag: vm tag
1282  *
1283  * Return:
1284  * * Pointer to allocated struct - on success
1285  * * ERR_PTR(-ENOMEM) on memory allocation failure
1286  */
1287 struct mei_cl_vtag *mei_cl_vtag_alloc(struct file *fp, u8 vtag)
1288 {
1289 	struct mei_cl_vtag *cl_vtag;
1290 
1291 	cl_vtag = kzalloc(sizeof(*cl_vtag), GFP_KERNEL);
1292 	if (!cl_vtag)
1293 		return ERR_PTR(-ENOMEM);
1294 
1295 	INIT_LIST_HEAD(&cl_vtag->list);
1296 	cl_vtag->vtag = vtag;
1297 	cl_vtag->fp = fp;
1298 
1299 	return cl_vtag;
1300 }
1301 
1302 /**
1303  * mei_cl_fp_by_vtag - obtain the file pointer by vtag
1304  *
1305  * @cl: host client
1306  * @vtag: vm tag
1307  *
1308  * Return:
1309  * * A file pointer - on success
1310  * * ERR_PTR(-ENOENT) if vtag is not found in the client vtag list
1311  */
1312 const struct file *mei_cl_fp_by_vtag(const struct mei_cl *cl, u8 vtag)
1313 {
1314 	struct mei_cl_vtag *vtag_l;
1315 
1316 	list_for_each_entry(vtag_l, &cl->vtag_map, list)
1317 		if (vtag_l->vtag == vtag)
1318 			return vtag_l->fp;
1319 
1320 	return ERR_PTR(-ENOENT);
1321 }
1322 
1323 /**
1324  * mei_cl_reset_read_by_vtag - reset pending_read flag by given vtag
1325  *
1326  * @cl: host client
1327  * @vtag: vm tag
1328  */
1329 static void mei_cl_reset_read_by_vtag(const struct mei_cl *cl, u8 vtag)
1330 {
1331 	struct mei_cl_vtag *vtag_l;
1332 
1333 	list_for_each_entry(vtag_l, &cl->vtag_map, list) {
1334 		if (vtag_l->vtag == vtag) {
1335 			vtag_l->pending_read = false;
1336 			break;
1337 		}
1338 	}
1339 }
1340 
1341 /**
1342  * mei_cl_read_vtag_add_fc - add flow control for next pending reader
1343  *                           in the vtag list
1344  *
1345  * @cl: host client
1346  */
1347 static void mei_cl_read_vtag_add_fc(struct mei_cl *cl)
1348 {
1349 	struct mei_cl_vtag *cl_vtag;
1350 
1351 	list_for_each_entry(cl_vtag, &cl->vtag_map, list) {
1352 		if (cl_vtag->pending_read) {
1353 			if (mei_cl_enqueue_ctrl_wr_cb(cl,
1354 						      mei_cl_mtu(cl),
1355 						      MEI_FOP_READ,
1356 						      cl_vtag->fp))
1357 				cl->rx_flow_ctrl_creds++;
1358 			break;
1359 		}
1360 	}
1361 }
1362 
1363 /**
1364  * mei_cl_vt_support_check - check if client support vtags
1365  *
1366  * @cl: host client
1367  *
1368  * Return:
1369  * * 0 - supported, or not connected at all
1370  * * -EOPNOTSUPP - vtags are not supported by client
1371  */
1372 int mei_cl_vt_support_check(const struct mei_cl *cl)
1373 {
1374 	struct mei_device *dev = cl->dev;
1375 
1376 	if (!dev->hbm_f_vt_supported)
1377 		return -EOPNOTSUPP;
1378 
1379 	if (!cl->me_cl)
1380 		return 0;
1381 
1382 	return cl->me_cl->props.vt_supported ? 0 : -EOPNOTSUPP;
1383 }
1384 
1385 /**
1386  * mei_cl_add_rd_completed - add read completed callback to list with lock
1387  *                           and vtag check
1388  *
1389  * @cl: host client
1390  * @cb: callback block
1391  *
1392  */
1393 void mei_cl_add_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb)
1394 {
1395 	const struct file *fp;
1396 
1397 	if (!mei_cl_vt_support_check(cl)) {
1398 		fp = mei_cl_fp_by_vtag(cl, cb->vtag);
1399 		if (IS_ERR(fp)) {
1400 			/* client already disconnected, discarding */
1401 			mei_io_cb_free(cb);
1402 			return;
1403 		}
1404 		cb->fp = fp;
1405 		mei_cl_reset_read_by_vtag(cl, cb->vtag);
1406 		mei_cl_read_vtag_add_fc(cl);
1407 	}
1408 
1409 	spin_lock(&cl->rd_completed_lock);
1410 	list_add_tail(&cb->list, &cl->rd_completed);
1411 	spin_unlock(&cl->rd_completed_lock);
1412 }
1413 
1414 /**
1415  * mei_cl_del_rd_completed - free read completed callback with lock
1416  *
1417  * @cl: host client
1418  * @cb: callback block
1419  *
1420  */
1421 void mei_cl_del_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb)
1422 {
1423 	spin_lock(&cl->rd_completed_lock);
1424 	mei_io_cb_free(cb);
1425 	spin_unlock(&cl->rd_completed_lock);
1426 }
1427 
1428 /**
1429  *  mei_cl_notify_fop2req - convert fop to proper request
1430  *
1431  * @fop: client notification start response command
1432  *
1433  * Return:  MEI_HBM_NOTIFICATION_START/STOP
1434  */
1435 u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop)
1436 {
1437 	if (fop == MEI_FOP_NOTIFY_START)
1438 		return MEI_HBM_NOTIFICATION_START;
1439 	else
1440 		return MEI_HBM_NOTIFICATION_STOP;
1441 }
1442 
1443 /**
1444  *  mei_cl_notify_req2fop - convert notification request top file operation type
1445  *
1446  * @req: hbm notification request type
1447  *
1448  * Return:  MEI_FOP_NOTIFY_START/STOP
1449  */
1450 enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req)
1451 {
1452 	if (req == MEI_HBM_NOTIFICATION_START)
1453 		return MEI_FOP_NOTIFY_START;
1454 	else
1455 		return MEI_FOP_NOTIFY_STOP;
1456 }
1457 
1458 /**
1459  * mei_cl_irq_notify - send notification request in irq_thread context
1460  *
1461  * @cl: client
1462  * @cb: callback block.
1463  * @cmpl_list: complete list.
1464  *
1465  * Return: 0 on such and error otherwise.
1466  */
1467 int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
1468 		      struct list_head *cmpl_list)
1469 {
1470 	struct mei_device *dev = cl->dev;
1471 	u32 msg_slots;
1472 	int slots;
1473 	int ret;
1474 	bool request;
1475 
1476 	msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
1477 	slots = mei_hbuf_empty_slots(dev);
1478 	if (slots < 0)
1479 		return -EOVERFLOW;
1480 
1481 	if ((u32)slots < msg_slots)
1482 		return -EMSGSIZE;
1483 
1484 	request = mei_cl_notify_fop2req(cb->fop_type);
1485 	ret = mei_hbm_cl_notify_req(dev, cl, request);
1486 	if (ret) {
1487 		cl->status = ret;
1488 		list_move_tail(&cb->list, cmpl_list);
1489 		return ret;
1490 	}
1491 
1492 	list_move_tail(&cb->list, &dev->ctrl_rd_list);
1493 	return 0;
1494 }
1495 
1496 /**
1497  * mei_cl_notify_request - send notification stop/start request
1498  *
1499  * @cl: host client
1500  * @fp: associate request with file
1501  * @request: 1 for start or 0 for stop
1502  *
1503  * Locking: called under "dev->device_lock" lock
1504  *
1505  * Return: 0 on such and error otherwise.
1506  */
1507 int mei_cl_notify_request(struct mei_cl *cl,
1508 			  const struct file *fp, u8 request)
1509 {
1510 	struct mei_device *dev;
1511 	struct mei_cl_cb *cb;
1512 	enum mei_cb_file_ops fop_type;
1513 	int rets;
1514 
1515 	if (WARN_ON(!cl || !cl->dev))
1516 		return -ENODEV;
1517 
1518 	dev = cl->dev;
1519 
1520 	if (!dev->hbm_f_ev_supported) {
1521 		cl_dbg(dev, cl, "notifications not supported\n");
1522 		return -EOPNOTSUPP;
1523 	}
1524 
1525 	if (!mei_cl_is_connected(cl))
1526 		return -ENODEV;
1527 
1528 	rets = pm_runtime_get(dev->dev);
1529 	if (rets < 0 && rets != -EINPROGRESS) {
1530 		pm_runtime_put_noidle(dev->dev);
1531 		cl_err(dev, cl, "rpm: get failed %d\n", rets);
1532 		return rets;
1533 	}
1534 
1535 	fop_type = mei_cl_notify_req2fop(request);
1536 	cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, fop_type, fp);
1537 	if (!cb) {
1538 		rets = -ENOMEM;
1539 		goto out;
1540 	}
1541 
1542 	if (mei_hbuf_acquire(dev)) {
1543 		if (mei_hbm_cl_notify_req(dev, cl, request)) {
1544 			rets = -ENODEV;
1545 			goto out;
1546 		}
1547 		list_move_tail(&cb->list, &dev->ctrl_rd_list);
1548 	}
1549 
1550 	mutex_unlock(&dev->device_lock);
1551 	wait_event_timeout(cl->wait,
1552 			   cl->notify_en == request ||
1553 			   cl->status ||
1554 			   !mei_cl_is_connected(cl),
1555 			   mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
1556 	mutex_lock(&dev->device_lock);
1557 
1558 	if (cl->notify_en != request && !cl->status)
1559 		cl->status = -EFAULT;
1560 
1561 	rets = cl->status;
1562 
1563 out:
1564 	cl_dbg(dev, cl, "rpm: autosuspend\n");
1565 	pm_runtime_mark_last_busy(dev->dev);
1566 	pm_runtime_put_autosuspend(dev->dev);
1567 
1568 	mei_io_cb_free(cb);
1569 	return rets;
1570 }
1571 
1572 /**
1573  * mei_cl_notify - raise notification
1574  *
1575  * @cl: host client
1576  *
1577  * Locking: called under "dev->device_lock" lock
1578  */
1579 void mei_cl_notify(struct mei_cl *cl)
1580 {
1581 	struct mei_device *dev;
1582 
1583 	if (!cl || !cl->dev)
1584 		return;
1585 
1586 	dev = cl->dev;
1587 
1588 	if (!cl->notify_en)
1589 		return;
1590 
1591 	cl_dbg(dev, cl, "notify event");
1592 	cl->notify_ev = true;
1593 	if (!mei_cl_bus_notify_event(cl))
1594 		wake_up_interruptible(&cl->ev_wait);
1595 
1596 	if (cl->ev_async)
1597 		kill_fasync(&cl->ev_async, SIGIO, POLL_PRI);
1598 
1599 }
1600 
1601 /**
1602  * mei_cl_notify_get - get or wait for notification event
1603  *
1604  * @cl: host client
1605  * @block: this request is blocking
1606  * @notify_ev: true if notification event was received
1607  *
1608  * Locking: called under "dev->device_lock" lock
1609  *
1610  * Return: 0 on such and error otherwise.
1611  */
1612 int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev)
1613 {
1614 	struct mei_device *dev;
1615 	int rets;
1616 
1617 	*notify_ev = false;
1618 
1619 	if (WARN_ON(!cl || !cl->dev))
1620 		return -ENODEV;
1621 
1622 	dev = cl->dev;
1623 
1624 	if (!dev->hbm_f_ev_supported) {
1625 		cl_dbg(dev, cl, "notifications not supported\n");
1626 		return -EOPNOTSUPP;
1627 	}
1628 
1629 	if (!mei_cl_is_connected(cl))
1630 		return -ENODEV;
1631 
1632 	if (cl->notify_ev)
1633 		goto out;
1634 
1635 	if (!block)
1636 		return -EAGAIN;
1637 
1638 	mutex_unlock(&dev->device_lock);
1639 	rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev);
1640 	mutex_lock(&dev->device_lock);
1641 
1642 	if (rets < 0)
1643 		return rets;
1644 
1645 out:
1646 	*notify_ev = cl->notify_ev;
1647 	cl->notify_ev = false;
1648 	return 0;
1649 }
1650 
1651 /**
1652  * mei_cl_read_start - the start read client message function.
1653  *
1654  * @cl: host client
1655  * @length: number of bytes to read
1656  * @fp: pointer to file structure
1657  *
1658  * Return: 0 on success, <0 on failure.
1659  */
1660 int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
1661 {
1662 	struct mei_device *dev;
1663 	struct mei_cl_cb *cb;
1664 	int rets;
1665 
1666 	if (WARN_ON(!cl || !cl->dev))
1667 		return -ENODEV;
1668 
1669 	dev = cl->dev;
1670 
1671 	if (!mei_cl_is_connected(cl))
1672 		return -ENODEV;
1673 
1674 	if (!mei_me_cl_is_active(cl->me_cl)) {
1675 		cl_err(dev, cl, "no such me client\n");
1676 		return  -ENOTTY;
1677 	}
1678 
1679 	if (mei_cl_is_fixed_address(cl))
1680 		return 0;
1681 
1682 	/* HW currently supports only one pending read */
1683 	if (cl->rx_flow_ctrl_creds) {
1684 		mei_cl_set_read_by_fp(cl, fp);
1685 		return -EBUSY;
1686 	}
1687 
1688 	cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp);
1689 	if (!cb)
1690 		return -ENOMEM;
1691 
1692 	mei_cl_set_read_by_fp(cl, fp);
1693 
1694 	rets = pm_runtime_get(dev->dev);
1695 	if (rets < 0 && rets != -EINPROGRESS) {
1696 		pm_runtime_put_noidle(dev->dev);
1697 		cl_err(dev, cl, "rpm: get failed %d\n", rets);
1698 		goto nortpm;
1699 	}
1700 
1701 	rets = 0;
1702 	if (mei_hbuf_acquire(dev)) {
1703 		rets = mei_hbm_cl_flow_control_req(dev, cl);
1704 		if (rets < 0)
1705 			goto out;
1706 
1707 		list_move_tail(&cb->list, &cl->rd_pending);
1708 	}
1709 	cl->rx_flow_ctrl_creds++;
1710 
1711 out:
1712 	cl_dbg(dev, cl, "rpm: autosuspend\n");
1713 	pm_runtime_mark_last_busy(dev->dev);
1714 	pm_runtime_put_autosuspend(dev->dev);
1715 nortpm:
1716 	if (rets)
1717 		mei_io_cb_free(cb);
1718 
1719 	return rets;
1720 }
1721 
1722 static inline u8 mei_ext_hdr_set_vtag(struct mei_ext_hdr *ext, u8 vtag)
1723 {
1724 	ext->type = MEI_EXT_HDR_VTAG;
1725 	ext->ext_payload[0] = vtag;
1726 	ext->length = mei_data2slots(sizeof(*ext));
1727 	return ext->length;
1728 }
1729 
1730 /**
1731  * mei_msg_hdr_init - allocate and initialize mei message header
1732  *
1733  * @cb: message callback structure
1734  *
1735  * Return: a pointer to initialized header
1736  */
1737 static struct mei_msg_hdr *mei_msg_hdr_init(const struct mei_cl_cb *cb)
1738 {
1739 	size_t hdr_len;
1740 	struct mei_ext_meta_hdr *meta;
1741 	struct mei_ext_hdr *ext;
1742 	struct mei_msg_hdr *mei_hdr;
1743 	bool is_ext, is_vtag;
1744 
1745 	if (!cb)
1746 		return ERR_PTR(-EINVAL);
1747 
1748 	/* Extended header for vtag is attached only on the first fragment */
1749 	is_vtag = (cb->vtag && cb->buf_idx == 0);
1750 	is_ext = is_vtag;
1751 
1752 	/* Compute extended header size */
1753 	hdr_len = sizeof(*mei_hdr);
1754 
1755 	if (!is_ext)
1756 		goto setup_hdr;
1757 
1758 	hdr_len += sizeof(*meta);
1759 	if (is_vtag)
1760 		hdr_len += sizeof(*ext);
1761 
1762 setup_hdr:
1763 	mei_hdr = kzalloc(hdr_len, GFP_KERNEL);
1764 	if (!mei_hdr)
1765 		return ERR_PTR(-ENOMEM);
1766 
1767 	mei_hdr->host_addr = mei_cl_host_addr(cb->cl);
1768 	mei_hdr->me_addr = mei_cl_me_id(cb->cl);
1769 	mei_hdr->internal = cb->internal;
1770 	mei_hdr->extended = is_ext;
1771 
1772 	if (!is_ext)
1773 		goto out;
1774 
1775 	meta = (struct mei_ext_meta_hdr *)mei_hdr->extension;
1776 	if (is_vtag) {
1777 		meta->count++;
1778 		meta->size += mei_ext_hdr_set_vtag(meta->hdrs, cb->vtag);
1779 	}
1780 out:
1781 	mei_hdr->length = hdr_len - sizeof(*mei_hdr);
1782 	return mei_hdr;
1783 }
1784 
1785 /**
1786  * mei_cl_irq_write - write a message to device
1787  *	from the interrupt thread context
1788  *
1789  * @cl: client
1790  * @cb: callback block.
1791  * @cmpl_list: complete list.
1792  *
1793  * Return: 0, OK; otherwise error.
1794  */
1795 int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
1796 		     struct list_head *cmpl_list)
1797 {
1798 	struct mei_device *dev;
1799 	struct mei_msg_data *buf;
1800 	struct mei_msg_hdr *mei_hdr = NULL;
1801 	size_t hdr_len;
1802 	size_t hbuf_len, dr_len;
1803 	size_t buf_len;
1804 	size_t data_len;
1805 	int hbuf_slots;
1806 	u32 dr_slots;
1807 	u32 dma_len;
1808 	int rets;
1809 	bool first_chunk;
1810 	const void *data;
1811 
1812 	if (WARN_ON(!cl || !cl->dev))
1813 		return -ENODEV;
1814 
1815 	dev = cl->dev;
1816 
1817 	buf = &cb->buf;
1818 
1819 	first_chunk = cb->buf_idx == 0;
1820 
1821 	rets = first_chunk ? mei_cl_tx_flow_ctrl_creds(cl) : 1;
1822 	if (rets < 0)
1823 		goto err;
1824 
1825 	if (rets == 0) {
1826 		cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
1827 		return 0;
1828 	}
1829 
1830 	buf_len = buf->size - cb->buf_idx;
1831 	data = buf->data + cb->buf_idx;
1832 	hbuf_slots = mei_hbuf_empty_slots(dev);
1833 	if (hbuf_slots < 0) {
1834 		rets = -EOVERFLOW;
1835 		goto err;
1836 	}
1837 
1838 	hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK;
1839 	dr_slots = mei_dma_ring_empty_slots(dev);
1840 	dr_len = mei_slots2data(dr_slots);
1841 
1842 	mei_hdr = mei_msg_hdr_init(cb);
1843 	if (IS_ERR(mei_hdr)) {
1844 		rets = PTR_ERR(mei_hdr);
1845 		mei_hdr = NULL;
1846 		goto err;
1847 	}
1848 
1849 	cl_dbg(dev, cl, "Extended Header %d vtag = %d\n",
1850 	       mei_hdr->extended, cb->vtag);
1851 
1852 	hdr_len = sizeof(*mei_hdr) + mei_hdr->length;
1853 
1854 	/**
1855 	 * Split the message only if we can write the whole host buffer
1856 	 * otherwise wait for next time the host buffer is empty.
1857 	 */
1858 	if (hdr_len + buf_len <= hbuf_len) {
1859 		data_len = buf_len;
1860 		mei_hdr->msg_complete = 1;
1861 	} else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
1862 		mei_hdr->dma_ring = 1;
1863 		if (buf_len > dr_len)
1864 			buf_len = dr_len;
1865 		else
1866 			mei_hdr->msg_complete = 1;
1867 
1868 		data_len = sizeof(dma_len);
1869 		dma_len = buf_len;
1870 		data = &dma_len;
1871 	} else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) {
1872 		buf_len = hbuf_len - hdr_len;
1873 		data_len = buf_len;
1874 	} else {
1875 		kfree(mei_hdr);
1876 		return 0;
1877 	}
1878 	mei_hdr->length += data_len;
1879 
1880 	if (mei_hdr->dma_ring)
1881 		mei_dma_ring_write(dev, buf->data + cb->buf_idx, buf_len);
1882 	rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len);
1883 
1884 	if (rets)
1885 		goto err;
1886 
1887 	cl->status = 0;
1888 	cl->writing_state = MEI_WRITING;
1889 	cb->buf_idx += buf_len;
1890 
1891 	if (first_chunk) {
1892 		if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) {
1893 			rets = -EIO;
1894 			goto err;
1895 		}
1896 	}
1897 
1898 	if (mei_hdr->msg_complete)
1899 		list_move_tail(&cb->list, &dev->write_waiting_list);
1900 
1901 	kfree(mei_hdr);
1902 	return 0;
1903 
1904 err:
1905 	kfree(mei_hdr);
1906 	cl->status = rets;
1907 	list_move_tail(&cb->list, cmpl_list);
1908 	return rets;
1909 }
1910 
1911 /**
1912  * mei_cl_write - submit a write cb to mei device
1913  *	assumes device_lock is locked
1914  *
1915  * @cl: host client
1916  * @cb: write callback with filled data
1917  *
1918  * Return: number of bytes sent on success, <0 on failure.
1919  */
1920 ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
1921 {
1922 	struct mei_device *dev;
1923 	struct mei_msg_data *buf;
1924 	struct mei_msg_hdr *mei_hdr = NULL;
1925 	size_t hdr_len;
1926 	size_t hbuf_len, dr_len;
1927 	size_t buf_len;
1928 	size_t data_len;
1929 	int hbuf_slots;
1930 	u32 dr_slots;
1931 	u32 dma_len;
1932 	ssize_t rets;
1933 	bool blocking;
1934 	const void *data;
1935 
1936 	if (WARN_ON(!cl || !cl->dev))
1937 		return -ENODEV;
1938 
1939 	if (WARN_ON(!cb))
1940 		return -EINVAL;
1941 
1942 	dev = cl->dev;
1943 
1944 	buf = &cb->buf;
1945 	buf_len = buf->size;
1946 
1947 	cl_dbg(dev, cl, "buf_len=%zd\n", buf_len);
1948 
1949 	blocking = cb->blocking;
1950 	data = buf->data;
1951 
1952 	rets = pm_runtime_get(dev->dev);
1953 	if (rets < 0 && rets != -EINPROGRESS) {
1954 		pm_runtime_put_noidle(dev->dev);
1955 		cl_err(dev, cl, "rpm: get failed %zd\n", rets);
1956 		goto free;
1957 	}
1958 
1959 	cb->buf_idx = 0;
1960 	cl->writing_state = MEI_IDLE;
1961 
1962 
1963 	rets = mei_cl_tx_flow_ctrl_creds(cl);
1964 	if (rets < 0)
1965 		goto err;
1966 
1967 	mei_hdr = mei_msg_hdr_init(cb);
1968 	if (IS_ERR(mei_hdr)) {
1969 		rets = -PTR_ERR(mei_hdr);
1970 		mei_hdr = NULL;
1971 		goto err;
1972 	}
1973 
1974 	cl_dbg(dev, cl, "Extended Header %d vtag = %d\n",
1975 	       mei_hdr->extended, cb->vtag);
1976 
1977 	hdr_len = sizeof(*mei_hdr) + mei_hdr->length;
1978 
1979 	if (rets == 0) {
1980 		cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
1981 		rets = buf_len;
1982 		goto out;
1983 	}
1984 
1985 	if (!mei_hbuf_acquire(dev)) {
1986 		cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
1987 		rets = buf_len;
1988 		goto out;
1989 	}
1990 
1991 	hbuf_slots = mei_hbuf_empty_slots(dev);
1992 	if (hbuf_slots < 0) {
1993 		rets = -EOVERFLOW;
1994 		goto out;
1995 	}
1996 
1997 	hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK;
1998 	dr_slots = mei_dma_ring_empty_slots(dev);
1999 	dr_len =  mei_slots2data(dr_slots);
2000 
2001 	if (hdr_len + buf_len <= hbuf_len) {
2002 		data_len = buf_len;
2003 		mei_hdr->msg_complete = 1;
2004 	} else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
2005 		mei_hdr->dma_ring = 1;
2006 		if (buf_len > dr_len)
2007 			buf_len = dr_len;
2008 		else
2009 			mei_hdr->msg_complete = 1;
2010 
2011 		data_len = sizeof(dma_len);
2012 		dma_len = buf_len;
2013 		data = &dma_len;
2014 	} else {
2015 		buf_len = hbuf_len - hdr_len;
2016 		data_len = buf_len;
2017 	}
2018 
2019 	mei_hdr->length += data_len;
2020 
2021 	if (mei_hdr->dma_ring)
2022 		mei_dma_ring_write(dev, buf->data, buf_len);
2023 	rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len);
2024 
2025 	if (rets)
2026 		goto err;
2027 
2028 	rets = mei_cl_tx_flow_ctrl_creds_reduce(cl);
2029 	if (rets)
2030 		goto err;
2031 
2032 	cl->writing_state = MEI_WRITING;
2033 	cb->buf_idx = buf_len;
2034 	/* restore return value */
2035 	buf_len = buf->size;
2036 
2037 out:
2038 	if (mei_hdr->msg_complete)
2039 		mei_tx_cb_enqueue(cb, &dev->write_waiting_list);
2040 	else
2041 		mei_tx_cb_enqueue(cb, &dev->write_list);
2042 
2043 	cb = NULL;
2044 	if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
2045 
2046 		mutex_unlock(&dev->device_lock);
2047 		rets = wait_event_interruptible(cl->tx_wait,
2048 				cl->writing_state == MEI_WRITE_COMPLETE ||
2049 				(!mei_cl_is_connected(cl)));
2050 		mutex_lock(&dev->device_lock);
2051 		/* wait_event_interruptible returns -ERESTARTSYS */
2052 		if (rets) {
2053 			if (signal_pending(current))
2054 				rets = -EINTR;
2055 			goto err;
2056 		}
2057 		if (cl->writing_state != MEI_WRITE_COMPLETE) {
2058 			rets = -EFAULT;
2059 			goto err;
2060 		}
2061 	}
2062 
2063 	rets = buf_len;
2064 err:
2065 	cl_dbg(dev, cl, "rpm: autosuspend\n");
2066 	pm_runtime_mark_last_busy(dev->dev);
2067 	pm_runtime_put_autosuspend(dev->dev);
2068 free:
2069 	mei_io_cb_free(cb);
2070 
2071 	kfree(mei_hdr);
2072 
2073 	return rets;
2074 }
2075 
2076 /**
2077  * mei_cl_complete - processes completed operation for a client
2078  *
2079  * @cl: private data of the file object.
2080  * @cb: callback block.
2081  */
2082 void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
2083 {
2084 	struct mei_device *dev = cl->dev;
2085 
2086 	switch (cb->fop_type) {
2087 	case MEI_FOP_WRITE:
2088 		mei_tx_cb_dequeue(cb);
2089 		cl->writing_state = MEI_WRITE_COMPLETE;
2090 		if (waitqueue_active(&cl->tx_wait)) {
2091 			wake_up_interruptible(&cl->tx_wait);
2092 		} else {
2093 			pm_runtime_mark_last_busy(dev->dev);
2094 			pm_request_autosuspend(dev->dev);
2095 		}
2096 		break;
2097 
2098 	case MEI_FOP_READ:
2099 		mei_cl_add_rd_completed(cl, cb);
2100 		if (!mei_cl_is_fixed_address(cl) &&
2101 		    !WARN_ON(!cl->rx_flow_ctrl_creds))
2102 			cl->rx_flow_ctrl_creds--;
2103 		if (!mei_cl_bus_rx_event(cl))
2104 			wake_up_interruptible(&cl->rx_wait);
2105 		break;
2106 
2107 	case MEI_FOP_CONNECT:
2108 	case MEI_FOP_DISCONNECT:
2109 	case MEI_FOP_NOTIFY_STOP:
2110 	case MEI_FOP_NOTIFY_START:
2111 		if (waitqueue_active(&cl->wait))
2112 			wake_up(&cl->wait);
2113 
2114 		break;
2115 	case MEI_FOP_DISCONNECT_RSP:
2116 		mei_io_cb_free(cb);
2117 		mei_cl_set_disconnected(cl);
2118 		break;
2119 	default:
2120 		BUG_ON(0);
2121 	}
2122 }
2123 
2124 
2125 /**
2126  * mei_cl_all_disconnect - disconnect forcefully all connected clients
2127  *
2128  * @dev: mei device
2129  */
2130 void mei_cl_all_disconnect(struct mei_device *dev)
2131 {
2132 	struct mei_cl *cl;
2133 
2134 	list_for_each_entry(cl, &dev->file_list, link)
2135 		mei_cl_set_disconnected(cl);
2136 }
2137