xref: /openbmc/linux/drivers/misc/mei/client.c (revision 36bccb11)
1 /*
2  *
3  * Intel Management Engine Interface (Intel MEI) Linux driver
4  * Copyright (c) 2003-2012, Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  */
16 
17 #include <linux/pci.h>
18 #include <linux/sched.h>
19 #include <linux/wait.h>
20 #include <linux/delay.h>
21 
22 #include <linux/mei.h>
23 
24 #include "mei_dev.h"
25 #include "hbm.h"
26 #include "client.h"
27 
28 /**
29  * mei_me_cl_by_uuid - locate index of me client
30  *
31  * @dev: mei device
32  *
33  * Locking: called under "dev->device_lock" lock
34  *
35  * returns me client index or -ENOENT if not found
36  */
37 int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid)
38 {
39 	int i;
40 
41 	for (i = 0; i < dev->me_clients_num; ++i)
42 		if (uuid_le_cmp(*uuid,
43 				dev->me_clients[i].props.protocol_name) == 0)
44 			return i;
45 
46 	return -ENOENT;
47 }
48 
49 
50 /**
51  * mei_me_cl_by_id return index to me_clients for client_id
52  *
53  * @dev: the device structure
54  * @client_id: me client id
55  *
56  * Locking: called under "dev->device_lock" lock
57  *
58  * returns index on success, -ENOENT on failure.
59  */
60 
61 int mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
62 {
63 	int i;
64 
65 	for (i = 0; i < dev->me_clients_num; i++)
66 		if (dev->me_clients[i].client_id == client_id)
67 			return i;
68 
69 	return -ENOENT;
70 }
71 
72 
73 /**
74  * mei_cl_cmp_id - tells if the clients are the same
75  *
76  * @cl1: host client 1
77  * @cl2: host client 2
78  *
79  * returns true  - if the clients has same host and me ids
80  *         false - otherwise
81  */
82 static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
83 				const struct mei_cl *cl2)
84 {
85 	return cl1 && cl2 &&
86 		(cl1->host_client_id == cl2->host_client_id) &&
87 		(cl1->me_client_id == cl2->me_client_id);
88 }
89 
90 /**
91  * mei_io_list_flush - removes cbs belonging to cl.
92  *
93  * @list:  an instance of our list structure
94  * @cl:    host client, can be NULL for flushing the whole list
95  * @free:  whether to free the cbs
96  */
97 static void __mei_io_list_flush(struct mei_cl_cb *list,
98 				struct mei_cl *cl, bool free)
99 {
100 	struct mei_cl_cb *cb;
101 	struct mei_cl_cb *next;
102 
103 	/* enable removing everything if no cl is specified */
104 	list_for_each_entry_safe(cb, next, &list->list, list) {
105 		if (!cl || (cb->cl && mei_cl_cmp_id(cl, cb->cl))) {
106 			list_del(&cb->list);
107 			if (free)
108 				mei_io_cb_free(cb);
109 		}
110 	}
111 }
112 
113 /**
114  * mei_io_list_flush - removes list entry belonging to cl.
115  *
116  * @list:  An instance of our list structure
117  * @cl: host client
118  */
119 static inline void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
120 {
121 	__mei_io_list_flush(list, cl, false);
122 }
123 
124 
125 /**
126  * mei_io_list_free - removes cb belonging to cl and free them
127  *
128  * @list:  An instance of our list structure
129  * @cl: host client
130  */
131 static inline void mei_io_list_free(struct mei_cl_cb *list, struct mei_cl *cl)
132 {
133 	__mei_io_list_flush(list, cl, true);
134 }
135 
136 /**
137  * mei_io_cb_free - free mei_cb_private related memory
138  *
139  * @cb: mei callback struct
140  */
141 void mei_io_cb_free(struct mei_cl_cb *cb)
142 {
143 	if (cb == NULL)
144 		return;
145 
146 	kfree(cb->request_buffer.data);
147 	kfree(cb->response_buffer.data);
148 	kfree(cb);
149 }
150 
151 /**
152  * mei_io_cb_init - allocate and initialize io callback
153  *
154  * @cl - mei client
155  * @fp: pointer to file structure
156  *
157  * returns mei_cl_cb pointer or NULL;
158  */
159 struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp)
160 {
161 	struct mei_cl_cb *cb;
162 
163 	cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
164 	if (!cb)
165 		return NULL;
166 
167 	mei_io_list_init(cb);
168 
169 	cb->file_object = fp;
170 	cb->cl = cl;
171 	cb->buf_idx = 0;
172 	return cb;
173 }
174 
175 /**
176  * mei_io_cb_alloc_req_buf - allocate request buffer
177  *
178  * @cb: io callback structure
179  * @length: size of the buffer
180  *
181  * returns 0 on success
182  *         -EINVAL if cb is NULL
183  *         -ENOMEM if allocation failed
184  */
185 int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length)
186 {
187 	if (!cb)
188 		return -EINVAL;
189 
190 	if (length == 0)
191 		return 0;
192 
193 	cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
194 	if (!cb->request_buffer.data)
195 		return -ENOMEM;
196 	cb->request_buffer.size = length;
197 	return 0;
198 }
199 /**
200  * mei_io_cb_alloc_resp_buf - allocate response buffer
201  *
202  * @cb: io callback structure
203  * @length: size of the buffer
204  *
205  * returns 0 on success
206  *         -EINVAL if cb is NULL
207  *         -ENOMEM if allocation failed
208  */
209 int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length)
210 {
211 	if (!cb)
212 		return -EINVAL;
213 
214 	if (length == 0)
215 		return 0;
216 
217 	cb->response_buffer.data = kmalloc(length, GFP_KERNEL);
218 	if (!cb->response_buffer.data)
219 		return -ENOMEM;
220 	cb->response_buffer.size = length;
221 	return 0;
222 }
223 
224 
225 
226 /**
227  * mei_cl_flush_queues - flushes queue lists belonging to cl.
228  *
229  * @cl: host client
230  */
231 int mei_cl_flush_queues(struct mei_cl *cl)
232 {
233 	struct mei_device *dev;
234 
235 	if (WARN_ON(!cl || !cl->dev))
236 		return -EINVAL;
237 
238 	dev = cl->dev;
239 
240 	cl_dbg(dev, cl, "remove list entry belonging to cl\n");
241 	mei_io_list_flush(&cl->dev->read_list, cl);
242 	mei_io_list_free(&cl->dev->write_list, cl);
243 	mei_io_list_free(&cl->dev->write_waiting_list, cl);
244 	mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
245 	mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
246 	mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
247 	mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
248 	return 0;
249 }
250 
251 
252 /**
253  * mei_cl_init - initializes cl.
254  *
255  * @cl: host client to be initialized
256  * @dev: mei device
257  */
258 void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
259 {
260 	memset(cl, 0, sizeof(struct mei_cl));
261 	init_waitqueue_head(&cl->wait);
262 	init_waitqueue_head(&cl->rx_wait);
263 	init_waitqueue_head(&cl->tx_wait);
264 	INIT_LIST_HEAD(&cl->link);
265 	INIT_LIST_HEAD(&cl->device_link);
266 	cl->reading_state = MEI_IDLE;
267 	cl->writing_state = MEI_IDLE;
268 	cl->dev = dev;
269 }
270 
271 /**
272  * mei_cl_allocate - allocates cl  structure and sets it up.
273  *
274  * @dev: mei device
275  * returns  The allocated file or NULL on failure
276  */
277 struct mei_cl *mei_cl_allocate(struct mei_device *dev)
278 {
279 	struct mei_cl *cl;
280 
281 	cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
282 	if (!cl)
283 		return NULL;
284 
285 	mei_cl_init(cl, dev);
286 
287 	return cl;
288 }
289 
290 /**
291  * mei_cl_find_read_cb - find this cl's callback in the read list
292  *
293  * @cl: host client
294  *
295  * returns cb on success, NULL on error
296  */
297 struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl)
298 {
299 	struct mei_device *dev = cl->dev;
300 	struct mei_cl_cb *cb;
301 
302 	list_for_each_entry(cb, &dev->read_list.list, list)
303 		if (mei_cl_cmp_id(cl, cb->cl))
304 			return cb;
305 	return NULL;
306 }
307 
308 /** mei_cl_link: allocate host id in the host map
309  *
310  * @cl - host client
311  * @id - fixed host id or -1 for generic one
312  *
313  * returns 0 on success
314  *	-EINVAL on incorrect values
315  *	-ENONET if client not found
316  */
317 int mei_cl_link(struct mei_cl *cl, int id)
318 {
319 	struct mei_device *dev;
320 	long open_handle_count;
321 
322 	if (WARN_ON(!cl || !cl->dev))
323 		return -EINVAL;
324 
325 	dev = cl->dev;
326 
327 	/* If Id is not assigned get one*/
328 	if (id == MEI_HOST_CLIENT_ID_ANY)
329 		id = find_first_zero_bit(dev->host_clients_map,
330 					MEI_CLIENTS_MAX);
331 
332 	if (id >= MEI_CLIENTS_MAX) {
333 		dev_err(&dev->pdev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
334 		return -EMFILE;
335 	}
336 
337 	open_handle_count = dev->open_handle_count + dev->iamthif_open_count;
338 	if (open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
339 		dev_err(&dev->pdev->dev, "open_handle_count exceeded %d",
340 			MEI_MAX_OPEN_HANDLE_COUNT);
341 		return -EMFILE;
342 	}
343 
344 	dev->open_handle_count++;
345 
346 	cl->host_client_id = id;
347 	list_add_tail(&cl->link, &dev->file_list);
348 
349 	set_bit(id, dev->host_clients_map);
350 
351 	cl->state = MEI_FILE_INITIALIZING;
352 
353 	cl_dbg(dev, cl, "link cl\n");
354 	return 0;
355 }
356 
357 /**
358  * mei_cl_unlink - remove me_cl from the list
359  *
360  * @cl: host client
361  */
362 int mei_cl_unlink(struct mei_cl *cl)
363 {
364 	struct mei_device *dev;
365 
366 	/* don't shout on error exit path */
367 	if (!cl)
368 		return 0;
369 
370 	/* wd and amthif might not be initialized */
371 	if (!cl->dev)
372 		return 0;
373 
374 	dev = cl->dev;
375 
376 	cl_dbg(dev, cl, "unlink client");
377 
378 	if (dev->open_handle_count > 0)
379 		dev->open_handle_count--;
380 
381 	/* never clear the 0 bit */
382 	if (cl->host_client_id)
383 		clear_bit(cl->host_client_id, dev->host_clients_map);
384 
385 	list_del_init(&cl->link);
386 
387 	cl->state = MEI_FILE_INITIALIZING;
388 
389 	return 0;
390 }
391 
392 
393 void mei_host_client_init(struct work_struct *work)
394 {
395 	struct mei_device *dev = container_of(work,
396 					      struct mei_device, init_work);
397 	struct mei_client_properties *client_props;
398 	int i;
399 
400 	mutex_lock(&dev->device_lock);
401 
402 	for (i = 0; i < dev->me_clients_num; i++) {
403 		client_props = &dev->me_clients[i].props;
404 
405 		if (!uuid_le_cmp(client_props->protocol_name, mei_amthif_guid))
406 			mei_amthif_host_init(dev);
407 		else if (!uuid_le_cmp(client_props->protocol_name, mei_wd_guid))
408 			mei_wd_host_init(dev);
409 		else if (!uuid_le_cmp(client_props->protocol_name, mei_nfc_guid))
410 			mei_nfc_host_init(dev);
411 
412 	}
413 
414 	dev->dev_state = MEI_DEV_ENABLED;
415 	dev->reset_count = 0;
416 
417 	mutex_unlock(&dev->device_lock);
418 }
419 
420 /**
421  * mei_hbuf_acquire: try to acquire host buffer
422  *
423  * @dev: the device structure
424  * returns true if host buffer was acquired
425  */
426 bool mei_hbuf_acquire(struct mei_device *dev)
427 {
428 	if (!dev->hbuf_is_ready) {
429 		dev_dbg(&dev->pdev->dev, "hbuf is not ready\n");
430 		return false;
431 	}
432 
433 	dev->hbuf_is_ready = false;
434 
435 	return true;
436 }
437 
438 /**
439  * mei_cl_disconnect - disconnect host client from the me one
440  *
441  * @cl: host client
442  *
443  * Locking: called under "dev->device_lock" lock
444  *
445  * returns 0 on success, <0 on failure.
446  */
447 int mei_cl_disconnect(struct mei_cl *cl)
448 {
449 	struct mei_device *dev;
450 	struct mei_cl_cb *cb;
451 	int rets, err;
452 
453 	if (WARN_ON(!cl || !cl->dev))
454 		return -ENODEV;
455 
456 	dev = cl->dev;
457 
458 	cl_dbg(dev, cl, "disconnecting");
459 
460 	if (cl->state != MEI_FILE_DISCONNECTING)
461 		return 0;
462 
463 	cb = mei_io_cb_init(cl, NULL);
464 	if (!cb)
465 		return -ENOMEM;
466 
467 	cb->fop_type = MEI_FOP_CLOSE;
468 	if (mei_hbuf_acquire(dev)) {
469 		if (mei_hbm_cl_disconnect_req(dev, cl)) {
470 			rets = -ENODEV;
471 			cl_err(dev, cl, "failed to disconnect.\n");
472 			goto free;
473 		}
474 		mdelay(10); /* Wait for hardware disconnection ready */
475 		list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
476 	} else {
477 		cl_dbg(dev, cl, "add disconnect cb to control write list\n");
478 		list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
479 
480 	}
481 	mutex_unlock(&dev->device_lock);
482 
483 	err = wait_event_timeout(dev->wait_recvd_msg,
484 			MEI_FILE_DISCONNECTED == cl->state,
485 			mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
486 
487 	mutex_lock(&dev->device_lock);
488 	if (MEI_FILE_DISCONNECTED == cl->state) {
489 		rets = 0;
490 		cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
491 	} else {
492 		rets = -ENODEV;
493 		if (MEI_FILE_DISCONNECTED != cl->state)
494 			cl_err(dev, cl, "wrong status client disconnect.\n");
495 
496 		if (err)
497 			cl_dbg(dev, cl, "wait failed disconnect err=%08x\n",
498 					err);
499 
500 		cl_err(dev, cl, "failed to disconnect from FW client.\n");
501 	}
502 
503 	mei_io_list_flush(&dev->ctrl_rd_list, cl);
504 	mei_io_list_flush(&dev->ctrl_wr_list, cl);
505 free:
506 	mei_io_cb_free(cb);
507 	return rets;
508 }
509 
510 
511 /**
512  * mei_cl_is_other_connecting - checks if other
513  *    client with the same me client id is connecting
514  *
515  * @cl: private data of the file object
516  *
517  * returns true if other client is connected, false - otherwise.
518  */
519 bool mei_cl_is_other_connecting(struct mei_cl *cl)
520 {
521 	struct mei_device *dev;
522 	struct mei_cl *ocl; /* the other client */
523 
524 	if (WARN_ON(!cl || !cl->dev))
525 		return false;
526 
527 	dev = cl->dev;
528 
529 	list_for_each_entry(ocl, &dev->file_list, link) {
530 		if (ocl->state == MEI_FILE_CONNECTING &&
531 		    ocl != cl &&
532 		    cl->me_client_id == ocl->me_client_id)
533 			return true;
534 
535 	}
536 
537 	return false;
538 }
539 
540 /**
541  * mei_cl_connect - connect host client to the me one
542  *
543  * @cl: host client
544  *
545  * Locking: called under "dev->device_lock" lock
546  *
547  * returns 0 on success, <0 on failure.
548  */
549 int mei_cl_connect(struct mei_cl *cl, struct file *file)
550 {
551 	struct mei_device *dev;
552 	struct mei_cl_cb *cb;
553 	int rets;
554 
555 	if (WARN_ON(!cl || !cl->dev))
556 		return -ENODEV;
557 
558 	dev = cl->dev;
559 
560 	cb = mei_io_cb_init(cl, file);
561 	if (!cb) {
562 		rets = -ENOMEM;
563 		goto out;
564 	}
565 
566 	cb->fop_type = MEI_FOP_CONNECT;
567 
568 	/* run hbuf acquire last so we don't have to undo */
569 	if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
570 		if (mei_hbm_cl_connect_req(dev, cl)) {
571 			rets = -ENODEV;
572 			goto out;
573 		}
574 		cl->timer_count = MEI_CONNECT_TIMEOUT;
575 		list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
576 	} else {
577 		list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
578 	}
579 
580 	mutex_unlock(&dev->device_lock);
581 	wait_event_timeout(dev->wait_recvd_msg,
582 			(cl->state == MEI_FILE_CONNECTED ||
583 			 cl->state == MEI_FILE_DISCONNECTED),
584 			mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
585 	mutex_lock(&dev->device_lock);
586 
587 	if (cl->state != MEI_FILE_CONNECTED) {
588 		/* something went really wrong */
589 		if (!cl->status)
590 			cl->status = -EFAULT;
591 
592 		mei_io_list_flush(&dev->ctrl_rd_list, cl);
593 		mei_io_list_flush(&dev->ctrl_wr_list, cl);
594 	}
595 
596 	rets = cl->status;
597 
598 out:
599 	mei_io_cb_free(cb);
600 	return rets;
601 }
602 
603 /**
604  * mei_cl_flow_ctrl_creds - checks flow_control credits for cl.
605  *
606  * @cl: private data of the file object
607  *
608  * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
609  *	-ENOENT if mei_cl is not present
610  *	-EINVAL if single_recv_buf == 0
611  */
612 int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
613 {
614 	struct mei_device *dev;
615 	struct mei_me_client *me_cl;
616 	int id;
617 
618 	if (WARN_ON(!cl || !cl->dev))
619 		return -EINVAL;
620 
621 	dev = cl->dev;
622 
623 	if (!dev->me_clients_num)
624 		return 0;
625 
626 	if (cl->mei_flow_ctrl_creds > 0)
627 		return 1;
628 
629 	id = mei_me_cl_by_id(dev, cl->me_client_id);
630 	if (id < 0) {
631 		cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
632 		return id;
633 	}
634 
635 	me_cl = &dev->me_clients[id];
636 	if (me_cl->mei_flow_ctrl_creds) {
637 		if (WARN_ON(me_cl->props.single_recv_buf == 0))
638 			return -EINVAL;
639 		return 1;
640 	}
641 	return 0;
642 }
643 
644 /**
645  * mei_cl_flow_ctrl_reduce - reduces flow_control.
646  *
647  * @cl: private data of the file object
648  *
649  * @returns
650  *	0 on success
651  *	-ENOENT when me client is not found
652  *	-EINVAL when ctrl credits are <= 0
653  */
654 int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
655 {
656 	struct mei_device *dev;
657 	struct mei_me_client *me_cl;
658 	int id;
659 
660 	if (WARN_ON(!cl || !cl->dev))
661 		return -EINVAL;
662 
663 	dev = cl->dev;
664 
665 	id = mei_me_cl_by_id(dev, cl->me_client_id);
666 	if (id < 0) {
667 		cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
668 		return id;
669 	}
670 
671 	me_cl = &dev->me_clients[id];
672 	if (me_cl->props.single_recv_buf != 0) {
673 		if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0))
674 			return -EINVAL;
675 		me_cl->mei_flow_ctrl_creds--;
676 	} else {
677 		if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
678 			return -EINVAL;
679 		cl->mei_flow_ctrl_creds--;
680 	}
681 	return 0;
682 }
683 
684 /**
685  * mei_cl_read_start - the start read client message function.
686  *
687  * @cl: host client
688  *
689  * returns 0 on success, <0 on failure.
690  */
691 int mei_cl_read_start(struct mei_cl *cl, size_t length)
692 {
693 	struct mei_device *dev;
694 	struct mei_cl_cb *cb;
695 	int rets;
696 	int i;
697 
698 	if (WARN_ON(!cl || !cl->dev))
699 		return -ENODEV;
700 
701 	dev = cl->dev;
702 
703 	if (!mei_cl_is_connected(cl))
704 		return -ENODEV;
705 
706 	if (cl->read_cb) {
707 		cl_dbg(dev, cl, "read is pending.\n");
708 		return -EBUSY;
709 	}
710 	i = mei_me_cl_by_id(dev, cl->me_client_id);
711 	if (i < 0) {
712 		cl_err(dev, cl, "no such me client %d\n", cl->me_client_id);
713 		return  -ENOTTY;
714 	}
715 
716 	cb = mei_io_cb_init(cl, NULL);
717 	if (!cb)
718 		return -ENOMEM;
719 
720 	/* always allocate at least client max message */
721 	length = max_t(size_t, length, dev->me_clients[i].props.max_msg_length);
722 	rets = mei_io_cb_alloc_resp_buf(cb, length);
723 	if (rets)
724 		goto err;
725 
726 	cb->fop_type = MEI_FOP_READ;
727 	if (mei_hbuf_acquire(dev)) {
728 		if (mei_hbm_cl_flow_control_req(dev, cl)) {
729 			cl_err(dev, cl, "flow control send failed\n");
730 			rets = -ENODEV;
731 			goto err;
732 		}
733 		list_add_tail(&cb->list, &dev->read_list.list);
734 	} else {
735 		list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
736 	}
737 
738 	cl->read_cb = cb;
739 
740 	return rets;
741 err:
742 	mei_io_cb_free(cb);
743 	return rets;
744 }
745 
746 /**
747  * mei_cl_irq_write - write a message to device
748  *	from the interrupt thread context
749  *
750  * @cl: client
751  * @cb: callback block.
752  * @cmpl_list: complete list.
753  *
754  * returns 0, OK; otherwise error.
755  */
756 int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
757 		     struct mei_cl_cb *cmpl_list)
758 {
759 	struct mei_device *dev;
760 	struct mei_msg_data *buf;
761 	struct mei_msg_hdr mei_hdr;
762 	size_t len;
763 	u32 msg_slots;
764 	int slots;
765 	int rets;
766 
767 	if (WARN_ON(!cl || !cl->dev))
768 		return -ENODEV;
769 
770 	dev = cl->dev;
771 
772 	buf = &cb->request_buffer;
773 
774 	rets = mei_cl_flow_ctrl_creds(cl);
775 	if (rets < 0)
776 		return rets;
777 
778 	if (rets == 0) {
779 		cl_dbg(dev, cl,	"No flow control credentials: not sending.\n");
780 		return 0;
781 	}
782 
783 	slots = mei_hbuf_empty_slots(dev);
784 	len = buf->size - cb->buf_idx;
785 	msg_slots = mei_data2slots(len);
786 
787 	mei_hdr.host_addr = cl->host_client_id;
788 	mei_hdr.me_addr = cl->me_client_id;
789 	mei_hdr.reserved = 0;
790 	mei_hdr.internal = cb->internal;
791 
792 	if (slots >= msg_slots) {
793 		mei_hdr.length = len;
794 		mei_hdr.msg_complete = 1;
795 	/* Split the message only if we can write the whole host buffer */
796 	} else if (slots == dev->hbuf_depth) {
797 		msg_slots = slots;
798 		len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
799 		mei_hdr.length = len;
800 		mei_hdr.msg_complete = 0;
801 	} else {
802 		/* wait for next time the host buffer is empty */
803 		return 0;
804 	}
805 
806 	cl_dbg(dev, cl, "buf: size = %d idx = %lu\n",
807 			cb->request_buffer.size, cb->buf_idx);
808 
809 	rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx);
810 	if (rets) {
811 		cl->status = rets;
812 		list_move_tail(&cb->list, &cmpl_list->list);
813 		return rets;
814 	}
815 
816 	cl->status = 0;
817 	cl->writing_state = MEI_WRITING;
818 	cb->buf_idx += mei_hdr.length;
819 
820 	if (mei_hdr.msg_complete) {
821 		if (mei_cl_flow_ctrl_reduce(cl))
822 			return -EIO;
823 		list_move_tail(&cb->list, &dev->write_waiting_list.list);
824 	}
825 
826 	return 0;
827 }
828 
829 /**
830  * mei_cl_write - submit a write cb to mei device
831 	assumes device_lock is locked
832  *
833  * @cl: host client
834  * @cl: write callback with filled data
835  *
836  * returns number of bytes sent on success, <0 on failure.
837  */
838 int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
839 {
840 	struct mei_device *dev;
841 	struct mei_msg_data *buf;
842 	struct mei_msg_hdr mei_hdr;
843 	int rets;
844 
845 
846 	if (WARN_ON(!cl || !cl->dev))
847 		return -ENODEV;
848 
849 	if (WARN_ON(!cb))
850 		return -EINVAL;
851 
852 	dev = cl->dev;
853 
854 
855 	buf = &cb->request_buffer;
856 
857 	cl_dbg(dev, cl, "mei_cl_write %d\n", buf->size);
858 
859 
860 	cb->fop_type = MEI_FOP_WRITE;
861 	cb->buf_idx = 0;
862 	cl->writing_state = MEI_IDLE;
863 
864 	mei_hdr.host_addr = cl->host_client_id;
865 	mei_hdr.me_addr = cl->me_client_id;
866 	mei_hdr.reserved = 0;
867 	mei_hdr.msg_complete = 0;
868 	mei_hdr.internal = cb->internal;
869 
870 	rets = mei_cl_flow_ctrl_creds(cl);
871 	if (rets < 0)
872 		goto err;
873 
874 	if (rets == 0) {
875 		cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
876 		rets = buf->size;
877 		goto out;
878 	}
879 	if (!mei_hbuf_acquire(dev)) {
880 		cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
881 		rets = buf->size;
882 		goto out;
883 	}
884 
885 	/* Check for a maximum length */
886 	if (buf->size > mei_hbuf_max_len(dev)) {
887 		mei_hdr.length = mei_hbuf_max_len(dev);
888 		mei_hdr.msg_complete = 0;
889 	} else {
890 		mei_hdr.length = buf->size;
891 		mei_hdr.msg_complete = 1;
892 	}
893 
894 	rets = mei_write_message(dev, &mei_hdr, buf->data);
895 	if (rets)
896 		goto err;
897 
898 	cl->writing_state = MEI_WRITING;
899 	cb->buf_idx = mei_hdr.length;
900 
901 out:
902 	if (mei_hdr.msg_complete) {
903 		rets = mei_cl_flow_ctrl_reduce(cl);
904 		if (rets < 0)
905 			goto err;
906 
907 		list_add_tail(&cb->list, &dev->write_waiting_list.list);
908 	} else {
909 		list_add_tail(&cb->list, &dev->write_list.list);
910 	}
911 
912 
913 	if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
914 
915 		mutex_unlock(&dev->device_lock);
916 		rets = wait_event_interruptible(cl->tx_wait,
917 				cl->writing_state == MEI_WRITE_COMPLETE);
918 		mutex_lock(&dev->device_lock);
919 		/* wait_event_interruptible returns -ERESTARTSYS */
920 		if (rets) {
921 			if (signal_pending(current))
922 				rets = -EINTR;
923 			goto err;
924 		}
925 	}
926 
927 	rets = buf->size;
928 err:
929 	return rets;
930 }
931 
932 
933 /**
934  * mei_cl_complete - processes completed operation for a client
935  *
936  * @cl: private data of the file object.
937  * @cb: callback block.
938  */
939 void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
940 {
941 	if (cb->fop_type == MEI_FOP_WRITE) {
942 		mei_io_cb_free(cb);
943 		cb = NULL;
944 		cl->writing_state = MEI_WRITE_COMPLETE;
945 		if (waitqueue_active(&cl->tx_wait))
946 			wake_up_interruptible(&cl->tx_wait);
947 
948 	} else if (cb->fop_type == MEI_FOP_READ &&
949 			MEI_READING == cl->reading_state) {
950 		cl->reading_state = MEI_READ_COMPLETE;
951 		if (waitqueue_active(&cl->rx_wait))
952 			wake_up_interruptible(&cl->rx_wait);
953 		else
954 			mei_cl_bus_rx_event(cl);
955 
956 	}
957 }
958 
959 
960 /**
961  * mei_cl_all_disconnect - disconnect forcefully all connected clients
962  *
963  * @dev - mei device
964  */
965 
966 void mei_cl_all_disconnect(struct mei_device *dev)
967 {
968 	struct mei_cl *cl;
969 
970 	list_for_each_entry(cl, &dev->file_list, link) {
971 		cl->state = MEI_FILE_DISCONNECTED;
972 		cl->mei_flow_ctrl_creds = 0;
973 		cl->timer_count = 0;
974 	}
975 }
976 
977 
978 /**
979  * mei_cl_all_wakeup  - wake up all readers and writers they can be interrupted
980  *
981  * @dev  - mei device
982  */
983 void mei_cl_all_wakeup(struct mei_device *dev)
984 {
985 	struct mei_cl *cl;
986 	list_for_each_entry(cl, &dev->file_list, link) {
987 		if (waitqueue_active(&cl->rx_wait)) {
988 			cl_dbg(dev, cl, "Waking up reading client!\n");
989 			wake_up_interruptible(&cl->rx_wait);
990 		}
991 		if (waitqueue_active(&cl->tx_wait)) {
992 			cl_dbg(dev, cl, "Waking up writing client!\n");
993 			wake_up_interruptible(&cl->tx_wait);
994 		}
995 	}
996 }
997 
998 /**
999  * mei_cl_all_write_clear - clear all pending writes
1000 
1001  * @dev - mei device
1002  */
1003 void mei_cl_all_write_clear(struct mei_device *dev)
1004 {
1005 	mei_io_list_free(&dev->write_list, NULL);
1006 	mei_io_list_free(&dev->write_waiting_list, NULL);
1007 }
1008 
1009 
1010