xref: /openbmc/linux/drivers/misc/mei/client.c (revision 5bd8e16d)
1 /*
2  *
3  * Intel Management Engine Interface (Intel MEI) Linux driver
4  * Copyright (c) 2003-2012, Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  */
16 
17 #include <linux/pci.h>
18 #include <linux/sched.h>
19 #include <linux/wait.h>
20 #include <linux/delay.h>
21 
22 #include <linux/mei.h>
23 
24 #include "mei_dev.h"
25 #include "hbm.h"
26 #include "client.h"
27 
28 /**
29  * mei_me_cl_by_uuid - locate index of me client
30  *
31  * @dev: mei device
32  * returns me client index or -ENOENT if not found
33  */
34 int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid)
35 {
36 	int i, res = -ENOENT;
37 
38 	for (i = 0; i < dev->me_clients_num; ++i)
39 		if (uuid_le_cmp(*uuid,
40 				dev->me_clients[i].props.protocol_name) == 0) {
41 			res = i;
42 			break;
43 		}
44 
45 	return res;
46 }
47 
48 
49 /**
50  * mei_me_cl_by_id return index to me_clients for client_id
51  *
52  * @dev: the device structure
53  * @client_id: me client id
54  *
55  * Locking: called under "dev->device_lock" lock
56  *
57  * returns index on success, -ENOENT on failure.
58  */
59 
60 int mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
61 {
62 	int i;
63 	for (i = 0; i < dev->me_clients_num; i++)
64 		if (dev->me_clients[i].client_id == client_id)
65 			break;
66 	if (WARN_ON(dev->me_clients[i].client_id != client_id))
67 		return -ENOENT;
68 
69 	if (i == dev->me_clients_num)
70 		return -ENOENT;
71 
72 	return i;
73 }
74 
75 
76 /**
77  * mei_io_list_flush - removes list entry belonging to cl.
78  *
79  * @list:  An instance of our list structure
80  * @cl: host client
81  */
82 void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
83 {
84 	struct mei_cl_cb *cb;
85 	struct mei_cl_cb *next;
86 
87 	list_for_each_entry_safe(cb, next, &list->list, list) {
88 		if (cb->cl && mei_cl_cmp_id(cl, cb->cl))
89 			list_del(&cb->list);
90 	}
91 }
92 
93 /**
94  * mei_io_cb_free - free mei_cb_private related memory
95  *
96  * @cb: mei callback struct
97  */
98 void mei_io_cb_free(struct mei_cl_cb *cb)
99 {
100 	if (cb == NULL)
101 		return;
102 
103 	kfree(cb->request_buffer.data);
104 	kfree(cb->response_buffer.data);
105 	kfree(cb);
106 }
107 
108 /**
109  * mei_io_cb_init - allocate and initialize io callback
110  *
111  * @cl - mei client
112  * @fp: pointer to file structure
113  *
114  * returns mei_cl_cb pointer or NULL;
115  */
116 struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp)
117 {
118 	struct mei_cl_cb *cb;
119 
120 	cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
121 	if (!cb)
122 		return NULL;
123 
124 	mei_io_list_init(cb);
125 
126 	cb->file_object = fp;
127 	cb->cl = cl;
128 	cb->buf_idx = 0;
129 	return cb;
130 }
131 
132 /**
133  * mei_io_cb_alloc_req_buf - allocate request buffer
134  *
135  * @cb: io callback structure
136  * @length: size of the buffer
137  *
138  * returns 0 on success
139  *         -EINVAL if cb is NULL
140  *         -ENOMEM if allocation failed
141  */
142 int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length)
143 {
144 	if (!cb)
145 		return -EINVAL;
146 
147 	if (length == 0)
148 		return 0;
149 
150 	cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
151 	if (!cb->request_buffer.data)
152 		return -ENOMEM;
153 	cb->request_buffer.size = length;
154 	return 0;
155 }
156 /**
157  * mei_io_cb_alloc_resp_buf - allocate respose buffer
158  *
159  * @cb: io callback structure
160  * @length: size of the buffer
161  *
162  * returns 0 on success
163  *         -EINVAL if cb is NULL
164  *         -ENOMEM if allocation failed
165  */
166 int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length)
167 {
168 	if (!cb)
169 		return -EINVAL;
170 
171 	if (length == 0)
172 		return 0;
173 
174 	cb->response_buffer.data = kmalloc(length, GFP_KERNEL);
175 	if (!cb->response_buffer.data)
176 		return -ENOMEM;
177 	cb->response_buffer.size = length;
178 	return 0;
179 }
180 
181 
182 
183 /**
184  * mei_cl_flush_queues - flushes queue lists belonging to cl.
185  *
186  * @cl: host client
187  */
188 int mei_cl_flush_queues(struct mei_cl *cl)
189 {
190 	if (WARN_ON(!cl || !cl->dev))
191 		return -EINVAL;
192 
193 	dev_dbg(&cl->dev->pdev->dev, "remove list entry belonging to cl\n");
194 	mei_io_list_flush(&cl->dev->read_list, cl);
195 	mei_io_list_flush(&cl->dev->write_list, cl);
196 	mei_io_list_flush(&cl->dev->write_waiting_list, cl);
197 	mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
198 	mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
199 	mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
200 	mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
201 	return 0;
202 }
203 
204 
205 /**
206  * mei_cl_init - initializes intialize cl.
207  *
208  * @cl: host client to be initialized
209  * @dev: mei device
210  */
211 void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
212 {
213 	memset(cl, 0, sizeof(struct mei_cl));
214 	init_waitqueue_head(&cl->wait);
215 	init_waitqueue_head(&cl->rx_wait);
216 	init_waitqueue_head(&cl->tx_wait);
217 	INIT_LIST_HEAD(&cl->link);
218 	INIT_LIST_HEAD(&cl->device_link);
219 	cl->reading_state = MEI_IDLE;
220 	cl->writing_state = MEI_IDLE;
221 	cl->dev = dev;
222 }
223 
224 /**
225  * mei_cl_allocate - allocates cl  structure and sets it up.
226  *
227  * @dev: mei device
228  * returns  The allocated file or NULL on failure
229  */
230 struct mei_cl *mei_cl_allocate(struct mei_device *dev)
231 {
232 	struct mei_cl *cl;
233 
234 	cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
235 	if (!cl)
236 		return NULL;
237 
238 	mei_cl_init(cl, dev);
239 
240 	return cl;
241 }
242 
243 /**
244  * mei_cl_find_read_cb - find this cl's callback in the read list
245  *
246  * @cl: host client
247  *
248  * returns cb on success, NULL on error
249  */
250 struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl)
251 {
252 	struct mei_device *dev = cl->dev;
253 	struct mei_cl_cb *cb = NULL;
254 	struct mei_cl_cb *next = NULL;
255 
256 	list_for_each_entry_safe(cb, next, &dev->read_list.list, list)
257 		if (mei_cl_cmp_id(cl, cb->cl))
258 			return cb;
259 	return NULL;
260 }
261 
262 /** mei_cl_link: allocte host id in the host map
263  *
264  * @cl - host client
265  * @id - fixed host id or -1 for genereting one
266  *
267  * returns 0 on success
268  *	-EINVAL on incorrect values
269  *	-ENONET if client not found
270  */
271 int mei_cl_link(struct mei_cl *cl, int id)
272 {
273 	struct mei_device *dev;
274 
275 	if (WARN_ON(!cl || !cl->dev))
276 		return -EINVAL;
277 
278 	dev = cl->dev;
279 
280 	/* If Id is not asigned get one*/
281 	if (id == MEI_HOST_CLIENT_ID_ANY)
282 		id = find_first_zero_bit(dev->host_clients_map,
283 					MEI_CLIENTS_MAX);
284 
285 	if (id >= MEI_CLIENTS_MAX) {
286 		dev_err(&dev->pdev->dev, "id exceded %d", MEI_CLIENTS_MAX) ;
287 		return -ENOENT;
288 	}
289 
290 	dev->open_handle_count++;
291 
292 	cl->host_client_id = id;
293 	list_add_tail(&cl->link, &dev->file_list);
294 
295 	set_bit(id, dev->host_clients_map);
296 
297 	cl->state = MEI_FILE_INITIALIZING;
298 
299 	dev_dbg(&dev->pdev->dev, "link cl host id = %d\n", cl->host_client_id);
300 	return 0;
301 }
302 
303 /**
304  * mei_cl_unlink - remove me_cl from the list
305  *
306  * @cl: host client
307  */
308 int mei_cl_unlink(struct mei_cl *cl)
309 {
310 	struct mei_device *dev;
311 	struct mei_cl *pos, *next;
312 
313 	/* don't shout on error exit path */
314 	if (!cl)
315 		return 0;
316 
317 	/* wd and amthif might not be initialized */
318 	if (!cl->dev)
319 		return 0;
320 
321 	dev = cl->dev;
322 
323 	list_for_each_entry_safe(pos, next, &dev->file_list, link) {
324 		if (cl->host_client_id == pos->host_client_id) {
325 			dev_dbg(&dev->pdev->dev, "remove host client = %d, ME client = %d\n",
326 				pos->host_client_id, pos->me_client_id);
327 			list_del_init(&pos->link);
328 			break;
329 		}
330 	}
331 	return 0;
332 }
333 
334 
335 void mei_host_client_init(struct work_struct *work)
336 {
337 	struct mei_device *dev = container_of(work,
338 					      struct mei_device, init_work);
339 	struct mei_client_properties *client_props;
340 	int i;
341 
342 	mutex_lock(&dev->device_lock);
343 
344 	bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
345 	dev->open_handle_count = 0;
346 
347 	/*
348 	 * Reserving the first three client IDs
349 	 * 0: Reserved for MEI Bus Message communications
350 	 * 1: Reserved for Watchdog
351 	 * 2: Reserved for AMTHI
352 	 */
353 	bitmap_set(dev->host_clients_map, 0, 3);
354 
355 	for (i = 0; i < dev->me_clients_num; i++) {
356 		client_props = &dev->me_clients[i].props;
357 
358 		if (!uuid_le_cmp(client_props->protocol_name, mei_amthif_guid))
359 			mei_amthif_host_init(dev);
360 		else if (!uuid_le_cmp(client_props->protocol_name, mei_wd_guid))
361 			mei_wd_host_init(dev);
362 		else if (!uuid_le_cmp(client_props->protocol_name, mei_nfc_guid))
363 			mei_nfc_host_init(dev);
364 
365 	}
366 
367 	dev->dev_state = MEI_DEV_ENABLED;
368 
369 	mutex_unlock(&dev->device_lock);
370 }
371 
372 
373 /**
374  * mei_cl_disconnect - disconnect host clinet form the me one
375  *
376  * @cl: host client
377  *
378  * Locking: called under "dev->device_lock" lock
379  *
380  * returns 0 on success, <0 on failure.
381  */
382 int mei_cl_disconnect(struct mei_cl *cl)
383 {
384 	struct mei_device *dev;
385 	struct mei_cl_cb *cb;
386 	int rets, err;
387 
388 	if (WARN_ON(!cl || !cl->dev))
389 		return -ENODEV;
390 
391 	dev = cl->dev;
392 
393 	if (cl->state != MEI_FILE_DISCONNECTING)
394 		return 0;
395 
396 	cb = mei_io_cb_init(cl, NULL);
397 	if (!cb)
398 		return -ENOMEM;
399 
400 	cb->fop_type = MEI_FOP_CLOSE;
401 	if (dev->hbuf_is_ready) {
402 		dev->hbuf_is_ready = false;
403 		if (mei_hbm_cl_disconnect_req(dev, cl)) {
404 			rets = -ENODEV;
405 			dev_err(&dev->pdev->dev, "failed to disconnect.\n");
406 			goto free;
407 		}
408 		mdelay(10); /* Wait for hardware disconnection ready */
409 		list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
410 	} else {
411 		dev_dbg(&dev->pdev->dev, "add disconnect cb to control write list\n");
412 		list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
413 
414 	}
415 	mutex_unlock(&dev->device_lock);
416 
417 	err = wait_event_timeout(dev->wait_recvd_msg,
418 			MEI_FILE_DISCONNECTED == cl->state,
419 			mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
420 
421 	mutex_lock(&dev->device_lock);
422 	if (MEI_FILE_DISCONNECTED == cl->state) {
423 		rets = 0;
424 		dev_dbg(&dev->pdev->dev, "successfully disconnected from FW client.\n");
425 	} else {
426 		rets = -ENODEV;
427 		if (MEI_FILE_DISCONNECTED != cl->state)
428 			dev_dbg(&dev->pdev->dev, "wrong status client disconnect.\n");
429 
430 		if (err)
431 			dev_dbg(&dev->pdev->dev,
432 					"wait failed disconnect err=%08x\n",
433 					err);
434 
435 		dev_dbg(&dev->pdev->dev, "failed to disconnect from FW client.\n");
436 	}
437 
438 	mei_io_list_flush(&dev->ctrl_rd_list, cl);
439 	mei_io_list_flush(&dev->ctrl_wr_list, cl);
440 free:
441 	mei_io_cb_free(cb);
442 	return rets;
443 }
444 
445 
446 /**
447  * mei_cl_is_other_connecting - checks if other
448  *    client with the same me client id is connecting
449  *
450  * @cl: private data of the file object
451  *
452  * returns ture if other client is connected, 0 - otherwise.
453  */
454 bool mei_cl_is_other_connecting(struct mei_cl *cl)
455 {
456 	struct mei_device *dev;
457 	struct mei_cl *pos;
458 	struct mei_cl *next;
459 
460 	if (WARN_ON(!cl || !cl->dev))
461 		return false;
462 
463 	dev = cl->dev;
464 
465 	list_for_each_entry_safe(pos, next, &dev->file_list, link) {
466 		if ((pos->state == MEI_FILE_CONNECTING) &&
467 		    (pos != cl) && cl->me_client_id == pos->me_client_id)
468 			return true;
469 
470 	}
471 
472 	return false;
473 }
474 
475 /**
476  * mei_cl_connect - connect host clinet to the me one
477  *
478  * @cl: host client
479  *
480  * Locking: called under "dev->device_lock" lock
481  *
482  * returns 0 on success, <0 on failure.
483  */
484 int mei_cl_connect(struct mei_cl *cl, struct file *file)
485 {
486 	struct mei_device *dev;
487 	struct mei_cl_cb *cb;
488 	int rets;
489 
490 	if (WARN_ON(!cl || !cl->dev))
491 		return -ENODEV;
492 
493 	dev = cl->dev;
494 
495 	cb = mei_io_cb_init(cl, file);
496 	if (!cb) {
497 		rets = -ENOMEM;
498 		goto out;
499 	}
500 
501 	cb->fop_type = MEI_FOP_IOCTL;
502 
503 	if (dev->hbuf_is_ready && !mei_cl_is_other_connecting(cl)) {
504 		dev->hbuf_is_ready = false;
505 
506 		if (mei_hbm_cl_connect_req(dev, cl)) {
507 			rets = -ENODEV;
508 			goto out;
509 		}
510 		cl->timer_count = MEI_CONNECT_TIMEOUT;
511 		list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
512 	} else {
513 		list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
514 	}
515 
516 	mutex_unlock(&dev->device_lock);
517 	rets = wait_event_timeout(dev->wait_recvd_msg,
518 				 (cl->state == MEI_FILE_CONNECTED ||
519 				  cl->state == MEI_FILE_DISCONNECTED),
520 				 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
521 	mutex_lock(&dev->device_lock);
522 
523 	if (cl->state != MEI_FILE_CONNECTED) {
524 		rets = -EFAULT;
525 
526 		mei_io_list_flush(&dev->ctrl_rd_list, cl);
527 		mei_io_list_flush(&dev->ctrl_wr_list, cl);
528 		goto out;
529 	}
530 
531 	rets = cl->status;
532 
533 out:
534 	mei_io_cb_free(cb);
535 	return rets;
536 }
537 
538 /**
539  * mei_cl_flow_ctrl_creds - checks flow_control credits for cl.
540  *
541  * @cl: private data of the file object
542  *
543  * returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
544  *	-ENOENT if mei_cl is not present
545  *	-EINVAL if single_recv_buf == 0
546  */
547 int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
548 {
549 	struct mei_device *dev;
550 	int i;
551 
552 	if (WARN_ON(!cl || !cl->dev))
553 		return -EINVAL;
554 
555 	dev = cl->dev;
556 
557 	if (!dev->me_clients_num)
558 		return 0;
559 
560 	if (cl->mei_flow_ctrl_creds > 0)
561 		return 1;
562 
563 	for (i = 0; i < dev->me_clients_num; i++) {
564 		struct mei_me_client  *me_cl = &dev->me_clients[i];
565 		if (me_cl->client_id == cl->me_client_id) {
566 			if (me_cl->mei_flow_ctrl_creds) {
567 				if (WARN_ON(me_cl->props.single_recv_buf == 0))
568 					return -EINVAL;
569 				return 1;
570 			} else {
571 				return 0;
572 			}
573 		}
574 	}
575 	return -ENOENT;
576 }
577 
578 /**
579  * mei_cl_flow_ctrl_reduce - reduces flow_control.
580  *
581  * @cl: private data of the file object
582  *
583  * @returns
584  *	0 on success
585  *	-ENOENT when me client is not found
586  *	-EINVAL when ctrl credits are <= 0
587  */
588 int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
589 {
590 	struct mei_device *dev;
591 	int i;
592 
593 	if (WARN_ON(!cl || !cl->dev))
594 		return -EINVAL;
595 
596 	dev = cl->dev;
597 
598 	if (!dev->me_clients_num)
599 		return -ENOENT;
600 
601 	for (i = 0; i < dev->me_clients_num; i++) {
602 		struct mei_me_client  *me_cl = &dev->me_clients[i];
603 		if (me_cl->client_id == cl->me_client_id) {
604 			if (me_cl->props.single_recv_buf != 0) {
605 				if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0))
606 					return -EINVAL;
607 				dev->me_clients[i].mei_flow_ctrl_creds--;
608 			} else {
609 				if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
610 					return -EINVAL;
611 				cl->mei_flow_ctrl_creds--;
612 			}
613 			return 0;
614 		}
615 	}
616 	return -ENOENT;
617 }
618 
619 /**
620  * mei_cl_read_start - the start read client message function.
621  *
622  * @cl: host client
623  *
624  * returns 0 on success, <0 on failure.
625  */
626 int mei_cl_read_start(struct mei_cl *cl, size_t length)
627 {
628 	struct mei_device *dev;
629 	struct mei_cl_cb *cb;
630 	int rets;
631 	int i;
632 
633 	if (WARN_ON(!cl || !cl->dev))
634 		return -ENODEV;
635 
636 	dev = cl->dev;
637 
638 	if (!mei_cl_is_connected(cl))
639 		return -ENODEV;
640 
641 	if (cl->read_cb) {
642 		dev_dbg(&dev->pdev->dev, "read is pending.\n");
643 		return -EBUSY;
644 	}
645 	i = mei_me_cl_by_id(dev, cl->me_client_id);
646 	if (i < 0) {
647 		dev_err(&dev->pdev->dev, "no such me client %d\n",
648 			cl->me_client_id);
649 		return  -ENODEV;
650 	}
651 
652 	cb = mei_io_cb_init(cl, NULL);
653 	if (!cb)
654 		return -ENOMEM;
655 
656 	/* always allocate at least client max message */
657 	length = max_t(size_t, length, dev->me_clients[i].props.max_msg_length);
658 	rets = mei_io_cb_alloc_resp_buf(cb, length);
659 	if (rets)
660 		goto err;
661 
662 	cb->fop_type = MEI_FOP_READ;
663 	cl->read_cb = cb;
664 	if (dev->hbuf_is_ready) {
665 		dev->hbuf_is_ready = false;
666 		if (mei_hbm_cl_flow_control_req(dev, cl)) {
667 			rets = -ENODEV;
668 			goto err;
669 		}
670 		list_add_tail(&cb->list, &dev->read_list.list);
671 	} else {
672 		list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
673 	}
674 	return rets;
675 err:
676 	mei_io_cb_free(cb);
677 	return rets;
678 }
679 
680 /**
681  * mei_cl_irq_write_complete - write a message to device
682  *	from the interrupt thread context
683  *
684  * @cl: client
685  * @cb: callback block.
686  * @slots: free slots.
687  * @cmpl_list: complete list.
688  *
689  * returns 0, OK; otherwise error.
690  */
691 int mei_cl_irq_write_complete(struct mei_cl *cl, struct mei_cl_cb *cb,
692 				     s32 *slots, struct mei_cl_cb *cmpl_list)
693 {
694 	struct mei_device *dev = cl->dev;
695 	struct mei_msg_hdr mei_hdr;
696 	size_t len = cb->request_buffer.size - cb->buf_idx;
697 	u32 msg_slots = mei_data2slots(len);
698 
699 	mei_hdr.host_addr = cl->host_client_id;
700 	mei_hdr.me_addr = cl->me_client_id;
701 	mei_hdr.reserved = 0;
702 
703 	if (*slots >= msg_slots) {
704 		mei_hdr.length = len;
705 		mei_hdr.msg_complete = 1;
706 	/* Split the message only if we can write the whole host buffer */
707 	} else if (*slots == dev->hbuf_depth) {
708 		msg_slots = *slots;
709 		len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
710 		mei_hdr.length = len;
711 		mei_hdr.msg_complete = 0;
712 	} else {
713 		/* wait for next time the host buffer is empty */
714 		return 0;
715 	}
716 
717 	dev_dbg(&dev->pdev->dev, "buf: size = %d idx = %lu\n",
718 			cb->request_buffer.size, cb->buf_idx);
719 	dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr));
720 
721 	*slots -=  msg_slots;
722 	if (mei_write_message(dev, &mei_hdr,
723 			cb->request_buffer.data + cb->buf_idx)) {
724 		cl->status = -ENODEV;
725 		list_move_tail(&cb->list, &cmpl_list->list);
726 		return -ENODEV;
727 	}
728 
729 	cl->status = 0;
730 	cl->writing_state = MEI_WRITING;
731 	cb->buf_idx += mei_hdr.length;
732 
733 	if (mei_hdr.msg_complete) {
734 		if (mei_cl_flow_ctrl_reduce(cl))
735 			return -ENODEV;
736 		list_move_tail(&cb->list, &dev->write_waiting_list.list);
737 	}
738 
739 	return 0;
740 }
741 
742 /**
743  * mei_cl_write - submit a write cb to mei device
744 	assumes device_lock is locked
745  *
746  * @cl: host client
747  * @cl: write callback with filled data
748  *
749  * returns numbe of bytes sent on success, <0 on failure.
750  */
751 int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
752 {
753 	struct mei_device *dev;
754 	struct mei_msg_data *buf;
755 	struct mei_msg_hdr mei_hdr;
756 	int rets;
757 
758 
759 	if (WARN_ON(!cl || !cl->dev))
760 		return -ENODEV;
761 
762 	if (WARN_ON(!cb))
763 		return -EINVAL;
764 
765 	dev = cl->dev;
766 
767 
768 	buf = &cb->request_buffer;
769 
770 	dev_dbg(&dev->pdev->dev, "mei_cl_write %d\n", buf->size);
771 
772 
773 	cb->fop_type = MEI_FOP_WRITE;
774 
775 	rets = mei_cl_flow_ctrl_creds(cl);
776 	if (rets < 0)
777 		goto err;
778 
779 	/* Host buffer is not ready, we queue the request */
780 	if (rets == 0 || !dev->hbuf_is_ready) {
781 		cb->buf_idx = 0;
782 		/* unseting complete will enqueue the cb for write */
783 		mei_hdr.msg_complete = 0;
784 		rets = buf->size;
785 		goto out;
786 	}
787 
788 	dev->hbuf_is_ready = false;
789 
790 	/* Check for a maximum length */
791 	if (buf->size > mei_hbuf_max_len(dev)) {
792 		mei_hdr.length = mei_hbuf_max_len(dev);
793 		mei_hdr.msg_complete = 0;
794 	} else {
795 		mei_hdr.length = buf->size;
796 		mei_hdr.msg_complete = 1;
797 	}
798 
799 	mei_hdr.host_addr = cl->host_client_id;
800 	mei_hdr.me_addr = cl->me_client_id;
801 	mei_hdr.reserved = 0;
802 
803 	dev_dbg(&dev->pdev->dev, "write " MEI_HDR_FMT "\n",
804 		MEI_HDR_PRM(&mei_hdr));
805 
806 
807 	if (mei_write_message(dev, &mei_hdr, buf->data)) {
808 		rets = -EIO;
809 		goto err;
810 	}
811 
812 	cl->writing_state = MEI_WRITING;
813 	cb->buf_idx = mei_hdr.length;
814 
815 	rets = buf->size;
816 out:
817 	if (mei_hdr.msg_complete) {
818 		if (mei_cl_flow_ctrl_reduce(cl)) {
819 			rets = -ENODEV;
820 			goto err;
821 		}
822 		list_add_tail(&cb->list, &dev->write_waiting_list.list);
823 	} else {
824 		list_add_tail(&cb->list, &dev->write_list.list);
825 	}
826 
827 
828 	if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
829 
830 		mutex_unlock(&dev->device_lock);
831 		if (wait_event_interruptible(cl->tx_wait,
832 			cl->writing_state == MEI_WRITE_COMPLETE)) {
833 				if (signal_pending(current))
834 					rets = -EINTR;
835 				else
836 					rets = -ERESTARTSYS;
837 		}
838 		mutex_lock(&dev->device_lock);
839 	}
840 err:
841 	return rets;
842 }
843 
844 
845 /**
846  * mei_cl_complete - processes completed operation for a client
847  *
848  * @cl: private data of the file object.
849  * @cb: callback block.
850  */
851 void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
852 {
853 	if (cb->fop_type == MEI_FOP_WRITE) {
854 		mei_io_cb_free(cb);
855 		cb = NULL;
856 		cl->writing_state = MEI_WRITE_COMPLETE;
857 		if (waitqueue_active(&cl->tx_wait))
858 			wake_up_interruptible(&cl->tx_wait);
859 
860 	} else if (cb->fop_type == MEI_FOP_READ &&
861 			MEI_READING == cl->reading_state) {
862 		cl->reading_state = MEI_READ_COMPLETE;
863 		if (waitqueue_active(&cl->rx_wait))
864 			wake_up_interruptible(&cl->rx_wait);
865 		else
866 			mei_cl_bus_rx_event(cl);
867 
868 	}
869 }
870 
871 
872 /**
873  * mei_cl_all_disconnect - disconnect forcefully all connected clients
874  *
875  * @dev - mei device
876  */
877 
878 void mei_cl_all_disconnect(struct mei_device *dev)
879 {
880 	struct mei_cl *cl, *next;
881 
882 	list_for_each_entry_safe(cl, next, &dev->file_list, link) {
883 		cl->state = MEI_FILE_DISCONNECTED;
884 		cl->mei_flow_ctrl_creds = 0;
885 		cl->read_cb = NULL;
886 		cl->timer_count = 0;
887 	}
888 }
889 
890 
891 /**
892  * mei_cl_all_wakeup  - wake up all readers and writers they can be interrupted
893  *
894  * @dev  - mei device
895  */
896 void mei_cl_all_wakeup(struct mei_device *dev)
897 {
898 	struct mei_cl *cl, *next;
899 	list_for_each_entry_safe(cl, next, &dev->file_list, link) {
900 		if (waitqueue_active(&cl->rx_wait)) {
901 			dev_dbg(&dev->pdev->dev, "Waking up reading client!\n");
902 			wake_up_interruptible(&cl->rx_wait);
903 		}
904 		if (waitqueue_active(&cl->tx_wait)) {
905 			dev_dbg(&dev->pdev->dev, "Waking up writing client!\n");
906 			wake_up_interruptible(&cl->tx_wait);
907 		}
908 	}
909 }
910 
911 /**
912  * mei_cl_all_write_clear - clear all pending writes
913 
914  * @dev - mei device
915  */
916 void mei_cl_all_write_clear(struct mei_device *dev)
917 {
918 	struct mei_cl_cb *cb, *next;
919 
920 	list_for_each_entry_safe(cb, next, &dev->write_list.list, list) {
921 		list_del(&cb->list);
922 		mei_io_cb_free(cb);
923 	}
924 }
925 
926 
927