1 /*
2  * ISHTP client logic
3  *
4  * Copyright (c) 2003-2016, Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13  * more details.
14  *
15  */
16 
17 #include <linux/slab.h>
18 #include <linux/sched.h>
19 #include <linux/wait.h>
20 #include <linux/delay.h>
21 #include <linux/dma-mapping.h>
22 #include "hbm.h"
23 #include "client.h"
24 
25 int ishtp_cl_get_tx_free_buffer_size(struct ishtp_cl *cl)
26 {
27 	unsigned long tx_free_flags;
28 	int size;
29 
30 	spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
31 	size = cl->tx_ring_free_size * cl->device->fw_client->props.max_msg_length;
32 	spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
33 
34 	return size;
35 }
36 EXPORT_SYMBOL(ishtp_cl_get_tx_free_buffer_size);
37 
38 int ishtp_cl_get_tx_free_rings(struct ishtp_cl *cl)
39 {
40 	return cl->tx_ring_free_size;
41 }
42 EXPORT_SYMBOL(ishtp_cl_get_tx_free_rings);
43 
44 /**
45  * ishtp_read_list_flush() - Flush read queue
46  * @cl: ishtp client instance
47  *
48  * Used to remove all entries from read queue for a client
49  */
50 static void ishtp_read_list_flush(struct ishtp_cl *cl)
51 {
52 	struct ishtp_cl_rb *rb;
53 	struct ishtp_cl_rb *next;
54 	unsigned long	flags;
55 
56 	spin_lock_irqsave(&cl->dev->read_list_spinlock, flags);
57 	list_for_each_entry_safe(rb, next, &cl->dev->read_list.list, list)
58 		if (rb->cl && ishtp_cl_cmp_id(cl, rb->cl)) {
59 			list_del(&rb->list);
60 			ishtp_io_rb_free(rb);
61 		}
62 	spin_unlock_irqrestore(&cl->dev->read_list_spinlock, flags);
63 }
64 
65 /**
66  * ishtp_cl_flush_queues() - Flush all queues for a client
67  * @cl: ishtp client instance
68  *
69  * Used to remove all queues for a client. This is called when a client device
70  * needs reset due to error, S3 resume or during module removal
71  *
72  * Return: 0 on success else -EINVAL if device is NULL
73  */
74 int ishtp_cl_flush_queues(struct ishtp_cl *cl)
75 {
76 	if (WARN_ON(!cl || !cl->dev))
77 		return -EINVAL;
78 
79 	ishtp_read_list_flush(cl);
80 
81 	return 0;
82 }
83 EXPORT_SYMBOL(ishtp_cl_flush_queues);
84 
85 /**
86  * ishtp_cl_init() - Initialize all fields of a client device
87  * @cl: ishtp client instance
88  * @dev: ishtp device
89  *
90  * Initializes a client device fields: Init spinlocks, init queues etc.
91  * This function is called during new client creation
92  */
93 static void ishtp_cl_init(struct ishtp_cl *cl, struct ishtp_device *dev)
94 {
95 	memset(cl, 0, sizeof(struct ishtp_cl));
96 	init_waitqueue_head(&cl->wait_ctrl_res);
97 	spin_lock_init(&cl->free_list_spinlock);
98 	spin_lock_init(&cl->in_process_spinlock);
99 	spin_lock_init(&cl->tx_list_spinlock);
100 	spin_lock_init(&cl->tx_free_list_spinlock);
101 	spin_lock_init(&cl->fc_spinlock);
102 	INIT_LIST_HEAD(&cl->link);
103 	cl->dev = dev;
104 
105 	INIT_LIST_HEAD(&cl->free_rb_list.list);
106 	INIT_LIST_HEAD(&cl->tx_list.list);
107 	INIT_LIST_HEAD(&cl->tx_free_list.list);
108 	INIT_LIST_HEAD(&cl->in_process_list.list);
109 
110 	cl->rx_ring_size = CL_DEF_RX_RING_SIZE;
111 	cl->tx_ring_size = CL_DEF_TX_RING_SIZE;
112 	cl->tx_ring_free_size = cl->tx_ring_size;
113 
114 	/* dma */
115 	cl->last_tx_path = CL_TX_PATH_IPC;
116 	cl->last_dma_acked = 1;
117 	cl->last_dma_addr = NULL;
118 	cl->last_ipc_acked = 1;
119 }
120 
121 /**
122  * ishtp_cl_allocate() - allocates client structure and sets it up.
123  * @dev: ishtp device
124  *
125  * Allocate memory for new client device and call to initialize each field.
126  *
127  * Return: The allocated client instance or NULL on failure
128  */
129 struct ishtp_cl *ishtp_cl_allocate(struct ishtp_device *dev)
130 {
131 	struct ishtp_cl *cl;
132 
133 	cl = kmalloc(sizeof(struct ishtp_cl), GFP_KERNEL);
134 	if (!cl)
135 		return NULL;
136 
137 	ishtp_cl_init(cl, dev);
138 	return cl;
139 }
140 EXPORT_SYMBOL(ishtp_cl_allocate);
141 
142 /**
143  * ishtp_cl_free() - Frees a client device
144  * @cl: client device instance
145  *
146  * Frees a client device
147  */
148 void	ishtp_cl_free(struct ishtp_cl *cl)
149 {
150 	struct ishtp_device *dev;
151 	unsigned long flags;
152 
153 	if (!cl)
154 		return;
155 
156 	dev = cl->dev;
157 	if (!dev)
158 		return;
159 
160 	spin_lock_irqsave(&dev->cl_list_lock, flags);
161 	ishtp_cl_free_rx_ring(cl);
162 	ishtp_cl_free_tx_ring(cl);
163 	kfree(cl);
164 	spin_unlock_irqrestore(&dev->cl_list_lock, flags);
165 }
166 EXPORT_SYMBOL(ishtp_cl_free);
167 
168 /**
169  * ishtp_cl_link() - Reserve a host id and link the client instance
170  * @cl: client device instance
171  * @id: host client id to use. It can be ISHTP_HOST_CLIENT_ID_ANY if any
172  *	id from the available can be used
173  *
174  *
175  * This allocates a single bit in the hostmap. This function will make sure
176  * that not many client sessions are opened at the same time. Once allocated
177  * the client device instance is added to the ishtp device in the current
178  * client list
179  *
180  * Return: 0 or error code on failure
181  */
182 int ishtp_cl_link(struct ishtp_cl *cl, int id)
183 {
184 	struct ishtp_device *dev;
185 	unsigned long	flags, flags_cl;
186 	int	ret = 0;
187 
188 	if (WARN_ON(!cl || !cl->dev))
189 		return -EINVAL;
190 
191 	dev = cl->dev;
192 
193 	spin_lock_irqsave(&dev->device_lock, flags);
194 
195 	if (dev->open_handle_count >= ISHTP_MAX_OPEN_HANDLE_COUNT) {
196 		ret = -EMFILE;
197 		goto unlock_dev;
198 	}
199 
200 	/* If Id is not assigned get one*/
201 	if (id == ISHTP_HOST_CLIENT_ID_ANY)
202 		id = find_first_zero_bit(dev->host_clients_map,
203 			ISHTP_CLIENTS_MAX);
204 
205 	if (id >= ISHTP_CLIENTS_MAX) {
206 		spin_unlock_irqrestore(&dev->device_lock, flags);
207 		dev_err(&cl->device->dev, "id exceeded %d", ISHTP_CLIENTS_MAX);
208 		return -ENOENT;
209 	}
210 
211 	dev->open_handle_count++;
212 	cl->host_client_id = id;
213 	spin_lock_irqsave(&dev->cl_list_lock, flags_cl);
214 	if (dev->dev_state != ISHTP_DEV_ENABLED) {
215 		ret = -ENODEV;
216 		goto unlock_cl;
217 	}
218 	list_add_tail(&cl->link, &dev->cl_list);
219 	set_bit(id, dev->host_clients_map);
220 	cl->state = ISHTP_CL_INITIALIZING;
221 
222 unlock_cl:
223 	spin_unlock_irqrestore(&dev->cl_list_lock, flags_cl);
224 unlock_dev:
225 	spin_unlock_irqrestore(&dev->device_lock, flags);
226 	return ret;
227 }
228 EXPORT_SYMBOL(ishtp_cl_link);
229 
230 /**
231  * ishtp_cl_unlink() - remove fw_cl from the client device list
232  * @cl: client device instance
233  *
234  * Remove a previously linked device to a ishtp device
235  */
236 void ishtp_cl_unlink(struct ishtp_cl *cl)
237 {
238 	struct ishtp_device *dev;
239 	struct ishtp_cl *pos;
240 	unsigned long	flags;
241 
242 	/* don't shout on error exit path */
243 	if (!cl || !cl->dev)
244 		return;
245 
246 	dev = cl->dev;
247 
248 	spin_lock_irqsave(&dev->device_lock, flags);
249 	if (dev->open_handle_count > 0) {
250 		clear_bit(cl->host_client_id, dev->host_clients_map);
251 		dev->open_handle_count--;
252 	}
253 	spin_unlock_irqrestore(&dev->device_lock, flags);
254 
255 	/*
256 	 * This checks that 'cl' is actually linked into device's structure,
257 	 * before attempting 'list_del'
258 	 */
259 	spin_lock_irqsave(&dev->cl_list_lock, flags);
260 	list_for_each_entry(pos, &dev->cl_list, link)
261 		if (cl->host_client_id == pos->host_client_id) {
262 			list_del_init(&pos->link);
263 			break;
264 		}
265 	spin_unlock_irqrestore(&dev->cl_list_lock, flags);
266 }
267 EXPORT_SYMBOL(ishtp_cl_unlink);
268 
269 /**
270  * ishtp_cl_disconnect() - Send disconnect request to firmware
271  * @cl: client device instance
272  *
273  * Send a disconnect request for a client to firmware.
274  *
275  * Return: 0 if successful disconnect response from the firmware or error
276  * code on failure
277  */
278 int ishtp_cl_disconnect(struct ishtp_cl *cl)
279 {
280 	struct ishtp_device *dev;
281 	int err;
282 
283 	if (WARN_ON(!cl || !cl->dev))
284 		return -ENODEV;
285 
286 	dev = cl->dev;
287 
288 	dev->print_log(dev, "%s() state %d\n", __func__, cl->state);
289 
290 	if (cl->state != ISHTP_CL_DISCONNECTING) {
291 		dev->print_log(dev, "%s() Disconnect in progress\n", __func__);
292 		return 0;
293 	}
294 
295 	if (ishtp_hbm_cl_disconnect_req(dev, cl)) {
296 		dev->print_log(dev, "%s() Failed to disconnect\n", __func__);
297 		dev_err(&cl->device->dev, "failed to disconnect.\n");
298 		return -ENODEV;
299 	}
300 
301 	err = wait_event_interruptible_timeout(cl->wait_ctrl_res,
302 			(dev->dev_state != ISHTP_DEV_ENABLED ||
303 			cl->state == ISHTP_CL_DISCONNECTED),
304 			ishtp_secs_to_jiffies(ISHTP_CL_CONNECT_TIMEOUT));
305 
306 	/*
307 	 * If FW reset arrived, this will happen. Don't check cl->,
308 	 * as 'cl' may be freed already
309 	 */
310 	if (dev->dev_state != ISHTP_DEV_ENABLED) {
311 		dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n",
312 			       __func__);
313 		return -ENODEV;
314 	}
315 
316 	if (cl->state == ISHTP_CL_DISCONNECTED) {
317 		dev->print_log(dev, "%s() successful\n", __func__);
318 		return 0;
319 	}
320 
321 	return -ENODEV;
322 }
323 EXPORT_SYMBOL(ishtp_cl_disconnect);
324 
325 /**
326  * ishtp_cl_is_other_connecting() - Check other client is connecting
327  * @cl: client device instance
328  *
329  * Checks if other client with the same fw client id is connecting
330  *
331  * Return: true if other client is connected else false
332  */
333 static bool ishtp_cl_is_other_connecting(struct ishtp_cl *cl)
334 {
335 	struct ishtp_device *dev;
336 	struct ishtp_cl *pos;
337 	unsigned long	flags;
338 
339 	if (WARN_ON(!cl || !cl->dev))
340 		return false;
341 
342 	dev = cl->dev;
343 	spin_lock_irqsave(&dev->cl_list_lock, flags);
344 	list_for_each_entry(pos, &dev->cl_list, link) {
345 		if ((pos->state == ISHTP_CL_CONNECTING) && (pos != cl) &&
346 				cl->fw_client_id == pos->fw_client_id) {
347 			spin_unlock_irqrestore(&dev->cl_list_lock, flags);
348 			return true;
349 		}
350 	}
351 	spin_unlock_irqrestore(&dev->cl_list_lock, flags);
352 
353 	return false;
354 }
355 
356 /**
357  * ishtp_cl_connect() - Send connect request to firmware
358  * @cl: client device instance
359  *
360  * Send a connect request for a client to firmware. If successful it will
361  * RX and TX ring buffers
362  *
363  * Return: 0 if successful connect response from the firmware and able
364  * to bind and allocate ring buffers or error code on failure
365  */
366 int ishtp_cl_connect(struct ishtp_cl *cl)
367 {
368 	struct ishtp_device *dev;
369 	int rets;
370 
371 	if (WARN_ON(!cl || !cl->dev))
372 		return -ENODEV;
373 
374 	dev = cl->dev;
375 
376 	dev->print_log(dev, "%s() current_state = %d\n", __func__, cl->state);
377 
378 	if (ishtp_cl_is_other_connecting(cl)) {
379 		dev->print_log(dev, "%s() Busy\n", __func__);
380 		return	-EBUSY;
381 	}
382 
383 	if (ishtp_hbm_cl_connect_req(dev, cl)) {
384 		dev->print_log(dev, "%s() HBM connect req fail\n", __func__);
385 		return -ENODEV;
386 	}
387 
388 	rets = wait_event_interruptible_timeout(cl->wait_ctrl_res,
389 				(dev->dev_state == ISHTP_DEV_ENABLED &&
390 				(cl->state == ISHTP_CL_CONNECTED ||
391 				 cl->state == ISHTP_CL_DISCONNECTED)),
392 				ishtp_secs_to_jiffies(
393 					ISHTP_CL_CONNECT_TIMEOUT));
394 	/*
395 	 * If FW reset arrived, this will happen. Don't check cl->,
396 	 * as 'cl' may be freed already
397 	 */
398 	if (dev->dev_state != ISHTP_DEV_ENABLED) {
399 		dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n",
400 			       __func__);
401 		return -EFAULT;
402 	}
403 
404 	if (cl->state != ISHTP_CL_CONNECTED) {
405 		dev->print_log(dev, "%s() state != ISHTP_CL_CONNECTED\n",
406 			       __func__);
407 		return -EFAULT;
408 	}
409 
410 	rets = cl->status;
411 	if (rets) {
412 		dev->print_log(dev, "%s() Invalid status\n", __func__);
413 		return rets;
414 	}
415 
416 	rets = ishtp_cl_device_bind(cl);
417 	if (rets) {
418 		dev->print_log(dev, "%s() Bind error\n", __func__);
419 		ishtp_cl_disconnect(cl);
420 		return rets;
421 	}
422 
423 	rets = ishtp_cl_alloc_rx_ring(cl);
424 	if (rets) {
425 		dev->print_log(dev, "%s() Alloc RX ring failed\n", __func__);
426 		/* if failed allocation, disconnect */
427 		ishtp_cl_disconnect(cl);
428 		return rets;
429 	}
430 
431 	rets = ishtp_cl_alloc_tx_ring(cl);
432 	if (rets) {
433 		dev->print_log(dev, "%s() Alloc TX ring failed\n", __func__);
434 		/* if failed allocation, disconnect */
435 		ishtp_cl_free_rx_ring(cl);
436 		ishtp_cl_disconnect(cl);
437 		return rets;
438 	}
439 
440 	/* Upon successful connection and allocation, emit flow-control */
441 	rets = ishtp_cl_read_start(cl);
442 
443 	dev->print_log(dev, "%s() successful\n", __func__);
444 
445 	return rets;
446 }
447 EXPORT_SYMBOL(ishtp_cl_connect);
448 
449 /**
450  * ishtp_cl_read_start() - Prepare to read client message
451  * @cl: client device instance
452  *
453  * Get a free buffer from pool of free read buffers and add to read buffer
454  * pool to add contents. Send a flow control request to firmware to be able
455  * send next message.
456  *
457  * Return: 0 if successful or error code on failure
458  */
459 int ishtp_cl_read_start(struct ishtp_cl *cl)
460 {
461 	struct ishtp_device *dev;
462 	struct ishtp_cl_rb *rb;
463 	int rets;
464 	int i;
465 	unsigned long	flags;
466 	unsigned long	dev_flags;
467 
468 	if (WARN_ON(!cl || !cl->dev))
469 		return -ENODEV;
470 
471 	dev = cl->dev;
472 
473 	if (cl->state != ISHTP_CL_CONNECTED)
474 		return -ENODEV;
475 
476 	if (dev->dev_state != ISHTP_DEV_ENABLED)
477 		return -ENODEV;
478 
479 	i = ishtp_fw_cl_by_id(dev, cl->fw_client_id);
480 	if (i < 0) {
481 		dev_err(&cl->device->dev, "no such fw client %d\n",
482 			cl->fw_client_id);
483 		return -ENODEV;
484 	}
485 
486 	/* The current rb is the head of the free rb list */
487 	spin_lock_irqsave(&cl->free_list_spinlock, flags);
488 	if (list_empty(&cl->free_rb_list.list)) {
489 		dev_warn(&cl->device->dev,
490 			 "[ishtp-ish] Rx buffers pool is empty\n");
491 		rets = -ENOMEM;
492 		rb = NULL;
493 		spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
494 		goto out;
495 	}
496 	rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb, list);
497 	list_del_init(&rb->list);
498 	spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
499 
500 	rb->cl = cl;
501 	rb->buf_idx = 0;
502 
503 	INIT_LIST_HEAD(&rb->list);
504 	rets = 0;
505 
506 	/*
507 	 * This must be BEFORE sending flow control -
508 	 * response in ISR may come too fast...
509 	 */
510 	spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
511 	list_add_tail(&rb->list, &dev->read_list.list);
512 	spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
513 	if (ishtp_hbm_cl_flow_control_req(dev, cl)) {
514 		rets = -ENODEV;
515 		goto out;
516 	}
517 out:
518 	/* if ishtp_hbm_cl_flow_control_req failed, return rb to free list */
519 	if (rets && rb) {
520 		spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
521 		list_del(&rb->list);
522 		spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
523 
524 		spin_lock_irqsave(&cl->free_list_spinlock, flags);
525 		list_add_tail(&rb->list, &cl->free_rb_list.list);
526 		spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
527 	}
528 	return rets;
529 }
530 
531 /**
532  * ishtp_cl_send() - Send a message to firmware
533  * @cl: client device instance
534  * @buf: message buffer
535  * @length: length of message
536  *
537  * If the client is correct state to send message, this function gets a buffer
538  * from tx ring buffers, copy the message data and call to send the message
539  * using ishtp_cl_send_msg()
540  *
541  * Return: 0 if successful or error code on failure
542  */
543 int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length)
544 {
545 	struct ishtp_device	*dev;
546 	int	id;
547 	struct ishtp_cl_tx_ring	*cl_msg;
548 	int	have_msg_to_send = 0;
549 	unsigned long	tx_flags, tx_free_flags;
550 
551 	if (WARN_ON(!cl || !cl->dev))
552 		return -ENODEV;
553 
554 	dev = cl->dev;
555 
556 	if (cl->state != ISHTP_CL_CONNECTED) {
557 		++cl->err_send_msg;
558 		return -EPIPE;
559 	}
560 
561 	if (dev->dev_state != ISHTP_DEV_ENABLED) {
562 		++cl->err_send_msg;
563 		return -ENODEV;
564 	}
565 
566 	/* Check if we have fw client device */
567 	id = ishtp_fw_cl_by_id(dev, cl->fw_client_id);
568 	if (id < 0) {
569 		++cl->err_send_msg;
570 		return -ENOENT;
571 	}
572 
573 	if (length > dev->fw_clients[id].props.max_msg_length) {
574 		++cl->err_send_msg;
575 		return -EMSGSIZE;
576 	}
577 
578 	/* No free bufs */
579 	spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
580 	if (list_empty(&cl->tx_free_list.list)) {
581 		spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
582 			tx_free_flags);
583 		++cl->err_send_msg;
584 		return	-ENOMEM;
585 	}
586 
587 	cl_msg = list_first_entry(&cl->tx_free_list.list,
588 		struct ishtp_cl_tx_ring, list);
589 	if (!cl_msg->send_buf.data) {
590 		spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
591 			tx_free_flags);
592 		return	-EIO;
593 		/* Should not happen, as free list is pre-allocated */
594 	}
595 	/*
596 	 * This is safe, as 'length' is already checked for not exceeding
597 	 * max ISHTP message size per client
598 	 */
599 	list_del_init(&cl_msg->list);
600 	--cl->tx_ring_free_size;
601 
602 	spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
603 	memcpy(cl_msg->send_buf.data, buf, length);
604 	cl_msg->send_buf.size = length;
605 	spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
606 	have_msg_to_send = !list_empty(&cl->tx_list.list);
607 	list_add_tail(&cl_msg->list, &cl->tx_list.list);
608 	spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
609 
610 	if (!have_msg_to_send && cl->ishtp_flow_ctrl_creds > 0)
611 		ishtp_cl_send_msg(dev, cl);
612 
613 	return	0;
614 }
615 EXPORT_SYMBOL(ishtp_cl_send);
616 
617 /**
618  * ishtp_cl_read_complete() - read complete
619  * @rb: Pointer to client request block
620  *
621  * If the message is completely received call ishtp_cl_bus_rx_event()
622  * to process message
623  */
624 static void ishtp_cl_read_complete(struct ishtp_cl_rb *rb)
625 {
626 	unsigned long	flags;
627 	int	schedule_work_flag = 0;
628 	struct ishtp_cl	*cl = rb->cl;
629 
630 	spin_lock_irqsave(&cl->in_process_spinlock, flags);
631 	/*
632 	 * if in-process list is empty, then need to schedule
633 	 * the processing thread
634 	 */
635 	schedule_work_flag = list_empty(&cl->in_process_list.list);
636 	list_add_tail(&rb->list, &cl->in_process_list.list);
637 	spin_unlock_irqrestore(&cl->in_process_spinlock, flags);
638 
639 	if (schedule_work_flag)
640 		ishtp_cl_bus_rx_event(cl->device);
641 }
642 
643 /**
644  * ipc_tx_callback() - IPC tx callback function
645  * @prm: Pointer to client device instance
646  *
647  * Send message over IPC either first time or on callback on previous message
648  * completion
649  */
650 static void ipc_tx_callback(void *prm)
651 {
652 	struct ishtp_cl	*cl = prm;
653 	struct ishtp_cl_tx_ring	*cl_msg;
654 	size_t	rem;
655 	struct ishtp_device	*dev = (cl ? cl->dev : NULL);
656 	struct ishtp_msg_hdr	ishtp_hdr;
657 	unsigned long	tx_flags, tx_free_flags;
658 	unsigned char	*pmsg;
659 
660 	if (!dev)
661 		return;
662 
663 	/*
664 	 * Other conditions if some critical error has
665 	 * occurred before this callback is called
666 	 */
667 	if (dev->dev_state != ISHTP_DEV_ENABLED)
668 		return;
669 
670 	if (cl->state != ISHTP_CL_CONNECTED)
671 		return;
672 
673 	spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
674 	if (list_empty(&cl->tx_list.list)) {
675 		spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
676 		return;
677 	}
678 
679 	if (cl->ishtp_flow_ctrl_creds != 1 && !cl->sending) {
680 		spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
681 		return;
682 	}
683 
684 	if (!cl->sending) {
685 		--cl->ishtp_flow_ctrl_creds;
686 		cl->last_ipc_acked = 0;
687 		cl->last_tx_path = CL_TX_PATH_IPC;
688 		cl->sending = 1;
689 	}
690 
691 	cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring,
692 			    list);
693 	rem = cl_msg->send_buf.size - cl->tx_offs;
694 
695 	ishtp_hdr.host_addr = cl->host_client_id;
696 	ishtp_hdr.fw_addr = cl->fw_client_id;
697 	ishtp_hdr.reserved = 0;
698 	pmsg = cl_msg->send_buf.data + cl->tx_offs;
699 
700 	if (rem <= dev->mtu) {
701 		ishtp_hdr.length = rem;
702 		ishtp_hdr.msg_complete = 1;
703 		cl->sending = 0;
704 		list_del_init(&cl_msg->list);	/* Must be before write */
705 		spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
706 		/* Submit to IPC queue with no callback */
707 		ishtp_write_message(dev, &ishtp_hdr, pmsg);
708 		spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
709 		list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
710 		++cl->tx_ring_free_size;
711 		spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
712 			tx_free_flags);
713 	} else {
714 		/* Send IPC fragment */
715 		spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
716 		cl->tx_offs += dev->mtu;
717 		ishtp_hdr.length = dev->mtu;
718 		ishtp_hdr.msg_complete = 0;
719 		ishtp_send_msg(dev, &ishtp_hdr, pmsg, ipc_tx_callback, cl);
720 	}
721 }
722 
723 /**
724  * ishtp_cl_send_msg_ipc() -Send message using IPC
725  * @dev: ISHTP device instance
726  * @cl: Pointer to client device instance
727  *
728  * Send message over IPC not using DMA
729  */
730 static void ishtp_cl_send_msg_ipc(struct ishtp_device *dev,
731 				  struct ishtp_cl *cl)
732 {
733 	/* If last DMA message wasn't acked yet, leave this one in Tx queue */
734 	if (cl->last_tx_path == CL_TX_PATH_DMA && cl->last_dma_acked == 0)
735 		return;
736 
737 	cl->tx_offs = 0;
738 	ipc_tx_callback(cl);
739 	++cl->send_msg_cnt_ipc;
740 }
741 
742 /**
743  * ishtp_cl_send_msg_dma() -Send message using DMA
744  * @dev: ISHTP device instance
745  * @cl: Pointer to client device instance
746  *
747  * Send message using DMA
748  */
749 static void ishtp_cl_send_msg_dma(struct ishtp_device *dev,
750 	struct ishtp_cl *cl)
751 {
752 	struct ishtp_msg_hdr	hdr;
753 	struct dma_xfer_hbm	dma_xfer;
754 	unsigned char	*msg_addr;
755 	int off;
756 	struct ishtp_cl_tx_ring	*cl_msg;
757 	unsigned long tx_flags, tx_free_flags;
758 
759 	/* If last IPC message wasn't acked yet, leave this one in Tx queue */
760 	if (cl->last_tx_path == CL_TX_PATH_IPC && cl->last_ipc_acked == 0)
761 		return;
762 
763 	spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
764 	if (list_empty(&cl->tx_list.list)) {
765 		spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
766 		return;
767 	}
768 
769 	cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring,
770 		list);
771 
772 	msg_addr = ishtp_cl_get_dma_send_buf(dev, cl_msg->send_buf.size);
773 	if (!msg_addr) {
774 		spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
775 		if (dev->transfer_path == CL_TX_PATH_DEFAULT)
776 			ishtp_cl_send_msg_ipc(dev, cl);
777 		return;
778 	}
779 
780 	list_del_init(&cl_msg->list);	/* Must be before write */
781 	spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
782 
783 	--cl->ishtp_flow_ctrl_creds;
784 	cl->last_dma_acked = 0;
785 	cl->last_dma_addr = msg_addr;
786 	cl->last_tx_path = CL_TX_PATH_DMA;
787 
788 	/* write msg to dma buf */
789 	memcpy(msg_addr, cl_msg->send_buf.data, cl_msg->send_buf.size);
790 
791 	/* send dma_xfer hbm msg */
792 	off = msg_addr - (unsigned char *)dev->ishtp_host_dma_tx_buf;
793 	ishtp_hbm_hdr(&hdr, sizeof(struct dma_xfer_hbm));
794 	dma_xfer.hbm = DMA_XFER;
795 	dma_xfer.fw_client_id = cl->fw_client_id;
796 	dma_xfer.host_client_id = cl->host_client_id;
797 	dma_xfer.reserved = 0;
798 	dma_xfer.msg_addr = dev->ishtp_host_dma_tx_buf_phys + off;
799 	dma_xfer.msg_length = cl_msg->send_buf.size;
800 	dma_xfer.reserved2 = 0;
801 	ishtp_write_message(dev, &hdr, (unsigned char *)&dma_xfer);
802 	spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
803 	list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
804 	++cl->tx_ring_free_size;
805 	spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
806 	++cl->send_msg_cnt_dma;
807 }
808 
809 /**
810  * ishtp_cl_send_msg() -Send message using DMA or IPC
811  * @dev: ISHTP device instance
812  * @cl: Pointer to client device instance
813  *
814  * Send message using DMA or IPC based on transfer_path
815  */
816 void ishtp_cl_send_msg(struct ishtp_device *dev, struct ishtp_cl *cl)
817 {
818 	if (dev->transfer_path == CL_TX_PATH_DMA)
819 		ishtp_cl_send_msg_dma(dev, cl);
820 	else
821 		ishtp_cl_send_msg_ipc(dev, cl);
822 }
823 
824 /**
825  * recv_ishtp_cl_msg() -Receive client message
826  * @dev: ISHTP device instance
827  * @ishtp_hdr: Pointer to message header
828  *
829  * Receive and dispatch ISHTP client messages. This function executes in ISR
830  * or work queue context
831  */
832 void recv_ishtp_cl_msg(struct ishtp_device *dev,
833 		       struct ishtp_msg_hdr *ishtp_hdr)
834 {
835 	struct ishtp_cl *cl;
836 	struct ishtp_cl_rb *rb;
837 	struct ishtp_cl_rb *new_rb;
838 	unsigned char *buffer = NULL;
839 	struct ishtp_cl_rb *complete_rb = NULL;
840 	unsigned long	flags;
841 	int	rb_count;
842 
843 	if (ishtp_hdr->reserved) {
844 		dev_err(dev->devc, "corrupted message header.\n");
845 		goto	eoi;
846 	}
847 
848 	if (ishtp_hdr->length > IPC_PAYLOAD_SIZE) {
849 		dev_err(dev->devc,
850 			"ISHTP message length in hdr exceeds IPC MTU\n");
851 		goto	eoi;
852 	}
853 
854 	spin_lock_irqsave(&dev->read_list_spinlock, flags);
855 	rb_count = -1;
856 	list_for_each_entry(rb, &dev->read_list.list, list) {
857 		++rb_count;
858 		cl = rb->cl;
859 		if (!cl || !(cl->host_client_id == ishtp_hdr->host_addr &&
860 				cl->fw_client_id == ishtp_hdr->fw_addr) ||
861 				!(cl->state == ISHTP_CL_CONNECTED))
862 			continue;
863 
864 		 /* If no Rx buffer is allocated, disband the rb */
865 		if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
866 			spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
867 			dev_err(&cl->device->dev,
868 				"Rx buffer is not allocated.\n");
869 			list_del(&rb->list);
870 			ishtp_io_rb_free(rb);
871 			cl->status = -ENOMEM;
872 			goto	eoi;
873 		}
874 
875 		/*
876 		 * If message buffer overflown (exceeds max. client msg
877 		 * size, drop message and return to free buffer.
878 		 * Do we need to disconnect such a client? (We don't send
879 		 * back FC, so communication will be stuck anyway)
880 		 */
881 		if (rb->buffer.size < ishtp_hdr->length + rb->buf_idx) {
882 			spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
883 			dev_err(&cl->device->dev,
884 				"message overflow. size %d len %d idx %ld\n",
885 				rb->buffer.size, ishtp_hdr->length,
886 				rb->buf_idx);
887 			list_del(&rb->list);
888 			ishtp_cl_io_rb_recycle(rb);
889 			cl->status = -EIO;
890 			goto	eoi;
891 		}
892 
893 		buffer = rb->buffer.data + rb->buf_idx;
894 		dev->ops->ishtp_read(dev, buffer, ishtp_hdr->length);
895 
896 		rb->buf_idx += ishtp_hdr->length;
897 		if (ishtp_hdr->msg_complete) {
898 			/* Last fragment in message - it's complete */
899 			cl->status = 0;
900 			list_del(&rb->list);
901 			complete_rb = rb;
902 
903 			--cl->out_flow_ctrl_creds;
904 			/*
905 			 * the whole msg arrived, send a new FC, and add a new
906 			 * rb buffer for the next coming msg
907 			 */
908 			spin_lock(&cl->free_list_spinlock);
909 
910 			if (!list_empty(&cl->free_rb_list.list)) {
911 				new_rb = list_entry(cl->free_rb_list.list.next,
912 					struct ishtp_cl_rb, list);
913 				list_del_init(&new_rb->list);
914 				spin_unlock(&cl->free_list_spinlock);
915 				new_rb->cl = cl;
916 				new_rb->buf_idx = 0;
917 				INIT_LIST_HEAD(&new_rb->list);
918 				list_add_tail(&new_rb->list,
919 					&dev->read_list.list);
920 
921 				ishtp_hbm_cl_flow_control_req(dev, cl);
922 			} else {
923 				spin_unlock(&cl->free_list_spinlock);
924 			}
925 		}
926 		/* One more fragment in message (even if this was last) */
927 		++cl->recv_msg_num_frags;
928 
929 		/*
930 		 * We can safely break here (and in BH too),
931 		 * a single input message can go only to a single request!
932 		 */
933 		break;
934 	}
935 
936 	spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
937 	/* If it's nobody's message, just read and discard it */
938 	if (!buffer) {
939 		uint8_t	rd_msg_buf[ISHTP_RD_MSG_BUF_SIZE];
940 
941 		dev_err(dev->devc, "Dropped Rx msg - no request\n");
942 		dev->ops->ishtp_read(dev, rd_msg_buf, ishtp_hdr->length);
943 		goto	eoi;
944 	}
945 
946 	if (complete_rb) {
947 		cl = complete_rb->cl;
948 		cl->ts_rx = ktime_get();
949 		++cl->recv_msg_cnt_ipc;
950 		ishtp_cl_read_complete(complete_rb);
951 	}
952 eoi:
953 	return;
954 }
955 
956 /**
957  * recv_ishtp_cl_msg_dma() -Receive client message
958  * @dev: ISHTP device instance
959  * @msg: message pointer
960  * @hbm: hbm buffer
961  *
962  * Receive and dispatch ISHTP client messages using DMA. This function executes
963  * in ISR or work queue context
964  */
965 void recv_ishtp_cl_msg_dma(struct ishtp_device *dev, void *msg,
966 			   struct dma_xfer_hbm *hbm)
967 {
968 	struct ishtp_cl *cl;
969 	struct ishtp_cl_rb *rb;
970 	struct ishtp_cl_rb *new_rb;
971 	unsigned char *buffer = NULL;
972 	struct ishtp_cl_rb *complete_rb = NULL;
973 	unsigned long	flags;
974 
975 	spin_lock_irqsave(&dev->read_list_spinlock, flags);
976 
977 	list_for_each_entry(rb, &dev->read_list.list, list) {
978 		cl = rb->cl;
979 		if (!cl || !(cl->host_client_id == hbm->host_client_id &&
980 				cl->fw_client_id == hbm->fw_client_id) ||
981 				!(cl->state == ISHTP_CL_CONNECTED))
982 			continue;
983 
984 		/*
985 		 * If no Rx buffer is allocated, disband the rb
986 		 */
987 		if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
988 			spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
989 			dev_err(&cl->device->dev,
990 				"response buffer is not allocated.\n");
991 			list_del(&rb->list);
992 			ishtp_io_rb_free(rb);
993 			cl->status = -ENOMEM;
994 			goto	eoi;
995 		}
996 
997 		/*
998 		 * If message buffer overflown (exceeds max. client msg
999 		 * size, drop message and return to free buffer.
1000 		 * Do we need to disconnect such a client? (We don't send
1001 		 * back FC, so communication will be stuck anyway)
1002 		 */
1003 		if (rb->buffer.size < hbm->msg_length) {
1004 			spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
1005 			dev_err(&cl->device->dev,
1006 				"message overflow. size %d len %d idx %ld\n",
1007 				rb->buffer.size, hbm->msg_length, rb->buf_idx);
1008 			list_del(&rb->list);
1009 			ishtp_cl_io_rb_recycle(rb);
1010 			cl->status = -EIO;
1011 			goto	eoi;
1012 		}
1013 
1014 		buffer = rb->buffer.data;
1015 		memcpy(buffer, msg, hbm->msg_length);
1016 		rb->buf_idx = hbm->msg_length;
1017 
1018 		/* Last fragment in message - it's complete */
1019 		cl->status = 0;
1020 		list_del(&rb->list);
1021 		complete_rb = rb;
1022 
1023 		--cl->out_flow_ctrl_creds;
1024 		/*
1025 		 * the whole msg arrived, send a new FC, and add a new
1026 		 * rb buffer for the next coming msg
1027 		 */
1028 		spin_lock(&cl->free_list_spinlock);
1029 
1030 		if (!list_empty(&cl->free_rb_list.list)) {
1031 			new_rb = list_entry(cl->free_rb_list.list.next,
1032 				struct ishtp_cl_rb, list);
1033 			list_del_init(&new_rb->list);
1034 			spin_unlock(&cl->free_list_spinlock);
1035 			new_rb->cl = cl;
1036 			new_rb->buf_idx = 0;
1037 			INIT_LIST_HEAD(&new_rb->list);
1038 			list_add_tail(&new_rb->list,
1039 				&dev->read_list.list);
1040 
1041 			ishtp_hbm_cl_flow_control_req(dev, cl);
1042 		} else {
1043 			spin_unlock(&cl->free_list_spinlock);
1044 		}
1045 
1046 		/* One more fragment in message (this is always last) */
1047 		++cl->recv_msg_num_frags;
1048 
1049 		/*
1050 		 * We can safely break here (and in BH too),
1051 		 * a single input message can go only to a single request!
1052 		 */
1053 		break;
1054 	}
1055 
1056 	spin_unlock_irqrestore(&dev->read_list_spinlock, flags);
1057 	/* If it's nobody's message, just read and discard it */
1058 	if (!buffer) {
1059 		dev_err(dev->devc, "Dropped Rx (DMA) msg - no request\n");
1060 		goto	eoi;
1061 	}
1062 
1063 	if (complete_rb) {
1064 		cl = complete_rb->cl;
1065 		cl->ts_rx = ktime_get();
1066 		++cl->recv_msg_cnt_dma;
1067 		ishtp_cl_read_complete(complete_rb);
1068 	}
1069 eoi:
1070 	return;
1071 }
1072