xref: /openbmc/linux/drivers/hsi/clients/cmt_speech.c (revision 1c71222e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * cmt_speech.c - HSI CMT speech driver
4  *
5  * Copyright (C) 2008,2009,2010 Nokia Corporation. All rights reserved.
6  *
7  * Contact: Kai Vehmanen <kai.vehmanen@nokia.com>
8  * Original author: Peter Ujfalusi <peter.ujfalusi@nokia.com>
9  */
10 
11 #include <linux/errno.h>
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/init.h>
15 #include <linux/device.h>
16 #include <linux/miscdevice.h>
17 #include <linux/mm.h>
18 #include <linux/slab.h>
19 #include <linux/fs.h>
20 #include <linux/poll.h>
21 #include <linux/sched/signal.h>
22 #include <linux/ioctl.h>
23 #include <linux/uaccess.h>
24 #include <linux/pm_qos.h>
25 #include <linux/hsi/hsi.h>
26 #include <linux/hsi/ssi_protocol.h>
27 #include <linux/hsi/cs-protocol.h>
28 
29 #define CS_MMAP_SIZE	PAGE_SIZE
30 
31 struct char_queue {
32 	struct list_head	list;
33 	u32			msg;
34 };
35 
36 struct cs_char {
37 	unsigned int		opened;
38 	struct hsi_client	*cl;
39 	struct cs_hsi_iface	*hi;
40 	struct list_head	chardev_queue;
41 	struct list_head	dataind_queue;
42 	int			dataind_pending;
43 	/* mmap things */
44 	unsigned long		mmap_base;
45 	unsigned long		mmap_size;
46 	spinlock_t		lock;
47 	struct fasync_struct	*async_queue;
48 	wait_queue_head_t	wait;
49 	/* hsi channel ids */
50 	int                     channel_id_cmd;
51 	int                     channel_id_data;
52 };
53 
54 #define SSI_CHANNEL_STATE_READING	1
55 #define SSI_CHANNEL_STATE_WRITING	(1 << 1)
56 #define SSI_CHANNEL_STATE_POLL		(1 << 2)
57 #define SSI_CHANNEL_STATE_ERROR		(1 << 3)
58 
59 #define TARGET_MASK			0xf000000
60 #define TARGET_REMOTE			(1 << CS_DOMAIN_SHIFT)
61 #define TARGET_LOCAL			0
62 
63 /* Number of pre-allocated commands buffers */
64 #define CS_MAX_CMDS		        4
65 
66 /*
67  * During data transfers, transactions must be handled
68  * within 20ms (fixed value in cmtspeech HSI protocol)
69  */
70 #define CS_QOS_LATENCY_FOR_DATA_USEC	20000
71 
72 /* Timeout to wait for pending HSI transfers to complete */
73 #define CS_HSI_TRANSFER_TIMEOUT_MS      500
74 
75 
76 #define RX_PTR_BOUNDARY_SHIFT		8
77 #define RX_PTR_MAX_SHIFT		(RX_PTR_BOUNDARY_SHIFT + \
78 						CS_MAX_BUFFERS_SHIFT)
79 struct cs_hsi_iface {
80 	struct hsi_client		*cl;
81 	struct hsi_client		*master;
82 
83 	unsigned int			iface_state;
84 	unsigned int			wakeline_state;
85 	unsigned int			control_state;
86 	unsigned int			data_state;
87 
88 	/* state exposed to application */
89 	struct cs_mmap_config_block	*mmap_cfg;
90 
91 	unsigned long			mmap_base;
92 	unsigned long			mmap_size;
93 
94 	unsigned int			rx_slot;
95 	unsigned int			tx_slot;
96 
97 	/* note: for security reasons, we do not trust the contents of
98 	 * mmap_cfg, but instead duplicate the variables here */
99 	unsigned int			buf_size;
100 	unsigned int			rx_bufs;
101 	unsigned int			tx_bufs;
102 	unsigned int			rx_ptr_boundary;
103 	unsigned int			rx_offsets[CS_MAX_BUFFERS];
104 	unsigned int			tx_offsets[CS_MAX_BUFFERS];
105 
106 	/* size of aligned memory blocks */
107 	unsigned int			slot_size;
108 	unsigned int			flags;
109 
110 	struct list_head		cmdqueue;
111 
112 	struct hsi_msg			*data_rx_msg;
113 	struct hsi_msg			*data_tx_msg;
114 	wait_queue_head_t		datawait;
115 
116 	struct pm_qos_request           pm_qos_req;
117 
118 	spinlock_t			lock;
119 };
120 
121 static struct cs_char cs_char_data;
122 
123 static void cs_hsi_read_on_control(struct cs_hsi_iface *hi);
124 static void cs_hsi_read_on_data(struct cs_hsi_iface *hi);
125 
rx_ptr_shift_too_big(void)126 static inline void rx_ptr_shift_too_big(void)
127 {
128 	BUILD_BUG_ON((1LLU << RX_PTR_MAX_SHIFT) > UINT_MAX);
129 }
130 
cs_notify(u32 message,struct list_head * head)131 static void cs_notify(u32 message, struct list_head *head)
132 {
133 	struct char_queue *entry;
134 
135 	spin_lock(&cs_char_data.lock);
136 
137 	if (!cs_char_data.opened) {
138 		spin_unlock(&cs_char_data.lock);
139 		goto out;
140 	}
141 
142 	entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
143 	if (!entry) {
144 		dev_err(&cs_char_data.cl->device,
145 			"Can't allocate new entry for the queue.\n");
146 		spin_unlock(&cs_char_data.lock);
147 		goto out;
148 	}
149 
150 	entry->msg = message;
151 	list_add_tail(&entry->list, head);
152 
153 	spin_unlock(&cs_char_data.lock);
154 
155 	wake_up_interruptible(&cs_char_data.wait);
156 	kill_fasync(&cs_char_data.async_queue, SIGIO, POLL_IN);
157 
158 out:
159 	return;
160 }
161 
cs_pop_entry(struct list_head * head)162 static u32 cs_pop_entry(struct list_head *head)
163 {
164 	struct char_queue *entry;
165 	u32 data;
166 
167 	entry = list_entry(head->next, struct char_queue, list);
168 	data = entry->msg;
169 	list_del(&entry->list);
170 	kfree(entry);
171 
172 	return data;
173 }
174 
cs_notify_control(u32 message)175 static void cs_notify_control(u32 message)
176 {
177 	cs_notify(message, &cs_char_data.chardev_queue);
178 }
179 
cs_notify_data(u32 message,int maxlength)180 static void cs_notify_data(u32 message, int maxlength)
181 {
182 	cs_notify(message, &cs_char_data.dataind_queue);
183 
184 	spin_lock(&cs_char_data.lock);
185 	cs_char_data.dataind_pending++;
186 	while (cs_char_data.dataind_pending > maxlength &&
187 				!list_empty(&cs_char_data.dataind_queue)) {
188 		dev_dbg(&cs_char_data.cl->device, "data notification "
189 		"queue overrun (%u entries)\n", cs_char_data.dataind_pending);
190 
191 		cs_pop_entry(&cs_char_data.dataind_queue);
192 		cs_char_data.dataind_pending--;
193 	}
194 	spin_unlock(&cs_char_data.lock);
195 }
196 
cs_set_cmd(struct hsi_msg * msg,u32 cmd)197 static inline void cs_set_cmd(struct hsi_msg *msg, u32 cmd)
198 {
199 	u32 *data = sg_virt(msg->sgt.sgl);
200 	*data = cmd;
201 }
202 
cs_get_cmd(struct hsi_msg * msg)203 static inline u32 cs_get_cmd(struct hsi_msg *msg)
204 {
205 	u32 *data = sg_virt(msg->sgt.sgl);
206 	return *data;
207 }
208 
cs_release_cmd(struct hsi_msg * msg)209 static void cs_release_cmd(struct hsi_msg *msg)
210 {
211 	struct cs_hsi_iface *hi = msg->context;
212 
213 	list_add_tail(&msg->link, &hi->cmdqueue);
214 }
215 
cs_cmd_destructor(struct hsi_msg * msg)216 static void cs_cmd_destructor(struct hsi_msg *msg)
217 {
218 	struct cs_hsi_iface *hi = msg->context;
219 
220 	spin_lock(&hi->lock);
221 
222 	dev_dbg(&cs_char_data.cl->device, "control cmd destructor\n");
223 
224 	if (hi->iface_state != CS_STATE_CLOSED)
225 		dev_err(&hi->cl->device, "Cmd flushed while driver active\n");
226 
227 	if (msg->ttype == HSI_MSG_READ)
228 		hi->control_state &=
229 			~(SSI_CHANNEL_STATE_POLL | SSI_CHANNEL_STATE_READING);
230 	else if (msg->ttype == HSI_MSG_WRITE &&
231 			hi->control_state & SSI_CHANNEL_STATE_WRITING)
232 		hi->control_state &= ~SSI_CHANNEL_STATE_WRITING;
233 
234 	cs_release_cmd(msg);
235 
236 	spin_unlock(&hi->lock);
237 }
238 
cs_claim_cmd(struct cs_hsi_iface * ssi)239 static struct hsi_msg *cs_claim_cmd(struct cs_hsi_iface* ssi)
240 {
241 	struct hsi_msg *msg;
242 
243 	BUG_ON(list_empty(&ssi->cmdqueue));
244 
245 	msg = list_first_entry(&ssi->cmdqueue, struct hsi_msg, link);
246 	list_del(&msg->link);
247 	msg->destructor = cs_cmd_destructor;
248 
249 	return msg;
250 }
251 
cs_free_cmds(struct cs_hsi_iface * ssi)252 static void cs_free_cmds(struct cs_hsi_iface *ssi)
253 {
254 	struct hsi_msg *msg, *tmp;
255 
256 	list_for_each_entry_safe(msg, tmp, &ssi->cmdqueue, link) {
257 		list_del(&msg->link);
258 		msg->destructor = NULL;
259 		kfree(sg_virt(msg->sgt.sgl));
260 		hsi_free_msg(msg);
261 	}
262 }
263 
cs_alloc_cmds(struct cs_hsi_iface * hi)264 static int cs_alloc_cmds(struct cs_hsi_iface *hi)
265 {
266 	struct hsi_msg *msg;
267 	u32 *buf;
268 	unsigned int i;
269 
270 	INIT_LIST_HEAD(&hi->cmdqueue);
271 
272 	for (i = 0; i < CS_MAX_CMDS; i++) {
273 		msg = hsi_alloc_msg(1, GFP_KERNEL);
274 		if (!msg)
275 			goto out;
276 		buf = kmalloc(sizeof(*buf), GFP_KERNEL);
277 		if (!buf) {
278 			hsi_free_msg(msg);
279 			goto out;
280 		}
281 		sg_init_one(msg->sgt.sgl, buf, sizeof(*buf));
282 		msg->channel = cs_char_data.channel_id_cmd;
283 		msg->context = hi;
284 		list_add_tail(&msg->link, &hi->cmdqueue);
285 	}
286 
287 	return 0;
288 
289 out:
290 	cs_free_cmds(hi);
291 	return -ENOMEM;
292 }
293 
cs_hsi_data_destructor(struct hsi_msg * msg)294 static void cs_hsi_data_destructor(struct hsi_msg *msg)
295 {
296 	struct cs_hsi_iface *hi = msg->context;
297 	const char *dir = (msg->ttype == HSI_MSG_READ) ? "TX" : "RX";
298 
299 	dev_dbg(&cs_char_data.cl->device, "Freeing data %s message\n", dir);
300 
301 	spin_lock(&hi->lock);
302 	if (hi->iface_state != CS_STATE_CLOSED)
303 		dev_err(&cs_char_data.cl->device,
304 				"Data %s flush while device active\n", dir);
305 	if (msg->ttype == HSI_MSG_READ)
306 		hi->data_state &=
307 			~(SSI_CHANNEL_STATE_POLL | SSI_CHANNEL_STATE_READING);
308 	else
309 		hi->data_state &= ~SSI_CHANNEL_STATE_WRITING;
310 
311 	msg->status = HSI_STATUS_COMPLETED;
312 	if (unlikely(waitqueue_active(&hi->datawait)))
313 		wake_up_interruptible(&hi->datawait);
314 
315 	spin_unlock(&hi->lock);
316 }
317 
cs_hsi_alloc_data(struct cs_hsi_iface * hi)318 static int cs_hsi_alloc_data(struct cs_hsi_iface *hi)
319 {
320 	struct hsi_msg *txmsg, *rxmsg;
321 	int res = 0;
322 
323 	rxmsg = hsi_alloc_msg(1, GFP_KERNEL);
324 	if (!rxmsg) {
325 		res = -ENOMEM;
326 		goto out1;
327 	}
328 	rxmsg->channel = cs_char_data.channel_id_data;
329 	rxmsg->destructor = cs_hsi_data_destructor;
330 	rxmsg->context = hi;
331 
332 	txmsg = hsi_alloc_msg(1, GFP_KERNEL);
333 	if (!txmsg) {
334 		res = -ENOMEM;
335 		goto out2;
336 	}
337 	txmsg->channel = cs_char_data.channel_id_data;
338 	txmsg->destructor = cs_hsi_data_destructor;
339 	txmsg->context = hi;
340 
341 	hi->data_rx_msg = rxmsg;
342 	hi->data_tx_msg = txmsg;
343 
344 	return 0;
345 
346 out2:
347 	hsi_free_msg(rxmsg);
348 out1:
349 	return res;
350 }
351 
cs_hsi_free_data_msg(struct hsi_msg * msg)352 static void cs_hsi_free_data_msg(struct hsi_msg *msg)
353 {
354 	WARN_ON(msg->status != HSI_STATUS_COMPLETED &&
355 					msg->status != HSI_STATUS_ERROR);
356 	hsi_free_msg(msg);
357 }
358 
cs_hsi_free_data(struct cs_hsi_iface * hi)359 static void cs_hsi_free_data(struct cs_hsi_iface *hi)
360 {
361 	cs_hsi_free_data_msg(hi->data_rx_msg);
362 	cs_hsi_free_data_msg(hi->data_tx_msg);
363 }
364 
__cs_hsi_error_pre(struct cs_hsi_iface * hi,struct hsi_msg * msg,const char * info,unsigned int * state)365 static inline void __cs_hsi_error_pre(struct cs_hsi_iface *hi,
366 					struct hsi_msg *msg, const char *info,
367 					unsigned int *state)
368 {
369 	spin_lock(&hi->lock);
370 	dev_err(&hi->cl->device, "HSI %s error, msg %d, state %u\n",
371 		info, msg->status, *state);
372 }
373 
__cs_hsi_error_post(struct cs_hsi_iface * hi)374 static inline void __cs_hsi_error_post(struct cs_hsi_iface *hi)
375 {
376 	spin_unlock(&hi->lock);
377 }
378 
__cs_hsi_error_read_bits(unsigned int * state)379 static inline void __cs_hsi_error_read_bits(unsigned int *state)
380 {
381 	*state |= SSI_CHANNEL_STATE_ERROR;
382 	*state &= ~(SSI_CHANNEL_STATE_READING | SSI_CHANNEL_STATE_POLL);
383 }
384 
__cs_hsi_error_write_bits(unsigned int * state)385 static inline void __cs_hsi_error_write_bits(unsigned int *state)
386 {
387 	*state |= SSI_CHANNEL_STATE_ERROR;
388 	*state &= ~SSI_CHANNEL_STATE_WRITING;
389 }
390 
cs_hsi_control_read_error(struct cs_hsi_iface * hi,struct hsi_msg * msg)391 static void cs_hsi_control_read_error(struct cs_hsi_iface *hi,
392 							struct hsi_msg *msg)
393 {
394 	__cs_hsi_error_pre(hi, msg, "control read", &hi->control_state);
395 	cs_release_cmd(msg);
396 	__cs_hsi_error_read_bits(&hi->control_state);
397 	__cs_hsi_error_post(hi);
398 }
399 
cs_hsi_control_write_error(struct cs_hsi_iface * hi,struct hsi_msg * msg)400 static void cs_hsi_control_write_error(struct cs_hsi_iface *hi,
401 							struct hsi_msg *msg)
402 {
403 	__cs_hsi_error_pre(hi, msg, "control write", &hi->control_state);
404 	cs_release_cmd(msg);
405 	__cs_hsi_error_write_bits(&hi->control_state);
406 	__cs_hsi_error_post(hi);
407 
408 }
409 
cs_hsi_data_read_error(struct cs_hsi_iface * hi,struct hsi_msg * msg)410 static void cs_hsi_data_read_error(struct cs_hsi_iface *hi, struct hsi_msg *msg)
411 {
412 	__cs_hsi_error_pre(hi, msg, "data read", &hi->data_state);
413 	__cs_hsi_error_read_bits(&hi->data_state);
414 	__cs_hsi_error_post(hi);
415 }
416 
cs_hsi_data_write_error(struct cs_hsi_iface * hi,struct hsi_msg * msg)417 static void cs_hsi_data_write_error(struct cs_hsi_iface *hi,
418 							struct hsi_msg *msg)
419 {
420 	__cs_hsi_error_pre(hi, msg, "data write", &hi->data_state);
421 	__cs_hsi_error_write_bits(&hi->data_state);
422 	__cs_hsi_error_post(hi);
423 }
424 
cs_hsi_read_on_control_complete(struct hsi_msg * msg)425 static void cs_hsi_read_on_control_complete(struct hsi_msg *msg)
426 {
427 	u32 cmd = cs_get_cmd(msg);
428 	struct cs_hsi_iface *hi = msg->context;
429 
430 	spin_lock(&hi->lock);
431 	hi->control_state &= ~SSI_CHANNEL_STATE_READING;
432 	if (msg->status == HSI_STATUS_ERROR) {
433 		dev_err(&hi->cl->device, "Control RX error detected\n");
434 		spin_unlock(&hi->lock);
435 		cs_hsi_control_read_error(hi, msg);
436 		goto out;
437 	}
438 	dev_dbg(&hi->cl->device, "Read on control: %08X\n", cmd);
439 	cs_release_cmd(msg);
440 	if (hi->flags & CS_FEAT_TSTAMP_RX_CTRL) {
441 		struct timespec64 tspec;
442 		struct cs_timestamp *tstamp =
443 			&hi->mmap_cfg->tstamp_rx_ctrl;
444 
445 		ktime_get_ts64(&tspec);
446 
447 		tstamp->tv_sec = (__u32) tspec.tv_sec;
448 		tstamp->tv_nsec = (__u32) tspec.tv_nsec;
449 	}
450 	spin_unlock(&hi->lock);
451 
452 	cs_notify_control(cmd);
453 
454 out:
455 	cs_hsi_read_on_control(hi);
456 }
457 
cs_hsi_peek_on_control_complete(struct hsi_msg * msg)458 static void cs_hsi_peek_on_control_complete(struct hsi_msg *msg)
459 {
460 	struct cs_hsi_iface *hi = msg->context;
461 	int ret;
462 
463 	if (msg->status == HSI_STATUS_ERROR) {
464 		dev_err(&hi->cl->device, "Control peek RX error detected\n");
465 		cs_hsi_control_read_error(hi, msg);
466 		return;
467 	}
468 
469 	WARN_ON(!(hi->control_state & SSI_CHANNEL_STATE_READING));
470 
471 	dev_dbg(&hi->cl->device, "Peek on control complete, reading\n");
472 	msg->sgt.nents = 1;
473 	msg->complete = cs_hsi_read_on_control_complete;
474 	ret = hsi_async_read(hi->cl, msg);
475 	if (ret)
476 		cs_hsi_control_read_error(hi, msg);
477 }
478 
cs_hsi_read_on_control(struct cs_hsi_iface * hi)479 static void cs_hsi_read_on_control(struct cs_hsi_iface *hi)
480 {
481 	struct hsi_msg *msg;
482 	int ret;
483 
484 	spin_lock(&hi->lock);
485 	if (hi->control_state & SSI_CHANNEL_STATE_READING) {
486 		dev_err(&hi->cl->device, "Control read already pending (%d)\n",
487 			hi->control_state);
488 		spin_unlock(&hi->lock);
489 		return;
490 	}
491 	if (hi->control_state & SSI_CHANNEL_STATE_ERROR) {
492 		dev_err(&hi->cl->device, "Control read error (%d)\n",
493 			hi->control_state);
494 		spin_unlock(&hi->lock);
495 		return;
496 	}
497 	hi->control_state |= SSI_CHANNEL_STATE_READING;
498 	dev_dbg(&hi->cl->device, "Issuing RX on control\n");
499 	msg = cs_claim_cmd(hi);
500 	spin_unlock(&hi->lock);
501 
502 	msg->sgt.nents = 0;
503 	msg->complete = cs_hsi_peek_on_control_complete;
504 	ret = hsi_async_read(hi->cl, msg);
505 	if (ret)
506 		cs_hsi_control_read_error(hi, msg);
507 }
508 
cs_hsi_write_on_control_complete(struct hsi_msg * msg)509 static void cs_hsi_write_on_control_complete(struct hsi_msg *msg)
510 {
511 	struct cs_hsi_iface *hi = msg->context;
512 	if (msg->status == HSI_STATUS_COMPLETED) {
513 		spin_lock(&hi->lock);
514 		hi->control_state &= ~SSI_CHANNEL_STATE_WRITING;
515 		cs_release_cmd(msg);
516 		spin_unlock(&hi->lock);
517 	} else if (msg->status == HSI_STATUS_ERROR) {
518 		cs_hsi_control_write_error(hi, msg);
519 	} else {
520 		dev_err(&hi->cl->device,
521 			"unexpected status in control write callback %d\n",
522 			msg->status);
523 	}
524 }
525 
cs_hsi_write_on_control(struct cs_hsi_iface * hi,u32 message)526 static int cs_hsi_write_on_control(struct cs_hsi_iface *hi, u32 message)
527 {
528 	struct hsi_msg *msg;
529 	int ret;
530 
531 	spin_lock(&hi->lock);
532 	if (hi->control_state & SSI_CHANNEL_STATE_ERROR) {
533 		spin_unlock(&hi->lock);
534 		return -EIO;
535 	}
536 	if (hi->control_state & SSI_CHANNEL_STATE_WRITING) {
537 		dev_err(&hi->cl->device,
538 			"Write still pending on control channel.\n");
539 		spin_unlock(&hi->lock);
540 		return -EBUSY;
541 	}
542 	hi->control_state |= SSI_CHANNEL_STATE_WRITING;
543 	msg = cs_claim_cmd(hi);
544 	spin_unlock(&hi->lock);
545 
546 	cs_set_cmd(msg, message);
547 	msg->sgt.nents = 1;
548 	msg->complete = cs_hsi_write_on_control_complete;
549 	dev_dbg(&hi->cl->device,
550 		"Sending control message %08X\n", message);
551 	ret = hsi_async_write(hi->cl, msg);
552 	if (ret) {
553 		dev_err(&hi->cl->device,
554 			"async_write failed with %d\n", ret);
555 		cs_hsi_control_write_error(hi, msg);
556 	}
557 
558 	/*
559 	 * Make sure control read is always pending when issuing
560 	 * new control writes. This is needed as the controller
561 	 * may flush our messages if e.g. the peer device reboots
562 	 * unexpectedly (and we cannot directly resubmit a new read from
563 	 * the message destructor; see cs_cmd_destructor()).
564 	 */
565 	if (!(hi->control_state & SSI_CHANNEL_STATE_READING)) {
566 		dev_err(&hi->cl->device, "Restarting control reads\n");
567 		cs_hsi_read_on_control(hi);
568 	}
569 
570 	return 0;
571 }
572 
cs_hsi_read_on_data_complete(struct hsi_msg * msg)573 static void cs_hsi_read_on_data_complete(struct hsi_msg *msg)
574 {
575 	struct cs_hsi_iface *hi = msg->context;
576 	u32 payload;
577 
578 	if (unlikely(msg->status == HSI_STATUS_ERROR)) {
579 		cs_hsi_data_read_error(hi, msg);
580 		return;
581 	}
582 
583 	spin_lock(&hi->lock);
584 	WARN_ON(!(hi->data_state & SSI_CHANNEL_STATE_READING));
585 	hi->data_state &= ~SSI_CHANNEL_STATE_READING;
586 	payload = CS_RX_DATA_RECEIVED;
587 	payload |= hi->rx_slot;
588 	hi->rx_slot++;
589 	hi->rx_slot %= hi->rx_ptr_boundary;
590 	/* expose current rx ptr in mmap area */
591 	hi->mmap_cfg->rx_ptr = hi->rx_slot;
592 	if (unlikely(waitqueue_active(&hi->datawait)))
593 		wake_up_interruptible(&hi->datawait);
594 	spin_unlock(&hi->lock);
595 
596 	cs_notify_data(payload, hi->rx_bufs);
597 	cs_hsi_read_on_data(hi);
598 }
599 
cs_hsi_peek_on_data_complete(struct hsi_msg * msg)600 static void cs_hsi_peek_on_data_complete(struct hsi_msg *msg)
601 {
602 	struct cs_hsi_iface *hi = msg->context;
603 	u32 *address;
604 	int ret;
605 
606 	if (unlikely(msg->status == HSI_STATUS_ERROR)) {
607 		cs_hsi_data_read_error(hi, msg);
608 		return;
609 	}
610 	if (unlikely(hi->iface_state != CS_STATE_CONFIGURED)) {
611 		dev_err(&hi->cl->device, "Data received in invalid state\n");
612 		cs_hsi_data_read_error(hi, msg);
613 		return;
614 	}
615 
616 	spin_lock(&hi->lock);
617 	WARN_ON(!(hi->data_state & SSI_CHANNEL_STATE_POLL));
618 	hi->data_state &= ~SSI_CHANNEL_STATE_POLL;
619 	hi->data_state |= SSI_CHANNEL_STATE_READING;
620 	spin_unlock(&hi->lock);
621 
622 	address = (u32 *)(hi->mmap_base +
623 				hi->rx_offsets[hi->rx_slot % hi->rx_bufs]);
624 	sg_init_one(msg->sgt.sgl, address, hi->buf_size);
625 	msg->sgt.nents = 1;
626 	msg->complete = cs_hsi_read_on_data_complete;
627 	ret = hsi_async_read(hi->cl, msg);
628 	if (ret)
629 		cs_hsi_data_read_error(hi, msg);
630 }
631 
632 /*
633  * Read/write transaction is ongoing. Returns false if in
634  * SSI_CHANNEL_STATE_POLL state.
635  */
cs_state_xfer_active(unsigned int state)636 static inline int cs_state_xfer_active(unsigned int state)
637 {
638 	return (state & SSI_CHANNEL_STATE_WRITING) ||
639 		(state & SSI_CHANNEL_STATE_READING);
640 }
641 
642 /*
643  * No pending read/writes
644  */
cs_state_idle(unsigned int state)645 static inline int cs_state_idle(unsigned int state)
646 {
647 	return !(state & ~SSI_CHANNEL_STATE_ERROR);
648 }
649 
cs_hsi_read_on_data(struct cs_hsi_iface * hi)650 static void cs_hsi_read_on_data(struct cs_hsi_iface *hi)
651 {
652 	struct hsi_msg *rxmsg;
653 	int ret;
654 
655 	spin_lock(&hi->lock);
656 	if (hi->data_state &
657 		(SSI_CHANNEL_STATE_READING | SSI_CHANNEL_STATE_POLL)) {
658 		dev_dbg(&hi->cl->device, "Data read already pending (%u)\n",
659 			hi->data_state);
660 		spin_unlock(&hi->lock);
661 		return;
662 	}
663 	hi->data_state |= SSI_CHANNEL_STATE_POLL;
664 	spin_unlock(&hi->lock);
665 
666 	rxmsg = hi->data_rx_msg;
667 	sg_init_one(rxmsg->sgt.sgl, (void *)hi->mmap_base, 0);
668 	rxmsg->sgt.nents = 0;
669 	rxmsg->complete = cs_hsi_peek_on_data_complete;
670 
671 	ret = hsi_async_read(hi->cl, rxmsg);
672 	if (ret)
673 		cs_hsi_data_read_error(hi, rxmsg);
674 }
675 
cs_hsi_write_on_data_complete(struct hsi_msg * msg)676 static void cs_hsi_write_on_data_complete(struct hsi_msg *msg)
677 {
678 	struct cs_hsi_iface *hi = msg->context;
679 
680 	if (msg->status == HSI_STATUS_COMPLETED) {
681 		spin_lock(&hi->lock);
682 		hi->data_state &= ~SSI_CHANNEL_STATE_WRITING;
683 		if (unlikely(waitqueue_active(&hi->datawait)))
684 			wake_up_interruptible(&hi->datawait);
685 		spin_unlock(&hi->lock);
686 	} else {
687 		cs_hsi_data_write_error(hi, msg);
688 	}
689 }
690 
cs_hsi_write_on_data(struct cs_hsi_iface * hi,unsigned int slot)691 static int cs_hsi_write_on_data(struct cs_hsi_iface *hi, unsigned int slot)
692 {
693 	u32 *address;
694 	struct hsi_msg *txmsg;
695 	int ret;
696 
697 	spin_lock(&hi->lock);
698 	if (hi->iface_state != CS_STATE_CONFIGURED) {
699 		dev_err(&hi->cl->device, "Not configured, aborting\n");
700 		ret = -EINVAL;
701 		goto error;
702 	}
703 	if (hi->data_state & SSI_CHANNEL_STATE_ERROR) {
704 		dev_err(&hi->cl->device, "HSI error, aborting\n");
705 		ret = -EIO;
706 		goto error;
707 	}
708 	if (hi->data_state & SSI_CHANNEL_STATE_WRITING) {
709 		dev_err(&hi->cl->device, "Write pending on data channel.\n");
710 		ret = -EBUSY;
711 		goto error;
712 	}
713 	hi->data_state |= SSI_CHANNEL_STATE_WRITING;
714 	spin_unlock(&hi->lock);
715 
716 	hi->tx_slot = slot;
717 	address = (u32 *)(hi->mmap_base + hi->tx_offsets[hi->tx_slot]);
718 	txmsg = hi->data_tx_msg;
719 	sg_init_one(txmsg->sgt.sgl, address, hi->buf_size);
720 	txmsg->complete = cs_hsi_write_on_data_complete;
721 	ret = hsi_async_write(hi->cl, txmsg);
722 	if (ret)
723 		cs_hsi_data_write_error(hi, txmsg);
724 
725 	return ret;
726 
727 error:
728 	spin_unlock(&hi->lock);
729 	if (ret == -EIO)
730 		cs_hsi_data_write_error(hi, hi->data_tx_msg);
731 
732 	return ret;
733 }
734 
cs_hsi_get_state(struct cs_hsi_iface * hi)735 static unsigned int cs_hsi_get_state(struct cs_hsi_iface *hi)
736 {
737 	return hi->iface_state;
738 }
739 
cs_hsi_command(struct cs_hsi_iface * hi,u32 cmd)740 static int cs_hsi_command(struct cs_hsi_iface *hi, u32 cmd)
741 {
742 	int ret = 0;
743 
744 	local_bh_disable();
745 	switch (cmd & TARGET_MASK) {
746 	case TARGET_REMOTE:
747 		ret = cs_hsi_write_on_control(hi, cmd);
748 		break;
749 	case TARGET_LOCAL:
750 		if ((cmd & CS_CMD_MASK) == CS_TX_DATA_READY)
751 			ret = cs_hsi_write_on_data(hi, cmd & CS_PARAM_MASK);
752 		else
753 			ret = -EINVAL;
754 		break;
755 	default:
756 		ret = -EINVAL;
757 		break;
758 	}
759 	local_bh_enable();
760 
761 	return ret;
762 }
763 
cs_hsi_set_wakeline(struct cs_hsi_iface * hi,bool new_state)764 static void cs_hsi_set_wakeline(struct cs_hsi_iface *hi, bool new_state)
765 {
766 	int change = 0;
767 
768 	spin_lock_bh(&hi->lock);
769 	if (hi->wakeline_state != new_state) {
770 		hi->wakeline_state = new_state;
771 		change = 1;
772 		dev_dbg(&hi->cl->device, "setting wake line to %d (%p)\n",
773 			new_state, hi->cl);
774 	}
775 	spin_unlock_bh(&hi->lock);
776 
777 	if (change) {
778 		if (new_state)
779 			ssip_slave_start_tx(hi->master);
780 		else
781 			ssip_slave_stop_tx(hi->master);
782 	}
783 
784 	dev_dbg(&hi->cl->device, "wake line set to %d (%p)\n",
785 		new_state, hi->cl);
786 }
787 
set_buffer_sizes(struct cs_hsi_iface * hi,int rx_bufs,int tx_bufs)788 static void set_buffer_sizes(struct cs_hsi_iface *hi, int rx_bufs, int tx_bufs)
789 {
790 	hi->rx_bufs = rx_bufs;
791 	hi->tx_bufs = tx_bufs;
792 	hi->mmap_cfg->rx_bufs = rx_bufs;
793 	hi->mmap_cfg->tx_bufs = tx_bufs;
794 
795 	if (hi->flags & CS_FEAT_ROLLING_RX_COUNTER) {
796 		/*
797 		 * For more robust overrun detection, let the rx
798 		 * pointer run in range 0..'boundary-1'. Boundary
799 		 * is a multiple of rx_bufs, and limited in max size
800 		 * by RX_PTR_MAX_SHIFT to allow for fast ptr-diff
801 		 * calculation.
802 		 */
803 		hi->rx_ptr_boundary = (rx_bufs << RX_PTR_BOUNDARY_SHIFT);
804 		hi->mmap_cfg->rx_ptr_boundary = hi->rx_ptr_boundary;
805 	} else {
806 		hi->rx_ptr_boundary = hi->rx_bufs;
807 	}
808 }
809 
check_buf_params(struct cs_hsi_iface * hi,const struct cs_buffer_config * buf_cfg)810 static int check_buf_params(struct cs_hsi_iface *hi,
811 					const struct cs_buffer_config *buf_cfg)
812 {
813 	size_t buf_size_aligned = L1_CACHE_ALIGN(buf_cfg->buf_size) *
814 					(buf_cfg->rx_bufs + buf_cfg->tx_bufs);
815 	size_t ctrl_size_aligned = L1_CACHE_ALIGN(sizeof(*hi->mmap_cfg));
816 	int r = 0;
817 
818 	if (buf_cfg->rx_bufs > CS_MAX_BUFFERS ||
819 					buf_cfg->tx_bufs > CS_MAX_BUFFERS) {
820 		r = -EINVAL;
821 	} else if ((buf_size_aligned + ctrl_size_aligned) >= hi->mmap_size) {
822 		dev_err(&hi->cl->device, "No space for the requested buffer "
823 			"configuration\n");
824 		r = -ENOBUFS;
825 	}
826 
827 	return r;
828 }
829 
830 /*
831  * Block until pending data transfers have completed.
832  */
cs_hsi_data_sync(struct cs_hsi_iface * hi)833 static int cs_hsi_data_sync(struct cs_hsi_iface *hi)
834 {
835 	int r = 0;
836 
837 	spin_lock_bh(&hi->lock);
838 
839 	if (!cs_state_xfer_active(hi->data_state)) {
840 		dev_dbg(&hi->cl->device, "hsi_data_sync break, idle\n");
841 		goto out;
842 	}
843 
844 	for (;;) {
845 		int s;
846 		DEFINE_WAIT(wait);
847 		if (!cs_state_xfer_active(hi->data_state))
848 			goto out;
849 		if (signal_pending(current)) {
850 			r = -ERESTARTSYS;
851 			goto out;
852 		}
853 		/*
854 		 * prepare_to_wait must be called with hi->lock held
855 		 * so that callbacks can check for waitqueue_active()
856 		 */
857 		prepare_to_wait(&hi->datawait, &wait, TASK_INTERRUPTIBLE);
858 		spin_unlock_bh(&hi->lock);
859 		s = schedule_timeout(
860 			msecs_to_jiffies(CS_HSI_TRANSFER_TIMEOUT_MS));
861 		spin_lock_bh(&hi->lock);
862 		finish_wait(&hi->datawait, &wait);
863 		if (!s) {
864 			dev_dbg(&hi->cl->device,
865 				"hsi_data_sync timeout after %d ms\n",
866 				CS_HSI_TRANSFER_TIMEOUT_MS);
867 			r = -EIO;
868 			goto out;
869 		}
870 	}
871 
872 out:
873 	spin_unlock_bh(&hi->lock);
874 	dev_dbg(&hi->cl->device, "hsi_data_sync done with res %d\n", r);
875 
876 	return r;
877 }
878 
cs_hsi_data_enable(struct cs_hsi_iface * hi,struct cs_buffer_config * buf_cfg)879 static void cs_hsi_data_enable(struct cs_hsi_iface *hi,
880 					struct cs_buffer_config *buf_cfg)
881 {
882 	unsigned int data_start, i;
883 
884 	BUG_ON(hi->buf_size == 0);
885 
886 	set_buffer_sizes(hi, buf_cfg->rx_bufs, buf_cfg->tx_bufs);
887 
888 	hi->slot_size = L1_CACHE_ALIGN(hi->buf_size);
889 	dev_dbg(&hi->cl->device,
890 			"setting slot size to %u, buf size %u, align %u\n",
891 			hi->slot_size, hi->buf_size, L1_CACHE_BYTES);
892 
893 	data_start = L1_CACHE_ALIGN(sizeof(*hi->mmap_cfg));
894 	dev_dbg(&hi->cl->device,
895 			"setting data start at %u, cfg block %u, align %u\n",
896 			data_start, sizeof(*hi->mmap_cfg), L1_CACHE_BYTES);
897 
898 	for (i = 0; i < hi->mmap_cfg->rx_bufs; i++) {
899 		hi->rx_offsets[i] = data_start + i * hi->slot_size;
900 		hi->mmap_cfg->rx_offsets[i] = hi->rx_offsets[i];
901 		dev_dbg(&hi->cl->device, "DL buf #%u at %u\n",
902 					i, hi->rx_offsets[i]);
903 	}
904 	for (i = 0; i < hi->mmap_cfg->tx_bufs; i++) {
905 		hi->tx_offsets[i] = data_start +
906 			(i + hi->mmap_cfg->rx_bufs) * hi->slot_size;
907 		hi->mmap_cfg->tx_offsets[i] = hi->tx_offsets[i];
908 		dev_dbg(&hi->cl->device, "UL buf #%u at %u\n",
909 					i, hi->rx_offsets[i]);
910 	}
911 
912 	hi->iface_state = CS_STATE_CONFIGURED;
913 }
914 
cs_hsi_data_disable(struct cs_hsi_iface * hi,int old_state)915 static void cs_hsi_data_disable(struct cs_hsi_iface *hi, int old_state)
916 {
917 	if (old_state == CS_STATE_CONFIGURED) {
918 		dev_dbg(&hi->cl->device,
919 			"closing data channel with slot size 0\n");
920 		hi->iface_state = CS_STATE_OPENED;
921 	}
922 }
923 
cs_hsi_buf_config(struct cs_hsi_iface * hi,struct cs_buffer_config * buf_cfg)924 static int cs_hsi_buf_config(struct cs_hsi_iface *hi,
925 					struct cs_buffer_config *buf_cfg)
926 {
927 	int r = 0;
928 	unsigned int old_state = hi->iface_state;
929 
930 	spin_lock_bh(&hi->lock);
931 	/* Prevent new transactions during buffer reconfig */
932 	if (old_state == CS_STATE_CONFIGURED)
933 		hi->iface_state = CS_STATE_OPENED;
934 	spin_unlock_bh(&hi->lock);
935 
936 	/*
937 	 * make sure that no non-zero data reads are ongoing before
938 	 * proceeding to change the buffer layout
939 	 */
940 	r = cs_hsi_data_sync(hi);
941 	if (r < 0)
942 		return r;
943 
944 	WARN_ON(cs_state_xfer_active(hi->data_state));
945 
946 	spin_lock_bh(&hi->lock);
947 	r = check_buf_params(hi, buf_cfg);
948 	if (r < 0)
949 		goto error;
950 
951 	hi->buf_size = buf_cfg->buf_size;
952 	hi->mmap_cfg->buf_size = hi->buf_size;
953 	hi->flags = buf_cfg->flags;
954 
955 	hi->rx_slot = 0;
956 	hi->tx_slot = 0;
957 	hi->slot_size = 0;
958 
959 	if (hi->buf_size)
960 		cs_hsi_data_enable(hi, buf_cfg);
961 	else
962 		cs_hsi_data_disable(hi, old_state);
963 
964 	spin_unlock_bh(&hi->lock);
965 
966 	if (old_state != hi->iface_state) {
967 		if (hi->iface_state == CS_STATE_CONFIGURED) {
968 			cpu_latency_qos_add_request(&hi->pm_qos_req,
969 				CS_QOS_LATENCY_FOR_DATA_USEC);
970 			local_bh_disable();
971 			cs_hsi_read_on_data(hi);
972 			local_bh_enable();
973 		} else if (old_state == CS_STATE_CONFIGURED) {
974 			cpu_latency_qos_remove_request(&hi->pm_qos_req);
975 		}
976 	}
977 	return r;
978 
979 error:
980 	spin_unlock_bh(&hi->lock);
981 	return r;
982 }
983 
cs_hsi_start(struct cs_hsi_iface ** hi,struct hsi_client * cl,unsigned long mmap_base,unsigned long mmap_size)984 static int cs_hsi_start(struct cs_hsi_iface **hi, struct hsi_client *cl,
985 			unsigned long mmap_base, unsigned long mmap_size)
986 {
987 	int err = 0;
988 	struct cs_hsi_iface *hsi_if = kzalloc(sizeof(*hsi_if), GFP_KERNEL);
989 
990 	dev_dbg(&cl->device, "cs_hsi_start\n");
991 
992 	if (!hsi_if) {
993 		err = -ENOMEM;
994 		goto leave0;
995 	}
996 	spin_lock_init(&hsi_if->lock);
997 	hsi_if->cl = cl;
998 	hsi_if->iface_state = CS_STATE_CLOSED;
999 	hsi_if->mmap_cfg = (struct cs_mmap_config_block *)mmap_base;
1000 	hsi_if->mmap_base = mmap_base;
1001 	hsi_if->mmap_size = mmap_size;
1002 	memset(hsi_if->mmap_cfg, 0, sizeof(*hsi_if->mmap_cfg));
1003 	init_waitqueue_head(&hsi_if->datawait);
1004 	err = cs_alloc_cmds(hsi_if);
1005 	if (err < 0) {
1006 		dev_err(&cl->device, "Unable to alloc HSI messages\n");
1007 		goto leave1;
1008 	}
1009 	err = cs_hsi_alloc_data(hsi_if);
1010 	if (err < 0) {
1011 		dev_err(&cl->device, "Unable to alloc HSI messages for data\n");
1012 		goto leave2;
1013 	}
1014 	err = hsi_claim_port(cl, 1);
1015 	if (err < 0) {
1016 		dev_err(&cl->device,
1017 				"Could not open, HSI port already claimed\n");
1018 		goto leave3;
1019 	}
1020 	hsi_if->master = ssip_slave_get_master(cl);
1021 	if (IS_ERR(hsi_if->master)) {
1022 		err = PTR_ERR(hsi_if->master);
1023 		dev_err(&cl->device, "Could not get HSI master client\n");
1024 		goto leave4;
1025 	}
1026 	if (!ssip_slave_running(hsi_if->master)) {
1027 		err = -ENODEV;
1028 		dev_err(&cl->device,
1029 				"HSI port not initialized\n");
1030 		goto leave4;
1031 	}
1032 
1033 	hsi_if->iface_state = CS_STATE_OPENED;
1034 	local_bh_disable();
1035 	cs_hsi_read_on_control(hsi_if);
1036 	local_bh_enable();
1037 
1038 	dev_dbg(&cl->device, "cs_hsi_start...done\n");
1039 
1040 	BUG_ON(!hi);
1041 	*hi = hsi_if;
1042 
1043 	return 0;
1044 
1045 leave4:
1046 	hsi_release_port(cl);
1047 leave3:
1048 	cs_hsi_free_data(hsi_if);
1049 leave2:
1050 	cs_free_cmds(hsi_if);
1051 leave1:
1052 	kfree(hsi_if);
1053 leave0:
1054 	dev_dbg(&cl->device, "cs_hsi_start...done/error\n\n");
1055 
1056 	return err;
1057 }
1058 
cs_hsi_stop(struct cs_hsi_iface * hi)1059 static void cs_hsi_stop(struct cs_hsi_iface *hi)
1060 {
1061 	dev_dbg(&hi->cl->device, "cs_hsi_stop\n");
1062 	cs_hsi_set_wakeline(hi, 0);
1063 	ssip_slave_put_master(hi->master);
1064 
1065 	/* hsi_release_port() needs to be called with CS_STATE_CLOSED */
1066 	hi->iface_state = CS_STATE_CLOSED;
1067 	hsi_release_port(hi->cl);
1068 
1069 	/*
1070 	 * hsi_release_port() should flush out all the pending
1071 	 * messages, so cs_state_idle() should be true for both
1072 	 * control and data channels.
1073 	 */
1074 	WARN_ON(!cs_state_idle(hi->control_state));
1075 	WARN_ON(!cs_state_idle(hi->data_state));
1076 
1077 	if (cpu_latency_qos_request_active(&hi->pm_qos_req))
1078 		cpu_latency_qos_remove_request(&hi->pm_qos_req);
1079 
1080 	spin_lock_bh(&hi->lock);
1081 	cs_hsi_free_data(hi);
1082 	cs_free_cmds(hi);
1083 	spin_unlock_bh(&hi->lock);
1084 	kfree(hi);
1085 }
1086 
cs_char_vma_fault(struct vm_fault * vmf)1087 static vm_fault_t cs_char_vma_fault(struct vm_fault *vmf)
1088 {
1089 	struct cs_char *csdata = vmf->vma->vm_private_data;
1090 	struct page *page;
1091 
1092 	page = virt_to_page((void *)csdata->mmap_base);
1093 	get_page(page);
1094 	vmf->page = page;
1095 
1096 	return 0;
1097 }
1098 
1099 static const struct vm_operations_struct cs_char_vm_ops = {
1100 	.fault	= cs_char_vma_fault,
1101 };
1102 
cs_char_fasync(int fd,struct file * file,int on)1103 static int cs_char_fasync(int fd, struct file *file, int on)
1104 {
1105 	struct cs_char *csdata = file->private_data;
1106 
1107 	if (fasync_helper(fd, file, on, &csdata->async_queue) < 0)
1108 		return -EIO;
1109 
1110 	return 0;
1111 }
1112 
cs_char_poll(struct file * file,poll_table * wait)1113 static __poll_t cs_char_poll(struct file *file, poll_table *wait)
1114 {
1115 	struct cs_char *csdata = file->private_data;
1116 	__poll_t ret = 0;
1117 
1118 	poll_wait(file, &cs_char_data.wait, wait);
1119 	spin_lock_bh(&csdata->lock);
1120 	if (!list_empty(&csdata->chardev_queue))
1121 		ret = EPOLLIN | EPOLLRDNORM;
1122 	else if (!list_empty(&csdata->dataind_queue))
1123 		ret = EPOLLIN | EPOLLRDNORM;
1124 	spin_unlock_bh(&csdata->lock);
1125 
1126 	return ret;
1127 }
1128 
cs_char_read(struct file * file,char __user * buf,size_t count,loff_t * unused)1129 static ssize_t cs_char_read(struct file *file, char __user *buf, size_t count,
1130 								loff_t *unused)
1131 {
1132 	struct cs_char *csdata = file->private_data;
1133 	u32 data;
1134 	ssize_t retval;
1135 
1136 	if (count < sizeof(data))
1137 		return -EINVAL;
1138 
1139 	for (;;) {
1140 		DEFINE_WAIT(wait);
1141 
1142 		spin_lock_bh(&csdata->lock);
1143 		if (!list_empty(&csdata->chardev_queue)) {
1144 			data = cs_pop_entry(&csdata->chardev_queue);
1145 		} else if (!list_empty(&csdata->dataind_queue)) {
1146 			data = cs_pop_entry(&csdata->dataind_queue);
1147 			csdata->dataind_pending--;
1148 		} else {
1149 			data = 0;
1150 		}
1151 		spin_unlock_bh(&csdata->lock);
1152 
1153 		if (data)
1154 			break;
1155 		if (file->f_flags & O_NONBLOCK) {
1156 			retval = -EAGAIN;
1157 			goto out;
1158 		} else if (signal_pending(current)) {
1159 			retval = -ERESTARTSYS;
1160 			goto out;
1161 		}
1162 		prepare_to_wait_exclusive(&csdata->wait, &wait,
1163 						TASK_INTERRUPTIBLE);
1164 		schedule();
1165 		finish_wait(&csdata->wait, &wait);
1166 	}
1167 
1168 	retval = put_user(data, (u32 __user *)buf);
1169 	if (!retval)
1170 		retval = sizeof(data);
1171 
1172 out:
1173 	return retval;
1174 }
1175 
cs_char_write(struct file * file,const char __user * buf,size_t count,loff_t * unused)1176 static ssize_t cs_char_write(struct file *file, const char __user *buf,
1177 						size_t count, loff_t *unused)
1178 {
1179 	struct cs_char *csdata = file->private_data;
1180 	u32 data;
1181 	int err;
1182 	ssize_t	retval;
1183 
1184 	if (count < sizeof(data))
1185 		return -EINVAL;
1186 
1187 	if (get_user(data, (u32 __user *)buf))
1188 		retval = -EFAULT;
1189 	else
1190 		retval = count;
1191 
1192 	err = cs_hsi_command(csdata->hi, data);
1193 	if (err < 0)
1194 		retval = err;
1195 
1196 	return retval;
1197 }
1198 
cs_char_ioctl(struct file * file,unsigned int cmd,unsigned long arg)1199 static long cs_char_ioctl(struct file *file, unsigned int cmd,
1200 				unsigned long arg)
1201 {
1202 	struct cs_char *csdata = file->private_data;
1203 	int r = 0;
1204 
1205 	switch (cmd) {
1206 	case CS_GET_STATE: {
1207 		unsigned int state;
1208 
1209 		state = cs_hsi_get_state(csdata->hi);
1210 		if (copy_to_user((void __user *)arg, &state, sizeof(state)))
1211 			r = -EFAULT;
1212 
1213 		break;
1214 	}
1215 	case CS_SET_WAKELINE: {
1216 		unsigned int state;
1217 
1218 		if (copy_from_user(&state, (void __user *)arg, sizeof(state))) {
1219 			r = -EFAULT;
1220 			break;
1221 		}
1222 
1223 		if (state > 1) {
1224 			r = -EINVAL;
1225 			break;
1226 		}
1227 
1228 		cs_hsi_set_wakeline(csdata->hi, !!state);
1229 
1230 		break;
1231 	}
1232 	case CS_GET_IF_VERSION: {
1233 		unsigned int ifver = CS_IF_VERSION;
1234 
1235 		if (copy_to_user((void __user *)arg, &ifver, sizeof(ifver)))
1236 			r = -EFAULT;
1237 
1238 		break;
1239 	}
1240 	case CS_CONFIG_BUFS: {
1241 		struct cs_buffer_config buf_cfg;
1242 
1243 		if (copy_from_user(&buf_cfg, (void __user *)arg,
1244 							sizeof(buf_cfg)))
1245 			r = -EFAULT;
1246 		else
1247 			r = cs_hsi_buf_config(csdata->hi, &buf_cfg);
1248 
1249 		break;
1250 	}
1251 	default:
1252 		r = -ENOTTY;
1253 		break;
1254 	}
1255 
1256 	return r;
1257 }
1258 
cs_char_mmap(struct file * file,struct vm_area_struct * vma)1259 static int cs_char_mmap(struct file *file, struct vm_area_struct *vma)
1260 {
1261 	if (vma->vm_end < vma->vm_start)
1262 		return -EINVAL;
1263 
1264 	if (vma_pages(vma) != 1)
1265 		return -EINVAL;
1266 
1267 	vm_flags_set(vma, VM_IO | VM_DONTDUMP | VM_DONTEXPAND);
1268 	vma->vm_ops = &cs_char_vm_ops;
1269 	vma->vm_private_data = file->private_data;
1270 
1271 	return 0;
1272 }
1273 
cs_char_open(struct inode * unused,struct file * file)1274 static int cs_char_open(struct inode *unused, struct file *file)
1275 {
1276 	int ret = 0;
1277 	unsigned long p;
1278 
1279 	spin_lock_bh(&cs_char_data.lock);
1280 	if (cs_char_data.opened) {
1281 		ret = -EBUSY;
1282 		spin_unlock_bh(&cs_char_data.lock);
1283 		goto out1;
1284 	}
1285 	cs_char_data.opened = 1;
1286 	cs_char_data.dataind_pending = 0;
1287 	spin_unlock_bh(&cs_char_data.lock);
1288 
1289 	p = get_zeroed_page(GFP_KERNEL);
1290 	if (!p) {
1291 		ret = -ENOMEM;
1292 		goto out2;
1293 	}
1294 
1295 	ret = cs_hsi_start(&cs_char_data.hi, cs_char_data.cl, p, CS_MMAP_SIZE);
1296 	if (ret) {
1297 		dev_err(&cs_char_data.cl->device, "Unable to initialize HSI\n");
1298 		goto out3;
1299 	}
1300 
1301 	/* these are only used in release so lock not needed */
1302 	cs_char_data.mmap_base = p;
1303 	cs_char_data.mmap_size = CS_MMAP_SIZE;
1304 
1305 	file->private_data = &cs_char_data;
1306 
1307 	return 0;
1308 
1309 out3:
1310 	free_page(p);
1311 out2:
1312 	spin_lock_bh(&cs_char_data.lock);
1313 	cs_char_data.opened = 0;
1314 	spin_unlock_bh(&cs_char_data.lock);
1315 out1:
1316 	return ret;
1317 }
1318 
cs_free_char_queue(struct list_head * head)1319 static void cs_free_char_queue(struct list_head *head)
1320 {
1321 	struct char_queue *entry;
1322 	struct list_head *cursor, *next;
1323 
1324 	if (!list_empty(head)) {
1325 		list_for_each_safe(cursor, next, head) {
1326 			entry = list_entry(cursor, struct char_queue, list);
1327 			list_del(&entry->list);
1328 			kfree(entry);
1329 		}
1330 	}
1331 
1332 }
1333 
cs_char_release(struct inode * unused,struct file * file)1334 static int cs_char_release(struct inode *unused, struct file *file)
1335 {
1336 	struct cs_char *csdata = file->private_data;
1337 
1338 	cs_hsi_stop(csdata->hi);
1339 	spin_lock_bh(&csdata->lock);
1340 	csdata->hi = NULL;
1341 	free_page(csdata->mmap_base);
1342 	cs_free_char_queue(&csdata->chardev_queue);
1343 	cs_free_char_queue(&csdata->dataind_queue);
1344 	csdata->opened = 0;
1345 	spin_unlock_bh(&csdata->lock);
1346 
1347 	return 0;
1348 }
1349 
1350 static const struct file_operations cs_char_fops = {
1351 	.owner		= THIS_MODULE,
1352 	.read		= cs_char_read,
1353 	.write		= cs_char_write,
1354 	.poll		= cs_char_poll,
1355 	.unlocked_ioctl	= cs_char_ioctl,
1356 	.mmap		= cs_char_mmap,
1357 	.open		= cs_char_open,
1358 	.release	= cs_char_release,
1359 	.fasync		= cs_char_fasync,
1360 };
1361 
1362 static struct miscdevice cs_char_miscdev = {
1363 	.minor	= MISC_DYNAMIC_MINOR,
1364 	.name	= "cmt_speech",
1365 	.fops	= &cs_char_fops
1366 };
1367 
cs_hsi_client_probe(struct device * dev)1368 static int cs_hsi_client_probe(struct device *dev)
1369 {
1370 	int err = 0;
1371 	struct hsi_client *cl = to_hsi_client(dev);
1372 
1373 	dev_dbg(dev, "hsi_client_probe\n");
1374 	init_waitqueue_head(&cs_char_data.wait);
1375 	spin_lock_init(&cs_char_data.lock);
1376 	cs_char_data.opened = 0;
1377 	cs_char_data.cl = cl;
1378 	cs_char_data.hi = NULL;
1379 	INIT_LIST_HEAD(&cs_char_data.chardev_queue);
1380 	INIT_LIST_HEAD(&cs_char_data.dataind_queue);
1381 
1382 	cs_char_data.channel_id_cmd = hsi_get_channel_id_by_name(cl,
1383 		"speech-control");
1384 	if (cs_char_data.channel_id_cmd < 0) {
1385 		err = cs_char_data.channel_id_cmd;
1386 		dev_err(dev, "Could not get cmd channel (%d)\n", err);
1387 		return err;
1388 	}
1389 
1390 	cs_char_data.channel_id_data = hsi_get_channel_id_by_name(cl,
1391 		"speech-data");
1392 	if (cs_char_data.channel_id_data < 0) {
1393 		err = cs_char_data.channel_id_data;
1394 		dev_err(dev, "Could not get data channel (%d)\n", err);
1395 		return err;
1396 	}
1397 
1398 	err = misc_register(&cs_char_miscdev);
1399 	if (err)
1400 		dev_err(dev, "Failed to register: %d\n", err);
1401 
1402 	return err;
1403 }
1404 
cs_hsi_client_remove(struct device * dev)1405 static int cs_hsi_client_remove(struct device *dev)
1406 {
1407 	struct cs_hsi_iface *hi;
1408 
1409 	dev_dbg(dev, "hsi_client_remove\n");
1410 	misc_deregister(&cs_char_miscdev);
1411 	spin_lock_bh(&cs_char_data.lock);
1412 	hi = cs_char_data.hi;
1413 	cs_char_data.hi = NULL;
1414 	spin_unlock_bh(&cs_char_data.lock);
1415 	if (hi)
1416 		cs_hsi_stop(hi);
1417 
1418 	return 0;
1419 }
1420 
1421 static struct hsi_client_driver cs_hsi_driver = {
1422 	.driver = {
1423 		.name	= "cmt-speech",
1424 		.owner	= THIS_MODULE,
1425 		.probe	= cs_hsi_client_probe,
1426 		.remove	= cs_hsi_client_remove,
1427 	},
1428 };
1429 
cs_char_init(void)1430 static int __init cs_char_init(void)
1431 {
1432 	pr_info("CMT speech driver added\n");
1433 	return hsi_register_client_driver(&cs_hsi_driver);
1434 }
1435 module_init(cs_char_init);
1436 
cs_char_exit(void)1437 static void __exit cs_char_exit(void)
1438 {
1439 	hsi_unregister_client_driver(&cs_hsi_driver);
1440 	pr_info("CMT speech driver removed\n");
1441 }
1442 module_exit(cs_char_exit);
1443 
1444 MODULE_ALIAS("hsi:cmt-speech");
1445 MODULE_AUTHOR("Kai Vehmanen <kai.vehmanen@nokia.com>");
1446 MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@nokia.com>");
1447 MODULE_DESCRIPTION("CMT speech driver");
1448 MODULE_LICENSE("GPL v2");
1449