1 /* OMAP SSI port driver.
2  *
3  * Copyright (C) 2010 Nokia Corporation. All rights reserved.
4  * Copyright (C) 2014 Sebastian Reichel <sre@kernel.org>
5  *
6  * Contact: Carlos Chinea <carlos.chinea@nokia.com>
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License
10  * version 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20  * 02110-1301 USA
21  */
22 
23 #include <linux/mod_devicetable.h>
24 #include <linux/platform_device.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/delay.h>
28 
29 #include <linux/gpio/consumer.h>
30 #include <linux/pinctrl/consumer.h>
31 #include <linux/debugfs.h>
32 
33 #include "omap_ssi_regs.h"
34 #include "omap_ssi.h"
35 
36 static inline int hsi_dummy_msg(struct hsi_msg *msg __maybe_unused)
37 {
38 	return 0;
39 }
40 
41 static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused)
42 {
43 	return 0;
44 }
45 
46 static inline unsigned int ssi_wakein(struct hsi_port *port)
47 {
48 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
49 	return gpiod_get_value(omap_port->wake_gpio);
50 }
51 
52 #ifdef CONFIG_DEBUG_FS
53 static void ssi_debug_remove_port(struct hsi_port *port)
54 {
55 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
56 
57 	debugfs_remove_recursive(omap_port->dir);
58 }
59 
60 static int ssi_debug_port_show(struct seq_file *m, void *p __maybe_unused)
61 {
62 	struct hsi_port *port = m->private;
63 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
64 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
65 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
66 	void __iomem	*base = omap_ssi->sys;
67 	unsigned int ch;
68 
69 	pm_runtime_get_sync(omap_port->pdev);
70 	if (omap_port->wake_irq > 0)
71 		seq_printf(m, "CAWAKE\t\t: %d\n", ssi_wakein(port));
72 	seq_printf(m, "WAKE\t\t: 0x%08x\n",
73 				readl(base + SSI_WAKE_REG(port->num)));
74 	seq_printf(m, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", 0,
75 			readl(base + SSI_MPU_ENABLE_REG(port->num, 0)));
76 	seq_printf(m, "MPU_STATUS_IRQ%d\t: 0x%08x\n", 0,
77 			readl(base + SSI_MPU_STATUS_REG(port->num, 0)));
78 	/* SST */
79 	base = omap_port->sst_base;
80 	seq_puts(m, "\nSST\n===\n");
81 	seq_printf(m, "ID SST\t\t: 0x%08x\n",
82 				readl(base + SSI_SST_ID_REG));
83 	seq_printf(m, "MODE\t\t: 0x%08x\n",
84 				readl(base + SSI_SST_MODE_REG));
85 	seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
86 				readl(base + SSI_SST_FRAMESIZE_REG));
87 	seq_printf(m, "DIVISOR\t\t: 0x%08x\n",
88 				readl(base + SSI_SST_DIVISOR_REG));
89 	seq_printf(m, "CHANNELS\t: 0x%08x\n",
90 				readl(base + SSI_SST_CHANNELS_REG));
91 	seq_printf(m, "ARBMODE\t\t: 0x%08x\n",
92 				readl(base + SSI_SST_ARBMODE_REG));
93 	seq_printf(m, "TXSTATE\t\t: 0x%08x\n",
94 				readl(base + SSI_SST_TXSTATE_REG));
95 	seq_printf(m, "BUFSTATE\t: 0x%08x\n",
96 				readl(base + SSI_SST_BUFSTATE_REG));
97 	seq_printf(m, "BREAK\t\t: 0x%08x\n",
98 				readl(base + SSI_SST_BREAK_REG));
99 	for (ch = 0; ch < omap_port->channels; ch++) {
100 		seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
101 				readl(base + SSI_SST_BUFFER_CH_REG(ch)));
102 	}
103 	/* SSR */
104 	base = omap_port->ssr_base;
105 	seq_puts(m, "\nSSR\n===\n");
106 	seq_printf(m, "ID SSR\t\t: 0x%08x\n",
107 				readl(base + SSI_SSR_ID_REG));
108 	seq_printf(m, "MODE\t\t: 0x%08x\n",
109 				readl(base + SSI_SSR_MODE_REG));
110 	seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
111 				readl(base + SSI_SSR_FRAMESIZE_REG));
112 	seq_printf(m, "CHANNELS\t: 0x%08x\n",
113 				readl(base + SSI_SSR_CHANNELS_REG));
114 	seq_printf(m, "TIMEOUT\t\t: 0x%08x\n",
115 				readl(base + SSI_SSR_TIMEOUT_REG));
116 	seq_printf(m, "RXSTATE\t\t: 0x%08x\n",
117 				readl(base + SSI_SSR_RXSTATE_REG));
118 	seq_printf(m, "BUFSTATE\t: 0x%08x\n",
119 				readl(base + SSI_SSR_BUFSTATE_REG));
120 	seq_printf(m, "BREAK\t\t: 0x%08x\n",
121 				readl(base + SSI_SSR_BREAK_REG));
122 	seq_printf(m, "ERROR\t\t: 0x%08x\n",
123 				readl(base + SSI_SSR_ERROR_REG));
124 	seq_printf(m, "ERRORACK\t: 0x%08x\n",
125 				readl(base + SSI_SSR_ERRORACK_REG));
126 	for (ch = 0; ch < omap_port->channels; ch++) {
127 		seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
128 				readl(base + SSI_SSR_BUFFER_CH_REG(ch)));
129 	}
130 	pm_runtime_put_autosuspend(omap_port->pdev);
131 
132 	return 0;
133 }
134 
135 static int ssi_port_regs_open(struct inode *inode, struct file *file)
136 {
137 	return single_open(file, ssi_debug_port_show, inode->i_private);
138 }
139 
140 static const struct file_operations ssi_port_regs_fops = {
141 	.open		= ssi_port_regs_open,
142 	.read		= seq_read,
143 	.llseek		= seq_lseek,
144 	.release	= single_release,
145 };
146 
147 static int ssi_div_get(void *data, u64 *val)
148 {
149 	struct hsi_port *port = data;
150 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
151 
152 	pm_runtime_get_sync(omap_port->pdev);
153 	*val = readl(omap_port->sst_base + SSI_SST_DIVISOR_REG);
154 	pm_runtime_put_autosuspend(omap_port->pdev);
155 
156 	return 0;
157 }
158 
159 static int ssi_div_set(void *data, u64 val)
160 {
161 	struct hsi_port *port = data;
162 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
163 
164 	if (val > 127)
165 		return -EINVAL;
166 
167 	pm_runtime_get_sync(omap_port->pdev);
168 	writel(val, omap_port->sst_base + SSI_SST_DIVISOR_REG);
169 	omap_port->sst.divisor = val;
170 	pm_runtime_put_autosuspend(omap_port->pdev);
171 
172 	return 0;
173 }
174 
175 DEFINE_SIMPLE_ATTRIBUTE(ssi_sst_div_fops, ssi_div_get, ssi_div_set, "%llu\n");
176 
177 static int ssi_debug_add_port(struct omap_ssi_port *omap_port,
178 				     struct dentry *dir)
179 {
180 	struct hsi_port *port = to_hsi_port(omap_port->dev);
181 
182 	dir = debugfs_create_dir(dev_name(omap_port->dev), dir);
183 	if (!dir)
184 		return -ENOMEM;
185 	omap_port->dir = dir;
186 	debugfs_create_file("regs", S_IRUGO, dir, port, &ssi_port_regs_fops);
187 	dir = debugfs_create_dir("sst", dir);
188 	if (!dir)
189 		return -ENOMEM;
190 	debugfs_create_file("divisor", S_IRUGO | S_IWUSR, dir, port,
191 			    &ssi_sst_div_fops);
192 
193 	return 0;
194 }
195 #endif
196 
197 static void ssi_process_errqueue(struct work_struct *work)
198 {
199 	struct omap_ssi_port *omap_port;
200 	struct list_head *head, *tmp;
201 	struct hsi_msg *msg;
202 
203 	omap_port = container_of(work, struct omap_ssi_port, errqueue_work.work);
204 
205 	list_for_each_safe(head, tmp, &omap_port->errqueue) {
206 		msg = list_entry(head, struct hsi_msg, link);
207 		msg->complete(msg);
208 		list_del(head);
209 	}
210 }
211 
212 static int ssi_claim_lch(struct hsi_msg *msg)
213 {
214 
215 	struct hsi_port *port = hsi_get_port(msg->cl);
216 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
217 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
218 	int lch;
219 
220 	for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++)
221 		if (!omap_ssi->gdd_trn[lch].msg) {
222 			omap_ssi->gdd_trn[lch].msg = msg;
223 			omap_ssi->gdd_trn[lch].sg = msg->sgt.sgl;
224 			return lch;
225 		}
226 
227 	return -EBUSY;
228 }
229 
230 static int ssi_start_dma(struct hsi_msg *msg, int lch)
231 {
232 	struct hsi_port *port = hsi_get_port(msg->cl);
233 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
234 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
235 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
236 	void __iomem *gdd = omap_ssi->gdd;
237 	int err;
238 	u16 csdp;
239 	u16 ccr;
240 	u32 s_addr;
241 	u32 d_addr;
242 	u32 tmp;
243 
244 	/* Hold clocks during the transfer */
245 	pm_runtime_get(omap_port->pdev);
246 
247 	if (!pm_runtime_active(omap_port->pdev)) {
248 		dev_warn(&port->device, "ssi_start_dma called without runtime PM!\n");
249 		pm_runtime_put_autosuspend(omap_port->pdev);
250 		return -EREMOTEIO;
251 	}
252 
253 	if (msg->ttype == HSI_MSG_READ) {
254 		err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
255 							DMA_FROM_DEVICE);
256 		if (err < 0) {
257 			dev_dbg(&ssi->device, "DMA map SG failed !\n");
258 			pm_runtime_put_autosuspend(omap_port->pdev);
259 			return err;
260 		}
261 		csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT |
262 			SSI_SRC_SINGLE_ACCESS0 | SSI_SRC_PERIPHERAL_PORT |
263 			SSI_DATA_TYPE_S32;
264 		ccr = msg->channel + 0x10 + (port->num * 8); /* Sync */
265 		ccr |= SSI_DST_AMODE_POSTINC | SSI_SRC_AMODE_CONST |
266 			SSI_CCR_ENABLE;
267 		s_addr = omap_port->ssr_dma +
268 					SSI_SSR_BUFFER_CH_REG(msg->channel);
269 		d_addr = sg_dma_address(msg->sgt.sgl);
270 	} else {
271 		err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
272 							DMA_TO_DEVICE);
273 		if (err < 0) {
274 			dev_dbg(&ssi->device, "DMA map SG failed !\n");
275 			pm_runtime_put_autosuspend(omap_port->pdev);
276 			return err;
277 		}
278 		csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT |
279 			SSI_DST_SINGLE_ACCESS0 | SSI_DST_PERIPHERAL_PORT |
280 			SSI_DATA_TYPE_S32;
281 		ccr = (msg->channel + 1 + (port->num * 8)) & 0xf; /* Sync */
282 		ccr |= SSI_SRC_AMODE_POSTINC | SSI_DST_AMODE_CONST |
283 			SSI_CCR_ENABLE;
284 		s_addr = sg_dma_address(msg->sgt.sgl);
285 		d_addr = omap_port->sst_dma +
286 					SSI_SST_BUFFER_CH_REG(msg->channel);
287 	}
288 	dev_dbg(&ssi->device, "lch %d cdsp %08x ccr %04x s_addr %08x d_addr %08x\n",
289 		lch, csdp, ccr, s_addr, d_addr);
290 
291 	writew_relaxed(csdp, gdd + SSI_GDD_CSDP_REG(lch));
292 	writew_relaxed(SSI_BLOCK_IE | SSI_TOUT_IE, gdd + SSI_GDD_CICR_REG(lch));
293 	writel_relaxed(d_addr, gdd + SSI_GDD_CDSA_REG(lch));
294 	writel_relaxed(s_addr, gdd + SSI_GDD_CSSA_REG(lch));
295 	writew_relaxed(SSI_BYTES_TO_FRAMES(msg->sgt.sgl->length),
296 						gdd + SSI_GDD_CEN_REG(lch));
297 
298 	spin_lock_bh(&omap_ssi->lock);
299 	tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
300 	tmp |= SSI_GDD_LCH(lch);
301 	writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
302 	spin_unlock_bh(&omap_ssi->lock);
303 	writew(ccr, gdd + SSI_GDD_CCR_REG(lch));
304 	msg->status = HSI_STATUS_PROCEEDING;
305 
306 	return 0;
307 }
308 
309 static int ssi_start_pio(struct hsi_msg *msg)
310 {
311 	struct hsi_port *port = hsi_get_port(msg->cl);
312 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
313 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
314 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
315 	u32 val;
316 
317 	pm_runtime_get(omap_port->pdev);
318 
319 	if (!pm_runtime_active(omap_port->pdev)) {
320 		dev_warn(&port->device, "ssi_start_pio called without runtime PM!\n");
321 		pm_runtime_put_autosuspend(omap_port->pdev);
322 		return -EREMOTEIO;
323 	}
324 
325 	if (msg->ttype == HSI_MSG_WRITE) {
326 		val = SSI_DATAACCEPT(msg->channel);
327 		/* Hold clocks for pio writes */
328 		pm_runtime_get(omap_port->pdev);
329 	} else {
330 		val = SSI_DATAAVAILABLE(msg->channel) | SSI_ERROROCCURED;
331 	}
332 	dev_dbg(&port->device, "Single %s transfer\n",
333 						msg->ttype ? "write" : "read");
334 	val |= readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
335 	writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
336 	pm_runtime_put_autosuspend(omap_port->pdev);
337 	msg->actual_len = 0;
338 	msg->status = HSI_STATUS_PROCEEDING;
339 
340 	return 0;
341 }
342 
343 static int ssi_start_transfer(struct list_head *queue)
344 {
345 	struct hsi_msg *msg;
346 	int lch = -1;
347 
348 	if (list_empty(queue))
349 		return 0;
350 	msg = list_first_entry(queue, struct hsi_msg, link);
351 	if (msg->status != HSI_STATUS_QUEUED)
352 		return 0;
353 	if ((msg->sgt.nents) && (msg->sgt.sgl->length > sizeof(u32)))
354 		lch = ssi_claim_lch(msg);
355 	if (lch >= 0)
356 		return ssi_start_dma(msg, lch);
357 	else
358 		return ssi_start_pio(msg);
359 }
360 
361 static int ssi_async_break(struct hsi_msg *msg)
362 {
363 	struct hsi_port *port = hsi_get_port(msg->cl);
364 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
365 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
366 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
367 	int err = 0;
368 	u32 tmp;
369 
370 	pm_runtime_get_sync(omap_port->pdev);
371 	if (msg->ttype == HSI_MSG_WRITE) {
372 		if (omap_port->sst.mode != SSI_MODE_FRAME) {
373 			err = -EINVAL;
374 			goto out;
375 		}
376 		writel(1, omap_port->sst_base + SSI_SST_BREAK_REG);
377 		msg->status = HSI_STATUS_COMPLETED;
378 		msg->complete(msg);
379 	} else {
380 		if (omap_port->ssr.mode != SSI_MODE_FRAME) {
381 			err = -EINVAL;
382 			goto out;
383 		}
384 		spin_lock_bh(&omap_port->lock);
385 		tmp = readl(omap_ssi->sys +
386 					SSI_MPU_ENABLE_REG(port->num, 0));
387 		writel(tmp | SSI_BREAKDETECTED,
388 			omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
389 		msg->status = HSI_STATUS_PROCEEDING;
390 		list_add_tail(&msg->link, &omap_port->brkqueue);
391 		spin_unlock_bh(&omap_port->lock);
392 	}
393 out:
394 	pm_runtime_mark_last_busy(omap_port->pdev);
395 	pm_runtime_put_autosuspend(omap_port->pdev);
396 
397 	return err;
398 }
399 
400 static int ssi_async(struct hsi_msg *msg)
401 {
402 	struct hsi_port *port = hsi_get_port(msg->cl);
403 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
404 	struct list_head *queue;
405 	int err = 0;
406 
407 	BUG_ON(!msg);
408 
409 	if (msg->sgt.nents > 1)
410 		return -ENOSYS; /* TODO: Add sg support */
411 
412 	if (msg->break_frame)
413 		return ssi_async_break(msg);
414 
415 	if (msg->ttype) {
416 		BUG_ON(msg->channel >= omap_port->sst.channels);
417 		queue = &omap_port->txqueue[msg->channel];
418 	} else {
419 		BUG_ON(msg->channel >= omap_port->ssr.channels);
420 		queue = &omap_port->rxqueue[msg->channel];
421 	}
422 	msg->status = HSI_STATUS_QUEUED;
423 
424 	pm_runtime_get_sync(omap_port->pdev);
425 	spin_lock_bh(&omap_port->lock);
426 	list_add_tail(&msg->link, queue);
427 	err = ssi_start_transfer(queue);
428 	if (err < 0) {
429 		list_del(&msg->link);
430 		msg->status = HSI_STATUS_ERROR;
431 	}
432 	spin_unlock_bh(&omap_port->lock);
433 	pm_runtime_mark_last_busy(omap_port->pdev);
434 	pm_runtime_put_autosuspend(omap_port->pdev);
435 	dev_dbg(&port->device, "msg status %d ttype %d ch %d\n",
436 				msg->status, msg->ttype, msg->channel);
437 
438 	return err;
439 }
440 
441 static u32 ssi_calculate_div(struct hsi_controller *ssi)
442 {
443 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
444 	u32 tx_fckrate = (u32) omap_ssi->fck_rate;
445 
446 	/* / 2 : SSI TX clock is always half of the SSI functional clock */
447 	tx_fckrate >>= 1;
448 	/* Round down when tx_fckrate % omap_ssi->max_speed == 0 */
449 	tx_fckrate--;
450 	dev_dbg(&ssi->device, "TX div %d for fck_rate %lu Khz speed %d Kb/s\n",
451 		tx_fckrate / omap_ssi->max_speed, omap_ssi->fck_rate,
452 		omap_ssi->max_speed);
453 
454 	return tx_fckrate / omap_ssi->max_speed;
455 }
456 
457 static void ssi_flush_queue(struct list_head *queue, struct hsi_client *cl)
458 {
459 	struct list_head *node, *tmp;
460 	struct hsi_msg *msg;
461 
462 	list_for_each_safe(node, tmp, queue) {
463 		msg = list_entry(node, struct hsi_msg, link);
464 		if ((cl) && (cl != msg->cl))
465 			continue;
466 		list_del(node);
467 		pr_debug("flush queue: ch %d, msg %p len %d type %d ctxt %p\n",
468 			msg->channel, msg, msg->sgt.sgl->length,
469 					msg->ttype, msg->context);
470 		if (msg->destructor)
471 			msg->destructor(msg);
472 		else
473 			hsi_free_msg(msg);
474 	}
475 }
476 
477 static int ssi_setup(struct hsi_client *cl)
478 {
479 	struct hsi_port *port = to_hsi_port(cl->device.parent);
480 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
481 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
482 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
483 	void __iomem *sst = omap_port->sst_base;
484 	void __iomem *ssr = omap_port->ssr_base;
485 	u32 div;
486 	u32 val;
487 	int err = 0;
488 
489 	pm_runtime_get_sync(omap_port->pdev);
490 	spin_lock_bh(&omap_port->lock);
491 	if (cl->tx_cfg.speed)
492 		omap_ssi->max_speed = cl->tx_cfg.speed;
493 	div = ssi_calculate_div(ssi);
494 	if (div > SSI_MAX_DIVISOR) {
495 		dev_err(&cl->device, "Invalid TX speed %d Mb/s (div %d)\n",
496 						cl->tx_cfg.speed, div);
497 		err = -EINVAL;
498 		goto out;
499 	}
500 	/* Set TX/RX module to sleep to stop TX/RX during cfg update */
501 	writel_relaxed(SSI_MODE_SLEEP, sst + SSI_SST_MODE_REG);
502 	writel_relaxed(SSI_MODE_SLEEP, ssr + SSI_SSR_MODE_REG);
503 	/* Flush posted write */
504 	val = readl(ssr + SSI_SSR_MODE_REG);
505 	/* TX */
506 	writel_relaxed(31, sst + SSI_SST_FRAMESIZE_REG);
507 	writel_relaxed(div, sst + SSI_SST_DIVISOR_REG);
508 	writel_relaxed(cl->tx_cfg.num_hw_channels, sst + SSI_SST_CHANNELS_REG);
509 	writel_relaxed(cl->tx_cfg.arb_mode, sst + SSI_SST_ARBMODE_REG);
510 	writel_relaxed(cl->tx_cfg.mode, sst + SSI_SST_MODE_REG);
511 	/* RX */
512 	writel_relaxed(31, ssr + SSI_SSR_FRAMESIZE_REG);
513 	writel_relaxed(cl->rx_cfg.num_hw_channels, ssr + SSI_SSR_CHANNELS_REG);
514 	writel_relaxed(0, ssr + SSI_SSR_TIMEOUT_REG);
515 	/* Cleanup the break queue if we leave FRAME mode */
516 	if ((omap_port->ssr.mode == SSI_MODE_FRAME) &&
517 		(cl->rx_cfg.mode != SSI_MODE_FRAME))
518 		ssi_flush_queue(&omap_port->brkqueue, cl);
519 	writel_relaxed(cl->rx_cfg.mode, ssr + SSI_SSR_MODE_REG);
520 	omap_port->channels = max(cl->rx_cfg.num_hw_channels,
521 				  cl->tx_cfg.num_hw_channels);
522 	/* Shadow registering for OFF mode */
523 	/* SST */
524 	omap_port->sst.divisor = div;
525 	omap_port->sst.frame_size = 31;
526 	omap_port->sst.channels = cl->tx_cfg.num_hw_channels;
527 	omap_port->sst.arb_mode = cl->tx_cfg.arb_mode;
528 	omap_port->sst.mode = cl->tx_cfg.mode;
529 	/* SSR */
530 	omap_port->ssr.frame_size = 31;
531 	omap_port->ssr.timeout = 0;
532 	omap_port->ssr.channels = cl->rx_cfg.num_hw_channels;
533 	omap_port->ssr.mode = cl->rx_cfg.mode;
534 out:
535 	spin_unlock_bh(&omap_port->lock);
536 	pm_runtime_mark_last_busy(omap_port->pdev);
537 	pm_runtime_put_autosuspend(omap_port->pdev);
538 
539 	return err;
540 }
541 
542 static int ssi_flush(struct hsi_client *cl)
543 {
544 	struct hsi_port *port = hsi_get_port(cl);
545 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
546 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
547 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
548 	struct hsi_msg *msg;
549 	void __iomem *sst = omap_port->sst_base;
550 	void __iomem *ssr = omap_port->ssr_base;
551 	unsigned int i;
552 	u32 err;
553 
554 	pm_runtime_get_sync(omap_port->pdev);
555 	spin_lock_bh(&omap_port->lock);
556 
557 	/* stop all ssi communication */
558 	pinctrl_pm_select_idle_state(omap_port->pdev);
559 	udelay(1); /* wait for racing frames */
560 
561 	/* Stop all DMA transfers */
562 	for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
563 		msg = omap_ssi->gdd_trn[i].msg;
564 		if (!msg || (port != hsi_get_port(msg->cl)))
565 			continue;
566 		writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
567 		if (msg->ttype == HSI_MSG_READ)
568 			pm_runtime_put_autosuspend(omap_port->pdev);
569 		omap_ssi->gdd_trn[i].msg = NULL;
570 	}
571 	/* Flush all SST buffers */
572 	writel_relaxed(0, sst + SSI_SST_BUFSTATE_REG);
573 	writel_relaxed(0, sst + SSI_SST_TXSTATE_REG);
574 	/* Flush all SSR buffers */
575 	writel_relaxed(0, ssr + SSI_SSR_RXSTATE_REG);
576 	writel_relaxed(0, ssr + SSI_SSR_BUFSTATE_REG);
577 	/* Flush all errors */
578 	err = readl(ssr + SSI_SSR_ERROR_REG);
579 	writel_relaxed(err, ssr + SSI_SSR_ERRORACK_REG);
580 	/* Flush break */
581 	writel_relaxed(0, ssr + SSI_SSR_BREAK_REG);
582 	/* Clear interrupts */
583 	writel_relaxed(0, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
584 	writel_relaxed(0xffffff00,
585 			omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
586 	writel_relaxed(0, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
587 	writel(0xff, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
588 	/* Dequeue all pending requests */
589 	for (i = 0; i < omap_port->channels; i++) {
590 		/* Release write clocks */
591 		if (!list_empty(&omap_port->txqueue[i]))
592 			pm_runtime_put_autosuspend(omap_port->pdev);
593 		ssi_flush_queue(&omap_port->txqueue[i], NULL);
594 		ssi_flush_queue(&omap_port->rxqueue[i], NULL);
595 	}
596 	ssi_flush_queue(&omap_port->brkqueue, NULL);
597 
598 	/* Resume SSI communication */
599 	pinctrl_pm_select_default_state(omap_port->pdev);
600 
601 	spin_unlock_bh(&omap_port->lock);
602 	pm_runtime_mark_last_busy(omap_port->pdev);
603 	pm_runtime_put_autosuspend(omap_port->pdev);
604 
605 	return 0;
606 }
607 
608 static void start_tx_work(struct work_struct *work)
609 {
610 	struct omap_ssi_port *omap_port =
611 				container_of(work, struct omap_ssi_port, work);
612 	struct hsi_port *port = to_hsi_port(omap_port->dev);
613 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
614 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
615 
616 	pm_runtime_get_sync(omap_port->pdev); /* Grab clocks */
617 	writel(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
618 }
619 
620 static int ssi_start_tx(struct hsi_client *cl)
621 {
622 	struct hsi_port *port = hsi_get_port(cl);
623 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
624 
625 	dev_dbg(&port->device, "Wake out high %d\n", omap_port->wk_refcount);
626 
627 	spin_lock_bh(&omap_port->wk_lock);
628 	if (omap_port->wk_refcount++) {
629 		spin_unlock_bh(&omap_port->wk_lock);
630 		return 0;
631 	}
632 	spin_unlock_bh(&omap_port->wk_lock);
633 
634 	schedule_work(&omap_port->work);
635 
636 	return 0;
637 }
638 
639 static int ssi_stop_tx(struct hsi_client *cl)
640 {
641 	struct hsi_port *port = hsi_get_port(cl);
642 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
643 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
644 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
645 
646 	dev_dbg(&port->device, "Wake out low %d\n", omap_port->wk_refcount);
647 
648 	spin_lock_bh(&omap_port->wk_lock);
649 	BUG_ON(!omap_port->wk_refcount);
650 	if (--omap_port->wk_refcount) {
651 		spin_unlock_bh(&omap_port->wk_lock);
652 		return 0;
653 	}
654 	writel(SSI_WAKE(0), omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
655 	spin_unlock_bh(&omap_port->wk_lock);
656 
657 	pm_runtime_mark_last_busy(omap_port->pdev);
658 	pm_runtime_put_autosuspend(omap_port->pdev); /* Release clocks */
659 
660 
661 	return 0;
662 }
663 
664 static void ssi_transfer(struct omap_ssi_port *omap_port,
665 							struct list_head *queue)
666 {
667 	struct hsi_msg *msg;
668 	int err = -1;
669 
670 	pm_runtime_get(omap_port->pdev);
671 	spin_lock_bh(&omap_port->lock);
672 	while (err < 0) {
673 		err = ssi_start_transfer(queue);
674 		if (err < 0) {
675 			msg = list_first_entry(queue, struct hsi_msg, link);
676 			msg->status = HSI_STATUS_ERROR;
677 			msg->actual_len = 0;
678 			list_del(&msg->link);
679 			spin_unlock_bh(&omap_port->lock);
680 			msg->complete(msg);
681 			spin_lock_bh(&omap_port->lock);
682 		}
683 	}
684 	spin_unlock_bh(&omap_port->lock);
685 	pm_runtime_mark_last_busy(omap_port->pdev);
686 	pm_runtime_put_autosuspend(omap_port->pdev);
687 }
688 
689 static void ssi_cleanup_queues(struct hsi_client *cl)
690 {
691 	struct hsi_port *port = hsi_get_port(cl);
692 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
693 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
694 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
695 	struct hsi_msg *msg;
696 	unsigned int i;
697 	u32 rxbufstate = 0;
698 	u32 txbufstate = 0;
699 	u32 status = SSI_ERROROCCURED;
700 	u32 tmp;
701 
702 	ssi_flush_queue(&omap_port->brkqueue, cl);
703 	if (list_empty(&omap_port->brkqueue))
704 		status |= SSI_BREAKDETECTED;
705 
706 	for (i = 0; i < omap_port->channels; i++) {
707 		if (list_empty(&omap_port->txqueue[i]))
708 			continue;
709 		msg = list_first_entry(&omap_port->txqueue[i], struct hsi_msg,
710 									link);
711 		if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
712 			txbufstate |= (1 << i);
713 			status |= SSI_DATAACCEPT(i);
714 			/* Release the clocks writes, also GDD ones */
715 			pm_runtime_mark_last_busy(omap_port->pdev);
716 			pm_runtime_put_autosuspend(omap_port->pdev);
717 		}
718 		ssi_flush_queue(&omap_port->txqueue[i], cl);
719 	}
720 	for (i = 0; i < omap_port->channels; i++) {
721 		if (list_empty(&omap_port->rxqueue[i]))
722 			continue;
723 		msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
724 									link);
725 		if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
726 			rxbufstate |= (1 << i);
727 			status |= SSI_DATAAVAILABLE(i);
728 		}
729 		ssi_flush_queue(&omap_port->rxqueue[i], cl);
730 		/* Check if we keep the error detection interrupt armed */
731 		if (!list_empty(&omap_port->rxqueue[i]))
732 			status &= ~SSI_ERROROCCURED;
733 	}
734 	/* Cleanup write buffers */
735 	tmp = readl(omap_port->sst_base + SSI_SST_BUFSTATE_REG);
736 	tmp &= ~txbufstate;
737 	writel_relaxed(tmp, omap_port->sst_base + SSI_SST_BUFSTATE_REG);
738 	/* Cleanup read buffers */
739 	tmp = readl(omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
740 	tmp &= ~rxbufstate;
741 	writel_relaxed(tmp, omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
742 	/* Disarm and ack pending interrupts */
743 	tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
744 	tmp &= ~status;
745 	writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
746 	writel_relaxed(status, omap_ssi->sys +
747 		SSI_MPU_STATUS_REG(port->num, 0));
748 }
749 
750 static void ssi_cleanup_gdd(struct hsi_controller *ssi, struct hsi_client *cl)
751 {
752 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
753 	struct hsi_port *port = hsi_get_port(cl);
754 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
755 	struct hsi_msg *msg;
756 	unsigned int i;
757 	u32 val = 0;
758 	u32 tmp;
759 
760 	for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
761 		msg = omap_ssi->gdd_trn[i].msg;
762 		if ((!msg) || (msg->cl != cl))
763 			continue;
764 		writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
765 		val |= (1 << i);
766 		/*
767 		 * Clock references for write will be handled in
768 		 * ssi_cleanup_queues
769 		 */
770 		if (msg->ttype == HSI_MSG_READ) {
771 			pm_runtime_mark_last_busy(omap_port->pdev);
772 			pm_runtime_put_autosuspend(omap_port->pdev);
773 		}
774 		omap_ssi->gdd_trn[i].msg = NULL;
775 	}
776 	tmp = readl_relaxed(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
777 	tmp &= ~val;
778 	writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
779 	writel(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
780 }
781 
782 static int ssi_set_port_mode(struct omap_ssi_port *omap_port, u32 mode)
783 {
784 	writel(mode, omap_port->sst_base + SSI_SST_MODE_REG);
785 	writel(mode, omap_port->ssr_base + SSI_SSR_MODE_REG);
786 	/* OCP barrier */
787 	mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
788 
789 	return 0;
790 }
791 
792 static int ssi_release(struct hsi_client *cl)
793 {
794 	struct hsi_port *port = hsi_get_port(cl);
795 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
796 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
797 
798 	pm_runtime_get_sync(omap_port->pdev);
799 	spin_lock_bh(&omap_port->lock);
800 	/* Stop all the pending DMA requests for that client */
801 	ssi_cleanup_gdd(ssi, cl);
802 	/* Now cleanup all the queues */
803 	ssi_cleanup_queues(cl);
804 	/* If it is the last client of the port, do extra checks and cleanup */
805 	if (port->claimed <= 1) {
806 		/*
807 		 * Drop the clock reference for the incoming wake line
808 		 * if it is still kept high by the other side.
809 		 */
810 		if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags))
811 			pm_runtime_put_sync(omap_port->pdev);
812 		pm_runtime_get(omap_port->pdev);
813 		/* Stop any SSI TX/RX without a client */
814 		ssi_set_port_mode(omap_port, SSI_MODE_SLEEP);
815 		omap_port->sst.mode = SSI_MODE_SLEEP;
816 		omap_port->ssr.mode = SSI_MODE_SLEEP;
817 		pm_runtime_put(omap_port->pdev);
818 		WARN_ON(omap_port->wk_refcount != 0);
819 	}
820 	spin_unlock_bh(&omap_port->lock);
821 	pm_runtime_put_sync(omap_port->pdev);
822 
823 	return 0;
824 }
825 
826 
827 
828 static void ssi_error(struct hsi_port *port)
829 {
830 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
831 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
832 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
833 	struct hsi_msg *msg;
834 	unsigned int i;
835 	u32 err;
836 	u32 val;
837 	u32 tmp;
838 
839 	/* ACK error */
840 	err = readl(omap_port->ssr_base + SSI_SSR_ERROR_REG);
841 	dev_err(&port->device, "SSI error: 0x%02x\n", err);
842 	if (!err) {
843 		dev_dbg(&port->device, "spurious SSI error ignored!\n");
844 		return;
845 	}
846 	spin_lock(&omap_ssi->lock);
847 	/* Cancel all GDD read transfers */
848 	for (i = 0, val = 0; i < SSI_MAX_GDD_LCH; i++) {
849 		msg = omap_ssi->gdd_trn[i].msg;
850 		if ((msg) && (msg->ttype == HSI_MSG_READ)) {
851 			writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
852 			val |= (1 << i);
853 			omap_ssi->gdd_trn[i].msg = NULL;
854 		}
855 	}
856 	tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
857 	tmp &= ~val;
858 	writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
859 	spin_unlock(&omap_ssi->lock);
860 	/* Cancel all PIO read transfers */
861 	spin_lock(&omap_port->lock);
862 	tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
863 	tmp &= 0xfeff00ff; /* Disable error & all dataavailable interrupts */
864 	writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
865 	/* ACK error */
866 	writel_relaxed(err, omap_port->ssr_base + SSI_SSR_ERRORACK_REG);
867 	writel_relaxed(SSI_ERROROCCURED,
868 			omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
869 	/* Signal the error all current pending read requests */
870 	for (i = 0; i < omap_port->channels; i++) {
871 		if (list_empty(&omap_port->rxqueue[i]))
872 			continue;
873 		msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
874 									link);
875 		list_del(&msg->link);
876 		msg->status = HSI_STATUS_ERROR;
877 		spin_unlock(&omap_port->lock);
878 		msg->complete(msg);
879 		/* Now restart queued reads if any */
880 		ssi_transfer(omap_port, &omap_port->rxqueue[i]);
881 		spin_lock(&omap_port->lock);
882 	}
883 	spin_unlock(&omap_port->lock);
884 }
885 
886 static void ssi_break_complete(struct hsi_port *port)
887 {
888 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
889 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
890 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
891 	struct hsi_msg *msg;
892 	struct hsi_msg *tmp;
893 	u32 val;
894 
895 	dev_dbg(&port->device, "HWBREAK received\n");
896 
897 	spin_lock(&omap_port->lock);
898 	val = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
899 	val &= ~SSI_BREAKDETECTED;
900 	writel_relaxed(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
901 	writel_relaxed(0, omap_port->ssr_base + SSI_SSR_BREAK_REG);
902 	writel(SSI_BREAKDETECTED,
903 			omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
904 	spin_unlock(&omap_port->lock);
905 
906 	list_for_each_entry_safe(msg, tmp, &omap_port->brkqueue, link) {
907 		msg->status = HSI_STATUS_COMPLETED;
908 		spin_lock(&omap_port->lock);
909 		list_del(&msg->link);
910 		spin_unlock(&omap_port->lock);
911 		msg->complete(msg);
912 	}
913 
914 }
915 
916 static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue)
917 {
918 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
919 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
920 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
921 	struct hsi_msg *msg;
922 	u32 *buf;
923 	u32 reg;
924 	u32 val;
925 
926 	spin_lock_bh(&omap_port->lock);
927 	msg = list_first_entry(queue, struct hsi_msg, link);
928 	if ((!msg->sgt.nents) || (!msg->sgt.sgl->length)) {
929 		msg->actual_len = 0;
930 		msg->status = HSI_STATUS_PENDING;
931 	}
932 	if (msg->ttype == HSI_MSG_WRITE)
933 		val = SSI_DATAACCEPT(msg->channel);
934 	else
935 		val = SSI_DATAAVAILABLE(msg->channel);
936 	if (msg->status == HSI_STATUS_PROCEEDING) {
937 		buf = sg_virt(msg->sgt.sgl) + msg->actual_len;
938 		if (msg->ttype == HSI_MSG_WRITE)
939 			writel(*buf, omap_port->sst_base +
940 					SSI_SST_BUFFER_CH_REG(msg->channel));
941 		 else
942 			*buf = readl(omap_port->ssr_base +
943 					SSI_SSR_BUFFER_CH_REG(msg->channel));
944 		dev_dbg(&port->device, "ch %d ttype %d 0x%08x\n", msg->channel,
945 							msg->ttype, *buf);
946 		msg->actual_len += sizeof(*buf);
947 		if (msg->actual_len >= msg->sgt.sgl->length)
948 			msg->status = HSI_STATUS_COMPLETED;
949 		/*
950 		 * Wait for the last written frame to be really sent before
951 		 * we call the complete callback
952 		 */
953 		if ((msg->status == HSI_STATUS_PROCEEDING) ||
954 				((msg->status == HSI_STATUS_COMPLETED) &&
955 					(msg->ttype == HSI_MSG_WRITE))) {
956 			writel(val, omap_ssi->sys +
957 					SSI_MPU_STATUS_REG(port->num, 0));
958 			spin_unlock_bh(&omap_port->lock);
959 
960 			return;
961 		}
962 
963 	}
964 	/* Transfer completed at this point */
965 	reg = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
966 	if (msg->ttype == HSI_MSG_WRITE) {
967 		/* Release clocks for write transfer */
968 		pm_runtime_mark_last_busy(omap_port->pdev);
969 		pm_runtime_put_autosuspend(omap_port->pdev);
970 	}
971 	reg &= ~val;
972 	writel_relaxed(reg, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
973 	writel_relaxed(val, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
974 	list_del(&msg->link);
975 	spin_unlock_bh(&omap_port->lock);
976 	msg->complete(msg);
977 	ssi_transfer(omap_port, queue);
978 }
979 
980 static irqreturn_t ssi_pio_thread(int irq, void *ssi_port)
981 {
982 	struct hsi_port *port = (struct hsi_port *)ssi_port;
983 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
984 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
985 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
986 	void __iomem *sys = omap_ssi->sys;
987 	unsigned int ch;
988 	u32 status_reg;
989 
990 	pm_runtime_get_sync(omap_port->pdev);
991 
992 	do {
993 		status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
994 		status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
995 
996 		for (ch = 0; ch < omap_port->channels; ch++) {
997 			if (status_reg & SSI_DATAACCEPT(ch))
998 				ssi_pio_complete(port, &omap_port->txqueue[ch]);
999 			if (status_reg & SSI_DATAAVAILABLE(ch))
1000 				ssi_pio_complete(port, &omap_port->rxqueue[ch]);
1001 		}
1002 		if (status_reg & SSI_BREAKDETECTED)
1003 			ssi_break_complete(port);
1004 		if (status_reg & SSI_ERROROCCURED)
1005 			ssi_error(port);
1006 
1007 		status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
1008 		status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
1009 
1010 		/* TODO: sleep if we retry? */
1011 	} while (status_reg);
1012 
1013 	pm_runtime_mark_last_busy(omap_port->pdev);
1014 	pm_runtime_put_autosuspend(omap_port->pdev);
1015 
1016 	return IRQ_HANDLED;
1017 }
1018 
1019 static irqreturn_t ssi_wake_thread(int irq __maybe_unused, void *ssi_port)
1020 {
1021 	struct hsi_port *port = (struct hsi_port *)ssi_port;
1022 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1023 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1024 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1025 
1026 	if (ssi_wakein(port)) {
1027 		/**
1028 		 * We can have a quick High-Low-High transition in the line.
1029 		 * In such a case if we have long interrupt latencies,
1030 		 * we can miss the low event or get twice a high event.
1031 		 * This workaround will avoid breaking the clock reference
1032 		 * count when such a situation ocurrs.
1033 		 */
1034 		if (!test_and_set_bit(SSI_WAKE_EN, &omap_port->flags))
1035 			pm_runtime_get_sync(omap_port->pdev);
1036 		dev_dbg(&ssi->device, "Wake in high\n");
1037 		if (omap_port->wktest) { /* FIXME: HACK ! To be removed */
1038 			writel(SSI_WAKE(0),
1039 				omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
1040 		}
1041 		hsi_event(port, HSI_EVENT_START_RX);
1042 	} else {
1043 		dev_dbg(&ssi->device, "Wake in low\n");
1044 		if (omap_port->wktest) { /* FIXME: HACK ! To be removed */
1045 			writel(SSI_WAKE(0),
1046 				omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
1047 		}
1048 		hsi_event(port, HSI_EVENT_STOP_RX);
1049 		if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags)) {
1050 			pm_runtime_mark_last_busy(omap_port->pdev);
1051 			pm_runtime_put_autosuspend(omap_port->pdev);
1052 		}
1053 	}
1054 
1055 	return IRQ_HANDLED;
1056 }
1057 
1058 static int ssi_port_irq(struct hsi_port *port, struct platform_device *pd)
1059 {
1060 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1061 	int err;
1062 
1063 	err = platform_get_irq(pd, 0);
1064 	if (err < 0) {
1065 		dev_err(&port->device, "Port IRQ resource missing\n");
1066 		return err;
1067 	}
1068 	omap_port->irq = err;
1069 	err = devm_request_threaded_irq(&port->device, omap_port->irq, NULL,
1070 				ssi_pio_thread, IRQF_ONESHOT, "SSI PORT", port);
1071 	if (err < 0)
1072 		dev_err(&port->device, "Request IRQ %d failed (%d)\n",
1073 							omap_port->irq, err);
1074 	return err;
1075 }
1076 
1077 static int ssi_wake_irq(struct hsi_port *port, struct platform_device *pd)
1078 {
1079 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1080 	int cawake_irq;
1081 	int err;
1082 
1083 	if (!omap_port->wake_gpio) {
1084 		omap_port->wake_irq = -1;
1085 		return 0;
1086 	}
1087 
1088 	cawake_irq = gpiod_to_irq(omap_port->wake_gpio);
1089 	omap_port->wake_irq = cawake_irq;
1090 
1091 	err = devm_request_threaded_irq(&port->device, cawake_irq, NULL,
1092 		ssi_wake_thread,
1093 		IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
1094 		"SSI cawake", port);
1095 	if (err < 0)
1096 		dev_err(&port->device, "Request Wake in IRQ %d failed %d\n",
1097 						cawake_irq, err);
1098 	err = enable_irq_wake(cawake_irq);
1099 	if (err < 0)
1100 		dev_err(&port->device, "Enable wake on the wakeline in irq %d failed %d\n",
1101 			cawake_irq, err);
1102 
1103 	return err;
1104 }
1105 
1106 static void ssi_queues_init(struct omap_ssi_port *omap_port)
1107 {
1108 	unsigned int ch;
1109 
1110 	for (ch = 0; ch < SSI_MAX_CHANNELS; ch++) {
1111 		INIT_LIST_HEAD(&omap_port->txqueue[ch]);
1112 		INIT_LIST_HEAD(&omap_port->rxqueue[ch]);
1113 	}
1114 	INIT_LIST_HEAD(&omap_port->brkqueue);
1115 }
1116 
1117 static int ssi_port_get_iomem(struct platform_device *pd,
1118 		const char *name, void __iomem **pbase, dma_addr_t *phy)
1119 {
1120 	struct hsi_port *port = platform_get_drvdata(pd);
1121 	struct resource *mem;
1122 	struct resource *ioarea;
1123 	void __iomem *base;
1124 
1125 	mem = platform_get_resource_byname(pd, IORESOURCE_MEM, name);
1126 	if (!mem) {
1127 		dev_err(&pd->dev, "IO memory region missing (%s)\n", name);
1128 		return -ENXIO;
1129 	}
1130 	ioarea = devm_request_mem_region(&port->device, mem->start,
1131 					resource_size(mem), dev_name(&pd->dev));
1132 	if (!ioarea) {
1133 		dev_err(&pd->dev, "%s IO memory region request failed\n",
1134 								mem->name);
1135 		return -ENXIO;
1136 	}
1137 	base = devm_ioremap(&port->device, mem->start, resource_size(mem));
1138 	if (!base) {
1139 		dev_err(&pd->dev, "%s IO remap failed\n", mem->name);
1140 		return -ENXIO;
1141 	}
1142 	*pbase = base;
1143 
1144 	if (phy)
1145 		*phy = mem->start;
1146 
1147 	return 0;
1148 }
1149 
1150 static int ssi_port_probe(struct platform_device *pd)
1151 {
1152 	struct device_node *np = pd->dev.of_node;
1153 	struct hsi_port *port;
1154 	struct omap_ssi_port *omap_port;
1155 	struct hsi_controller *ssi = dev_get_drvdata(pd->dev.parent);
1156 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1157 	struct gpio_desc *cawake_gpio = NULL;
1158 	u32 port_id;
1159 	int err;
1160 
1161 	dev_dbg(&pd->dev, "init ssi port...\n");
1162 
1163 	if (!ssi->port || !omap_ssi->port) {
1164 		dev_err(&pd->dev, "ssi controller not initialized!\n");
1165 		err = -ENODEV;
1166 		goto error;
1167 	}
1168 
1169 	/* get id of first uninitialized port in controller */
1170 	for (port_id = 0; port_id < ssi->num_ports && omap_ssi->port[port_id];
1171 		port_id++)
1172 		;
1173 
1174 	if (port_id >= ssi->num_ports) {
1175 		dev_err(&pd->dev, "port id out of range!\n");
1176 		err = -ENODEV;
1177 		goto error;
1178 	}
1179 
1180 	port = ssi->port[port_id];
1181 
1182 	if (!np) {
1183 		dev_err(&pd->dev, "missing device tree data\n");
1184 		err = -EINVAL;
1185 		goto error;
1186 	}
1187 
1188 	cawake_gpio = devm_gpiod_get(&pd->dev, "ti,ssi-cawake", GPIOD_IN);
1189 	if (IS_ERR(cawake_gpio)) {
1190 		err = PTR_ERR(cawake_gpio);
1191 		dev_err(&pd->dev, "couldn't get cawake gpio (err=%d)!\n", err);
1192 		goto error;
1193 	}
1194 
1195 	omap_port = devm_kzalloc(&port->device, sizeof(*omap_port), GFP_KERNEL);
1196 	if (!omap_port) {
1197 		err = -ENOMEM;
1198 		goto error;
1199 	}
1200 	omap_port->wake_gpio = cawake_gpio;
1201 	omap_port->pdev = &pd->dev;
1202 	omap_port->port_id = port_id;
1203 
1204 	INIT_DEFERRABLE_WORK(&omap_port->errqueue_work, ssi_process_errqueue);
1205 	INIT_WORK(&omap_port->work, start_tx_work);
1206 
1207 	/* initialize HSI port */
1208 	port->async	= ssi_async;
1209 	port->setup	= ssi_setup;
1210 	port->flush	= ssi_flush;
1211 	port->start_tx	= ssi_start_tx;
1212 	port->stop_tx	= ssi_stop_tx;
1213 	port->release	= ssi_release;
1214 	hsi_port_set_drvdata(port, omap_port);
1215 	omap_ssi->port[port_id] = omap_port;
1216 
1217 	platform_set_drvdata(pd, port);
1218 
1219 	err = ssi_port_get_iomem(pd, "tx", &omap_port->sst_base,
1220 		&omap_port->sst_dma);
1221 	if (err < 0)
1222 		goto error;
1223 	err = ssi_port_get_iomem(pd, "rx", &omap_port->ssr_base,
1224 		&omap_port->ssr_dma);
1225 	if (err < 0)
1226 		goto error;
1227 
1228 	err = ssi_port_irq(port, pd);
1229 	if (err < 0)
1230 		goto error;
1231 	err = ssi_wake_irq(port, pd);
1232 	if (err < 0)
1233 		goto error;
1234 
1235 	ssi_queues_init(omap_port);
1236 	spin_lock_init(&omap_port->lock);
1237 	spin_lock_init(&omap_port->wk_lock);
1238 	omap_port->dev = &port->device;
1239 
1240 	pm_runtime_use_autosuspend(omap_port->pdev);
1241 	pm_runtime_set_autosuspend_delay(omap_port->pdev, 250);
1242 	pm_runtime_enable(omap_port->pdev);
1243 
1244 #ifdef CONFIG_DEBUG_FS
1245 	err = ssi_debug_add_port(omap_port, omap_ssi->dir);
1246 	if (err < 0) {
1247 		pm_runtime_disable(omap_port->pdev);
1248 		goto error;
1249 	}
1250 #endif
1251 
1252 	hsi_add_clients_from_dt(port, np);
1253 
1254 	dev_info(&pd->dev, "ssi port %u successfully initialized\n", port_id);
1255 
1256 	return 0;
1257 
1258 error:
1259 	return err;
1260 }
1261 
1262 static int ssi_port_remove(struct platform_device *pd)
1263 {
1264 	struct hsi_port *port = platform_get_drvdata(pd);
1265 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1266 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1267 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1268 
1269 #ifdef CONFIG_DEBUG_FS
1270 	ssi_debug_remove_port(port);
1271 #endif
1272 
1273 	cancel_delayed_work_sync(&omap_port->errqueue_work);
1274 
1275 	hsi_port_unregister_clients(port);
1276 
1277 	port->async	= hsi_dummy_msg;
1278 	port->setup	= hsi_dummy_cl;
1279 	port->flush	= hsi_dummy_cl;
1280 	port->start_tx	= hsi_dummy_cl;
1281 	port->stop_tx	= hsi_dummy_cl;
1282 	port->release	= hsi_dummy_cl;
1283 
1284 	omap_ssi->port[omap_port->port_id] = NULL;
1285 	platform_set_drvdata(pd, NULL);
1286 
1287 	pm_runtime_dont_use_autosuspend(&pd->dev);
1288 	pm_runtime_disable(&pd->dev);
1289 
1290 	return 0;
1291 }
1292 
1293 static int ssi_restore_divisor(struct omap_ssi_port *omap_port)
1294 {
1295 	writel_relaxed(omap_port->sst.divisor,
1296 				omap_port->sst_base + SSI_SST_DIVISOR_REG);
1297 
1298 	return 0;
1299 }
1300 
1301 void omap_ssi_port_update_fclk(struct hsi_controller *ssi,
1302 			       struct omap_ssi_port *omap_port)
1303 {
1304 	/* update divisor */
1305 	u32 div = ssi_calculate_div(ssi);
1306 	omap_port->sst.divisor = div;
1307 	ssi_restore_divisor(omap_port);
1308 }
1309 
1310 #ifdef CONFIG_PM
1311 static int ssi_save_port_ctx(struct omap_ssi_port *omap_port)
1312 {
1313 	struct hsi_port *port = to_hsi_port(omap_port->dev);
1314 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1315 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1316 
1317 	omap_port->sys_mpu_enable = readl(omap_ssi->sys +
1318 					SSI_MPU_ENABLE_REG(port->num, 0));
1319 
1320 	return 0;
1321 }
1322 
1323 static int ssi_restore_port_ctx(struct omap_ssi_port *omap_port)
1324 {
1325 	struct hsi_port *port = to_hsi_port(omap_port->dev);
1326 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1327 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1328 	void __iomem	*base;
1329 
1330 	writel_relaxed(omap_port->sys_mpu_enable,
1331 			omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
1332 
1333 	/* SST context */
1334 	base = omap_port->sst_base;
1335 	writel_relaxed(omap_port->sst.frame_size, base + SSI_SST_FRAMESIZE_REG);
1336 	writel_relaxed(omap_port->sst.channels, base + SSI_SST_CHANNELS_REG);
1337 	writel_relaxed(omap_port->sst.arb_mode, base + SSI_SST_ARBMODE_REG);
1338 
1339 	/* SSR context */
1340 	base = omap_port->ssr_base;
1341 	writel_relaxed(omap_port->ssr.frame_size, base + SSI_SSR_FRAMESIZE_REG);
1342 	writel_relaxed(omap_port->ssr.channels, base + SSI_SSR_CHANNELS_REG);
1343 	writel_relaxed(omap_port->ssr.timeout, base + SSI_SSR_TIMEOUT_REG);
1344 
1345 	return 0;
1346 }
1347 
1348 static int ssi_restore_port_mode(struct omap_ssi_port *omap_port)
1349 {
1350 	u32 mode;
1351 
1352 	writel_relaxed(omap_port->sst.mode,
1353 				omap_port->sst_base + SSI_SST_MODE_REG);
1354 	writel_relaxed(omap_port->ssr.mode,
1355 				omap_port->ssr_base + SSI_SSR_MODE_REG);
1356 	/* OCP barrier */
1357 	mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
1358 
1359 	return 0;
1360 }
1361 
1362 static int omap_ssi_port_runtime_suspend(struct device *dev)
1363 {
1364 	struct hsi_port *port = dev_get_drvdata(dev);
1365 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1366 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1367 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1368 
1369 	dev_dbg(dev, "port runtime suspend!\n");
1370 
1371 	ssi_set_port_mode(omap_port, SSI_MODE_SLEEP);
1372 	if (omap_ssi->get_loss)
1373 		omap_port->loss_count =
1374 				omap_ssi->get_loss(ssi->device.parent);
1375 	ssi_save_port_ctx(omap_port);
1376 
1377 	return 0;
1378 }
1379 
1380 static int omap_ssi_port_runtime_resume(struct device *dev)
1381 {
1382 	struct hsi_port *port = dev_get_drvdata(dev);
1383 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1384 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1385 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1386 
1387 	dev_dbg(dev, "port runtime resume!\n");
1388 
1389 	if ((omap_ssi->get_loss) && (omap_port->loss_count ==
1390 				omap_ssi->get_loss(ssi->device.parent)))
1391 		goto mode; /* We always need to restore the mode & TX divisor */
1392 
1393 	ssi_restore_port_ctx(omap_port);
1394 
1395 mode:
1396 	ssi_restore_divisor(omap_port);
1397 	ssi_restore_port_mode(omap_port);
1398 
1399 	return 0;
1400 }
1401 
1402 static const struct dev_pm_ops omap_ssi_port_pm_ops = {
1403 	SET_RUNTIME_PM_OPS(omap_ssi_port_runtime_suspend,
1404 		omap_ssi_port_runtime_resume, NULL)
1405 };
1406 
1407 #define DEV_PM_OPS     (&omap_ssi_port_pm_ops)
1408 #else
1409 #define DEV_PM_OPS     NULL
1410 #endif
1411 
1412 
1413 #ifdef CONFIG_OF
1414 static const struct of_device_id omap_ssi_port_of_match[] = {
1415 	{ .compatible = "ti,omap3-ssi-port", },
1416 	{},
1417 };
1418 MODULE_DEVICE_TABLE(of, omap_ssi_port_of_match);
1419 #else
1420 #define omap_ssi_port_of_match NULL
1421 #endif
1422 
1423 struct platform_driver ssi_port_pdriver = {
1424 	.probe = ssi_port_probe,
1425 	.remove	= ssi_port_remove,
1426 	.driver	= {
1427 		.name	= "omap_ssi_port",
1428 		.of_match_table = omap_ssi_port_of_match,
1429 		.pm	= DEV_PM_OPS,
1430 	},
1431 };
1432