1 /* OMAP SSI port driver.
2  *
3  * Copyright (C) 2010 Nokia Corporation. All rights reserved.
4  * Copyright (C) 2014 Sebastian Reichel <sre@kernel.org>
5  *
6  * Contact: Carlos Chinea <carlos.chinea@nokia.com>
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License
10  * version 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20  * 02110-1301 USA
21  */
22 
23 #include <linux/mod_devicetable.h>
24 #include <linux/platform_device.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/delay.h>
28 
29 #include <linux/gpio/consumer.h>
30 #include <linux/pinctrl/consumer.h>
31 #include <linux/debugfs.h>
32 
33 #include "omap_ssi_regs.h"
34 #include "omap_ssi.h"
35 
36 static inline int hsi_dummy_msg(struct hsi_msg *msg __maybe_unused)
37 {
38 	return 0;
39 }
40 
41 static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused)
42 {
43 	return 0;
44 }
45 
46 static inline unsigned int ssi_wakein(struct hsi_port *port)
47 {
48 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
49 	return gpiod_get_value(omap_port->wake_gpio);
50 }
51 
52 #ifdef CONFIG_DEBUG_FS
53 static void ssi_debug_remove_port(struct hsi_port *port)
54 {
55 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
56 
57 	debugfs_remove_recursive(omap_port->dir);
58 }
59 
60 static int ssi_port_regs_show(struct seq_file *m, void *p __maybe_unused)
61 {
62 	struct hsi_port *port = m->private;
63 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
64 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
65 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
66 	void __iomem	*base = omap_ssi->sys;
67 	unsigned int ch;
68 
69 	pm_runtime_get_sync(omap_port->pdev);
70 	if (omap_port->wake_irq > 0)
71 		seq_printf(m, "CAWAKE\t\t: %d\n", ssi_wakein(port));
72 	seq_printf(m, "WAKE\t\t: 0x%08x\n",
73 				readl(base + SSI_WAKE_REG(port->num)));
74 	seq_printf(m, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", 0,
75 			readl(base + SSI_MPU_ENABLE_REG(port->num, 0)));
76 	seq_printf(m, "MPU_STATUS_IRQ%d\t: 0x%08x\n", 0,
77 			readl(base + SSI_MPU_STATUS_REG(port->num, 0)));
78 	/* SST */
79 	base = omap_port->sst_base;
80 	seq_puts(m, "\nSST\n===\n");
81 	seq_printf(m, "ID SST\t\t: 0x%08x\n",
82 				readl(base + SSI_SST_ID_REG));
83 	seq_printf(m, "MODE\t\t: 0x%08x\n",
84 				readl(base + SSI_SST_MODE_REG));
85 	seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
86 				readl(base + SSI_SST_FRAMESIZE_REG));
87 	seq_printf(m, "DIVISOR\t\t: 0x%08x\n",
88 				readl(base + SSI_SST_DIVISOR_REG));
89 	seq_printf(m, "CHANNELS\t: 0x%08x\n",
90 				readl(base + SSI_SST_CHANNELS_REG));
91 	seq_printf(m, "ARBMODE\t\t: 0x%08x\n",
92 				readl(base + SSI_SST_ARBMODE_REG));
93 	seq_printf(m, "TXSTATE\t\t: 0x%08x\n",
94 				readl(base + SSI_SST_TXSTATE_REG));
95 	seq_printf(m, "BUFSTATE\t: 0x%08x\n",
96 				readl(base + SSI_SST_BUFSTATE_REG));
97 	seq_printf(m, "BREAK\t\t: 0x%08x\n",
98 				readl(base + SSI_SST_BREAK_REG));
99 	for (ch = 0; ch < omap_port->channels; ch++) {
100 		seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
101 				readl(base + SSI_SST_BUFFER_CH_REG(ch)));
102 	}
103 	/* SSR */
104 	base = omap_port->ssr_base;
105 	seq_puts(m, "\nSSR\n===\n");
106 	seq_printf(m, "ID SSR\t\t: 0x%08x\n",
107 				readl(base + SSI_SSR_ID_REG));
108 	seq_printf(m, "MODE\t\t: 0x%08x\n",
109 				readl(base + SSI_SSR_MODE_REG));
110 	seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
111 				readl(base + SSI_SSR_FRAMESIZE_REG));
112 	seq_printf(m, "CHANNELS\t: 0x%08x\n",
113 				readl(base + SSI_SSR_CHANNELS_REG));
114 	seq_printf(m, "TIMEOUT\t\t: 0x%08x\n",
115 				readl(base + SSI_SSR_TIMEOUT_REG));
116 	seq_printf(m, "RXSTATE\t\t: 0x%08x\n",
117 				readl(base + SSI_SSR_RXSTATE_REG));
118 	seq_printf(m, "BUFSTATE\t: 0x%08x\n",
119 				readl(base + SSI_SSR_BUFSTATE_REG));
120 	seq_printf(m, "BREAK\t\t: 0x%08x\n",
121 				readl(base + SSI_SSR_BREAK_REG));
122 	seq_printf(m, "ERROR\t\t: 0x%08x\n",
123 				readl(base + SSI_SSR_ERROR_REG));
124 	seq_printf(m, "ERRORACK\t: 0x%08x\n",
125 				readl(base + SSI_SSR_ERRORACK_REG));
126 	for (ch = 0; ch < omap_port->channels; ch++) {
127 		seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
128 				readl(base + SSI_SSR_BUFFER_CH_REG(ch)));
129 	}
130 	pm_runtime_put_autosuspend(omap_port->pdev);
131 
132 	return 0;
133 }
134 
135 DEFINE_SHOW_ATTRIBUTE(ssi_port_regs);
136 
137 static int ssi_div_get(void *data, u64 *val)
138 {
139 	struct hsi_port *port = data;
140 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
141 
142 	pm_runtime_get_sync(omap_port->pdev);
143 	*val = readl(omap_port->sst_base + SSI_SST_DIVISOR_REG);
144 	pm_runtime_put_autosuspend(omap_port->pdev);
145 
146 	return 0;
147 }
148 
149 static int ssi_div_set(void *data, u64 val)
150 {
151 	struct hsi_port *port = data;
152 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
153 
154 	if (val > 127)
155 		return -EINVAL;
156 
157 	pm_runtime_get_sync(omap_port->pdev);
158 	writel(val, omap_port->sst_base + SSI_SST_DIVISOR_REG);
159 	omap_port->sst.divisor = val;
160 	pm_runtime_put_autosuspend(omap_port->pdev);
161 
162 	return 0;
163 }
164 
165 DEFINE_SIMPLE_ATTRIBUTE(ssi_sst_div_fops, ssi_div_get, ssi_div_set, "%llu\n");
166 
167 static int ssi_debug_add_port(struct omap_ssi_port *omap_port,
168 				     struct dentry *dir)
169 {
170 	struct hsi_port *port = to_hsi_port(omap_port->dev);
171 
172 	dir = debugfs_create_dir(dev_name(omap_port->dev), dir);
173 	if (!dir)
174 		return -ENOMEM;
175 	omap_port->dir = dir;
176 	debugfs_create_file("regs", S_IRUGO, dir, port, &ssi_port_regs_fops);
177 	dir = debugfs_create_dir("sst", dir);
178 	if (!dir)
179 		return -ENOMEM;
180 	debugfs_create_file("divisor", S_IRUGO | S_IWUSR, dir, port,
181 			    &ssi_sst_div_fops);
182 
183 	return 0;
184 }
185 #endif
186 
187 static void ssi_process_errqueue(struct work_struct *work)
188 {
189 	struct omap_ssi_port *omap_port;
190 	struct list_head *head, *tmp;
191 	struct hsi_msg *msg;
192 
193 	omap_port = container_of(work, struct omap_ssi_port, errqueue_work.work);
194 
195 	list_for_each_safe(head, tmp, &omap_port->errqueue) {
196 		msg = list_entry(head, struct hsi_msg, link);
197 		msg->complete(msg);
198 		list_del(head);
199 	}
200 }
201 
202 static int ssi_claim_lch(struct hsi_msg *msg)
203 {
204 
205 	struct hsi_port *port = hsi_get_port(msg->cl);
206 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
207 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
208 	int lch;
209 
210 	for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++)
211 		if (!omap_ssi->gdd_trn[lch].msg) {
212 			omap_ssi->gdd_trn[lch].msg = msg;
213 			omap_ssi->gdd_trn[lch].sg = msg->sgt.sgl;
214 			return lch;
215 		}
216 
217 	return -EBUSY;
218 }
219 
220 static int ssi_start_dma(struct hsi_msg *msg, int lch)
221 {
222 	struct hsi_port *port = hsi_get_port(msg->cl);
223 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
224 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
225 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
226 	void __iomem *gdd = omap_ssi->gdd;
227 	int err;
228 	u16 csdp;
229 	u16 ccr;
230 	u32 s_addr;
231 	u32 d_addr;
232 	u32 tmp;
233 
234 	/* Hold clocks during the transfer */
235 	pm_runtime_get(omap_port->pdev);
236 
237 	if (!pm_runtime_active(omap_port->pdev)) {
238 		dev_warn(&port->device, "ssi_start_dma called without runtime PM!\n");
239 		pm_runtime_put_autosuspend(omap_port->pdev);
240 		return -EREMOTEIO;
241 	}
242 
243 	if (msg->ttype == HSI_MSG_READ) {
244 		err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
245 							DMA_FROM_DEVICE);
246 		if (err < 0) {
247 			dev_dbg(&ssi->device, "DMA map SG failed !\n");
248 			pm_runtime_put_autosuspend(omap_port->pdev);
249 			return err;
250 		}
251 		csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT |
252 			SSI_SRC_SINGLE_ACCESS0 | SSI_SRC_PERIPHERAL_PORT |
253 			SSI_DATA_TYPE_S32;
254 		ccr = msg->channel + 0x10 + (port->num * 8); /* Sync */
255 		ccr |= SSI_DST_AMODE_POSTINC | SSI_SRC_AMODE_CONST |
256 			SSI_CCR_ENABLE;
257 		s_addr = omap_port->ssr_dma +
258 					SSI_SSR_BUFFER_CH_REG(msg->channel);
259 		d_addr = sg_dma_address(msg->sgt.sgl);
260 	} else {
261 		err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
262 							DMA_TO_DEVICE);
263 		if (err < 0) {
264 			dev_dbg(&ssi->device, "DMA map SG failed !\n");
265 			pm_runtime_put_autosuspend(omap_port->pdev);
266 			return err;
267 		}
268 		csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT |
269 			SSI_DST_SINGLE_ACCESS0 | SSI_DST_PERIPHERAL_PORT |
270 			SSI_DATA_TYPE_S32;
271 		ccr = (msg->channel + 1 + (port->num * 8)) & 0xf; /* Sync */
272 		ccr |= SSI_SRC_AMODE_POSTINC | SSI_DST_AMODE_CONST |
273 			SSI_CCR_ENABLE;
274 		s_addr = sg_dma_address(msg->sgt.sgl);
275 		d_addr = omap_port->sst_dma +
276 					SSI_SST_BUFFER_CH_REG(msg->channel);
277 	}
278 	dev_dbg(&ssi->device, "lch %d cdsp %08x ccr %04x s_addr %08x d_addr %08x\n",
279 		lch, csdp, ccr, s_addr, d_addr);
280 
281 	writew_relaxed(csdp, gdd + SSI_GDD_CSDP_REG(lch));
282 	writew_relaxed(SSI_BLOCK_IE | SSI_TOUT_IE, gdd + SSI_GDD_CICR_REG(lch));
283 	writel_relaxed(d_addr, gdd + SSI_GDD_CDSA_REG(lch));
284 	writel_relaxed(s_addr, gdd + SSI_GDD_CSSA_REG(lch));
285 	writew_relaxed(SSI_BYTES_TO_FRAMES(msg->sgt.sgl->length),
286 						gdd + SSI_GDD_CEN_REG(lch));
287 
288 	spin_lock_bh(&omap_ssi->lock);
289 	tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
290 	tmp |= SSI_GDD_LCH(lch);
291 	writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
292 	spin_unlock_bh(&omap_ssi->lock);
293 	writew(ccr, gdd + SSI_GDD_CCR_REG(lch));
294 	msg->status = HSI_STATUS_PROCEEDING;
295 
296 	return 0;
297 }
298 
299 static int ssi_start_pio(struct hsi_msg *msg)
300 {
301 	struct hsi_port *port = hsi_get_port(msg->cl);
302 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
303 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
304 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
305 	u32 val;
306 
307 	pm_runtime_get(omap_port->pdev);
308 
309 	if (!pm_runtime_active(omap_port->pdev)) {
310 		dev_warn(&port->device, "ssi_start_pio called without runtime PM!\n");
311 		pm_runtime_put_autosuspend(omap_port->pdev);
312 		return -EREMOTEIO;
313 	}
314 
315 	if (msg->ttype == HSI_MSG_WRITE) {
316 		val = SSI_DATAACCEPT(msg->channel);
317 		/* Hold clocks for pio writes */
318 		pm_runtime_get(omap_port->pdev);
319 	} else {
320 		val = SSI_DATAAVAILABLE(msg->channel) | SSI_ERROROCCURED;
321 	}
322 	dev_dbg(&port->device, "Single %s transfer\n",
323 						msg->ttype ? "write" : "read");
324 	val |= readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
325 	writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
326 	pm_runtime_put_autosuspend(omap_port->pdev);
327 	msg->actual_len = 0;
328 	msg->status = HSI_STATUS_PROCEEDING;
329 
330 	return 0;
331 }
332 
333 static int ssi_start_transfer(struct list_head *queue)
334 {
335 	struct hsi_msg *msg;
336 	int lch = -1;
337 
338 	if (list_empty(queue))
339 		return 0;
340 	msg = list_first_entry(queue, struct hsi_msg, link);
341 	if (msg->status != HSI_STATUS_QUEUED)
342 		return 0;
343 	if ((msg->sgt.nents) && (msg->sgt.sgl->length > sizeof(u32)))
344 		lch = ssi_claim_lch(msg);
345 	if (lch >= 0)
346 		return ssi_start_dma(msg, lch);
347 	else
348 		return ssi_start_pio(msg);
349 }
350 
351 static int ssi_async_break(struct hsi_msg *msg)
352 {
353 	struct hsi_port *port = hsi_get_port(msg->cl);
354 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
355 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
356 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
357 	int err = 0;
358 	u32 tmp;
359 
360 	pm_runtime_get_sync(omap_port->pdev);
361 	if (msg->ttype == HSI_MSG_WRITE) {
362 		if (omap_port->sst.mode != SSI_MODE_FRAME) {
363 			err = -EINVAL;
364 			goto out;
365 		}
366 		writel(1, omap_port->sst_base + SSI_SST_BREAK_REG);
367 		msg->status = HSI_STATUS_COMPLETED;
368 		msg->complete(msg);
369 	} else {
370 		if (omap_port->ssr.mode != SSI_MODE_FRAME) {
371 			err = -EINVAL;
372 			goto out;
373 		}
374 		spin_lock_bh(&omap_port->lock);
375 		tmp = readl(omap_ssi->sys +
376 					SSI_MPU_ENABLE_REG(port->num, 0));
377 		writel(tmp | SSI_BREAKDETECTED,
378 			omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
379 		msg->status = HSI_STATUS_PROCEEDING;
380 		list_add_tail(&msg->link, &omap_port->brkqueue);
381 		spin_unlock_bh(&omap_port->lock);
382 	}
383 out:
384 	pm_runtime_mark_last_busy(omap_port->pdev);
385 	pm_runtime_put_autosuspend(omap_port->pdev);
386 
387 	return err;
388 }
389 
390 static int ssi_async(struct hsi_msg *msg)
391 {
392 	struct hsi_port *port = hsi_get_port(msg->cl);
393 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
394 	struct list_head *queue;
395 	int err = 0;
396 
397 	BUG_ON(!msg);
398 
399 	if (msg->sgt.nents > 1)
400 		return -ENOSYS; /* TODO: Add sg support */
401 
402 	if (msg->break_frame)
403 		return ssi_async_break(msg);
404 
405 	if (msg->ttype) {
406 		BUG_ON(msg->channel >= omap_port->sst.channels);
407 		queue = &omap_port->txqueue[msg->channel];
408 	} else {
409 		BUG_ON(msg->channel >= omap_port->ssr.channels);
410 		queue = &omap_port->rxqueue[msg->channel];
411 	}
412 	msg->status = HSI_STATUS_QUEUED;
413 
414 	pm_runtime_get_sync(omap_port->pdev);
415 	spin_lock_bh(&omap_port->lock);
416 	list_add_tail(&msg->link, queue);
417 	err = ssi_start_transfer(queue);
418 	if (err < 0) {
419 		list_del(&msg->link);
420 		msg->status = HSI_STATUS_ERROR;
421 	}
422 	spin_unlock_bh(&omap_port->lock);
423 	pm_runtime_mark_last_busy(omap_port->pdev);
424 	pm_runtime_put_autosuspend(omap_port->pdev);
425 	dev_dbg(&port->device, "msg status %d ttype %d ch %d\n",
426 				msg->status, msg->ttype, msg->channel);
427 
428 	return err;
429 }
430 
431 static u32 ssi_calculate_div(struct hsi_controller *ssi)
432 {
433 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
434 	u32 tx_fckrate = (u32) omap_ssi->fck_rate;
435 
436 	/* / 2 : SSI TX clock is always half of the SSI functional clock */
437 	tx_fckrate >>= 1;
438 	/* Round down when tx_fckrate % omap_ssi->max_speed == 0 */
439 	tx_fckrate--;
440 	dev_dbg(&ssi->device, "TX div %d for fck_rate %lu Khz speed %d Kb/s\n",
441 		tx_fckrate / omap_ssi->max_speed, omap_ssi->fck_rate,
442 		omap_ssi->max_speed);
443 
444 	return tx_fckrate / omap_ssi->max_speed;
445 }
446 
447 static void ssi_flush_queue(struct list_head *queue, struct hsi_client *cl)
448 {
449 	struct list_head *node, *tmp;
450 	struct hsi_msg *msg;
451 
452 	list_for_each_safe(node, tmp, queue) {
453 		msg = list_entry(node, struct hsi_msg, link);
454 		if ((cl) && (cl != msg->cl))
455 			continue;
456 		list_del(node);
457 		pr_debug("flush queue: ch %d, msg %p len %d type %d ctxt %p\n",
458 			msg->channel, msg, msg->sgt.sgl->length,
459 					msg->ttype, msg->context);
460 		if (msg->destructor)
461 			msg->destructor(msg);
462 		else
463 			hsi_free_msg(msg);
464 	}
465 }
466 
467 static int ssi_setup(struct hsi_client *cl)
468 {
469 	struct hsi_port *port = to_hsi_port(cl->device.parent);
470 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
471 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
472 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
473 	void __iomem *sst = omap_port->sst_base;
474 	void __iomem *ssr = omap_port->ssr_base;
475 	u32 div;
476 	u32 val;
477 	int err = 0;
478 
479 	pm_runtime_get_sync(omap_port->pdev);
480 	spin_lock_bh(&omap_port->lock);
481 	if (cl->tx_cfg.speed)
482 		omap_ssi->max_speed = cl->tx_cfg.speed;
483 	div = ssi_calculate_div(ssi);
484 	if (div > SSI_MAX_DIVISOR) {
485 		dev_err(&cl->device, "Invalid TX speed %d Mb/s (div %d)\n",
486 						cl->tx_cfg.speed, div);
487 		err = -EINVAL;
488 		goto out;
489 	}
490 	/* Set TX/RX module to sleep to stop TX/RX during cfg update */
491 	writel_relaxed(SSI_MODE_SLEEP, sst + SSI_SST_MODE_REG);
492 	writel_relaxed(SSI_MODE_SLEEP, ssr + SSI_SSR_MODE_REG);
493 	/* Flush posted write */
494 	val = readl(ssr + SSI_SSR_MODE_REG);
495 	/* TX */
496 	writel_relaxed(31, sst + SSI_SST_FRAMESIZE_REG);
497 	writel_relaxed(div, sst + SSI_SST_DIVISOR_REG);
498 	writel_relaxed(cl->tx_cfg.num_hw_channels, sst + SSI_SST_CHANNELS_REG);
499 	writel_relaxed(cl->tx_cfg.arb_mode, sst + SSI_SST_ARBMODE_REG);
500 	writel_relaxed(cl->tx_cfg.mode, sst + SSI_SST_MODE_REG);
501 	/* RX */
502 	writel_relaxed(31, ssr + SSI_SSR_FRAMESIZE_REG);
503 	writel_relaxed(cl->rx_cfg.num_hw_channels, ssr + SSI_SSR_CHANNELS_REG);
504 	writel_relaxed(0, ssr + SSI_SSR_TIMEOUT_REG);
505 	/* Cleanup the break queue if we leave FRAME mode */
506 	if ((omap_port->ssr.mode == SSI_MODE_FRAME) &&
507 		(cl->rx_cfg.mode != SSI_MODE_FRAME))
508 		ssi_flush_queue(&omap_port->brkqueue, cl);
509 	writel_relaxed(cl->rx_cfg.mode, ssr + SSI_SSR_MODE_REG);
510 	omap_port->channels = max(cl->rx_cfg.num_hw_channels,
511 				  cl->tx_cfg.num_hw_channels);
512 	/* Shadow registering for OFF mode */
513 	/* SST */
514 	omap_port->sst.divisor = div;
515 	omap_port->sst.frame_size = 31;
516 	omap_port->sst.channels = cl->tx_cfg.num_hw_channels;
517 	omap_port->sst.arb_mode = cl->tx_cfg.arb_mode;
518 	omap_port->sst.mode = cl->tx_cfg.mode;
519 	/* SSR */
520 	omap_port->ssr.frame_size = 31;
521 	omap_port->ssr.timeout = 0;
522 	omap_port->ssr.channels = cl->rx_cfg.num_hw_channels;
523 	omap_port->ssr.mode = cl->rx_cfg.mode;
524 out:
525 	spin_unlock_bh(&omap_port->lock);
526 	pm_runtime_mark_last_busy(omap_port->pdev);
527 	pm_runtime_put_autosuspend(omap_port->pdev);
528 
529 	return err;
530 }
531 
532 static int ssi_flush(struct hsi_client *cl)
533 {
534 	struct hsi_port *port = hsi_get_port(cl);
535 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
536 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
537 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
538 	struct hsi_msg *msg;
539 	void __iomem *sst = omap_port->sst_base;
540 	void __iomem *ssr = omap_port->ssr_base;
541 	unsigned int i;
542 	u32 err;
543 
544 	pm_runtime_get_sync(omap_port->pdev);
545 	spin_lock_bh(&omap_port->lock);
546 
547 	/* stop all ssi communication */
548 	pinctrl_pm_select_idle_state(omap_port->pdev);
549 	udelay(1); /* wait for racing frames */
550 
551 	/* Stop all DMA transfers */
552 	for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
553 		msg = omap_ssi->gdd_trn[i].msg;
554 		if (!msg || (port != hsi_get_port(msg->cl)))
555 			continue;
556 		writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
557 		if (msg->ttype == HSI_MSG_READ)
558 			pm_runtime_put_autosuspend(omap_port->pdev);
559 		omap_ssi->gdd_trn[i].msg = NULL;
560 	}
561 	/* Flush all SST buffers */
562 	writel_relaxed(0, sst + SSI_SST_BUFSTATE_REG);
563 	writel_relaxed(0, sst + SSI_SST_TXSTATE_REG);
564 	/* Flush all SSR buffers */
565 	writel_relaxed(0, ssr + SSI_SSR_RXSTATE_REG);
566 	writel_relaxed(0, ssr + SSI_SSR_BUFSTATE_REG);
567 	/* Flush all errors */
568 	err = readl(ssr + SSI_SSR_ERROR_REG);
569 	writel_relaxed(err, ssr + SSI_SSR_ERRORACK_REG);
570 	/* Flush break */
571 	writel_relaxed(0, ssr + SSI_SSR_BREAK_REG);
572 	/* Clear interrupts */
573 	writel_relaxed(0, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
574 	writel_relaxed(0xffffff00,
575 			omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
576 	writel_relaxed(0, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
577 	writel(0xff, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
578 	/* Dequeue all pending requests */
579 	for (i = 0; i < omap_port->channels; i++) {
580 		/* Release write clocks */
581 		if (!list_empty(&omap_port->txqueue[i]))
582 			pm_runtime_put_autosuspend(omap_port->pdev);
583 		ssi_flush_queue(&omap_port->txqueue[i], NULL);
584 		ssi_flush_queue(&omap_port->rxqueue[i], NULL);
585 	}
586 	ssi_flush_queue(&omap_port->brkqueue, NULL);
587 
588 	/* Resume SSI communication */
589 	pinctrl_pm_select_default_state(omap_port->pdev);
590 
591 	spin_unlock_bh(&omap_port->lock);
592 	pm_runtime_mark_last_busy(omap_port->pdev);
593 	pm_runtime_put_autosuspend(omap_port->pdev);
594 
595 	return 0;
596 }
597 
598 static void start_tx_work(struct work_struct *work)
599 {
600 	struct omap_ssi_port *omap_port =
601 				container_of(work, struct omap_ssi_port, work);
602 	struct hsi_port *port = to_hsi_port(omap_port->dev);
603 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
604 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
605 
606 	pm_runtime_get_sync(omap_port->pdev); /* Grab clocks */
607 	writel(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
608 }
609 
610 static int ssi_start_tx(struct hsi_client *cl)
611 {
612 	struct hsi_port *port = hsi_get_port(cl);
613 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
614 
615 	dev_dbg(&port->device, "Wake out high %d\n", omap_port->wk_refcount);
616 
617 	spin_lock_bh(&omap_port->wk_lock);
618 	if (omap_port->wk_refcount++) {
619 		spin_unlock_bh(&omap_port->wk_lock);
620 		return 0;
621 	}
622 	spin_unlock_bh(&omap_port->wk_lock);
623 
624 	schedule_work(&omap_port->work);
625 
626 	return 0;
627 }
628 
629 static int ssi_stop_tx(struct hsi_client *cl)
630 {
631 	struct hsi_port *port = hsi_get_port(cl);
632 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
633 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
634 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
635 
636 	dev_dbg(&port->device, "Wake out low %d\n", omap_port->wk_refcount);
637 
638 	spin_lock_bh(&omap_port->wk_lock);
639 	BUG_ON(!omap_port->wk_refcount);
640 	if (--omap_port->wk_refcount) {
641 		spin_unlock_bh(&omap_port->wk_lock);
642 		return 0;
643 	}
644 	writel(SSI_WAKE(0), omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
645 	spin_unlock_bh(&omap_port->wk_lock);
646 
647 	pm_runtime_mark_last_busy(omap_port->pdev);
648 	pm_runtime_put_autosuspend(omap_port->pdev); /* Release clocks */
649 
650 
651 	return 0;
652 }
653 
654 static void ssi_transfer(struct omap_ssi_port *omap_port,
655 							struct list_head *queue)
656 {
657 	struct hsi_msg *msg;
658 	int err = -1;
659 
660 	pm_runtime_get(omap_port->pdev);
661 	spin_lock_bh(&omap_port->lock);
662 	while (err < 0) {
663 		err = ssi_start_transfer(queue);
664 		if (err < 0) {
665 			msg = list_first_entry(queue, struct hsi_msg, link);
666 			msg->status = HSI_STATUS_ERROR;
667 			msg->actual_len = 0;
668 			list_del(&msg->link);
669 			spin_unlock_bh(&omap_port->lock);
670 			msg->complete(msg);
671 			spin_lock_bh(&omap_port->lock);
672 		}
673 	}
674 	spin_unlock_bh(&omap_port->lock);
675 	pm_runtime_mark_last_busy(omap_port->pdev);
676 	pm_runtime_put_autosuspend(omap_port->pdev);
677 }
678 
679 static void ssi_cleanup_queues(struct hsi_client *cl)
680 {
681 	struct hsi_port *port = hsi_get_port(cl);
682 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
683 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
684 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
685 	struct hsi_msg *msg;
686 	unsigned int i;
687 	u32 rxbufstate = 0;
688 	u32 txbufstate = 0;
689 	u32 status = SSI_ERROROCCURED;
690 	u32 tmp;
691 
692 	ssi_flush_queue(&omap_port->brkqueue, cl);
693 	if (list_empty(&omap_port->brkqueue))
694 		status |= SSI_BREAKDETECTED;
695 
696 	for (i = 0; i < omap_port->channels; i++) {
697 		if (list_empty(&omap_port->txqueue[i]))
698 			continue;
699 		msg = list_first_entry(&omap_port->txqueue[i], struct hsi_msg,
700 									link);
701 		if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
702 			txbufstate |= (1 << i);
703 			status |= SSI_DATAACCEPT(i);
704 			/* Release the clocks writes, also GDD ones */
705 			pm_runtime_mark_last_busy(omap_port->pdev);
706 			pm_runtime_put_autosuspend(omap_port->pdev);
707 		}
708 		ssi_flush_queue(&omap_port->txqueue[i], cl);
709 	}
710 	for (i = 0; i < omap_port->channels; i++) {
711 		if (list_empty(&omap_port->rxqueue[i]))
712 			continue;
713 		msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
714 									link);
715 		if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
716 			rxbufstate |= (1 << i);
717 			status |= SSI_DATAAVAILABLE(i);
718 		}
719 		ssi_flush_queue(&omap_port->rxqueue[i], cl);
720 		/* Check if we keep the error detection interrupt armed */
721 		if (!list_empty(&omap_port->rxqueue[i]))
722 			status &= ~SSI_ERROROCCURED;
723 	}
724 	/* Cleanup write buffers */
725 	tmp = readl(omap_port->sst_base + SSI_SST_BUFSTATE_REG);
726 	tmp &= ~txbufstate;
727 	writel_relaxed(tmp, omap_port->sst_base + SSI_SST_BUFSTATE_REG);
728 	/* Cleanup read buffers */
729 	tmp = readl(omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
730 	tmp &= ~rxbufstate;
731 	writel_relaxed(tmp, omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
732 	/* Disarm and ack pending interrupts */
733 	tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
734 	tmp &= ~status;
735 	writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
736 	writel_relaxed(status, omap_ssi->sys +
737 		SSI_MPU_STATUS_REG(port->num, 0));
738 }
739 
740 static void ssi_cleanup_gdd(struct hsi_controller *ssi, struct hsi_client *cl)
741 {
742 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
743 	struct hsi_port *port = hsi_get_port(cl);
744 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
745 	struct hsi_msg *msg;
746 	unsigned int i;
747 	u32 val = 0;
748 	u32 tmp;
749 
750 	for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
751 		msg = omap_ssi->gdd_trn[i].msg;
752 		if ((!msg) || (msg->cl != cl))
753 			continue;
754 		writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
755 		val |= (1 << i);
756 		/*
757 		 * Clock references for write will be handled in
758 		 * ssi_cleanup_queues
759 		 */
760 		if (msg->ttype == HSI_MSG_READ) {
761 			pm_runtime_mark_last_busy(omap_port->pdev);
762 			pm_runtime_put_autosuspend(omap_port->pdev);
763 		}
764 		omap_ssi->gdd_trn[i].msg = NULL;
765 	}
766 	tmp = readl_relaxed(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
767 	tmp &= ~val;
768 	writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
769 	writel(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
770 }
771 
772 static int ssi_set_port_mode(struct omap_ssi_port *omap_port, u32 mode)
773 {
774 	writel(mode, omap_port->sst_base + SSI_SST_MODE_REG);
775 	writel(mode, omap_port->ssr_base + SSI_SSR_MODE_REG);
776 	/* OCP barrier */
777 	mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
778 
779 	return 0;
780 }
781 
782 static int ssi_release(struct hsi_client *cl)
783 {
784 	struct hsi_port *port = hsi_get_port(cl);
785 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
786 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
787 
788 	pm_runtime_get_sync(omap_port->pdev);
789 	spin_lock_bh(&omap_port->lock);
790 	/* Stop all the pending DMA requests for that client */
791 	ssi_cleanup_gdd(ssi, cl);
792 	/* Now cleanup all the queues */
793 	ssi_cleanup_queues(cl);
794 	/* If it is the last client of the port, do extra checks and cleanup */
795 	if (port->claimed <= 1) {
796 		/*
797 		 * Drop the clock reference for the incoming wake line
798 		 * if it is still kept high by the other side.
799 		 */
800 		if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags))
801 			pm_runtime_put_sync(omap_port->pdev);
802 		pm_runtime_get(omap_port->pdev);
803 		/* Stop any SSI TX/RX without a client */
804 		ssi_set_port_mode(omap_port, SSI_MODE_SLEEP);
805 		omap_port->sst.mode = SSI_MODE_SLEEP;
806 		omap_port->ssr.mode = SSI_MODE_SLEEP;
807 		pm_runtime_put(omap_port->pdev);
808 		WARN_ON(omap_port->wk_refcount != 0);
809 	}
810 	spin_unlock_bh(&omap_port->lock);
811 	pm_runtime_put_sync(omap_port->pdev);
812 
813 	return 0;
814 }
815 
816 
817 
818 static void ssi_error(struct hsi_port *port)
819 {
820 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
821 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
822 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
823 	struct hsi_msg *msg;
824 	unsigned int i;
825 	u32 err;
826 	u32 val;
827 	u32 tmp;
828 
829 	/* ACK error */
830 	err = readl(omap_port->ssr_base + SSI_SSR_ERROR_REG);
831 	dev_err(&port->device, "SSI error: 0x%02x\n", err);
832 	if (!err) {
833 		dev_dbg(&port->device, "spurious SSI error ignored!\n");
834 		return;
835 	}
836 	spin_lock(&omap_ssi->lock);
837 	/* Cancel all GDD read transfers */
838 	for (i = 0, val = 0; i < SSI_MAX_GDD_LCH; i++) {
839 		msg = omap_ssi->gdd_trn[i].msg;
840 		if ((msg) && (msg->ttype == HSI_MSG_READ)) {
841 			writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
842 			val |= (1 << i);
843 			omap_ssi->gdd_trn[i].msg = NULL;
844 		}
845 	}
846 	tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
847 	tmp &= ~val;
848 	writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
849 	spin_unlock(&omap_ssi->lock);
850 	/* Cancel all PIO read transfers */
851 	spin_lock(&omap_port->lock);
852 	tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
853 	tmp &= 0xfeff00ff; /* Disable error & all dataavailable interrupts */
854 	writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
855 	/* ACK error */
856 	writel_relaxed(err, omap_port->ssr_base + SSI_SSR_ERRORACK_REG);
857 	writel_relaxed(SSI_ERROROCCURED,
858 			omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
859 	/* Signal the error all current pending read requests */
860 	for (i = 0; i < omap_port->channels; i++) {
861 		if (list_empty(&omap_port->rxqueue[i]))
862 			continue;
863 		msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
864 									link);
865 		list_del(&msg->link);
866 		msg->status = HSI_STATUS_ERROR;
867 		spin_unlock(&omap_port->lock);
868 		msg->complete(msg);
869 		/* Now restart queued reads if any */
870 		ssi_transfer(omap_port, &omap_port->rxqueue[i]);
871 		spin_lock(&omap_port->lock);
872 	}
873 	spin_unlock(&omap_port->lock);
874 }
875 
876 static void ssi_break_complete(struct hsi_port *port)
877 {
878 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
879 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
880 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
881 	struct hsi_msg *msg;
882 	struct hsi_msg *tmp;
883 	u32 val;
884 
885 	dev_dbg(&port->device, "HWBREAK received\n");
886 
887 	spin_lock(&omap_port->lock);
888 	val = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
889 	val &= ~SSI_BREAKDETECTED;
890 	writel_relaxed(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
891 	writel_relaxed(0, omap_port->ssr_base + SSI_SSR_BREAK_REG);
892 	writel(SSI_BREAKDETECTED,
893 			omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
894 	spin_unlock(&omap_port->lock);
895 
896 	list_for_each_entry_safe(msg, tmp, &omap_port->brkqueue, link) {
897 		msg->status = HSI_STATUS_COMPLETED;
898 		spin_lock(&omap_port->lock);
899 		list_del(&msg->link);
900 		spin_unlock(&omap_port->lock);
901 		msg->complete(msg);
902 	}
903 
904 }
905 
906 static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue)
907 {
908 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
909 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
910 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
911 	struct hsi_msg *msg;
912 	u32 *buf;
913 	u32 reg;
914 	u32 val;
915 
916 	spin_lock_bh(&omap_port->lock);
917 	msg = list_first_entry(queue, struct hsi_msg, link);
918 	if ((!msg->sgt.nents) || (!msg->sgt.sgl->length)) {
919 		msg->actual_len = 0;
920 		msg->status = HSI_STATUS_PENDING;
921 	}
922 	if (msg->ttype == HSI_MSG_WRITE)
923 		val = SSI_DATAACCEPT(msg->channel);
924 	else
925 		val = SSI_DATAAVAILABLE(msg->channel);
926 	if (msg->status == HSI_STATUS_PROCEEDING) {
927 		buf = sg_virt(msg->sgt.sgl) + msg->actual_len;
928 		if (msg->ttype == HSI_MSG_WRITE)
929 			writel(*buf, omap_port->sst_base +
930 					SSI_SST_BUFFER_CH_REG(msg->channel));
931 		 else
932 			*buf = readl(omap_port->ssr_base +
933 					SSI_SSR_BUFFER_CH_REG(msg->channel));
934 		dev_dbg(&port->device, "ch %d ttype %d 0x%08x\n", msg->channel,
935 							msg->ttype, *buf);
936 		msg->actual_len += sizeof(*buf);
937 		if (msg->actual_len >= msg->sgt.sgl->length)
938 			msg->status = HSI_STATUS_COMPLETED;
939 		/*
940 		 * Wait for the last written frame to be really sent before
941 		 * we call the complete callback
942 		 */
943 		if ((msg->status == HSI_STATUS_PROCEEDING) ||
944 				((msg->status == HSI_STATUS_COMPLETED) &&
945 					(msg->ttype == HSI_MSG_WRITE))) {
946 			writel(val, omap_ssi->sys +
947 					SSI_MPU_STATUS_REG(port->num, 0));
948 			spin_unlock_bh(&omap_port->lock);
949 
950 			return;
951 		}
952 
953 	}
954 	/* Transfer completed at this point */
955 	reg = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
956 	if (msg->ttype == HSI_MSG_WRITE) {
957 		/* Release clocks for write transfer */
958 		pm_runtime_mark_last_busy(omap_port->pdev);
959 		pm_runtime_put_autosuspend(omap_port->pdev);
960 	}
961 	reg &= ~val;
962 	writel_relaxed(reg, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
963 	writel_relaxed(val, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
964 	list_del(&msg->link);
965 	spin_unlock_bh(&omap_port->lock);
966 	msg->complete(msg);
967 	ssi_transfer(omap_port, queue);
968 }
969 
970 static irqreturn_t ssi_pio_thread(int irq, void *ssi_port)
971 {
972 	struct hsi_port *port = (struct hsi_port *)ssi_port;
973 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
974 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
975 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
976 	void __iomem *sys = omap_ssi->sys;
977 	unsigned int ch;
978 	u32 status_reg;
979 
980 	pm_runtime_get_sync(omap_port->pdev);
981 
982 	do {
983 		status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
984 		status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
985 
986 		for (ch = 0; ch < omap_port->channels; ch++) {
987 			if (status_reg & SSI_DATAACCEPT(ch))
988 				ssi_pio_complete(port, &omap_port->txqueue[ch]);
989 			if (status_reg & SSI_DATAAVAILABLE(ch))
990 				ssi_pio_complete(port, &omap_port->rxqueue[ch]);
991 		}
992 		if (status_reg & SSI_BREAKDETECTED)
993 			ssi_break_complete(port);
994 		if (status_reg & SSI_ERROROCCURED)
995 			ssi_error(port);
996 
997 		status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
998 		status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
999 
1000 		/* TODO: sleep if we retry? */
1001 	} while (status_reg);
1002 
1003 	pm_runtime_mark_last_busy(omap_port->pdev);
1004 	pm_runtime_put_autosuspend(omap_port->pdev);
1005 
1006 	return IRQ_HANDLED;
1007 }
1008 
1009 static irqreturn_t ssi_wake_thread(int irq __maybe_unused, void *ssi_port)
1010 {
1011 	struct hsi_port *port = (struct hsi_port *)ssi_port;
1012 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1013 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1014 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1015 
1016 	if (ssi_wakein(port)) {
1017 		/**
1018 		 * We can have a quick High-Low-High transition in the line.
1019 		 * In such a case if we have long interrupt latencies,
1020 		 * we can miss the low event or get twice a high event.
1021 		 * This workaround will avoid breaking the clock reference
1022 		 * count when such a situation ocurrs.
1023 		 */
1024 		if (!test_and_set_bit(SSI_WAKE_EN, &omap_port->flags))
1025 			pm_runtime_get_sync(omap_port->pdev);
1026 		dev_dbg(&ssi->device, "Wake in high\n");
1027 		if (omap_port->wktest) { /* FIXME: HACK ! To be removed */
1028 			writel(SSI_WAKE(0),
1029 				omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
1030 		}
1031 		hsi_event(port, HSI_EVENT_START_RX);
1032 	} else {
1033 		dev_dbg(&ssi->device, "Wake in low\n");
1034 		if (omap_port->wktest) { /* FIXME: HACK ! To be removed */
1035 			writel(SSI_WAKE(0),
1036 				omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
1037 		}
1038 		hsi_event(port, HSI_EVENT_STOP_RX);
1039 		if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags)) {
1040 			pm_runtime_mark_last_busy(omap_port->pdev);
1041 			pm_runtime_put_autosuspend(omap_port->pdev);
1042 		}
1043 	}
1044 
1045 	return IRQ_HANDLED;
1046 }
1047 
1048 static int ssi_port_irq(struct hsi_port *port, struct platform_device *pd)
1049 {
1050 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1051 	int err;
1052 
1053 	err = platform_get_irq(pd, 0);
1054 	if (err < 0) {
1055 		dev_err(&port->device, "Port IRQ resource missing\n");
1056 		return err;
1057 	}
1058 	omap_port->irq = err;
1059 	err = devm_request_threaded_irq(&port->device, omap_port->irq, NULL,
1060 				ssi_pio_thread, IRQF_ONESHOT, "SSI PORT", port);
1061 	if (err < 0)
1062 		dev_err(&port->device, "Request IRQ %d failed (%d)\n",
1063 							omap_port->irq, err);
1064 	return err;
1065 }
1066 
1067 static int ssi_wake_irq(struct hsi_port *port, struct platform_device *pd)
1068 {
1069 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1070 	int cawake_irq;
1071 	int err;
1072 
1073 	if (!omap_port->wake_gpio) {
1074 		omap_port->wake_irq = -1;
1075 		return 0;
1076 	}
1077 
1078 	cawake_irq = gpiod_to_irq(omap_port->wake_gpio);
1079 	omap_port->wake_irq = cawake_irq;
1080 
1081 	err = devm_request_threaded_irq(&port->device, cawake_irq, NULL,
1082 		ssi_wake_thread,
1083 		IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
1084 		"SSI cawake", port);
1085 	if (err < 0)
1086 		dev_err(&port->device, "Request Wake in IRQ %d failed %d\n",
1087 						cawake_irq, err);
1088 	err = enable_irq_wake(cawake_irq);
1089 	if (err < 0)
1090 		dev_err(&port->device, "Enable wake on the wakeline in irq %d failed %d\n",
1091 			cawake_irq, err);
1092 
1093 	return err;
1094 }
1095 
1096 static void ssi_queues_init(struct omap_ssi_port *omap_port)
1097 {
1098 	unsigned int ch;
1099 
1100 	for (ch = 0; ch < SSI_MAX_CHANNELS; ch++) {
1101 		INIT_LIST_HEAD(&omap_port->txqueue[ch]);
1102 		INIT_LIST_HEAD(&omap_port->rxqueue[ch]);
1103 	}
1104 	INIT_LIST_HEAD(&omap_port->brkqueue);
1105 }
1106 
1107 static int ssi_port_get_iomem(struct platform_device *pd,
1108 		const char *name, void __iomem **pbase, dma_addr_t *phy)
1109 {
1110 	struct hsi_port *port = platform_get_drvdata(pd);
1111 	struct resource *mem;
1112 	struct resource *ioarea;
1113 	void __iomem *base;
1114 
1115 	mem = platform_get_resource_byname(pd, IORESOURCE_MEM, name);
1116 	if (!mem) {
1117 		dev_err(&pd->dev, "IO memory region missing (%s)\n", name);
1118 		return -ENXIO;
1119 	}
1120 	ioarea = devm_request_mem_region(&port->device, mem->start,
1121 					resource_size(mem), dev_name(&pd->dev));
1122 	if (!ioarea) {
1123 		dev_err(&pd->dev, "%s IO memory region request failed\n",
1124 								mem->name);
1125 		return -ENXIO;
1126 	}
1127 	base = devm_ioremap(&port->device, mem->start, resource_size(mem));
1128 	if (!base) {
1129 		dev_err(&pd->dev, "%s IO remap failed\n", mem->name);
1130 		return -ENXIO;
1131 	}
1132 	*pbase = base;
1133 
1134 	if (phy)
1135 		*phy = mem->start;
1136 
1137 	return 0;
1138 }
1139 
1140 static int ssi_port_probe(struct platform_device *pd)
1141 {
1142 	struct device_node *np = pd->dev.of_node;
1143 	struct hsi_port *port;
1144 	struct omap_ssi_port *omap_port;
1145 	struct hsi_controller *ssi = dev_get_drvdata(pd->dev.parent);
1146 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1147 	struct gpio_desc *cawake_gpio = NULL;
1148 	u32 port_id;
1149 	int err;
1150 
1151 	dev_dbg(&pd->dev, "init ssi port...\n");
1152 
1153 	if (!ssi->port || !omap_ssi->port) {
1154 		dev_err(&pd->dev, "ssi controller not initialized!\n");
1155 		err = -ENODEV;
1156 		goto error;
1157 	}
1158 
1159 	/* get id of first uninitialized port in controller */
1160 	for (port_id = 0; port_id < ssi->num_ports && omap_ssi->port[port_id];
1161 		port_id++)
1162 		;
1163 
1164 	if (port_id >= ssi->num_ports) {
1165 		dev_err(&pd->dev, "port id out of range!\n");
1166 		err = -ENODEV;
1167 		goto error;
1168 	}
1169 
1170 	port = ssi->port[port_id];
1171 
1172 	if (!np) {
1173 		dev_err(&pd->dev, "missing device tree data\n");
1174 		err = -EINVAL;
1175 		goto error;
1176 	}
1177 
1178 	cawake_gpio = devm_gpiod_get(&pd->dev, "ti,ssi-cawake", GPIOD_IN);
1179 	if (IS_ERR(cawake_gpio)) {
1180 		err = PTR_ERR(cawake_gpio);
1181 		dev_err(&pd->dev, "couldn't get cawake gpio (err=%d)!\n", err);
1182 		goto error;
1183 	}
1184 
1185 	omap_port = devm_kzalloc(&port->device, sizeof(*omap_port), GFP_KERNEL);
1186 	if (!omap_port) {
1187 		err = -ENOMEM;
1188 		goto error;
1189 	}
1190 	omap_port->wake_gpio = cawake_gpio;
1191 	omap_port->pdev = &pd->dev;
1192 	omap_port->port_id = port_id;
1193 
1194 	INIT_DEFERRABLE_WORK(&omap_port->errqueue_work, ssi_process_errqueue);
1195 	INIT_WORK(&omap_port->work, start_tx_work);
1196 
1197 	/* initialize HSI port */
1198 	port->async	= ssi_async;
1199 	port->setup	= ssi_setup;
1200 	port->flush	= ssi_flush;
1201 	port->start_tx	= ssi_start_tx;
1202 	port->stop_tx	= ssi_stop_tx;
1203 	port->release	= ssi_release;
1204 	hsi_port_set_drvdata(port, omap_port);
1205 	omap_ssi->port[port_id] = omap_port;
1206 
1207 	platform_set_drvdata(pd, port);
1208 
1209 	err = ssi_port_get_iomem(pd, "tx", &omap_port->sst_base,
1210 		&omap_port->sst_dma);
1211 	if (err < 0)
1212 		goto error;
1213 	err = ssi_port_get_iomem(pd, "rx", &omap_port->ssr_base,
1214 		&omap_port->ssr_dma);
1215 	if (err < 0)
1216 		goto error;
1217 
1218 	err = ssi_port_irq(port, pd);
1219 	if (err < 0)
1220 		goto error;
1221 	err = ssi_wake_irq(port, pd);
1222 	if (err < 0)
1223 		goto error;
1224 
1225 	ssi_queues_init(omap_port);
1226 	spin_lock_init(&omap_port->lock);
1227 	spin_lock_init(&omap_port->wk_lock);
1228 	omap_port->dev = &port->device;
1229 
1230 	pm_runtime_use_autosuspend(omap_port->pdev);
1231 	pm_runtime_set_autosuspend_delay(omap_port->pdev, 250);
1232 	pm_runtime_enable(omap_port->pdev);
1233 
1234 #ifdef CONFIG_DEBUG_FS
1235 	err = ssi_debug_add_port(omap_port, omap_ssi->dir);
1236 	if (err < 0) {
1237 		pm_runtime_disable(omap_port->pdev);
1238 		goto error;
1239 	}
1240 #endif
1241 
1242 	hsi_add_clients_from_dt(port, np);
1243 
1244 	dev_info(&pd->dev, "ssi port %u successfully initialized\n", port_id);
1245 
1246 	return 0;
1247 
1248 error:
1249 	return err;
1250 }
1251 
1252 static int ssi_port_remove(struct platform_device *pd)
1253 {
1254 	struct hsi_port *port = platform_get_drvdata(pd);
1255 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1256 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1257 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1258 
1259 #ifdef CONFIG_DEBUG_FS
1260 	ssi_debug_remove_port(port);
1261 #endif
1262 
1263 	cancel_delayed_work_sync(&omap_port->errqueue_work);
1264 
1265 	hsi_port_unregister_clients(port);
1266 
1267 	port->async	= hsi_dummy_msg;
1268 	port->setup	= hsi_dummy_cl;
1269 	port->flush	= hsi_dummy_cl;
1270 	port->start_tx	= hsi_dummy_cl;
1271 	port->stop_tx	= hsi_dummy_cl;
1272 	port->release	= hsi_dummy_cl;
1273 
1274 	omap_ssi->port[omap_port->port_id] = NULL;
1275 	platform_set_drvdata(pd, NULL);
1276 
1277 	pm_runtime_dont_use_autosuspend(&pd->dev);
1278 	pm_runtime_disable(&pd->dev);
1279 
1280 	return 0;
1281 }
1282 
1283 static int ssi_restore_divisor(struct omap_ssi_port *omap_port)
1284 {
1285 	writel_relaxed(omap_port->sst.divisor,
1286 				omap_port->sst_base + SSI_SST_DIVISOR_REG);
1287 
1288 	return 0;
1289 }
1290 
1291 void omap_ssi_port_update_fclk(struct hsi_controller *ssi,
1292 			       struct omap_ssi_port *omap_port)
1293 {
1294 	/* update divisor */
1295 	u32 div = ssi_calculate_div(ssi);
1296 	omap_port->sst.divisor = div;
1297 	ssi_restore_divisor(omap_port);
1298 }
1299 
1300 #ifdef CONFIG_PM
1301 static int ssi_save_port_ctx(struct omap_ssi_port *omap_port)
1302 {
1303 	struct hsi_port *port = to_hsi_port(omap_port->dev);
1304 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1305 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1306 
1307 	omap_port->sys_mpu_enable = readl(omap_ssi->sys +
1308 					SSI_MPU_ENABLE_REG(port->num, 0));
1309 
1310 	return 0;
1311 }
1312 
1313 static int ssi_restore_port_ctx(struct omap_ssi_port *omap_port)
1314 {
1315 	struct hsi_port *port = to_hsi_port(omap_port->dev);
1316 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1317 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1318 	void __iomem	*base;
1319 
1320 	writel_relaxed(omap_port->sys_mpu_enable,
1321 			omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
1322 
1323 	/* SST context */
1324 	base = omap_port->sst_base;
1325 	writel_relaxed(omap_port->sst.frame_size, base + SSI_SST_FRAMESIZE_REG);
1326 	writel_relaxed(omap_port->sst.channels, base + SSI_SST_CHANNELS_REG);
1327 	writel_relaxed(omap_port->sst.arb_mode, base + SSI_SST_ARBMODE_REG);
1328 
1329 	/* SSR context */
1330 	base = omap_port->ssr_base;
1331 	writel_relaxed(omap_port->ssr.frame_size, base + SSI_SSR_FRAMESIZE_REG);
1332 	writel_relaxed(omap_port->ssr.channels, base + SSI_SSR_CHANNELS_REG);
1333 	writel_relaxed(omap_port->ssr.timeout, base + SSI_SSR_TIMEOUT_REG);
1334 
1335 	return 0;
1336 }
1337 
1338 static int ssi_restore_port_mode(struct omap_ssi_port *omap_port)
1339 {
1340 	u32 mode;
1341 
1342 	writel_relaxed(omap_port->sst.mode,
1343 				omap_port->sst_base + SSI_SST_MODE_REG);
1344 	writel_relaxed(omap_port->ssr.mode,
1345 				omap_port->ssr_base + SSI_SSR_MODE_REG);
1346 	/* OCP barrier */
1347 	mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
1348 
1349 	return 0;
1350 }
1351 
1352 static int omap_ssi_port_runtime_suspend(struct device *dev)
1353 {
1354 	struct hsi_port *port = dev_get_drvdata(dev);
1355 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1356 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1357 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1358 
1359 	dev_dbg(dev, "port runtime suspend!\n");
1360 
1361 	ssi_set_port_mode(omap_port, SSI_MODE_SLEEP);
1362 	if (omap_ssi->get_loss)
1363 		omap_port->loss_count =
1364 				omap_ssi->get_loss(ssi->device.parent);
1365 	ssi_save_port_ctx(omap_port);
1366 
1367 	return 0;
1368 }
1369 
1370 static int omap_ssi_port_runtime_resume(struct device *dev)
1371 {
1372 	struct hsi_port *port = dev_get_drvdata(dev);
1373 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1374 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1375 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1376 
1377 	dev_dbg(dev, "port runtime resume!\n");
1378 
1379 	if ((omap_ssi->get_loss) && (omap_port->loss_count ==
1380 				omap_ssi->get_loss(ssi->device.parent)))
1381 		goto mode; /* We always need to restore the mode & TX divisor */
1382 
1383 	ssi_restore_port_ctx(omap_port);
1384 
1385 mode:
1386 	ssi_restore_divisor(omap_port);
1387 	ssi_restore_port_mode(omap_port);
1388 
1389 	return 0;
1390 }
1391 
1392 static const struct dev_pm_ops omap_ssi_port_pm_ops = {
1393 	SET_RUNTIME_PM_OPS(omap_ssi_port_runtime_suspend,
1394 		omap_ssi_port_runtime_resume, NULL)
1395 };
1396 
1397 #define DEV_PM_OPS     (&omap_ssi_port_pm_ops)
1398 #else
1399 #define DEV_PM_OPS     NULL
1400 #endif
1401 
1402 
1403 #ifdef CONFIG_OF
1404 static const struct of_device_id omap_ssi_port_of_match[] = {
1405 	{ .compatible = "ti,omap3-ssi-port", },
1406 	{},
1407 };
1408 MODULE_DEVICE_TABLE(of, omap_ssi_port_of_match);
1409 #else
1410 #define omap_ssi_port_of_match NULL
1411 #endif
1412 
1413 struct platform_driver ssi_port_pdriver = {
1414 	.probe = ssi_port_probe,
1415 	.remove	= ssi_port_remove,
1416 	.driver	= {
1417 		.name	= "omap_ssi_port",
1418 		.of_match_table = omap_ssi_port_of_match,
1419 		.pm	= DEV_PM_OPS,
1420 	},
1421 };
1422