1 /* OMAP SSI port driver.
2  *
3  * Copyright (C) 2010 Nokia Corporation. All rights reserved.
4  * Copyright (C) 2014 Sebastian Reichel <sre@kernel.org>
5  *
6  * Contact: Carlos Chinea <carlos.chinea@nokia.com>
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License
10  * version 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20  * 02110-1301 USA
21  */
22 
23 #include <linux/platform_device.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/delay.h>
27 
28 #include <linux/gpio/consumer.h>
29 #include <linux/pinctrl/consumer.h>
30 #include <linux/debugfs.h>
31 
32 #include "omap_ssi_regs.h"
33 #include "omap_ssi.h"
34 
35 static inline int hsi_dummy_msg(struct hsi_msg *msg __maybe_unused)
36 {
37 	return 0;
38 }
39 
40 static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused)
41 {
42 	return 0;
43 }
44 
45 static inline unsigned int ssi_wakein(struct hsi_port *port)
46 {
47 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
48 	return gpiod_get_value(omap_port->wake_gpio);
49 }
50 
51 #ifdef CONFIG_DEBUG_FS
52 static void ssi_debug_remove_port(struct hsi_port *port)
53 {
54 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
55 
56 	debugfs_remove_recursive(omap_port->dir);
57 }
58 
59 static int ssi_debug_port_show(struct seq_file *m, void *p __maybe_unused)
60 {
61 	struct hsi_port *port = m->private;
62 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
63 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
64 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
65 	void __iomem	*base = omap_ssi->sys;
66 	unsigned int ch;
67 
68 	pm_runtime_get_sync(omap_port->pdev);
69 	if (omap_port->wake_irq > 0)
70 		seq_printf(m, "CAWAKE\t\t: %d\n", ssi_wakein(port));
71 	seq_printf(m, "WAKE\t\t: 0x%08x\n",
72 				readl(base + SSI_WAKE_REG(port->num)));
73 	seq_printf(m, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", 0,
74 			readl(base + SSI_MPU_ENABLE_REG(port->num, 0)));
75 	seq_printf(m, "MPU_STATUS_IRQ%d\t: 0x%08x\n", 0,
76 			readl(base + SSI_MPU_STATUS_REG(port->num, 0)));
77 	/* SST */
78 	base = omap_port->sst_base;
79 	seq_puts(m, "\nSST\n===\n");
80 	seq_printf(m, "ID SST\t\t: 0x%08x\n",
81 				readl(base + SSI_SST_ID_REG));
82 	seq_printf(m, "MODE\t\t: 0x%08x\n",
83 				readl(base + SSI_SST_MODE_REG));
84 	seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
85 				readl(base + SSI_SST_FRAMESIZE_REG));
86 	seq_printf(m, "DIVISOR\t\t: 0x%08x\n",
87 				readl(base + SSI_SST_DIVISOR_REG));
88 	seq_printf(m, "CHANNELS\t: 0x%08x\n",
89 				readl(base + SSI_SST_CHANNELS_REG));
90 	seq_printf(m, "ARBMODE\t\t: 0x%08x\n",
91 				readl(base + SSI_SST_ARBMODE_REG));
92 	seq_printf(m, "TXSTATE\t\t: 0x%08x\n",
93 				readl(base + SSI_SST_TXSTATE_REG));
94 	seq_printf(m, "BUFSTATE\t: 0x%08x\n",
95 				readl(base + SSI_SST_BUFSTATE_REG));
96 	seq_printf(m, "BREAK\t\t: 0x%08x\n",
97 				readl(base + SSI_SST_BREAK_REG));
98 	for (ch = 0; ch < omap_port->channels; ch++) {
99 		seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
100 				readl(base + SSI_SST_BUFFER_CH_REG(ch)));
101 	}
102 	/* SSR */
103 	base = omap_port->ssr_base;
104 	seq_puts(m, "\nSSR\n===\n");
105 	seq_printf(m, "ID SSR\t\t: 0x%08x\n",
106 				readl(base + SSI_SSR_ID_REG));
107 	seq_printf(m, "MODE\t\t: 0x%08x\n",
108 				readl(base + SSI_SSR_MODE_REG));
109 	seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
110 				readl(base + SSI_SSR_FRAMESIZE_REG));
111 	seq_printf(m, "CHANNELS\t: 0x%08x\n",
112 				readl(base + SSI_SSR_CHANNELS_REG));
113 	seq_printf(m, "TIMEOUT\t\t: 0x%08x\n",
114 				readl(base + SSI_SSR_TIMEOUT_REG));
115 	seq_printf(m, "RXSTATE\t\t: 0x%08x\n",
116 				readl(base + SSI_SSR_RXSTATE_REG));
117 	seq_printf(m, "BUFSTATE\t: 0x%08x\n",
118 				readl(base + SSI_SSR_BUFSTATE_REG));
119 	seq_printf(m, "BREAK\t\t: 0x%08x\n",
120 				readl(base + SSI_SSR_BREAK_REG));
121 	seq_printf(m, "ERROR\t\t: 0x%08x\n",
122 				readl(base + SSI_SSR_ERROR_REG));
123 	seq_printf(m, "ERRORACK\t: 0x%08x\n",
124 				readl(base + SSI_SSR_ERRORACK_REG));
125 	for (ch = 0; ch < omap_port->channels; ch++) {
126 		seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
127 				readl(base + SSI_SSR_BUFFER_CH_REG(ch)));
128 	}
129 	pm_runtime_put_sync(omap_port->pdev);
130 
131 	return 0;
132 }
133 
134 static int ssi_port_regs_open(struct inode *inode, struct file *file)
135 {
136 	return single_open(file, ssi_debug_port_show, inode->i_private);
137 }
138 
139 static const struct file_operations ssi_port_regs_fops = {
140 	.open		= ssi_port_regs_open,
141 	.read		= seq_read,
142 	.llseek		= seq_lseek,
143 	.release	= single_release,
144 };
145 
146 static int ssi_div_get(void *data, u64 *val)
147 {
148 	struct hsi_port *port = data;
149 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
150 
151 	pm_runtime_get_sync(omap_port->pdev);
152 	*val = readl(omap_port->sst_base + SSI_SST_DIVISOR_REG);
153 	pm_runtime_put_sync(omap_port->pdev);
154 
155 	return 0;
156 }
157 
158 static int ssi_div_set(void *data, u64 val)
159 {
160 	struct hsi_port *port = data;
161 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
162 
163 	if (val > 127)
164 		return -EINVAL;
165 
166 	pm_runtime_get_sync(omap_port->pdev);
167 	writel(val, omap_port->sst_base + SSI_SST_DIVISOR_REG);
168 	omap_port->sst.divisor = val;
169 	pm_runtime_put_sync(omap_port->pdev);
170 
171 	return 0;
172 }
173 
174 DEFINE_SIMPLE_ATTRIBUTE(ssi_sst_div_fops, ssi_div_get, ssi_div_set, "%llu\n");
175 
176 static int ssi_debug_add_port(struct omap_ssi_port *omap_port,
177 				     struct dentry *dir)
178 {
179 	struct hsi_port *port = to_hsi_port(omap_port->dev);
180 
181 	dir = debugfs_create_dir(dev_name(omap_port->dev), dir);
182 	if (!dir)
183 		return -ENOMEM;
184 	omap_port->dir = dir;
185 	debugfs_create_file("regs", S_IRUGO, dir, port, &ssi_port_regs_fops);
186 	dir = debugfs_create_dir("sst", dir);
187 	if (!dir)
188 		return -ENOMEM;
189 	debugfs_create_file("divisor", S_IRUGO | S_IWUSR, dir, port,
190 			    &ssi_sst_div_fops);
191 
192 	return 0;
193 }
194 #endif
195 
196 static int ssi_claim_lch(struct hsi_msg *msg)
197 {
198 
199 	struct hsi_port *port = hsi_get_port(msg->cl);
200 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
201 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
202 	int lch;
203 
204 	for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++)
205 		if (!omap_ssi->gdd_trn[lch].msg) {
206 			omap_ssi->gdd_trn[lch].msg = msg;
207 			omap_ssi->gdd_trn[lch].sg = msg->sgt.sgl;
208 			return lch;
209 		}
210 
211 	return -EBUSY;
212 }
213 
214 static int ssi_start_dma(struct hsi_msg *msg, int lch)
215 {
216 	struct hsi_port *port = hsi_get_port(msg->cl);
217 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
218 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
219 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
220 	void __iomem *gdd = omap_ssi->gdd;
221 	int err;
222 	u16 csdp;
223 	u16 ccr;
224 	u32 s_addr;
225 	u32 d_addr;
226 	u32 tmp;
227 
228 	if (msg->ttype == HSI_MSG_READ) {
229 		err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
230 							DMA_FROM_DEVICE);
231 		if (err < 0) {
232 			dev_dbg(&ssi->device, "DMA map SG failed !\n");
233 			return err;
234 		}
235 		csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT |
236 			SSI_SRC_SINGLE_ACCESS0 | SSI_SRC_PERIPHERAL_PORT |
237 			SSI_DATA_TYPE_S32;
238 		ccr = msg->channel + 0x10 + (port->num * 8); /* Sync */
239 		ccr |= SSI_DST_AMODE_POSTINC | SSI_SRC_AMODE_CONST |
240 			SSI_CCR_ENABLE;
241 		s_addr = omap_port->ssr_dma +
242 					SSI_SSR_BUFFER_CH_REG(msg->channel);
243 		d_addr = sg_dma_address(msg->sgt.sgl);
244 	} else {
245 		err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
246 							DMA_TO_DEVICE);
247 		if (err < 0) {
248 			dev_dbg(&ssi->device, "DMA map SG failed !\n");
249 			return err;
250 		}
251 		csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT |
252 			SSI_DST_SINGLE_ACCESS0 | SSI_DST_PERIPHERAL_PORT |
253 			SSI_DATA_TYPE_S32;
254 		ccr = (msg->channel + 1 + (port->num * 8)) & 0xf; /* Sync */
255 		ccr |= SSI_SRC_AMODE_POSTINC | SSI_DST_AMODE_CONST |
256 			SSI_CCR_ENABLE;
257 		s_addr = sg_dma_address(msg->sgt.sgl);
258 		d_addr = omap_port->sst_dma +
259 					SSI_SST_BUFFER_CH_REG(msg->channel);
260 	}
261 	dev_dbg(&ssi->device, "lch %d cdsp %08x ccr %04x s_addr %08x d_addr %08x\n",
262 		lch, csdp, ccr, s_addr, d_addr);
263 
264 	/* Hold clocks during the transfer */
265 	pm_runtime_get_sync(omap_port->pdev);
266 
267 	writew_relaxed(csdp, gdd + SSI_GDD_CSDP_REG(lch));
268 	writew_relaxed(SSI_BLOCK_IE | SSI_TOUT_IE, gdd + SSI_GDD_CICR_REG(lch));
269 	writel_relaxed(d_addr, gdd + SSI_GDD_CDSA_REG(lch));
270 	writel_relaxed(s_addr, gdd + SSI_GDD_CSSA_REG(lch));
271 	writew_relaxed(SSI_BYTES_TO_FRAMES(msg->sgt.sgl->length),
272 						gdd + SSI_GDD_CEN_REG(lch));
273 
274 	spin_lock_bh(&omap_ssi->lock);
275 	tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
276 	tmp |= SSI_GDD_LCH(lch);
277 	writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
278 	spin_unlock_bh(&omap_ssi->lock);
279 	writew(ccr, gdd + SSI_GDD_CCR_REG(lch));
280 	msg->status = HSI_STATUS_PROCEEDING;
281 
282 	return 0;
283 }
284 
285 static int ssi_start_pio(struct hsi_msg *msg)
286 {
287 	struct hsi_port *port = hsi_get_port(msg->cl);
288 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
289 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
290 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
291 	u32 val;
292 
293 	pm_runtime_get_sync(omap_port->pdev);
294 	if (msg->ttype == HSI_MSG_WRITE) {
295 		val = SSI_DATAACCEPT(msg->channel);
296 		/* Hold clocks for pio writes */
297 		pm_runtime_get_sync(omap_port->pdev);
298 	} else {
299 		val = SSI_DATAAVAILABLE(msg->channel) | SSI_ERROROCCURED;
300 	}
301 	dev_dbg(&port->device, "Single %s transfer\n",
302 						msg->ttype ? "write" : "read");
303 	val |= readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
304 	writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
305 	pm_runtime_put_sync(omap_port->pdev);
306 	msg->actual_len = 0;
307 	msg->status = HSI_STATUS_PROCEEDING;
308 
309 	return 0;
310 }
311 
312 static int ssi_start_transfer(struct list_head *queue)
313 {
314 	struct hsi_msg *msg;
315 	int lch = -1;
316 
317 	if (list_empty(queue))
318 		return 0;
319 	msg = list_first_entry(queue, struct hsi_msg, link);
320 	if (msg->status != HSI_STATUS_QUEUED)
321 		return 0;
322 	if ((msg->sgt.nents) && (msg->sgt.sgl->length > sizeof(u32)))
323 		lch = ssi_claim_lch(msg);
324 	if (lch >= 0)
325 		return ssi_start_dma(msg, lch);
326 	else
327 		return ssi_start_pio(msg);
328 }
329 
330 static int ssi_async_break(struct hsi_msg *msg)
331 {
332 	struct hsi_port *port = hsi_get_port(msg->cl);
333 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
334 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
335 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
336 	int err = 0;
337 	u32 tmp;
338 
339 	pm_runtime_get_sync(omap_port->pdev);
340 	if (msg->ttype == HSI_MSG_WRITE) {
341 		if (omap_port->sst.mode != SSI_MODE_FRAME) {
342 			err = -EINVAL;
343 			goto out;
344 		}
345 		writel(1, omap_port->sst_base + SSI_SST_BREAK_REG);
346 		msg->status = HSI_STATUS_COMPLETED;
347 		msg->complete(msg);
348 	} else {
349 		if (omap_port->ssr.mode != SSI_MODE_FRAME) {
350 			err = -EINVAL;
351 			goto out;
352 		}
353 		spin_lock_bh(&omap_port->lock);
354 		tmp = readl(omap_ssi->sys +
355 					SSI_MPU_ENABLE_REG(port->num, 0));
356 		writel(tmp | SSI_BREAKDETECTED,
357 			omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
358 		msg->status = HSI_STATUS_PROCEEDING;
359 		list_add_tail(&msg->link, &omap_port->brkqueue);
360 		spin_unlock_bh(&omap_port->lock);
361 	}
362 out:
363 	pm_runtime_put_sync(omap_port->pdev);
364 
365 	return err;
366 }
367 
368 static int ssi_async(struct hsi_msg *msg)
369 {
370 	struct hsi_port *port = hsi_get_port(msg->cl);
371 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
372 	struct list_head *queue;
373 	int err = 0;
374 
375 	BUG_ON(!msg);
376 
377 	if (msg->sgt.nents > 1)
378 		return -ENOSYS; /* TODO: Add sg support */
379 
380 	if (msg->break_frame)
381 		return ssi_async_break(msg);
382 
383 	if (msg->ttype) {
384 		BUG_ON(msg->channel >= omap_port->sst.channels);
385 		queue = &omap_port->txqueue[msg->channel];
386 	} else {
387 		BUG_ON(msg->channel >= omap_port->ssr.channels);
388 		queue = &omap_port->rxqueue[msg->channel];
389 	}
390 	msg->status = HSI_STATUS_QUEUED;
391 	spin_lock_bh(&omap_port->lock);
392 	list_add_tail(&msg->link, queue);
393 	err = ssi_start_transfer(queue);
394 	if (err < 0) {
395 		list_del(&msg->link);
396 		msg->status = HSI_STATUS_ERROR;
397 	}
398 	spin_unlock_bh(&omap_port->lock);
399 	dev_dbg(&port->device, "msg status %d ttype %d ch %d\n",
400 				msg->status, msg->ttype, msg->channel);
401 
402 	return err;
403 }
404 
405 static u32 ssi_calculate_div(struct hsi_controller *ssi)
406 {
407 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
408 	u32 tx_fckrate = (u32) omap_ssi->fck_rate;
409 
410 	/* / 2 : SSI TX clock is always half of the SSI functional clock */
411 	tx_fckrate >>= 1;
412 	/* Round down when tx_fckrate % omap_ssi->max_speed == 0 */
413 	tx_fckrate--;
414 	dev_dbg(&ssi->device, "TX div %d for fck_rate %lu Khz speed %d Kb/s\n",
415 		tx_fckrate / omap_ssi->max_speed, omap_ssi->fck_rate,
416 		omap_ssi->max_speed);
417 
418 	return tx_fckrate / omap_ssi->max_speed;
419 }
420 
421 static void ssi_flush_queue(struct list_head *queue, struct hsi_client *cl)
422 {
423 	struct list_head *node, *tmp;
424 	struct hsi_msg *msg;
425 
426 	list_for_each_safe(node, tmp, queue) {
427 		msg = list_entry(node, struct hsi_msg, link);
428 		if ((cl) && (cl != msg->cl))
429 			continue;
430 		list_del(node);
431 		pr_debug("flush queue: ch %d, msg %p len %d type %d ctxt %p\n",
432 			msg->channel, msg, msg->sgt.sgl->length,
433 					msg->ttype, msg->context);
434 		if (msg->destructor)
435 			msg->destructor(msg);
436 		else
437 			hsi_free_msg(msg);
438 	}
439 }
440 
441 static int ssi_setup(struct hsi_client *cl)
442 {
443 	struct hsi_port *port = to_hsi_port(cl->device.parent);
444 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
445 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
446 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
447 	void __iomem *sst = omap_port->sst_base;
448 	void __iomem *ssr = omap_port->ssr_base;
449 	u32 div;
450 	u32 val;
451 	int err = 0;
452 
453 	pm_runtime_get_sync(omap_port->pdev);
454 	spin_lock_bh(&omap_port->lock);
455 	if (cl->tx_cfg.speed)
456 		omap_ssi->max_speed = cl->tx_cfg.speed;
457 	div = ssi_calculate_div(ssi);
458 	if (div > SSI_MAX_DIVISOR) {
459 		dev_err(&cl->device, "Invalid TX speed %d Mb/s (div %d)\n",
460 						cl->tx_cfg.speed, div);
461 		err = -EINVAL;
462 		goto out;
463 	}
464 	/* Set TX/RX module to sleep to stop TX/RX during cfg update */
465 	writel_relaxed(SSI_MODE_SLEEP, sst + SSI_SST_MODE_REG);
466 	writel_relaxed(SSI_MODE_SLEEP, ssr + SSI_SSR_MODE_REG);
467 	/* Flush posted write */
468 	val = readl(ssr + SSI_SSR_MODE_REG);
469 	/* TX */
470 	writel_relaxed(31, sst + SSI_SST_FRAMESIZE_REG);
471 	writel_relaxed(div, sst + SSI_SST_DIVISOR_REG);
472 	writel_relaxed(cl->tx_cfg.num_hw_channels, sst + SSI_SST_CHANNELS_REG);
473 	writel_relaxed(cl->tx_cfg.arb_mode, sst + SSI_SST_ARBMODE_REG);
474 	writel_relaxed(cl->tx_cfg.mode, sst + SSI_SST_MODE_REG);
475 	/* RX */
476 	writel_relaxed(31, ssr + SSI_SSR_FRAMESIZE_REG);
477 	writel_relaxed(cl->rx_cfg.num_hw_channels, ssr + SSI_SSR_CHANNELS_REG);
478 	writel_relaxed(0, ssr + SSI_SSR_TIMEOUT_REG);
479 	/* Cleanup the break queue if we leave FRAME mode */
480 	if ((omap_port->ssr.mode == SSI_MODE_FRAME) &&
481 		(cl->rx_cfg.mode != SSI_MODE_FRAME))
482 		ssi_flush_queue(&omap_port->brkqueue, cl);
483 	writel_relaxed(cl->rx_cfg.mode, ssr + SSI_SSR_MODE_REG);
484 	omap_port->channels = max(cl->rx_cfg.num_hw_channels,
485 				  cl->tx_cfg.num_hw_channels);
486 	/* Shadow registering for OFF mode */
487 	/* SST */
488 	omap_port->sst.divisor = div;
489 	omap_port->sst.frame_size = 31;
490 	omap_port->sst.channels = cl->tx_cfg.num_hw_channels;
491 	omap_port->sst.arb_mode = cl->tx_cfg.arb_mode;
492 	omap_port->sst.mode = cl->tx_cfg.mode;
493 	/* SSR */
494 	omap_port->ssr.frame_size = 31;
495 	omap_port->ssr.timeout = 0;
496 	omap_port->ssr.channels = cl->rx_cfg.num_hw_channels;
497 	omap_port->ssr.mode = cl->rx_cfg.mode;
498 out:
499 	spin_unlock_bh(&omap_port->lock);
500 	pm_runtime_put_sync(omap_port->pdev);
501 
502 	return err;
503 }
504 
505 static int ssi_flush(struct hsi_client *cl)
506 {
507 	struct hsi_port *port = hsi_get_port(cl);
508 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
509 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
510 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
511 	struct hsi_msg *msg;
512 	void __iomem *sst = omap_port->sst_base;
513 	void __iomem *ssr = omap_port->ssr_base;
514 	unsigned int i;
515 	u32 err;
516 
517 	pm_runtime_get_sync(omap_port->pdev);
518 	spin_lock_bh(&omap_port->lock);
519 
520 	/* stop all ssi communication */
521 	pinctrl_pm_select_idle_state(omap_port->pdev);
522 	udelay(1); /* wait for racing frames */
523 
524 	/* Stop all DMA transfers */
525 	for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
526 		msg = omap_ssi->gdd_trn[i].msg;
527 		if (!msg || (port != hsi_get_port(msg->cl)))
528 			continue;
529 		writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
530 		if (msg->ttype == HSI_MSG_READ)
531 			pm_runtime_put_sync(omap_port->pdev);
532 		omap_ssi->gdd_trn[i].msg = NULL;
533 	}
534 	/* Flush all SST buffers */
535 	writel_relaxed(0, sst + SSI_SST_BUFSTATE_REG);
536 	writel_relaxed(0, sst + SSI_SST_TXSTATE_REG);
537 	/* Flush all SSR buffers */
538 	writel_relaxed(0, ssr + SSI_SSR_RXSTATE_REG);
539 	writel_relaxed(0, ssr + SSI_SSR_BUFSTATE_REG);
540 	/* Flush all errors */
541 	err = readl(ssr + SSI_SSR_ERROR_REG);
542 	writel_relaxed(err, ssr + SSI_SSR_ERRORACK_REG);
543 	/* Flush break */
544 	writel_relaxed(0, ssr + SSI_SSR_BREAK_REG);
545 	/* Clear interrupts */
546 	writel_relaxed(0, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
547 	writel_relaxed(0xffffff00,
548 			omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
549 	writel_relaxed(0, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
550 	writel(0xff, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
551 	/* Dequeue all pending requests */
552 	for (i = 0; i < omap_port->channels; i++) {
553 		/* Release write clocks */
554 		if (!list_empty(&omap_port->txqueue[i]))
555 			pm_runtime_put_sync(omap_port->pdev);
556 		ssi_flush_queue(&omap_port->txqueue[i], NULL);
557 		ssi_flush_queue(&omap_port->rxqueue[i], NULL);
558 	}
559 	ssi_flush_queue(&omap_port->brkqueue, NULL);
560 
561 	/* Resume SSI communication */
562 	pinctrl_pm_select_default_state(omap_port->pdev);
563 
564 	spin_unlock_bh(&omap_port->lock);
565 	pm_runtime_put_sync(omap_port->pdev);
566 
567 	return 0;
568 }
569 
570 static int ssi_start_tx(struct hsi_client *cl)
571 {
572 	struct hsi_port *port = hsi_get_port(cl);
573 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
574 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
575 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
576 
577 	dev_dbg(&port->device, "Wake out high %d\n", omap_port->wk_refcount);
578 
579 	spin_lock_bh(&omap_port->wk_lock);
580 	if (omap_port->wk_refcount++) {
581 		spin_unlock_bh(&omap_port->wk_lock);
582 		return 0;
583 	}
584 	pm_runtime_get_sync(omap_port->pdev); /* Grab clocks */
585 	writel(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
586 	spin_unlock_bh(&omap_port->wk_lock);
587 
588 	return 0;
589 }
590 
591 static int ssi_stop_tx(struct hsi_client *cl)
592 {
593 	struct hsi_port *port = hsi_get_port(cl);
594 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
595 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
596 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
597 
598 	dev_dbg(&port->device, "Wake out low %d\n", omap_port->wk_refcount);
599 
600 	spin_lock_bh(&omap_port->wk_lock);
601 	BUG_ON(!omap_port->wk_refcount);
602 	if (--omap_port->wk_refcount) {
603 		spin_unlock_bh(&omap_port->wk_lock);
604 		return 0;
605 	}
606 	writel(SSI_WAKE(0), omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
607 	pm_runtime_put_sync(omap_port->pdev); /* Release clocks */
608 	spin_unlock_bh(&omap_port->wk_lock);
609 
610 	return 0;
611 }
612 
613 static void ssi_transfer(struct omap_ssi_port *omap_port,
614 							struct list_head *queue)
615 {
616 	struct hsi_msg *msg;
617 	int err = -1;
618 
619 	spin_lock_bh(&omap_port->lock);
620 	while (err < 0) {
621 		err = ssi_start_transfer(queue);
622 		if (err < 0) {
623 			msg = list_first_entry(queue, struct hsi_msg, link);
624 			msg->status = HSI_STATUS_ERROR;
625 			msg->actual_len = 0;
626 			list_del(&msg->link);
627 			spin_unlock_bh(&omap_port->lock);
628 			msg->complete(msg);
629 			spin_lock_bh(&omap_port->lock);
630 		}
631 	}
632 	spin_unlock_bh(&omap_port->lock);
633 }
634 
635 static void ssi_cleanup_queues(struct hsi_client *cl)
636 {
637 	struct hsi_port *port = hsi_get_port(cl);
638 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
639 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
640 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
641 	struct hsi_msg *msg;
642 	unsigned int i;
643 	u32 rxbufstate = 0;
644 	u32 txbufstate = 0;
645 	u32 status = SSI_ERROROCCURED;
646 	u32 tmp;
647 
648 	ssi_flush_queue(&omap_port->brkqueue, cl);
649 	if (list_empty(&omap_port->brkqueue))
650 		status |= SSI_BREAKDETECTED;
651 
652 	for (i = 0; i < omap_port->channels; i++) {
653 		if (list_empty(&omap_port->txqueue[i]))
654 			continue;
655 		msg = list_first_entry(&omap_port->txqueue[i], struct hsi_msg,
656 									link);
657 		if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
658 			txbufstate |= (1 << i);
659 			status |= SSI_DATAACCEPT(i);
660 			/* Release the clocks writes, also GDD ones */
661 			pm_runtime_put_sync(omap_port->pdev);
662 		}
663 		ssi_flush_queue(&omap_port->txqueue[i], cl);
664 	}
665 	for (i = 0; i < omap_port->channels; i++) {
666 		if (list_empty(&omap_port->rxqueue[i]))
667 			continue;
668 		msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
669 									link);
670 		if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
671 			rxbufstate |= (1 << i);
672 			status |= SSI_DATAAVAILABLE(i);
673 		}
674 		ssi_flush_queue(&omap_port->rxqueue[i], cl);
675 		/* Check if we keep the error detection interrupt armed */
676 		if (!list_empty(&omap_port->rxqueue[i]))
677 			status &= ~SSI_ERROROCCURED;
678 	}
679 	/* Cleanup write buffers */
680 	tmp = readl(omap_port->sst_base + SSI_SST_BUFSTATE_REG);
681 	tmp &= ~txbufstate;
682 	writel_relaxed(tmp, omap_port->sst_base + SSI_SST_BUFSTATE_REG);
683 	/* Cleanup read buffers */
684 	tmp = readl(omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
685 	tmp &= ~rxbufstate;
686 	writel_relaxed(tmp, omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
687 	/* Disarm and ack pending interrupts */
688 	tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
689 	tmp &= ~status;
690 	writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
691 	writel_relaxed(status, omap_ssi->sys +
692 		SSI_MPU_STATUS_REG(port->num, 0));
693 }
694 
695 static void ssi_cleanup_gdd(struct hsi_controller *ssi, struct hsi_client *cl)
696 {
697 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
698 	struct hsi_port *port = hsi_get_port(cl);
699 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
700 	struct hsi_msg *msg;
701 	unsigned int i;
702 	u32 val = 0;
703 	u32 tmp;
704 
705 	for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
706 		msg = omap_ssi->gdd_trn[i].msg;
707 		if ((!msg) || (msg->cl != cl))
708 			continue;
709 		writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
710 		val |= (1 << i);
711 		/*
712 		 * Clock references for write will be handled in
713 		 * ssi_cleanup_queues
714 		 */
715 		if (msg->ttype == HSI_MSG_READ)
716 			pm_runtime_put_sync(omap_port->pdev);
717 		omap_ssi->gdd_trn[i].msg = NULL;
718 	}
719 	tmp = readl_relaxed(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
720 	tmp &= ~val;
721 	writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
722 	writel(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
723 }
724 
725 static int ssi_set_port_mode(struct omap_ssi_port *omap_port, u32 mode)
726 {
727 	writel(mode, omap_port->sst_base + SSI_SST_MODE_REG);
728 	writel(mode, omap_port->ssr_base + SSI_SSR_MODE_REG);
729 	/* OCP barrier */
730 	mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
731 
732 	return 0;
733 }
734 
735 static int ssi_release(struct hsi_client *cl)
736 {
737 	struct hsi_port *port = hsi_get_port(cl);
738 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
739 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
740 
741 	spin_lock_bh(&omap_port->lock);
742 	pm_runtime_get_sync(omap_port->pdev);
743 	/* Stop all the pending DMA requests for that client */
744 	ssi_cleanup_gdd(ssi, cl);
745 	/* Now cleanup all the queues */
746 	ssi_cleanup_queues(cl);
747 	pm_runtime_put_sync(omap_port->pdev);
748 	/* If it is the last client of the port, do extra checks and cleanup */
749 	if (port->claimed <= 1) {
750 		/*
751 		 * Drop the clock reference for the incoming wake line
752 		 * if it is still kept high by the other side.
753 		 */
754 		if (omap_port->wkin_cken) {
755 			pm_runtime_put_sync(omap_port->pdev);
756 			omap_port->wkin_cken = 0;
757 		}
758 		pm_runtime_get_sync(omap_port->pdev);
759 		/* Stop any SSI TX/RX without a client */
760 		ssi_set_port_mode(omap_port, SSI_MODE_SLEEP);
761 		omap_port->sst.mode = SSI_MODE_SLEEP;
762 		omap_port->ssr.mode = SSI_MODE_SLEEP;
763 		pm_runtime_put_sync(omap_port->pdev);
764 		WARN_ON(omap_port->wk_refcount != 0);
765 	}
766 	spin_unlock_bh(&omap_port->lock);
767 
768 	return 0;
769 }
770 
771 
772 
773 static void ssi_error(struct hsi_port *port)
774 {
775 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
776 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
777 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
778 	struct hsi_msg *msg;
779 	unsigned int i;
780 	u32 err;
781 	u32 val;
782 	u32 tmp;
783 
784 	/* ACK error */
785 	err = readl(omap_port->ssr_base + SSI_SSR_ERROR_REG);
786 	dev_err(&port->device, "SSI error: 0x%02x\n", err);
787 	if (!err) {
788 		dev_dbg(&port->device, "spurious SSI error ignored!\n");
789 		return;
790 	}
791 	spin_lock(&omap_ssi->lock);
792 	/* Cancel all GDD read transfers */
793 	for (i = 0, val = 0; i < SSI_MAX_GDD_LCH; i++) {
794 		msg = omap_ssi->gdd_trn[i].msg;
795 		if ((msg) && (msg->ttype == HSI_MSG_READ)) {
796 			writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
797 			val |= (1 << i);
798 			omap_ssi->gdd_trn[i].msg = NULL;
799 		}
800 	}
801 	tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
802 	tmp &= ~val;
803 	writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
804 	spin_unlock(&omap_ssi->lock);
805 	/* Cancel all PIO read transfers */
806 	spin_lock(&omap_port->lock);
807 	tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
808 	tmp &= 0xfeff00ff; /* Disable error & all dataavailable interrupts */
809 	writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
810 	/* ACK error */
811 	writel_relaxed(err, omap_port->ssr_base + SSI_SSR_ERRORACK_REG);
812 	writel_relaxed(SSI_ERROROCCURED,
813 			omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
814 	/* Signal the error all current pending read requests */
815 	for (i = 0; i < omap_port->channels; i++) {
816 		if (list_empty(&omap_port->rxqueue[i]))
817 			continue;
818 		msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
819 									link);
820 		list_del(&msg->link);
821 		msg->status = HSI_STATUS_ERROR;
822 		spin_unlock(&omap_port->lock);
823 		msg->complete(msg);
824 		/* Now restart queued reads if any */
825 		ssi_transfer(omap_port, &omap_port->rxqueue[i]);
826 		spin_lock(&omap_port->lock);
827 	}
828 	spin_unlock(&omap_port->lock);
829 }
830 
831 static void ssi_break_complete(struct hsi_port *port)
832 {
833 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
834 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
835 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
836 	struct hsi_msg *msg;
837 	struct hsi_msg *tmp;
838 	u32 val;
839 
840 	dev_dbg(&port->device, "HWBREAK received\n");
841 
842 	spin_lock(&omap_port->lock);
843 	val = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
844 	val &= ~SSI_BREAKDETECTED;
845 	writel_relaxed(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
846 	writel_relaxed(0, omap_port->ssr_base + SSI_SSR_BREAK_REG);
847 	writel(SSI_BREAKDETECTED,
848 			omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
849 	spin_unlock(&omap_port->lock);
850 
851 	list_for_each_entry_safe(msg, tmp, &omap_port->brkqueue, link) {
852 		msg->status = HSI_STATUS_COMPLETED;
853 		spin_lock(&omap_port->lock);
854 		list_del(&msg->link);
855 		spin_unlock(&omap_port->lock);
856 		msg->complete(msg);
857 	}
858 
859 }
860 
861 static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue)
862 {
863 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
864 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
865 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
866 	struct hsi_msg *msg;
867 	u32 *buf;
868 	u32 reg;
869 	u32 val;
870 
871 	spin_lock(&omap_port->lock);
872 	msg = list_first_entry(queue, struct hsi_msg, link);
873 	if ((!msg->sgt.nents) || (!msg->sgt.sgl->length)) {
874 		msg->actual_len = 0;
875 		msg->status = HSI_STATUS_PENDING;
876 	}
877 	if (msg->ttype == HSI_MSG_WRITE)
878 		val = SSI_DATAACCEPT(msg->channel);
879 	else
880 		val = SSI_DATAAVAILABLE(msg->channel);
881 	if (msg->status == HSI_STATUS_PROCEEDING) {
882 		buf = sg_virt(msg->sgt.sgl) + msg->actual_len;
883 		if (msg->ttype == HSI_MSG_WRITE)
884 			writel(*buf, omap_port->sst_base +
885 					SSI_SST_BUFFER_CH_REG(msg->channel));
886 		 else
887 			*buf = readl(omap_port->ssr_base +
888 					SSI_SSR_BUFFER_CH_REG(msg->channel));
889 		dev_dbg(&port->device, "ch %d ttype %d 0x%08x\n", msg->channel,
890 							msg->ttype, *buf);
891 		msg->actual_len += sizeof(*buf);
892 		if (msg->actual_len >= msg->sgt.sgl->length)
893 			msg->status = HSI_STATUS_COMPLETED;
894 		/*
895 		 * Wait for the last written frame to be really sent before
896 		 * we call the complete callback
897 		 */
898 		if ((msg->status == HSI_STATUS_PROCEEDING) ||
899 				((msg->status == HSI_STATUS_COMPLETED) &&
900 					(msg->ttype == HSI_MSG_WRITE))) {
901 			writel(val, omap_ssi->sys +
902 					SSI_MPU_STATUS_REG(port->num, 0));
903 			spin_unlock(&omap_port->lock);
904 
905 			return;
906 		}
907 
908 	}
909 	/* Transfer completed at this point */
910 	reg = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
911 	if (msg->ttype == HSI_MSG_WRITE) {
912 		/* Release clocks for write transfer */
913 		pm_runtime_put_sync(omap_port->pdev);
914 	}
915 	reg &= ~val;
916 	writel_relaxed(reg, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
917 	writel_relaxed(val, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
918 	list_del(&msg->link);
919 	spin_unlock(&omap_port->lock);
920 	msg->complete(msg);
921 	ssi_transfer(omap_port, queue);
922 }
923 
924 static void ssi_pio_tasklet(unsigned long ssi_port)
925 {
926 	struct hsi_port *port = (struct hsi_port *)ssi_port;
927 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
928 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
929 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
930 	void __iomem *sys = omap_ssi->sys;
931 	unsigned int ch;
932 	u32 status_reg;
933 
934 	pm_runtime_get_sync(omap_port->pdev);
935 	status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
936 	status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
937 
938 	for (ch = 0; ch < omap_port->channels; ch++) {
939 		if (status_reg & SSI_DATAACCEPT(ch))
940 			ssi_pio_complete(port, &omap_port->txqueue[ch]);
941 		if (status_reg & SSI_DATAAVAILABLE(ch))
942 			ssi_pio_complete(port, &omap_port->rxqueue[ch]);
943 	}
944 	if (status_reg & SSI_BREAKDETECTED)
945 		ssi_break_complete(port);
946 	if (status_reg & SSI_ERROROCCURED)
947 		ssi_error(port);
948 
949 	status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
950 	status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
951 	pm_runtime_put_sync(omap_port->pdev);
952 
953 	if (status_reg)
954 		tasklet_hi_schedule(&omap_port->pio_tasklet);
955 	else
956 		enable_irq(omap_port->irq);
957 }
958 
959 static irqreturn_t ssi_pio_isr(int irq, void *port)
960 {
961 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
962 
963 	tasklet_hi_schedule(&omap_port->pio_tasklet);
964 	disable_irq_nosync(irq);
965 
966 	return IRQ_HANDLED;
967 }
968 
969 static void ssi_wake_tasklet(unsigned long ssi_port)
970 {
971 	struct hsi_port *port = (struct hsi_port *)ssi_port;
972 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
973 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
974 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
975 
976 	if (ssi_wakein(port)) {
977 		/**
978 		 * We can have a quick High-Low-High transition in the line.
979 		 * In such a case if we have long interrupt latencies,
980 		 * we can miss the low event or get twice a high event.
981 		 * This workaround will avoid breaking the clock reference
982 		 * count when such a situation ocurrs.
983 		 */
984 		spin_lock(&omap_port->lock);
985 		if (!omap_port->wkin_cken) {
986 			omap_port->wkin_cken = 1;
987 			pm_runtime_get_sync(omap_port->pdev);
988 		}
989 		spin_unlock(&omap_port->lock);
990 		dev_dbg(&ssi->device, "Wake in high\n");
991 		if (omap_port->wktest) { /* FIXME: HACK ! To be removed */
992 			writel(SSI_WAKE(0),
993 				omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
994 		}
995 		hsi_event(port, HSI_EVENT_START_RX);
996 	} else {
997 		dev_dbg(&ssi->device, "Wake in low\n");
998 		if (omap_port->wktest) { /* FIXME: HACK ! To be removed */
999 			writel(SSI_WAKE(0),
1000 				omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
1001 		}
1002 		hsi_event(port, HSI_EVENT_STOP_RX);
1003 		spin_lock(&omap_port->lock);
1004 		if (omap_port->wkin_cken) {
1005 			pm_runtime_put_sync(omap_port->pdev);
1006 			omap_port->wkin_cken = 0;
1007 		}
1008 		spin_unlock(&omap_port->lock);
1009 	}
1010 }
1011 
1012 static irqreturn_t ssi_wake_isr(int irq __maybe_unused, void *ssi_port)
1013 {
1014 	struct omap_ssi_port *omap_port = hsi_port_drvdata(ssi_port);
1015 
1016 	tasklet_hi_schedule(&omap_port->wake_tasklet);
1017 
1018 	return IRQ_HANDLED;
1019 }
1020 
1021 static int ssi_port_irq(struct hsi_port *port,
1022 						struct platform_device *pd)
1023 {
1024 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1025 	int err;
1026 
1027 	err = platform_get_irq(pd, 0);
1028 	if (err < 0) {
1029 		dev_err(&port->device, "Port IRQ resource missing\n");
1030 		return err;
1031 	}
1032 	omap_port->irq = err;
1033 	tasklet_init(&omap_port->pio_tasklet, ssi_pio_tasklet,
1034 							(unsigned long)port);
1035 	err = devm_request_irq(&port->device, omap_port->irq, ssi_pio_isr,
1036 						0, "mpu_irq0", port);
1037 	if (err < 0)
1038 		dev_err(&port->device, "Request IRQ %d failed (%d)\n",
1039 							omap_port->irq, err);
1040 	return err;
1041 }
1042 
1043 static int ssi_wake_irq(struct hsi_port *port,
1044 						struct platform_device *pd)
1045 {
1046 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1047 	int cawake_irq;
1048 	int err;
1049 
1050 	if (!omap_port->wake_gpio) {
1051 		omap_port->wake_irq = -1;
1052 		return 0;
1053 	}
1054 
1055 	cawake_irq = gpiod_to_irq(omap_port->wake_gpio);
1056 
1057 	omap_port->wake_irq = cawake_irq;
1058 	tasklet_init(&omap_port->wake_tasklet, ssi_wake_tasklet,
1059 							(unsigned long)port);
1060 	err = devm_request_irq(&port->device, cawake_irq, ssi_wake_isr,
1061 		IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
1062 							"cawake", port);
1063 	if (err < 0)
1064 		dev_err(&port->device, "Request Wake in IRQ %d failed %d\n",
1065 						cawake_irq, err);
1066 	err = enable_irq_wake(cawake_irq);
1067 	if (err < 0)
1068 		dev_err(&port->device, "Enable wake on the wakeline in irq %d failed %d\n",
1069 			cawake_irq, err);
1070 
1071 	return err;
1072 }
1073 
1074 static void ssi_queues_init(struct omap_ssi_port *omap_port)
1075 {
1076 	unsigned int ch;
1077 
1078 	for (ch = 0; ch < SSI_MAX_CHANNELS; ch++) {
1079 		INIT_LIST_HEAD(&omap_port->txqueue[ch]);
1080 		INIT_LIST_HEAD(&omap_port->rxqueue[ch]);
1081 	}
1082 	INIT_LIST_HEAD(&omap_port->brkqueue);
1083 }
1084 
1085 static int ssi_port_get_iomem(struct platform_device *pd,
1086 		const char *name, void __iomem **pbase, dma_addr_t *phy)
1087 {
1088 	struct hsi_port *port = platform_get_drvdata(pd);
1089 	struct resource *mem;
1090 	struct resource *ioarea;
1091 	void __iomem *base;
1092 
1093 	mem = platform_get_resource_byname(pd, IORESOURCE_MEM, name);
1094 	if (!mem) {
1095 		dev_err(&pd->dev, "IO memory region missing (%s)\n", name);
1096 		return -ENXIO;
1097 	}
1098 	ioarea = devm_request_mem_region(&port->device, mem->start,
1099 					resource_size(mem), dev_name(&pd->dev));
1100 	if (!ioarea) {
1101 		dev_err(&pd->dev, "%s IO memory region request failed\n",
1102 								mem->name);
1103 		return -ENXIO;
1104 	}
1105 	base = devm_ioremap(&port->device, mem->start, resource_size(mem));
1106 	if (!base) {
1107 		dev_err(&pd->dev, "%s IO remap failed\n", mem->name);
1108 		return -ENXIO;
1109 	}
1110 	*pbase = base;
1111 
1112 	if (phy)
1113 		*phy = mem->start;
1114 
1115 	return 0;
1116 }
1117 
1118 static int ssi_port_probe(struct platform_device *pd)
1119 {
1120 	struct device_node *np = pd->dev.of_node;
1121 	struct hsi_port *port;
1122 	struct omap_ssi_port *omap_port;
1123 	struct hsi_controller *ssi = dev_get_drvdata(pd->dev.parent);
1124 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1125 	struct gpio_desc *cawake_gpio = NULL;
1126 	u32 port_id;
1127 	int err;
1128 
1129 	dev_dbg(&pd->dev, "init ssi port...\n");
1130 
1131 	if (!ssi->port || !omap_ssi->port) {
1132 		dev_err(&pd->dev, "ssi controller not initialized!\n");
1133 		err = -ENODEV;
1134 		goto error;
1135 	}
1136 
1137 	/* get id of first uninitialized port in controller */
1138 	for (port_id = 0; port_id < ssi->num_ports && omap_ssi->port[port_id];
1139 		port_id++)
1140 		;
1141 
1142 	if (port_id >= ssi->num_ports) {
1143 		dev_err(&pd->dev, "port id out of range!\n");
1144 		err = -ENODEV;
1145 		goto error;
1146 	}
1147 
1148 	port = ssi->port[port_id];
1149 
1150 	if (!np) {
1151 		dev_err(&pd->dev, "missing device tree data\n");
1152 		err = -EINVAL;
1153 		goto error;
1154 	}
1155 
1156 	cawake_gpio = devm_gpiod_get(&pd->dev, "ti,ssi-cawake", GPIOD_IN);
1157 	if (IS_ERR(cawake_gpio)) {
1158 		err = PTR_ERR(cawake_gpio);
1159 		dev_err(&pd->dev, "couldn't get cawake gpio (err=%d)!\n", err);
1160 		goto error;
1161 	}
1162 
1163 	omap_port = devm_kzalloc(&port->device, sizeof(*omap_port), GFP_KERNEL);
1164 	if (!omap_port) {
1165 		err = -ENOMEM;
1166 		goto error;
1167 	}
1168 	omap_port->wake_gpio = cawake_gpio;
1169 	omap_port->pdev = &pd->dev;
1170 	omap_port->port_id = port_id;
1171 
1172 	/* initialize HSI port */
1173 	port->async	= ssi_async;
1174 	port->setup	= ssi_setup;
1175 	port->flush	= ssi_flush;
1176 	port->start_tx	= ssi_start_tx;
1177 	port->stop_tx	= ssi_stop_tx;
1178 	port->release	= ssi_release;
1179 	hsi_port_set_drvdata(port, omap_port);
1180 	omap_ssi->port[port_id] = omap_port;
1181 
1182 	platform_set_drvdata(pd, port);
1183 
1184 	err = ssi_port_get_iomem(pd, "tx", &omap_port->sst_base,
1185 		&omap_port->sst_dma);
1186 	if (err < 0)
1187 		goto error;
1188 	err = ssi_port_get_iomem(pd, "rx", &omap_port->ssr_base,
1189 		&omap_port->ssr_dma);
1190 	if (err < 0)
1191 		goto error;
1192 
1193 	err = ssi_port_irq(port, pd);
1194 	if (err < 0)
1195 		goto error;
1196 	err = ssi_wake_irq(port, pd);
1197 	if (err < 0)
1198 		goto error;
1199 
1200 	ssi_queues_init(omap_port);
1201 	spin_lock_init(&omap_port->lock);
1202 	spin_lock_init(&omap_port->wk_lock);
1203 	omap_port->dev = &port->device;
1204 
1205 	pm_runtime_irq_safe(omap_port->pdev);
1206 	pm_runtime_enable(omap_port->pdev);
1207 
1208 #ifdef CONFIG_DEBUG_FS
1209 	err = ssi_debug_add_port(omap_port, omap_ssi->dir);
1210 	if (err < 0) {
1211 		pm_runtime_disable(omap_port->pdev);
1212 		goto error;
1213 	}
1214 #endif
1215 
1216 	hsi_add_clients_from_dt(port, np);
1217 
1218 	dev_info(&pd->dev, "ssi port %u successfully initialized\n", port_id);
1219 
1220 	return 0;
1221 
1222 error:
1223 	return err;
1224 }
1225 
1226 static int ssi_port_remove(struct platform_device *pd)
1227 {
1228 	struct hsi_port *port = platform_get_drvdata(pd);
1229 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1230 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1231 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1232 
1233 #ifdef CONFIG_DEBUG_FS
1234 	ssi_debug_remove_port(port);
1235 #endif
1236 
1237 	hsi_port_unregister_clients(port);
1238 
1239 	tasklet_kill(&omap_port->wake_tasklet);
1240 	tasklet_kill(&omap_port->pio_tasklet);
1241 
1242 	port->async	= hsi_dummy_msg;
1243 	port->setup	= hsi_dummy_cl;
1244 	port->flush	= hsi_dummy_cl;
1245 	port->start_tx	= hsi_dummy_cl;
1246 	port->stop_tx	= hsi_dummy_cl;
1247 	port->release	= hsi_dummy_cl;
1248 
1249 	omap_ssi->port[omap_port->port_id] = NULL;
1250 	platform_set_drvdata(pd, NULL);
1251 	pm_runtime_disable(&pd->dev);
1252 
1253 	return 0;
1254 }
1255 
1256 static int ssi_restore_divisor(struct omap_ssi_port *omap_port)
1257 {
1258 	writel_relaxed(omap_port->sst.divisor,
1259 				omap_port->sst_base + SSI_SST_DIVISOR_REG);
1260 
1261 	return 0;
1262 }
1263 
1264 void omap_ssi_port_update_fclk(struct hsi_controller *ssi,
1265 			       struct omap_ssi_port *omap_port)
1266 {
1267 	/* update divisor */
1268 	u32 div = ssi_calculate_div(ssi);
1269 	omap_port->sst.divisor = div;
1270 	ssi_restore_divisor(omap_port);
1271 }
1272 
1273 #ifdef CONFIG_PM
1274 static int ssi_save_port_ctx(struct omap_ssi_port *omap_port)
1275 {
1276 	struct hsi_port *port = to_hsi_port(omap_port->dev);
1277 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1278 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1279 
1280 	omap_port->sys_mpu_enable = readl(omap_ssi->sys +
1281 					SSI_MPU_ENABLE_REG(port->num, 0));
1282 
1283 	return 0;
1284 }
1285 
1286 static int ssi_restore_port_ctx(struct omap_ssi_port *omap_port)
1287 {
1288 	struct hsi_port *port = to_hsi_port(omap_port->dev);
1289 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1290 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1291 	void __iomem	*base;
1292 
1293 	writel_relaxed(omap_port->sys_mpu_enable,
1294 			omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
1295 
1296 	/* SST context */
1297 	base = omap_port->sst_base;
1298 	writel_relaxed(omap_port->sst.frame_size, base + SSI_SST_FRAMESIZE_REG);
1299 	writel_relaxed(omap_port->sst.channels, base + SSI_SST_CHANNELS_REG);
1300 	writel_relaxed(omap_port->sst.arb_mode, base + SSI_SST_ARBMODE_REG);
1301 
1302 	/* SSR context */
1303 	base = omap_port->ssr_base;
1304 	writel_relaxed(omap_port->ssr.frame_size, base + SSI_SSR_FRAMESIZE_REG);
1305 	writel_relaxed(omap_port->ssr.channels, base + SSI_SSR_CHANNELS_REG);
1306 	writel_relaxed(omap_port->ssr.timeout, base + SSI_SSR_TIMEOUT_REG);
1307 
1308 	return 0;
1309 }
1310 
1311 static int ssi_restore_port_mode(struct omap_ssi_port *omap_port)
1312 {
1313 	u32 mode;
1314 
1315 	writel_relaxed(omap_port->sst.mode,
1316 				omap_port->sst_base + SSI_SST_MODE_REG);
1317 	writel_relaxed(omap_port->ssr.mode,
1318 				omap_port->ssr_base + SSI_SSR_MODE_REG);
1319 	/* OCP barrier */
1320 	mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
1321 
1322 	return 0;
1323 }
1324 
1325 static int omap_ssi_port_runtime_suspend(struct device *dev)
1326 {
1327 	struct hsi_port *port = dev_get_drvdata(dev);
1328 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1329 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1330 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1331 
1332 	dev_dbg(dev, "port runtime suspend!\n");
1333 
1334 	ssi_set_port_mode(omap_port, SSI_MODE_SLEEP);
1335 	if (omap_ssi->get_loss)
1336 		omap_port->loss_count =
1337 				omap_ssi->get_loss(ssi->device.parent);
1338 	ssi_save_port_ctx(omap_port);
1339 
1340 	return 0;
1341 }
1342 
1343 static int omap_ssi_port_runtime_resume(struct device *dev)
1344 {
1345 	struct hsi_port *port = dev_get_drvdata(dev);
1346 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1347 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1348 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1349 
1350 	dev_dbg(dev, "port runtime resume!\n");
1351 
1352 	if ((omap_ssi->get_loss) && (omap_port->loss_count ==
1353 				omap_ssi->get_loss(ssi->device.parent)))
1354 		goto mode; /* We always need to restore the mode & TX divisor */
1355 
1356 	ssi_restore_port_ctx(omap_port);
1357 
1358 mode:
1359 	ssi_restore_divisor(omap_port);
1360 	ssi_restore_port_mode(omap_port);
1361 
1362 	return 0;
1363 }
1364 
1365 static const struct dev_pm_ops omap_ssi_port_pm_ops = {
1366 	SET_RUNTIME_PM_OPS(omap_ssi_port_runtime_suspend,
1367 		omap_ssi_port_runtime_resume, NULL)
1368 };
1369 
1370 #define DEV_PM_OPS     (&omap_ssi_port_pm_ops)
1371 #else
1372 #define DEV_PM_OPS     NULL
1373 #endif
1374 
1375 
1376 #ifdef CONFIG_OF
1377 static const struct of_device_id omap_ssi_port_of_match[] = {
1378 	{ .compatible = "ti,omap3-ssi-port", },
1379 	{},
1380 };
1381 MODULE_DEVICE_TABLE(of, omap_ssi_port_of_match);
1382 #else
1383 #define omap_ssi_port_of_match NULL
1384 #endif
1385 
1386 struct platform_driver ssi_port_pdriver = {
1387 	.probe = ssi_port_probe,
1388 	.remove	= ssi_port_remove,
1389 	.driver	= {
1390 		.name	= "omap_ssi_port",
1391 		.of_match_table = omap_ssi_port_of_match,
1392 		.pm	= DEV_PM_OPS,
1393 	},
1394 };
1395