1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * c8sectpfe-core.c - C8SECTPFE STi DVB driver
4  *
5  * Copyright (c) STMicroelectronics 2015
6  *
7  *   Author:Peter Bennett <peter.bennett@st.com>
8  *	    Peter Griffin <peter.griffin@linaro.org>
9  *
10  */
11 #include <linux/atomic.h>
12 #include <linux/clk.h>
13 #include <linux/completion.h>
14 #include <linux/delay.h>
15 #include <linux/device.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/dvb/dmx.h>
18 #include <linux/dvb/frontend.h>
19 #include <linux/errno.h>
20 #include <linux/firmware.h>
21 #include <linux/init.h>
22 #include <linux/interrupt.h>
23 #include <linux/io.h>
24 #include <linux/module.h>
25 #include <linux/of_gpio.h>
26 #include <linux/of_platform.h>
27 #include <linux/pinctrl/consumer.h>
28 #include <linux/pinctrl/pinctrl.h>
29 #include <linux/platform_device.h>
30 #include <linux/slab.h>
31 #include <linux/time.h>
32 #include <linux/usb.h>
33 #include <linux/wait.h>
34 
35 #include "c8sectpfe-common.h"
36 #include "c8sectpfe-core.h"
37 #include "c8sectpfe-debugfs.h"
38 
39 #include <media/dmxdev.h>
40 #include <media/dvb_demux.h>
41 #include <media/dvb_frontend.h>
42 #include <media/dvb_net.h>
43 
44 #define FIRMWARE_MEMDMA "pti_memdma_h407.elf"
45 MODULE_FIRMWARE(FIRMWARE_MEMDMA);
46 
47 #define PID_TABLE_SIZE 1024
48 #define POLL_MSECS 50
49 
50 static int load_c8sectpfe_fw(struct c8sectpfei *fei);
51 
52 #define TS_PKT_SIZE 188
53 #define HEADER_SIZE (4)
54 #define PACKET_SIZE (TS_PKT_SIZE+HEADER_SIZE)
55 
56 #define FEI_ALIGNMENT (32)
57 /* hw requires minimum of 8*PACKET_SIZE and padded to 8byte boundary */
58 #define FEI_BUFFER_SIZE (8*PACKET_SIZE*340)
59 
60 #define FIFO_LEN 1024
61 
62 static void c8sectpfe_timer_interrupt(struct timer_list *t)
63 {
64 	struct c8sectpfei *fei = from_timer(fei, t, timer);
65 	struct channel_info *channel;
66 	int chan_num;
67 
68 	/* iterate through input block channels */
69 	for (chan_num = 0; chan_num < fei->tsin_count; chan_num++) {
70 		channel = fei->channel_data[chan_num];
71 
72 		/* is this descriptor initialised and TP enabled */
73 		if (channel->irec && readl(channel->irec + DMA_PRDS_TPENABLE))
74 			tasklet_schedule(&channel->tsklet);
75 	}
76 
77 	fei->timer.expires = jiffies +	msecs_to_jiffies(POLL_MSECS);
78 	add_timer(&fei->timer);
79 }
80 
81 static void channel_swdemux_tsklet(struct tasklet_struct *t)
82 {
83 	struct channel_info *channel = from_tasklet(channel, t, tsklet);
84 	struct c8sectpfei *fei;
85 	unsigned long wp, rp;
86 	int pos, num_packets, n, size;
87 	u8 *buf;
88 
89 	if (unlikely(!channel || !channel->irec))
90 		return;
91 
92 	fei = channel->fei;
93 
94 	wp = readl(channel->irec + DMA_PRDS_BUSWP_TP(0));
95 	rp = readl(channel->irec + DMA_PRDS_BUSRP_TP(0));
96 
97 	pos = rp - channel->back_buffer_busaddr;
98 
99 	/* has it wrapped */
100 	if (wp < rp)
101 		wp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE;
102 
103 	size = wp - rp;
104 	num_packets = size / PACKET_SIZE;
105 
106 	/* manage cache so data is visible to CPU */
107 	dma_sync_single_for_cpu(fei->dev,
108 				rp,
109 				size,
110 				DMA_FROM_DEVICE);
111 
112 	buf = channel->back_buffer_aligned;
113 
114 	dev_dbg(fei->dev,
115 		"chan=%d channel=%p num_packets = %d, buf = %p, pos = 0x%x\n\trp=0x%lx, wp=0x%lx\n",
116 		channel->tsin_id, channel, num_packets, buf, pos, rp, wp);
117 
118 	for (n = 0; n < num_packets; n++) {
119 		dvb_dmx_swfilter_packets(
120 			&fei->c8sectpfe[0]->
121 				demux[channel->demux_mapping].dvb_demux,
122 			&buf[pos], 1);
123 
124 		pos += PACKET_SIZE;
125 	}
126 
127 	/* advance the read pointer */
128 	if (wp == (channel->back_buffer_busaddr + FEI_BUFFER_SIZE))
129 		writel(channel->back_buffer_busaddr, channel->irec +
130 			DMA_PRDS_BUSRP_TP(0));
131 	else
132 		writel(wp, channel->irec + DMA_PRDS_BUSRP_TP(0));
133 }
134 
135 static int c8sectpfe_start_feed(struct dvb_demux_feed *dvbdmxfeed)
136 {
137 	struct dvb_demux *demux = dvbdmxfeed->demux;
138 	struct stdemux *stdemux = (struct stdemux *)demux->priv;
139 	struct c8sectpfei *fei = stdemux->c8sectpfei;
140 	struct channel_info *channel;
141 	u32 tmp;
142 	unsigned long *bitmap;
143 	int ret;
144 
145 	switch (dvbdmxfeed->type) {
146 	case DMX_TYPE_TS:
147 		break;
148 	case DMX_TYPE_SEC:
149 		break;
150 	default:
151 		dev_err(fei->dev, "%s:%d Error bailing\n"
152 			, __func__, __LINE__);
153 		return -EINVAL;
154 	}
155 
156 	if (dvbdmxfeed->type == DMX_TYPE_TS) {
157 		switch (dvbdmxfeed->pes_type) {
158 		case DMX_PES_VIDEO:
159 		case DMX_PES_AUDIO:
160 		case DMX_PES_TELETEXT:
161 		case DMX_PES_PCR:
162 		case DMX_PES_OTHER:
163 			break;
164 		default:
165 			dev_err(fei->dev, "%s:%d Error bailing\n"
166 				, __func__, __LINE__);
167 			return -EINVAL;
168 		}
169 	}
170 
171 	if (!atomic_read(&fei->fw_loaded)) {
172 		ret = load_c8sectpfe_fw(fei);
173 		if (ret)
174 			return ret;
175 	}
176 
177 	mutex_lock(&fei->lock);
178 
179 	channel = fei->channel_data[stdemux->tsin_index];
180 
181 	bitmap = channel->pid_buffer_aligned;
182 
183 	/* 8192 is a special PID */
184 	if (dvbdmxfeed->pid == 8192) {
185 		tmp = readl(fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
186 		tmp &= ~C8SECTPFE_PID_ENABLE;
187 		writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
188 
189 	} else {
190 		bitmap_set(bitmap, dvbdmxfeed->pid, 1);
191 	}
192 
193 	/* manage cache so PID bitmap is visible to HW */
194 	dma_sync_single_for_device(fei->dev,
195 					channel->pid_buffer_busaddr,
196 					PID_TABLE_SIZE,
197 					DMA_TO_DEVICE);
198 
199 	channel->active = 1;
200 
201 	if (fei->global_feed_count == 0) {
202 		fei->timer.expires = jiffies +
203 			msecs_to_jiffies(msecs_to_jiffies(POLL_MSECS));
204 
205 		add_timer(&fei->timer);
206 	}
207 
208 	if (stdemux->running_feed_count == 0) {
209 
210 		dev_dbg(fei->dev, "Starting channel=%p\n", channel);
211 
212 		tasklet_setup(&channel->tsklet, channel_swdemux_tsklet);
213 
214 		/* Reset the internal inputblock sram pointers */
215 		writel(channel->fifo,
216 			fei->io + C8SECTPFE_IB_BUFF_STRT(channel->tsin_id));
217 		writel(channel->fifo + FIFO_LEN - 1,
218 			fei->io + C8SECTPFE_IB_BUFF_END(channel->tsin_id));
219 
220 		writel(channel->fifo,
221 			fei->io + C8SECTPFE_IB_READ_PNT(channel->tsin_id));
222 		writel(channel->fifo,
223 			fei->io + C8SECTPFE_IB_WRT_PNT(channel->tsin_id));
224 
225 
226 		/* reset read / write memdma ptrs for this channel */
227 		writel(channel->back_buffer_busaddr, channel->irec +
228 			DMA_PRDS_BUSBASE_TP(0));
229 
230 		tmp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
231 		writel(tmp, channel->irec + DMA_PRDS_BUSTOP_TP(0));
232 
233 		writel(channel->back_buffer_busaddr, channel->irec +
234 			DMA_PRDS_BUSWP_TP(0));
235 
236 		/* Issue a reset and enable InputBlock */
237 		writel(C8SECTPFE_SYS_ENABLE | C8SECTPFE_SYS_RESET
238 			, fei->io + C8SECTPFE_IB_SYS(channel->tsin_id));
239 
240 		/* and enable the tp */
241 		writel(0x1, channel->irec + DMA_PRDS_TPENABLE);
242 
243 		dev_dbg(fei->dev, "%s:%d Starting DMA feed on stdemux=%p\n"
244 			, __func__, __LINE__, stdemux);
245 	}
246 
247 	stdemux->running_feed_count++;
248 	fei->global_feed_count++;
249 
250 	mutex_unlock(&fei->lock);
251 
252 	return 0;
253 }
254 
255 static int c8sectpfe_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
256 {
257 
258 	struct dvb_demux *demux = dvbdmxfeed->demux;
259 	struct stdemux *stdemux = (struct stdemux *)demux->priv;
260 	struct c8sectpfei *fei = stdemux->c8sectpfei;
261 	struct channel_info *channel;
262 	int idlereq;
263 	u32 tmp;
264 	int ret;
265 	unsigned long *bitmap;
266 
267 	if (!atomic_read(&fei->fw_loaded)) {
268 		ret = load_c8sectpfe_fw(fei);
269 		if (ret)
270 			return ret;
271 	}
272 
273 	mutex_lock(&fei->lock);
274 
275 	channel = fei->channel_data[stdemux->tsin_index];
276 
277 	bitmap = channel->pid_buffer_aligned;
278 
279 	if (dvbdmxfeed->pid == 8192) {
280 		tmp = readl(fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
281 		tmp |= C8SECTPFE_PID_ENABLE;
282 		writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
283 	} else {
284 		bitmap_clear(bitmap, dvbdmxfeed->pid, 1);
285 	}
286 
287 	/* manage cache so data is visible to HW */
288 	dma_sync_single_for_device(fei->dev,
289 					channel->pid_buffer_busaddr,
290 					PID_TABLE_SIZE,
291 					DMA_TO_DEVICE);
292 
293 	if (--stdemux->running_feed_count == 0) {
294 
295 		channel = fei->channel_data[stdemux->tsin_index];
296 
297 		/* TP re-configuration on page 168 of functional spec */
298 
299 		/* disable IB (prevents more TS data going to memdma) */
300 		writel(0, fei->io + C8SECTPFE_IB_SYS(channel->tsin_id));
301 
302 		/* disable this channels descriptor */
303 		writel(0,  channel->irec + DMA_PRDS_TPENABLE);
304 
305 		tasklet_disable(&channel->tsklet);
306 
307 		/* now request memdma channel goes idle */
308 		idlereq = (1 << channel->tsin_id) | IDLEREQ;
309 		writel(idlereq, fei->io + DMA_IDLE_REQ);
310 
311 		/* wait for idle irq handler to signal completion */
312 		ret = wait_for_completion_timeout(&channel->idle_completion,
313 						msecs_to_jiffies(100));
314 
315 		if (ret == 0)
316 			dev_warn(fei->dev,
317 				"Timeout waiting for idle irq on tsin%d\n",
318 				channel->tsin_id);
319 
320 		reinit_completion(&channel->idle_completion);
321 
322 		/* reset read / write ptrs for this channel */
323 
324 		writel(channel->back_buffer_busaddr,
325 			channel->irec + DMA_PRDS_BUSBASE_TP(0));
326 
327 		tmp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
328 		writel(tmp, channel->irec + DMA_PRDS_BUSTOP_TP(0));
329 
330 		writel(channel->back_buffer_busaddr,
331 			channel->irec + DMA_PRDS_BUSWP_TP(0));
332 
333 		dev_dbg(fei->dev,
334 			"%s:%d stopping DMA feed on stdemux=%p channel=%d\n",
335 			__func__, __LINE__, stdemux, channel->tsin_id);
336 
337 		/* turn off all PIDS in the bitmap */
338 		memset(channel->pid_buffer_aligned, 0, PID_TABLE_SIZE);
339 
340 		/* manage cache so data is visible to HW */
341 		dma_sync_single_for_device(fei->dev,
342 					channel->pid_buffer_busaddr,
343 					PID_TABLE_SIZE,
344 					DMA_TO_DEVICE);
345 
346 		channel->active = 0;
347 	}
348 
349 	if (--fei->global_feed_count == 0) {
350 		dev_dbg(fei->dev, "%s:%d global_feed_count=%d\n"
351 			, __func__, __LINE__, fei->global_feed_count);
352 
353 		del_timer(&fei->timer);
354 	}
355 
356 	mutex_unlock(&fei->lock);
357 
358 	return 0;
359 }
360 
361 static struct channel_info *find_channel(struct c8sectpfei *fei, int tsin_num)
362 {
363 	int i;
364 
365 	for (i = 0; i < C8SECTPFE_MAX_TSIN_CHAN; i++) {
366 		if (!fei->channel_data[i])
367 			continue;
368 
369 		if (fei->channel_data[i]->tsin_id == tsin_num)
370 			return fei->channel_data[i];
371 	}
372 
373 	return NULL;
374 }
375 
376 static void c8sectpfe_getconfig(struct c8sectpfei *fei)
377 {
378 	struct c8sectpfe_hw *hw = &fei->hw_stats;
379 
380 	hw->num_ib = readl(fei->io + SYS_CFG_NUM_IB);
381 	hw->num_mib = readl(fei->io + SYS_CFG_NUM_MIB);
382 	hw->num_swts = readl(fei->io + SYS_CFG_NUM_SWTS);
383 	hw->num_tsout = readl(fei->io + SYS_CFG_NUM_TSOUT);
384 	hw->num_ccsc = readl(fei->io + SYS_CFG_NUM_CCSC);
385 	hw->num_ram = readl(fei->io + SYS_CFG_NUM_RAM);
386 	hw->num_tp = readl(fei->io + SYS_CFG_NUM_TP);
387 
388 	dev_info(fei->dev, "C8SECTPFE hw supports the following:\n");
389 	dev_info(fei->dev, "Input Blocks: %d\n", hw->num_ib);
390 	dev_info(fei->dev, "Merged Input Blocks: %d\n", hw->num_mib);
391 	dev_info(fei->dev, "Software Transport Stream Inputs: %d\n"
392 				, hw->num_swts);
393 	dev_info(fei->dev, "Transport Stream Output: %d\n", hw->num_tsout);
394 	dev_info(fei->dev, "Cable Card Converter: %d\n", hw->num_ccsc);
395 	dev_info(fei->dev, "RAMs supported by C8SECTPFE: %d\n", hw->num_ram);
396 	dev_info(fei->dev, "Tango TPs supported by C8SECTPFE: %d\n"
397 			, hw->num_tp);
398 }
399 
400 static irqreturn_t c8sectpfe_idle_irq_handler(int irq, void *priv)
401 {
402 	struct c8sectpfei *fei = priv;
403 	struct channel_info *chan;
404 	int bit;
405 	unsigned long tmp = readl(fei->io + DMA_IDLE_REQ);
406 
407 	/* page 168 of functional spec: Clear the idle request
408 	   by writing 0 to the C8SECTPFE_DMA_IDLE_REQ register. */
409 
410 	/* signal idle completion */
411 	for_each_set_bit(bit, &tmp, fei->hw_stats.num_ib) {
412 
413 		chan = find_channel(fei, bit);
414 
415 		if (chan)
416 			complete(&chan->idle_completion);
417 	}
418 
419 	writel(0, fei->io + DMA_IDLE_REQ);
420 
421 	return IRQ_HANDLED;
422 }
423 
424 
425 static void free_input_block(struct c8sectpfei *fei, struct channel_info *tsin)
426 {
427 	if (!fei || !tsin)
428 		return;
429 
430 	if (tsin->back_buffer_busaddr)
431 		if (!dma_mapping_error(fei->dev, tsin->back_buffer_busaddr))
432 			dma_unmap_single(fei->dev, tsin->back_buffer_busaddr,
433 				FEI_BUFFER_SIZE, DMA_BIDIRECTIONAL);
434 
435 	kfree(tsin->back_buffer_start);
436 
437 	if (tsin->pid_buffer_busaddr)
438 		if (!dma_mapping_error(fei->dev, tsin->pid_buffer_busaddr))
439 			dma_unmap_single(fei->dev, tsin->pid_buffer_busaddr,
440 				PID_TABLE_SIZE, DMA_BIDIRECTIONAL);
441 
442 	kfree(tsin->pid_buffer_start);
443 }
444 
445 #define MAX_NAME 20
446 
447 static int configure_memdma_and_inputblock(struct c8sectpfei *fei,
448 				struct channel_info *tsin)
449 {
450 	int ret;
451 	u32 tmp;
452 	char tsin_pin_name[MAX_NAME];
453 
454 	if (!fei || !tsin)
455 		return -EINVAL;
456 
457 	dev_dbg(fei->dev, "%s:%d Configuring channel=%p tsin=%d\n"
458 		, __func__, __LINE__, tsin, tsin->tsin_id);
459 
460 	init_completion(&tsin->idle_completion);
461 
462 	tsin->back_buffer_start = kzalloc(FEI_BUFFER_SIZE + FEI_ALIGNMENT, GFP_KERNEL);
463 	if (!tsin->back_buffer_start) {
464 		ret = -ENOMEM;
465 		goto err_unmap;
466 	}
467 
468 	/* Ensure backbuffer is 32byte aligned */
469 	tsin->back_buffer_aligned = tsin->back_buffer_start + FEI_ALIGNMENT;
470 
471 	tsin->back_buffer_aligned = PTR_ALIGN(tsin->back_buffer_aligned, FEI_ALIGNMENT);
472 
473 	tsin->back_buffer_busaddr = dma_map_single(fei->dev,
474 					tsin->back_buffer_aligned,
475 					FEI_BUFFER_SIZE,
476 					DMA_BIDIRECTIONAL);
477 
478 	if (dma_mapping_error(fei->dev, tsin->back_buffer_busaddr)) {
479 		dev_err(fei->dev, "failed to map back_buffer\n");
480 		ret = -EFAULT;
481 		goto err_unmap;
482 	}
483 
484 	/*
485 	 * The pid buffer can be configured (in hw) for byte or bit
486 	 * per pid. By powers of deduction we conclude stih407 family
487 	 * is configured (at SoC design stage) for bit per pid.
488 	 */
489 	tsin->pid_buffer_start = kzalloc(PID_TABLE_SIZE + PID_TABLE_SIZE, GFP_KERNEL);
490 	if (!tsin->pid_buffer_start) {
491 		ret = -ENOMEM;
492 		goto err_unmap;
493 	}
494 
495 	/*
496 	 * PID buffer needs to be aligned to size of the pid table
497 	 * which at bit per pid is 1024 bytes (8192 pids / 8).
498 	 * PIDF_BASE register enforces this alignment when writing
499 	 * the register.
500 	 */
501 
502 	tsin->pid_buffer_aligned = tsin->pid_buffer_start + PID_TABLE_SIZE;
503 
504 	tsin->pid_buffer_aligned = PTR_ALIGN(tsin->pid_buffer_aligned, PID_TABLE_SIZE);
505 
506 	tsin->pid_buffer_busaddr = dma_map_single(fei->dev,
507 						tsin->pid_buffer_aligned,
508 						PID_TABLE_SIZE,
509 						DMA_BIDIRECTIONAL);
510 
511 	if (dma_mapping_error(fei->dev, tsin->pid_buffer_busaddr)) {
512 		dev_err(fei->dev, "failed to map pid_bitmap\n");
513 		ret = -EFAULT;
514 		goto err_unmap;
515 	}
516 
517 	/* manage cache so pid bitmap is visible to HW */
518 	dma_sync_single_for_device(fei->dev,
519 				tsin->pid_buffer_busaddr,
520 				PID_TABLE_SIZE,
521 				DMA_TO_DEVICE);
522 
523 	snprintf(tsin_pin_name, MAX_NAME, "tsin%d-%s", tsin->tsin_id,
524 		(tsin->serial_not_parallel ? "serial" : "parallel"));
525 
526 	tsin->pstate = pinctrl_lookup_state(fei->pinctrl, tsin_pin_name);
527 	if (IS_ERR(tsin->pstate)) {
528 		dev_err(fei->dev, "%s: pinctrl_lookup_state couldn't find %s state\n"
529 			, __func__, tsin_pin_name);
530 		ret = PTR_ERR(tsin->pstate);
531 		goto err_unmap;
532 	}
533 
534 	ret = pinctrl_select_state(fei->pinctrl, tsin->pstate);
535 
536 	if (ret) {
537 		dev_err(fei->dev, "%s: pinctrl_select_state failed\n"
538 			, __func__);
539 		goto err_unmap;
540 	}
541 
542 	/* Enable this input block */
543 	tmp = readl(fei->io + SYS_INPUT_CLKEN);
544 	tmp |= BIT(tsin->tsin_id);
545 	writel(tmp, fei->io + SYS_INPUT_CLKEN);
546 
547 	if (tsin->serial_not_parallel)
548 		tmp |= C8SECTPFE_SERIAL_NOT_PARALLEL;
549 
550 	if (tsin->invert_ts_clk)
551 		tmp |= C8SECTPFE_INVERT_TSCLK;
552 
553 	if (tsin->async_not_sync)
554 		tmp |= C8SECTPFE_ASYNC_NOT_SYNC;
555 
556 	tmp |= C8SECTPFE_ALIGN_BYTE_SOP | C8SECTPFE_BYTE_ENDIANNESS_MSB;
557 
558 	writel(tmp, fei->io + C8SECTPFE_IB_IP_FMT_CFG(tsin->tsin_id));
559 
560 	writel(C8SECTPFE_SYNC(0x9) |
561 		C8SECTPFE_DROP(0x9) |
562 		C8SECTPFE_TOKEN(0x47),
563 		fei->io + C8SECTPFE_IB_SYNCLCKDRP_CFG(tsin->tsin_id));
564 
565 	writel(TS_PKT_SIZE, fei->io + C8SECTPFE_IB_PKT_LEN(tsin->tsin_id));
566 
567 	/* Place the FIFO's at the end of the irec descriptors */
568 
569 	tsin->fifo = (tsin->tsin_id * FIFO_LEN);
570 
571 	writel(tsin->fifo, fei->io + C8SECTPFE_IB_BUFF_STRT(tsin->tsin_id));
572 	writel(tsin->fifo + FIFO_LEN - 1,
573 		fei->io + C8SECTPFE_IB_BUFF_END(tsin->tsin_id));
574 
575 	writel(tsin->fifo, fei->io + C8SECTPFE_IB_READ_PNT(tsin->tsin_id));
576 	writel(tsin->fifo, fei->io + C8SECTPFE_IB_WRT_PNT(tsin->tsin_id));
577 
578 	writel(tsin->pid_buffer_busaddr,
579 		fei->io + PIDF_BASE(tsin->tsin_id));
580 
581 	dev_dbg(fei->dev, "chan=%d PIDF_BASE=0x%x pid_bus_addr=%pad\n",
582 		tsin->tsin_id, readl(fei->io + PIDF_BASE(tsin->tsin_id)),
583 		&tsin->pid_buffer_busaddr);
584 
585 	/* Configure and enable HW PID filtering */
586 
587 	/*
588 	 * The PID value is created by assembling the first 8 bytes of
589 	 * the TS packet into a 64-bit word in big-endian format. A
590 	 * slice of that 64-bit word is taken from
591 	 * (PID_OFFSET+PID_NUM_BITS-1) to PID_OFFSET.
592 	 */
593 	tmp = (C8SECTPFE_PID_ENABLE | C8SECTPFE_PID_NUMBITS(13)
594 		| C8SECTPFE_PID_OFFSET(40));
595 
596 	writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(tsin->tsin_id));
597 
598 	dev_dbg(fei->dev, "chan=%d setting wp: %d, rp: %d, buf: %d-%d\n",
599 		tsin->tsin_id,
600 		readl(fei->io + C8SECTPFE_IB_WRT_PNT(tsin->tsin_id)),
601 		readl(fei->io + C8SECTPFE_IB_READ_PNT(tsin->tsin_id)),
602 		readl(fei->io + C8SECTPFE_IB_BUFF_STRT(tsin->tsin_id)),
603 		readl(fei->io + C8SECTPFE_IB_BUFF_END(tsin->tsin_id)));
604 
605 	/* Get base addpress of pointer record block from DMEM */
606 	tsin->irec = fei->io + DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET +
607 			readl(fei->io + DMA_PTRREC_BASE);
608 
609 	/* fill out pointer record data structure */
610 
611 	/* advance pointer record block to our channel */
612 	tsin->irec += (tsin->tsin_id * DMA_PRDS_SIZE);
613 
614 	writel(tsin->fifo, tsin->irec + DMA_PRDS_MEMBASE);
615 
616 	writel(tsin->fifo + FIFO_LEN - 1, tsin->irec + DMA_PRDS_MEMTOP);
617 
618 	writel((188 + 7)&~7, tsin->irec + DMA_PRDS_PKTSIZE);
619 
620 	writel(0x1, tsin->irec + DMA_PRDS_TPENABLE);
621 
622 	/* read/write pointers with physical bus address */
623 
624 	writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSBASE_TP(0));
625 
626 	tmp = tsin->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
627 	writel(tmp, tsin->irec + DMA_PRDS_BUSTOP_TP(0));
628 
629 	writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSWP_TP(0));
630 	writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSRP_TP(0));
631 
632 	/* initialize tasklet */
633 	tasklet_setup(&tsin->tsklet, channel_swdemux_tsklet);
634 
635 	return 0;
636 
637 err_unmap:
638 	free_input_block(fei, tsin);
639 	return ret;
640 }
641 
642 static irqreturn_t c8sectpfe_error_irq_handler(int irq, void *priv)
643 {
644 	struct c8sectpfei *fei = priv;
645 
646 	dev_err(fei->dev, "%s: error handling not yet implemented\n"
647 		, __func__);
648 
649 	/*
650 	 * TODO FIXME we should detect some error conditions here
651 	 * and ideally do something about them!
652 	 */
653 
654 	return IRQ_HANDLED;
655 }
656 
657 static int c8sectpfe_probe(struct platform_device *pdev)
658 {
659 	struct device *dev = &pdev->dev;
660 	struct device_node *child, *np = dev->of_node;
661 	struct c8sectpfei *fei;
662 	struct resource *res;
663 	int ret, index = 0;
664 	struct channel_info *tsin;
665 
666 	/* Allocate the c8sectpfei structure */
667 	fei = devm_kzalloc(dev, sizeof(struct c8sectpfei), GFP_KERNEL);
668 	if (!fei)
669 		return -ENOMEM;
670 
671 	fei->dev = dev;
672 
673 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "c8sectpfe");
674 	fei->io = devm_ioremap_resource(dev, res);
675 	if (IS_ERR(fei->io))
676 		return PTR_ERR(fei->io);
677 
678 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
679 					"c8sectpfe-ram");
680 	fei->sram = devm_ioremap_resource(dev, res);
681 	if (IS_ERR(fei->sram))
682 		return PTR_ERR(fei->sram);
683 
684 	fei->sram_size = resource_size(res);
685 
686 	fei->idle_irq = platform_get_irq_byname(pdev, "c8sectpfe-idle-irq");
687 	if (fei->idle_irq < 0)
688 		return fei->idle_irq;
689 
690 	fei->error_irq = platform_get_irq_byname(pdev, "c8sectpfe-error-irq");
691 	if (fei->error_irq < 0)
692 		return fei->error_irq;
693 
694 	platform_set_drvdata(pdev, fei);
695 
696 	fei->c8sectpfeclk = devm_clk_get(dev, "c8sectpfe");
697 	if (IS_ERR(fei->c8sectpfeclk)) {
698 		dev_err(dev, "c8sectpfe clk not found\n");
699 		return PTR_ERR(fei->c8sectpfeclk);
700 	}
701 
702 	ret = clk_prepare_enable(fei->c8sectpfeclk);
703 	if (ret) {
704 		dev_err(dev, "Failed to enable c8sectpfe clock\n");
705 		return ret;
706 	}
707 
708 	/* to save power disable all IP's (on by default) */
709 	writel(0, fei->io + SYS_INPUT_CLKEN);
710 
711 	/* Enable memdma clock */
712 	writel(MEMDMAENABLE, fei->io + SYS_OTHER_CLKEN);
713 
714 	/* clear internal sram */
715 	memset_io(fei->sram, 0x0, fei->sram_size);
716 
717 	c8sectpfe_getconfig(fei);
718 
719 	ret = devm_request_irq(dev, fei->idle_irq, c8sectpfe_idle_irq_handler,
720 			0, "c8sectpfe-idle-irq", fei);
721 	if (ret) {
722 		dev_err(dev, "Can't register c8sectpfe-idle-irq IRQ.\n");
723 		goto err_clk_disable;
724 	}
725 
726 	ret = devm_request_irq(dev, fei->error_irq,
727 				c8sectpfe_error_irq_handler, 0,
728 				"c8sectpfe-error-irq", fei);
729 	if (ret) {
730 		dev_err(dev, "Can't register c8sectpfe-error-irq IRQ.\n");
731 		goto err_clk_disable;
732 	}
733 
734 	fei->tsin_count = of_get_child_count(np);
735 
736 	if (fei->tsin_count > C8SECTPFE_MAX_TSIN_CHAN ||
737 		fei->tsin_count > fei->hw_stats.num_ib) {
738 
739 		dev_err(dev, "More tsin declared than exist on SoC!\n");
740 		ret = -EINVAL;
741 		goto err_clk_disable;
742 	}
743 
744 	fei->pinctrl = devm_pinctrl_get(dev);
745 
746 	if (IS_ERR(fei->pinctrl)) {
747 		dev_err(dev, "Error getting tsin pins\n");
748 		ret = PTR_ERR(fei->pinctrl);
749 		goto err_clk_disable;
750 	}
751 
752 	for_each_child_of_node(np, child) {
753 		struct device_node *i2c_bus;
754 
755 		fei->channel_data[index] = devm_kzalloc(dev,
756 						sizeof(struct channel_info),
757 						GFP_KERNEL);
758 
759 		if (!fei->channel_data[index]) {
760 			ret = -ENOMEM;
761 			goto err_node_put;
762 		}
763 
764 		tsin = fei->channel_data[index];
765 
766 		tsin->fei = fei;
767 
768 		ret = of_property_read_u32(child, "tsin-num", &tsin->tsin_id);
769 		if (ret) {
770 			dev_err(&pdev->dev, "No tsin_num found\n");
771 			goto err_node_put;
772 		}
773 
774 		/* sanity check value */
775 		if (tsin->tsin_id > fei->hw_stats.num_ib) {
776 			dev_err(&pdev->dev,
777 				"tsin-num %d specified greater than number\n\tof input block hw in SoC! (%d)",
778 				tsin->tsin_id, fei->hw_stats.num_ib);
779 			ret = -EINVAL;
780 			goto err_node_put;
781 		}
782 
783 		tsin->invert_ts_clk = of_property_read_bool(child,
784 							"invert-ts-clk");
785 
786 		tsin->serial_not_parallel = of_property_read_bool(child,
787 							"serial-not-parallel");
788 
789 		tsin->async_not_sync = of_property_read_bool(child,
790 							"async-not-sync");
791 
792 		ret = of_property_read_u32(child, "dvb-card",
793 					&tsin->dvb_card);
794 		if (ret) {
795 			dev_err(&pdev->dev, "No dvb-card found\n");
796 			goto err_node_put;
797 		}
798 
799 		i2c_bus = of_parse_phandle(child, "i2c-bus", 0);
800 		if (!i2c_bus) {
801 			dev_err(&pdev->dev, "No i2c-bus found\n");
802 			ret = -ENODEV;
803 			goto err_node_put;
804 		}
805 		tsin->i2c_adapter =
806 			of_find_i2c_adapter_by_node(i2c_bus);
807 		if (!tsin->i2c_adapter) {
808 			dev_err(&pdev->dev, "No i2c adapter found\n");
809 			of_node_put(i2c_bus);
810 			ret = -ENODEV;
811 			goto err_node_put;
812 		}
813 		of_node_put(i2c_bus);
814 
815 		tsin->rst_gpio = of_get_named_gpio(child, "reset-gpios", 0);
816 
817 		ret = gpio_is_valid(tsin->rst_gpio);
818 		if (!ret) {
819 			dev_err(dev,
820 				"reset gpio for tsin%d not valid (gpio=%d)\n",
821 				tsin->tsin_id, tsin->rst_gpio);
822 			ret = -EINVAL;
823 			goto err_node_put;
824 		}
825 
826 		ret = devm_gpio_request_one(dev, tsin->rst_gpio,
827 					GPIOF_OUT_INIT_LOW, "NIM reset");
828 		if (ret && ret != -EBUSY) {
829 			dev_err(dev, "Can't request tsin%d reset gpio\n"
830 				, fei->channel_data[index]->tsin_id);
831 			goto err_node_put;
832 		}
833 
834 		if (!ret) {
835 			/* toggle reset lines */
836 			gpio_direction_output(tsin->rst_gpio, 0);
837 			usleep_range(3500, 5000);
838 			gpio_direction_output(tsin->rst_gpio, 1);
839 			usleep_range(3000, 5000);
840 		}
841 
842 		tsin->demux_mapping = index;
843 
844 		dev_dbg(fei->dev,
845 			"channel=%p n=%d tsin_num=%d, invert-ts-clk=%d\n\tserial-not-parallel=%d pkt-clk-valid=%d dvb-card=%d\n",
846 			fei->channel_data[index], index,
847 			tsin->tsin_id, tsin->invert_ts_clk,
848 			tsin->serial_not_parallel, tsin->async_not_sync,
849 			tsin->dvb_card);
850 
851 		index++;
852 	}
853 
854 	/* Setup timer interrupt */
855 	timer_setup(&fei->timer, c8sectpfe_timer_interrupt, 0);
856 
857 	mutex_init(&fei->lock);
858 
859 	/* Get the configuration information about the tuners */
860 	ret = c8sectpfe_tuner_register_frontend(&fei->c8sectpfe[0],
861 					(void *)fei,
862 					c8sectpfe_start_feed,
863 					c8sectpfe_stop_feed);
864 	if (ret) {
865 		dev_err(dev, "c8sectpfe_tuner_register_frontend failed (%d)\n",
866 			ret);
867 		goto err_clk_disable;
868 	}
869 
870 	c8sectpfe_debugfs_init(fei);
871 
872 	return 0;
873 
874 err_node_put:
875 	of_node_put(child);
876 err_clk_disable:
877 	clk_disable_unprepare(fei->c8sectpfeclk);
878 	return ret;
879 }
880 
881 static int c8sectpfe_remove(struct platform_device *pdev)
882 {
883 	struct c8sectpfei *fei = platform_get_drvdata(pdev);
884 	struct channel_info *channel;
885 	int i;
886 
887 	wait_for_completion(&fei->fw_ack);
888 
889 	c8sectpfe_tuner_unregister_frontend(fei->c8sectpfe[0], fei);
890 
891 	/*
892 	 * Now loop through and un-configure each of the InputBlock resources
893 	 */
894 	for (i = 0; i < fei->tsin_count; i++) {
895 		channel = fei->channel_data[i];
896 		free_input_block(fei, channel);
897 	}
898 
899 	c8sectpfe_debugfs_exit(fei);
900 
901 	dev_info(fei->dev, "Stopping memdma SLIM core\n");
902 	if (readl(fei->io + DMA_CPU_RUN))
903 		writel(0x0,  fei->io + DMA_CPU_RUN);
904 
905 	/* unclock all internal IP's */
906 	if (readl(fei->io + SYS_INPUT_CLKEN))
907 		writel(0, fei->io + SYS_INPUT_CLKEN);
908 
909 	if (readl(fei->io + SYS_OTHER_CLKEN))
910 		writel(0, fei->io + SYS_OTHER_CLKEN);
911 
912 	clk_disable_unprepare(fei->c8sectpfeclk);
913 
914 	return 0;
915 }
916 
917 
918 static int configure_channels(struct c8sectpfei *fei)
919 {
920 	int index = 0, ret;
921 	struct device_node *child, *np = fei->dev->of_node;
922 
923 	/* iterate round each tsin and configure memdma descriptor and IB hw */
924 	for_each_child_of_node(np, child) {
925 		ret = configure_memdma_and_inputblock(fei,
926 						fei->channel_data[index]);
927 		if (ret) {
928 			dev_err(fei->dev,
929 				"configure_memdma_and_inputblock failed\n");
930 			of_node_put(child);
931 			goto err_unmap;
932 		}
933 		index++;
934 	}
935 
936 	return 0;
937 
938 err_unmap:
939 	while (--index >= 0)
940 		free_input_block(fei, fei->channel_data[index]);
941 
942 	return ret;
943 }
944 
945 static int
946 c8sectpfe_elf_sanity_check(struct c8sectpfei *fei, const struct firmware *fw)
947 {
948 	struct elf32_hdr *ehdr;
949 	char class;
950 
951 	if (!fw) {
952 		dev_err(fei->dev, "failed to load %s\n", FIRMWARE_MEMDMA);
953 		return -EINVAL;
954 	}
955 
956 	if (fw->size < sizeof(struct elf32_hdr)) {
957 		dev_err(fei->dev, "Image is too small\n");
958 		return -EINVAL;
959 	}
960 
961 	ehdr = (struct elf32_hdr *)fw->data;
962 
963 	/* We only support ELF32 at this point */
964 	class = ehdr->e_ident[EI_CLASS];
965 	if (class != ELFCLASS32) {
966 		dev_err(fei->dev, "Unsupported class: %d\n", class);
967 		return -EINVAL;
968 	}
969 
970 	if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) {
971 		dev_err(fei->dev, "Unsupported firmware endianness\n");
972 		return -EINVAL;
973 	}
974 
975 	if (fw->size < ehdr->e_shoff + sizeof(struct elf32_shdr)) {
976 		dev_err(fei->dev, "Image is too small\n");
977 		return -EINVAL;
978 	}
979 
980 	if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
981 		dev_err(fei->dev, "Image is corrupted (bad magic)\n");
982 		return -EINVAL;
983 	}
984 
985 	/* Check ELF magic */
986 	ehdr = (Elf32_Ehdr *)fw->data;
987 	if (ehdr->e_ident[EI_MAG0] != ELFMAG0 ||
988 	    ehdr->e_ident[EI_MAG1] != ELFMAG1 ||
989 	    ehdr->e_ident[EI_MAG2] != ELFMAG2 ||
990 	    ehdr->e_ident[EI_MAG3] != ELFMAG3) {
991 		dev_err(fei->dev, "Invalid ELF magic\n");
992 		return -EINVAL;
993 	}
994 
995 	if (ehdr->e_type != ET_EXEC) {
996 		dev_err(fei->dev, "Unsupported ELF header type\n");
997 		return -EINVAL;
998 	}
999 
1000 	if (ehdr->e_phoff > fw->size) {
1001 		dev_err(fei->dev, "Firmware size is too small\n");
1002 		return -EINVAL;
1003 	}
1004 
1005 	return 0;
1006 }
1007 
1008 
1009 static void load_imem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
1010 			const struct firmware *fw, u8 __iomem *dest,
1011 			int seg_num)
1012 {
1013 	const u8 *imem_src = fw->data + phdr->p_offset;
1014 	int i;
1015 
1016 	/*
1017 	 * For IMEM segments, the segment contains 24-bit
1018 	 * instructions which must be padded to 32-bit
1019 	 * instructions before being written. The written
1020 	 * segment is padded with NOP instructions.
1021 	 */
1022 
1023 	dev_dbg(fei->dev,
1024 		"Loading IMEM segment %d 0x%08x\n\t (0x%x bytes) -> 0x%p (0x%x bytes)\n",
1025 		seg_num, phdr->p_paddr, phdr->p_filesz, dest,
1026 		phdr->p_memsz + phdr->p_memsz / 3);
1027 
1028 	for (i = 0; i < phdr->p_filesz; i++) {
1029 
1030 		writeb(readb((void __iomem *)imem_src), (void __iomem *)dest);
1031 
1032 		/* Every 3 bytes, add an additional
1033 		 * padding zero in destination */
1034 		if (i % 3 == 2) {
1035 			dest++;
1036 			writeb(0x00, (void __iomem *)dest);
1037 		}
1038 
1039 		dest++;
1040 		imem_src++;
1041 	}
1042 }
1043 
1044 static void load_dmem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
1045 			const struct firmware *fw, u8 __iomem *dst, int seg_num)
1046 {
1047 	/*
1048 	 * For DMEM segments copy the segment data from the ELF
1049 	 * file and pad segment with zeroes
1050 	 */
1051 
1052 	dev_dbg(fei->dev,
1053 		"Loading DMEM segment %d 0x%08x\n\t(0x%x bytes) -> 0x%p (0x%x bytes)\n",
1054 		seg_num, phdr->p_paddr, phdr->p_filesz,
1055 		dst, phdr->p_memsz);
1056 
1057 	memcpy((void __force *)dst, (void *)fw->data + phdr->p_offset,
1058 		phdr->p_filesz);
1059 
1060 	memset((void __force *)dst + phdr->p_filesz, 0,
1061 		phdr->p_memsz - phdr->p_filesz);
1062 }
1063 
1064 static int load_slim_core_fw(const struct firmware *fw, struct c8sectpfei *fei)
1065 {
1066 	Elf32_Ehdr *ehdr;
1067 	Elf32_Phdr *phdr;
1068 	u8 __iomem *dst;
1069 	int err = 0, i;
1070 
1071 	if (!fw || !fei)
1072 		return -EINVAL;
1073 
1074 	ehdr = (Elf32_Ehdr *)fw->data;
1075 	phdr = (Elf32_Phdr *)(fw->data + ehdr->e_phoff);
1076 
1077 	/* go through the available ELF segments */
1078 	for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1079 
1080 		/* Only consider LOAD segments */
1081 		if (phdr->p_type != PT_LOAD)
1082 			continue;
1083 
1084 		/*
1085 		 * Check segment is contained within the fw->data buffer
1086 		 */
1087 		if (phdr->p_offset + phdr->p_filesz > fw->size) {
1088 			dev_err(fei->dev,
1089 				"Segment %d is outside of firmware file\n", i);
1090 			err = -EINVAL;
1091 			break;
1092 		}
1093 
1094 		/*
1095 		 * MEMDMA IMEM has executable flag set, otherwise load
1096 		 * this segment into DMEM.
1097 		 *
1098 		 */
1099 
1100 		if (phdr->p_flags & PF_X) {
1101 			dst = (u8 __iomem *) fei->io + DMA_MEMDMA_IMEM;
1102 			/*
1103 			 * The Slim ELF file uses 32-bit word addressing for
1104 			 * load offsets.
1105 			 */
1106 			dst += (phdr->p_paddr & 0xFFFFF) * sizeof(unsigned int);
1107 			load_imem_segment(fei, phdr, fw, dst, i);
1108 		} else {
1109 			dst = (u8 __iomem *) fei->io + DMA_MEMDMA_DMEM;
1110 			/*
1111 			 * The Slim ELF file uses 32-bit word addressing for
1112 			 * load offsets.
1113 			 */
1114 			dst += (phdr->p_paddr & 0xFFFFF) * sizeof(unsigned int);
1115 			load_dmem_segment(fei, phdr, fw, dst, i);
1116 		}
1117 	}
1118 
1119 	release_firmware(fw);
1120 	return err;
1121 }
1122 
1123 static int load_c8sectpfe_fw(struct c8sectpfei *fei)
1124 {
1125 	const struct firmware *fw;
1126 	int err;
1127 
1128 	dev_info(fei->dev, "Loading firmware: %s\n", FIRMWARE_MEMDMA);
1129 
1130 	err = request_firmware(&fw, FIRMWARE_MEMDMA, fei->dev);
1131 	if (err)
1132 		return err;
1133 
1134 	err = c8sectpfe_elf_sanity_check(fei, fw);
1135 	if (err) {
1136 		dev_err(fei->dev, "c8sectpfe_elf_sanity_check failed err=(%d)\n"
1137 			, err);
1138 		release_firmware(fw);
1139 		return err;
1140 	}
1141 
1142 	err = load_slim_core_fw(fw, fei);
1143 	if (err) {
1144 		dev_err(fei->dev, "load_slim_core_fw failed err=(%d)\n", err);
1145 		return err;
1146 	}
1147 
1148 	/* now the firmware is loaded configure the input blocks */
1149 	err = configure_channels(fei);
1150 	if (err) {
1151 		dev_err(fei->dev, "configure_channels failed err=(%d)\n", err);
1152 		return err;
1153 	}
1154 
1155 	/*
1156 	 * STBus target port can access IMEM and DMEM ports
1157 	 * without waiting for CPU
1158 	 */
1159 	writel(0x1, fei->io + DMA_PER_STBUS_SYNC);
1160 
1161 	dev_info(fei->dev, "Boot the memdma SLIM core\n");
1162 	writel(0x1,  fei->io + DMA_CPU_RUN);
1163 
1164 	atomic_set(&fei->fw_loaded, 1);
1165 
1166 	return 0;
1167 }
1168 
1169 static const struct of_device_id c8sectpfe_match[] = {
1170 	{ .compatible = "st,stih407-c8sectpfe" },
1171 	{ /* sentinel */ },
1172 };
1173 MODULE_DEVICE_TABLE(of, c8sectpfe_match);
1174 
1175 static struct platform_driver c8sectpfe_driver = {
1176 	.driver = {
1177 		.name = "c8sectpfe",
1178 		.of_match_table = of_match_ptr(c8sectpfe_match),
1179 	},
1180 	.probe	= c8sectpfe_probe,
1181 	.remove	= c8sectpfe_remove,
1182 };
1183 
1184 module_platform_driver(c8sectpfe_driver);
1185 
1186 MODULE_AUTHOR("Peter Bennett <peter.bennett@st.com>");
1187 MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
1188 MODULE_DESCRIPTION("C8SECTPFE STi DVB Driver");
1189 MODULE_LICENSE("GPL");
1190