1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * c8sectpfe-core.c - C8SECTPFE STi DVB driver
4 *
5 * Copyright (c) STMicroelectronics 2015
6 *
7 * Author:Peter Bennett <peter.bennett@st.com>
8 * Peter Griffin <peter.griffin@linaro.org>
9 *
10 */
11 #include <linux/atomic.h>
12 #include <linux/clk.h>
13 #include <linux/completion.h>
14 #include <linux/delay.h>
15 #include <linux/device.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/dvb/dmx.h>
18 #include <linux/dvb/frontend.h>
19 #include <linux/err.h>
20 #include <linux/errno.h>
21 #include <linux/firmware.h>
22 #include <linux/gpio/consumer.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/io.h>
26 #include <linux/module.h>
27 #include <linux/of_gpio.h>
28 #include <linux/of_platform.h>
29 #include <linux/pinctrl/consumer.h>
30 #include <linux/pinctrl/pinctrl.h>
31 #include <linux/platform_device.h>
32 #include <linux/slab.h>
33 #include <linux/time.h>
34 #include <linux/usb.h>
35 #include <linux/wait.h>
36
37 #include "c8sectpfe-common.h"
38 #include "c8sectpfe-core.h"
39 #include "c8sectpfe-debugfs.h"
40
41 #include <media/dmxdev.h>
42 #include <media/dvb_demux.h>
43 #include <media/dvb_frontend.h>
44 #include <media/dvb_net.h>
45
46 #define FIRMWARE_MEMDMA "pti_memdma_h407.elf"
47 MODULE_FIRMWARE(FIRMWARE_MEMDMA);
48
49 #define PID_TABLE_SIZE 1024
50 #define POLL_MSECS 50
51
52 static int load_c8sectpfe_fw(struct c8sectpfei *fei);
53
54 #define TS_PKT_SIZE 188
55 #define HEADER_SIZE (4)
56 #define PACKET_SIZE (TS_PKT_SIZE+HEADER_SIZE)
57
58 #define FEI_ALIGNMENT (32)
59 /* hw requires minimum of 8*PACKET_SIZE and padded to 8byte boundary */
60 #define FEI_BUFFER_SIZE (8*PACKET_SIZE*340)
61
62 #define FIFO_LEN 1024
63
c8sectpfe_timer_interrupt(struct timer_list * t)64 static void c8sectpfe_timer_interrupt(struct timer_list *t)
65 {
66 struct c8sectpfei *fei = from_timer(fei, t, timer);
67 struct channel_info *channel;
68 int chan_num;
69
70 /* iterate through input block channels */
71 for (chan_num = 0; chan_num < fei->tsin_count; chan_num++) {
72 channel = fei->channel_data[chan_num];
73
74 /* is this descriptor initialised and TP enabled */
75 if (channel->irec && readl(channel->irec + DMA_PRDS_TPENABLE))
76 tasklet_schedule(&channel->tsklet);
77 }
78
79 fei->timer.expires = jiffies + msecs_to_jiffies(POLL_MSECS);
80 add_timer(&fei->timer);
81 }
82
channel_swdemux_tsklet(struct tasklet_struct * t)83 static void channel_swdemux_tsklet(struct tasklet_struct *t)
84 {
85 struct channel_info *channel = from_tasklet(channel, t, tsklet);
86 struct c8sectpfei *fei;
87 unsigned long wp, rp;
88 int pos, num_packets, n, size;
89 u8 *buf;
90
91 if (unlikely(!channel || !channel->irec))
92 return;
93
94 fei = channel->fei;
95
96 wp = readl(channel->irec + DMA_PRDS_BUSWP_TP(0));
97 rp = readl(channel->irec + DMA_PRDS_BUSRP_TP(0));
98
99 pos = rp - channel->back_buffer_busaddr;
100
101 /* has it wrapped */
102 if (wp < rp)
103 wp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE;
104
105 size = wp - rp;
106 num_packets = size / PACKET_SIZE;
107
108 /* manage cache so data is visible to CPU */
109 dma_sync_single_for_cpu(fei->dev,
110 rp,
111 size,
112 DMA_FROM_DEVICE);
113
114 buf = channel->back_buffer_aligned;
115
116 dev_dbg(fei->dev,
117 "chan=%d channel=%p num_packets = %d, buf = %p, pos = 0x%x\n\trp=0x%lx, wp=0x%lx\n",
118 channel->tsin_id, channel, num_packets, buf, pos, rp, wp);
119
120 for (n = 0; n < num_packets; n++) {
121 dvb_dmx_swfilter_packets(
122 &fei->c8sectpfe[0]->
123 demux[channel->demux_mapping].dvb_demux,
124 &buf[pos], 1);
125
126 pos += PACKET_SIZE;
127 }
128
129 /* advance the read pointer */
130 if (wp == (channel->back_buffer_busaddr + FEI_BUFFER_SIZE))
131 writel(channel->back_buffer_busaddr, channel->irec +
132 DMA_PRDS_BUSRP_TP(0));
133 else
134 writel(wp, channel->irec + DMA_PRDS_BUSRP_TP(0));
135 }
136
c8sectpfe_start_feed(struct dvb_demux_feed * dvbdmxfeed)137 static int c8sectpfe_start_feed(struct dvb_demux_feed *dvbdmxfeed)
138 {
139 struct dvb_demux *demux = dvbdmxfeed->demux;
140 struct stdemux *stdemux = demux->priv;
141 struct c8sectpfei *fei = stdemux->c8sectpfei;
142 struct channel_info *channel;
143 u32 tmp;
144 unsigned long *bitmap;
145 int ret;
146
147 switch (dvbdmxfeed->type) {
148 case DMX_TYPE_TS:
149 break;
150 case DMX_TYPE_SEC:
151 break;
152 default:
153 dev_err(fei->dev, "%s:%d Error bailing\n"
154 , __func__, __LINE__);
155 return -EINVAL;
156 }
157
158 if (dvbdmxfeed->type == DMX_TYPE_TS) {
159 switch (dvbdmxfeed->pes_type) {
160 case DMX_PES_VIDEO:
161 case DMX_PES_AUDIO:
162 case DMX_PES_TELETEXT:
163 case DMX_PES_PCR:
164 case DMX_PES_OTHER:
165 break;
166 default:
167 dev_err(fei->dev, "%s:%d Error bailing\n"
168 , __func__, __LINE__);
169 return -EINVAL;
170 }
171 }
172
173 if (!atomic_read(&fei->fw_loaded)) {
174 ret = load_c8sectpfe_fw(fei);
175 if (ret)
176 return ret;
177 }
178
179 mutex_lock(&fei->lock);
180
181 channel = fei->channel_data[stdemux->tsin_index];
182
183 bitmap = channel->pid_buffer_aligned;
184
185 /* 8192 is a special PID */
186 if (dvbdmxfeed->pid == 8192) {
187 tmp = readl(fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
188 tmp &= ~C8SECTPFE_PID_ENABLE;
189 writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
190
191 } else {
192 bitmap_set(bitmap, dvbdmxfeed->pid, 1);
193 }
194
195 /* manage cache so PID bitmap is visible to HW */
196 dma_sync_single_for_device(fei->dev,
197 channel->pid_buffer_busaddr,
198 PID_TABLE_SIZE,
199 DMA_TO_DEVICE);
200
201 channel->active = 1;
202
203 if (fei->global_feed_count == 0) {
204 fei->timer.expires = jiffies +
205 msecs_to_jiffies(msecs_to_jiffies(POLL_MSECS));
206
207 add_timer(&fei->timer);
208 }
209
210 if (stdemux->running_feed_count == 0) {
211
212 dev_dbg(fei->dev, "Starting channel=%p\n", channel);
213
214 tasklet_setup(&channel->tsklet, channel_swdemux_tsklet);
215
216 /* Reset the internal inputblock sram pointers */
217 writel(channel->fifo,
218 fei->io + C8SECTPFE_IB_BUFF_STRT(channel->tsin_id));
219 writel(channel->fifo + FIFO_LEN - 1,
220 fei->io + C8SECTPFE_IB_BUFF_END(channel->tsin_id));
221
222 writel(channel->fifo,
223 fei->io + C8SECTPFE_IB_READ_PNT(channel->tsin_id));
224 writel(channel->fifo,
225 fei->io + C8SECTPFE_IB_WRT_PNT(channel->tsin_id));
226
227
228 /* reset read / write memdma ptrs for this channel */
229 writel(channel->back_buffer_busaddr, channel->irec +
230 DMA_PRDS_BUSBASE_TP(0));
231
232 tmp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
233 writel(tmp, channel->irec + DMA_PRDS_BUSTOP_TP(0));
234
235 writel(channel->back_buffer_busaddr, channel->irec +
236 DMA_PRDS_BUSWP_TP(0));
237
238 /* Issue a reset and enable InputBlock */
239 writel(C8SECTPFE_SYS_ENABLE | C8SECTPFE_SYS_RESET
240 , fei->io + C8SECTPFE_IB_SYS(channel->tsin_id));
241
242 /* and enable the tp */
243 writel(0x1, channel->irec + DMA_PRDS_TPENABLE);
244
245 dev_dbg(fei->dev, "%s:%d Starting DMA feed on stdemux=%p\n"
246 , __func__, __LINE__, stdemux);
247 }
248
249 stdemux->running_feed_count++;
250 fei->global_feed_count++;
251
252 mutex_unlock(&fei->lock);
253
254 return 0;
255 }
256
c8sectpfe_stop_feed(struct dvb_demux_feed * dvbdmxfeed)257 static int c8sectpfe_stop_feed(struct dvb_demux_feed *dvbdmxfeed)
258 {
259
260 struct dvb_demux *demux = dvbdmxfeed->demux;
261 struct stdemux *stdemux = demux->priv;
262 struct c8sectpfei *fei = stdemux->c8sectpfei;
263 struct channel_info *channel;
264 int idlereq;
265 u32 tmp;
266 int ret;
267 unsigned long *bitmap;
268
269 if (!atomic_read(&fei->fw_loaded)) {
270 ret = load_c8sectpfe_fw(fei);
271 if (ret)
272 return ret;
273 }
274
275 mutex_lock(&fei->lock);
276
277 channel = fei->channel_data[stdemux->tsin_index];
278
279 bitmap = channel->pid_buffer_aligned;
280
281 if (dvbdmxfeed->pid == 8192) {
282 tmp = readl(fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
283 tmp |= C8SECTPFE_PID_ENABLE;
284 writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(channel->tsin_id));
285 } else {
286 bitmap_clear(bitmap, dvbdmxfeed->pid, 1);
287 }
288
289 /* manage cache so data is visible to HW */
290 dma_sync_single_for_device(fei->dev,
291 channel->pid_buffer_busaddr,
292 PID_TABLE_SIZE,
293 DMA_TO_DEVICE);
294
295 if (--stdemux->running_feed_count == 0) {
296
297 channel = fei->channel_data[stdemux->tsin_index];
298
299 /* TP re-configuration on page 168 of functional spec */
300
301 /* disable IB (prevents more TS data going to memdma) */
302 writel(0, fei->io + C8SECTPFE_IB_SYS(channel->tsin_id));
303
304 /* disable this channels descriptor */
305 writel(0, channel->irec + DMA_PRDS_TPENABLE);
306
307 tasklet_disable(&channel->tsklet);
308
309 /* now request memdma channel goes idle */
310 idlereq = (1 << channel->tsin_id) | IDLEREQ;
311 writel(idlereq, fei->io + DMA_IDLE_REQ);
312
313 /* wait for idle irq handler to signal completion */
314 ret = wait_for_completion_timeout(&channel->idle_completion,
315 msecs_to_jiffies(100));
316
317 if (ret == 0)
318 dev_warn(fei->dev,
319 "Timeout waiting for idle irq on tsin%d\n",
320 channel->tsin_id);
321
322 reinit_completion(&channel->idle_completion);
323
324 /* reset read / write ptrs for this channel */
325
326 writel(channel->back_buffer_busaddr,
327 channel->irec + DMA_PRDS_BUSBASE_TP(0));
328
329 tmp = channel->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
330 writel(tmp, channel->irec + DMA_PRDS_BUSTOP_TP(0));
331
332 writel(channel->back_buffer_busaddr,
333 channel->irec + DMA_PRDS_BUSWP_TP(0));
334
335 dev_dbg(fei->dev,
336 "%s:%d stopping DMA feed on stdemux=%p channel=%d\n",
337 __func__, __LINE__, stdemux, channel->tsin_id);
338
339 /* turn off all PIDS in the bitmap */
340 memset(channel->pid_buffer_aligned, 0, PID_TABLE_SIZE);
341
342 /* manage cache so data is visible to HW */
343 dma_sync_single_for_device(fei->dev,
344 channel->pid_buffer_busaddr,
345 PID_TABLE_SIZE,
346 DMA_TO_DEVICE);
347
348 channel->active = 0;
349 }
350
351 if (--fei->global_feed_count == 0) {
352 dev_dbg(fei->dev, "%s:%d global_feed_count=%d\n"
353 , __func__, __LINE__, fei->global_feed_count);
354
355 del_timer(&fei->timer);
356 }
357
358 mutex_unlock(&fei->lock);
359
360 return 0;
361 }
362
find_channel(struct c8sectpfei * fei,int tsin_num)363 static struct channel_info *find_channel(struct c8sectpfei *fei, int tsin_num)
364 {
365 int i;
366
367 for (i = 0; i < C8SECTPFE_MAX_TSIN_CHAN; i++) {
368 if (!fei->channel_data[i])
369 continue;
370
371 if (fei->channel_data[i]->tsin_id == tsin_num)
372 return fei->channel_data[i];
373 }
374
375 return NULL;
376 }
377
c8sectpfe_getconfig(struct c8sectpfei * fei)378 static void c8sectpfe_getconfig(struct c8sectpfei *fei)
379 {
380 struct c8sectpfe_hw *hw = &fei->hw_stats;
381
382 hw->num_ib = readl(fei->io + SYS_CFG_NUM_IB);
383 hw->num_mib = readl(fei->io + SYS_CFG_NUM_MIB);
384 hw->num_swts = readl(fei->io + SYS_CFG_NUM_SWTS);
385 hw->num_tsout = readl(fei->io + SYS_CFG_NUM_TSOUT);
386 hw->num_ccsc = readl(fei->io + SYS_CFG_NUM_CCSC);
387 hw->num_ram = readl(fei->io + SYS_CFG_NUM_RAM);
388 hw->num_tp = readl(fei->io + SYS_CFG_NUM_TP);
389
390 dev_info(fei->dev, "C8SECTPFE hw supports the following:\n");
391 dev_info(fei->dev, "Input Blocks: %d\n", hw->num_ib);
392 dev_info(fei->dev, "Merged Input Blocks: %d\n", hw->num_mib);
393 dev_info(fei->dev, "Software Transport Stream Inputs: %d\n"
394 , hw->num_swts);
395 dev_info(fei->dev, "Transport Stream Output: %d\n", hw->num_tsout);
396 dev_info(fei->dev, "Cable Card Converter: %d\n", hw->num_ccsc);
397 dev_info(fei->dev, "RAMs supported by C8SECTPFE: %d\n", hw->num_ram);
398 dev_info(fei->dev, "Tango TPs supported by C8SECTPFE: %d\n"
399 , hw->num_tp);
400 }
401
c8sectpfe_idle_irq_handler(int irq,void * priv)402 static irqreturn_t c8sectpfe_idle_irq_handler(int irq, void *priv)
403 {
404 struct c8sectpfei *fei = priv;
405 struct channel_info *chan;
406 int bit;
407 unsigned long tmp = readl(fei->io + DMA_IDLE_REQ);
408
409 /* page 168 of functional spec: Clear the idle request
410 by writing 0 to the C8SECTPFE_DMA_IDLE_REQ register. */
411
412 /* signal idle completion */
413 for_each_set_bit(bit, &tmp, fei->hw_stats.num_ib) {
414
415 chan = find_channel(fei, bit);
416
417 if (chan)
418 complete(&chan->idle_completion);
419 }
420
421 writel(0, fei->io + DMA_IDLE_REQ);
422
423 return IRQ_HANDLED;
424 }
425
426
free_input_block(struct c8sectpfei * fei,struct channel_info * tsin)427 static void free_input_block(struct c8sectpfei *fei, struct channel_info *tsin)
428 {
429 if (!fei || !tsin)
430 return;
431
432 if (tsin->back_buffer_busaddr)
433 if (!dma_mapping_error(fei->dev, tsin->back_buffer_busaddr))
434 dma_unmap_single(fei->dev, tsin->back_buffer_busaddr,
435 FEI_BUFFER_SIZE, DMA_BIDIRECTIONAL);
436
437 kfree(tsin->back_buffer_start);
438
439 if (tsin->pid_buffer_busaddr)
440 if (!dma_mapping_error(fei->dev, tsin->pid_buffer_busaddr))
441 dma_unmap_single(fei->dev, tsin->pid_buffer_busaddr,
442 PID_TABLE_SIZE, DMA_BIDIRECTIONAL);
443
444 kfree(tsin->pid_buffer_start);
445 }
446
447 #define MAX_NAME 20
448
configure_memdma_and_inputblock(struct c8sectpfei * fei,struct channel_info * tsin)449 static int configure_memdma_and_inputblock(struct c8sectpfei *fei,
450 struct channel_info *tsin)
451 {
452 int ret;
453 u32 tmp;
454 char tsin_pin_name[MAX_NAME];
455
456 if (!fei || !tsin)
457 return -EINVAL;
458
459 dev_dbg(fei->dev, "%s:%d Configuring channel=%p tsin=%d\n"
460 , __func__, __LINE__, tsin, tsin->tsin_id);
461
462 init_completion(&tsin->idle_completion);
463
464 tsin->back_buffer_start = kzalloc(FEI_BUFFER_SIZE + FEI_ALIGNMENT, GFP_KERNEL);
465 if (!tsin->back_buffer_start) {
466 ret = -ENOMEM;
467 goto err_unmap;
468 }
469
470 /* Ensure backbuffer is 32byte aligned */
471 tsin->back_buffer_aligned = tsin->back_buffer_start + FEI_ALIGNMENT;
472
473 tsin->back_buffer_aligned = PTR_ALIGN(tsin->back_buffer_aligned, FEI_ALIGNMENT);
474
475 tsin->back_buffer_busaddr = dma_map_single(fei->dev,
476 tsin->back_buffer_aligned,
477 FEI_BUFFER_SIZE,
478 DMA_BIDIRECTIONAL);
479
480 if (dma_mapping_error(fei->dev, tsin->back_buffer_busaddr)) {
481 dev_err(fei->dev, "failed to map back_buffer\n");
482 ret = -EFAULT;
483 goto err_unmap;
484 }
485
486 /*
487 * The pid buffer can be configured (in hw) for byte or bit
488 * per pid. By powers of deduction we conclude stih407 family
489 * is configured (at SoC design stage) for bit per pid.
490 */
491 tsin->pid_buffer_start = kzalloc(PID_TABLE_SIZE + PID_TABLE_SIZE, GFP_KERNEL);
492 if (!tsin->pid_buffer_start) {
493 ret = -ENOMEM;
494 goto err_unmap;
495 }
496
497 /*
498 * PID buffer needs to be aligned to size of the pid table
499 * which at bit per pid is 1024 bytes (8192 pids / 8).
500 * PIDF_BASE register enforces this alignment when writing
501 * the register.
502 */
503
504 tsin->pid_buffer_aligned = tsin->pid_buffer_start + PID_TABLE_SIZE;
505
506 tsin->pid_buffer_aligned = PTR_ALIGN(tsin->pid_buffer_aligned, PID_TABLE_SIZE);
507
508 tsin->pid_buffer_busaddr = dma_map_single(fei->dev,
509 tsin->pid_buffer_aligned,
510 PID_TABLE_SIZE,
511 DMA_BIDIRECTIONAL);
512
513 if (dma_mapping_error(fei->dev, tsin->pid_buffer_busaddr)) {
514 dev_err(fei->dev, "failed to map pid_bitmap\n");
515 ret = -EFAULT;
516 goto err_unmap;
517 }
518
519 /* manage cache so pid bitmap is visible to HW */
520 dma_sync_single_for_device(fei->dev,
521 tsin->pid_buffer_busaddr,
522 PID_TABLE_SIZE,
523 DMA_TO_DEVICE);
524
525 snprintf(tsin_pin_name, MAX_NAME, "tsin%d-%s", tsin->tsin_id,
526 (tsin->serial_not_parallel ? "serial" : "parallel"));
527
528 tsin->pstate = pinctrl_lookup_state(fei->pinctrl, tsin_pin_name);
529 if (IS_ERR(tsin->pstate)) {
530 dev_err(fei->dev, "%s: pinctrl_lookup_state couldn't find %s state\n"
531 , __func__, tsin_pin_name);
532 ret = PTR_ERR(tsin->pstate);
533 goto err_unmap;
534 }
535
536 ret = pinctrl_select_state(fei->pinctrl, tsin->pstate);
537
538 if (ret) {
539 dev_err(fei->dev, "%s: pinctrl_select_state failed\n"
540 , __func__);
541 goto err_unmap;
542 }
543
544 /* Enable this input block */
545 tmp = readl(fei->io + SYS_INPUT_CLKEN);
546 tmp |= BIT(tsin->tsin_id);
547 writel(tmp, fei->io + SYS_INPUT_CLKEN);
548
549 if (tsin->serial_not_parallel)
550 tmp |= C8SECTPFE_SERIAL_NOT_PARALLEL;
551
552 if (tsin->invert_ts_clk)
553 tmp |= C8SECTPFE_INVERT_TSCLK;
554
555 if (tsin->async_not_sync)
556 tmp |= C8SECTPFE_ASYNC_NOT_SYNC;
557
558 tmp |= C8SECTPFE_ALIGN_BYTE_SOP | C8SECTPFE_BYTE_ENDIANNESS_MSB;
559
560 writel(tmp, fei->io + C8SECTPFE_IB_IP_FMT_CFG(tsin->tsin_id));
561
562 writel(C8SECTPFE_SYNC(0x9) |
563 C8SECTPFE_DROP(0x9) |
564 C8SECTPFE_TOKEN(0x47),
565 fei->io + C8SECTPFE_IB_SYNCLCKDRP_CFG(tsin->tsin_id));
566
567 writel(TS_PKT_SIZE, fei->io + C8SECTPFE_IB_PKT_LEN(tsin->tsin_id));
568
569 /* Place the FIFO's at the end of the irec descriptors */
570
571 tsin->fifo = (tsin->tsin_id * FIFO_LEN);
572
573 writel(tsin->fifo, fei->io + C8SECTPFE_IB_BUFF_STRT(tsin->tsin_id));
574 writel(tsin->fifo + FIFO_LEN - 1,
575 fei->io + C8SECTPFE_IB_BUFF_END(tsin->tsin_id));
576
577 writel(tsin->fifo, fei->io + C8SECTPFE_IB_READ_PNT(tsin->tsin_id));
578 writel(tsin->fifo, fei->io + C8SECTPFE_IB_WRT_PNT(tsin->tsin_id));
579
580 writel(tsin->pid_buffer_busaddr,
581 fei->io + PIDF_BASE(tsin->tsin_id));
582
583 dev_dbg(fei->dev, "chan=%d PIDF_BASE=0x%x pid_bus_addr=%pad\n",
584 tsin->tsin_id, readl(fei->io + PIDF_BASE(tsin->tsin_id)),
585 &tsin->pid_buffer_busaddr);
586
587 /* Configure and enable HW PID filtering */
588
589 /*
590 * The PID value is created by assembling the first 8 bytes of
591 * the TS packet into a 64-bit word in big-endian format. A
592 * slice of that 64-bit word is taken from
593 * (PID_OFFSET+PID_NUM_BITS-1) to PID_OFFSET.
594 */
595 tmp = (C8SECTPFE_PID_ENABLE | C8SECTPFE_PID_NUMBITS(13)
596 | C8SECTPFE_PID_OFFSET(40));
597
598 writel(tmp, fei->io + C8SECTPFE_IB_PID_SET(tsin->tsin_id));
599
600 dev_dbg(fei->dev, "chan=%d setting wp: %d, rp: %d, buf: %d-%d\n",
601 tsin->tsin_id,
602 readl(fei->io + C8SECTPFE_IB_WRT_PNT(tsin->tsin_id)),
603 readl(fei->io + C8SECTPFE_IB_READ_PNT(tsin->tsin_id)),
604 readl(fei->io + C8SECTPFE_IB_BUFF_STRT(tsin->tsin_id)),
605 readl(fei->io + C8SECTPFE_IB_BUFF_END(tsin->tsin_id)));
606
607 /* Get base addpress of pointer record block from DMEM */
608 tsin->irec = fei->io + DMA_MEMDMA_OFFSET + DMA_DMEM_OFFSET +
609 readl(fei->io + DMA_PTRREC_BASE);
610
611 /* fill out pointer record data structure */
612
613 /* advance pointer record block to our channel */
614 tsin->irec += (tsin->tsin_id * DMA_PRDS_SIZE);
615
616 writel(tsin->fifo, tsin->irec + DMA_PRDS_MEMBASE);
617
618 writel(tsin->fifo + FIFO_LEN - 1, tsin->irec + DMA_PRDS_MEMTOP);
619
620 writel((188 + 7)&~7, tsin->irec + DMA_PRDS_PKTSIZE);
621
622 writel(0x1, tsin->irec + DMA_PRDS_TPENABLE);
623
624 /* read/write pointers with physical bus address */
625
626 writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSBASE_TP(0));
627
628 tmp = tsin->back_buffer_busaddr + FEI_BUFFER_SIZE - 1;
629 writel(tmp, tsin->irec + DMA_PRDS_BUSTOP_TP(0));
630
631 writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSWP_TP(0));
632 writel(tsin->back_buffer_busaddr, tsin->irec + DMA_PRDS_BUSRP_TP(0));
633
634 /* initialize tasklet */
635 tasklet_setup(&tsin->tsklet, channel_swdemux_tsklet);
636
637 return 0;
638
639 err_unmap:
640 free_input_block(fei, tsin);
641 return ret;
642 }
643
c8sectpfe_error_irq_handler(int irq,void * priv)644 static irqreturn_t c8sectpfe_error_irq_handler(int irq, void *priv)
645 {
646 struct c8sectpfei *fei = priv;
647
648 dev_err(fei->dev, "%s: error handling not yet implemented\n"
649 , __func__);
650
651 /*
652 * TODO FIXME we should detect some error conditions here
653 * and ideally do something about them!
654 */
655
656 return IRQ_HANDLED;
657 }
658
c8sectpfe_probe(struct platform_device * pdev)659 static int c8sectpfe_probe(struct platform_device *pdev)
660 {
661 struct device *dev = &pdev->dev;
662 struct device_node *child, *np = dev->of_node;
663 struct c8sectpfei *fei;
664 struct resource *res;
665 int ret, index = 0;
666 struct channel_info *tsin;
667
668 /* Allocate the c8sectpfei structure */
669 fei = devm_kzalloc(dev, sizeof(struct c8sectpfei), GFP_KERNEL);
670 if (!fei)
671 return -ENOMEM;
672
673 fei->dev = dev;
674
675 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "c8sectpfe");
676 fei->io = devm_ioremap_resource(dev, res);
677 if (IS_ERR(fei->io))
678 return PTR_ERR(fei->io);
679
680 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
681 "c8sectpfe-ram");
682 fei->sram = devm_ioremap_resource(dev, res);
683 if (IS_ERR(fei->sram))
684 return PTR_ERR(fei->sram);
685
686 fei->sram_size = resource_size(res);
687
688 fei->idle_irq = platform_get_irq_byname(pdev, "c8sectpfe-idle-irq");
689 if (fei->idle_irq < 0)
690 return fei->idle_irq;
691
692 fei->error_irq = platform_get_irq_byname(pdev, "c8sectpfe-error-irq");
693 if (fei->error_irq < 0)
694 return fei->error_irq;
695
696 platform_set_drvdata(pdev, fei);
697
698 fei->c8sectpfeclk = devm_clk_get(dev, "c8sectpfe");
699 if (IS_ERR(fei->c8sectpfeclk)) {
700 dev_err(dev, "c8sectpfe clk not found\n");
701 return PTR_ERR(fei->c8sectpfeclk);
702 }
703
704 ret = clk_prepare_enable(fei->c8sectpfeclk);
705 if (ret) {
706 dev_err(dev, "Failed to enable c8sectpfe clock\n");
707 return ret;
708 }
709
710 /* to save power disable all IP's (on by default) */
711 writel(0, fei->io + SYS_INPUT_CLKEN);
712
713 /* Enable memdma clock */
714 writel(MEMDMAENABLE, fei->io + SYS_OTHER_CLKEN);
715
716 /* clear internal sram */
717 memset_io(fei->sram, 0x0, fei->sram_size);
718
719 c8sectpfe_getconfig(fei);
720
721 ret = devm_request_irq(dev, fei->idle_irq, c8sectpfe_idle_irq_handler,
722 0, "c8sectpfe-idle-irq", fei);
723 if (ret) {
724 dev_err(dev, "Can't register c8sectpfe-idle-irq IRQ.\n");
725 goto err_clk_disable;
726 }
727
728 ret = devm_request_irq(dev, fei->error_irq,
729 c8sectpfe_error_irq_handler, 0,
730 "c8sectpfe-error-irq", fei);
731 if (ret) {
732 dev_err(dev, "Can't register c8sectpfe-error-irq IRQ.\n");
733 goto err_clk_disable;
734 }
735
736 fei->tsin_count = of_get_child_count(np);
737
738 if (fei->tsin_count > C8SECTPFE_MAX_TSIN_CHAN ||
739 fei->tsin_count > fei->hw_stats.num_ib) {
740
741 dev_err(dev, "More tsin declared than exist on SoC!\n");
742 ret = -EINVAL;
743 goto err_clk_disable;
744 }
745
746 fei->pinctrl = devm_pinctrl_get(dev);
747
748 if (IS_ERR(fei->pinctrl)) {
749 dev_err(dev, "Error getting tsin pins\n");
750 ret = PTR_ERR(fei->pinctrl);
751 goto err_clk_disable;
752 }
753
754 for_each_child_of_node(np, child) {
755 struct device_node *i2c_bus;
756
757 fei->channel_data[index] = devm_kzalloc(dev,
758 sizeof(struct channel_info),
759 GFP_KERNEL);
760
761 if (!fei->channel_data[index]) {
762 ret = -ENOMEM;
763 goto err_node_put;
764 }
765
766 tsin = fei->channel_data[index];
767
768 tsin->fei = fei;
769
770 ret = of_property_read_u32(child, "tsin-num", &tsin->tsin_id);
771 if (ret) {
772 dev_err(&pdev->dev, "No tsin_num found\n");
773 goto err_node_put;
774 }
775
776 /* sanity check value */
777 if (tsin->tsin_id > fei->hw_stats.num_ib) {
778 dev_err(&pdev->dev,
779 "tsin-num %d specified greater than number\n\tof input block hw in SoC! (%d)",
780 tsin->tsin_id, fei->hw_stats.num_ib);
781 ret = -EINVAL;
782 goto err_node_put;
783 }
784
785 tsin->invert_ts_clk = of_property_read_bool(child,
786 "invert-ts-clk");
787
788 tsin->serial_not_parallel = of_property_read_bool(child,
789 "serial-not-parallel");
790
791 tsin->async_not_sync = of_property_read_bool(child,
792 "async-not-sync");
793
794 ret = of_property_read_u32(child, "dvb-card",
795 &tsin->dvb_card);
796 if (ret) {
797 dev_err(&pdev->dev, "No dvb-card found\n");
798 goto err_node_put;
799 }
800
801 i2c_bus = of_parse_phandle(child, "i2c-bus", 0);
802 if (!i2c_bus) {
803 dev_err(&pdev->dev, "No i2c-bus found\n");
804 ret = -ENODEV;
805 goto err_node_put;
806 }
807 tsin->i2c_adapter =
808 of_find_i2c_adapter_by_node(i2c_bus);
809 if (!tsin->i2c_adapter) {
810 dev_err(&pdev->dev, "No i2c adapter found\n");
811 of_node_put(i2c_bus);
812 ret = -ENODEV;
813 goto err_node_put;
814 }
815 of_node_put(i2c_bus);
816
817 /* Acquire reset GPIO and activate it */
818 tsin->rst_gpio = devm_fwnode_gpiod_get(dev,
819 of_fwnode_handle(child),
820 "reset", GPIOD_OUT_HIGH,
821 "NIM reset");
822 ret = PTR_ERR_OR_ZERO(tsin->rst_gpio);
823 if (ret && ret != -EBUSY) {
824 dev_err(dev, "Can't request tsin%d reset gpio\n",
825 fei->channel_data[index]->tsin_id);
826 goto err_node_put;
827 }
828
829 if (!ret) {
830 /* wait for the chip to reset */
831 usleep_range(3500, 5000);
832 /* release the reset line */
833 gpiod_set_value_cansleep(tsin->rst_gpio, 0);
834 usleep_range(3000, 5000);
835 }
836
837 tsin->demux_mapping = index;
838
839 dev_dbg(fei->dev,
840 "channel=%p n=%d tsin_num=%d, invert-ts-clk=%d\n\tserial-not-parallel=%d pkt-clk-valid=%d dvb-card=%d\n",
841 fei->channel_data[index], index,
842 tsin->tsin_id, tsin->invert_ts_clk,
843 tsin->serial_not_parallel, tsin->async_not_sync,
844 tsin->dvb_card);
845
846 index++;
847 }
848
849 /* Setup timer interrupt */
850 timer_setup(&fei->timer, c8sectpfe_timer_interrupt, 0);
851
852 mutex_init(&fei->lock);
853
854 /* Get the configuration information about the tuners */
855 ret = c8sectpfe_tuner_register_frontend(&fei->c8sectpfe[0],
856 (void *)fei,
857 c8sectpfe_start_feed,
858 c8sectpfe_stop_feed);
859 if (ret) {
860 dev_err(dev, "c8sectpfe_tuner_register_frontend failed (%d)\n",
861 ret);
862 goto err_clk_disable;
863 }
864
865 c8sectpfe_debugfs_init(fei);
866
867 return 0;
868
869 err_node_put:
870 of_node_put(child);
871 err_clk_disable:
872 clk_disable_unprepare(fei->c8sectpfeclk);
873 return ret;
874 }
875
c8sectpfe_remove(struct platform_device * pdev)876 static void c8sectpfe_remove(struct platform_device *pdev)
877 {
878 struct c8sectpfei *fei = platform_get_drvdata(pdev);
879 struct channel_info *channel;
880 int i;
881
882 wait_for_completion(&fei->fw_ack);
883
884 c8sectpfe_tuner_unregister_frontend(fei->c8sectpfe[0], fei);
885
886 /*
887 * Now loop through and un-configure each of the InputBlock resources
888 */
889 for (i = 0; i < fei->tsin_count; i++) {
890 channel = fei->channel_data[i];
891 free_input_block(fei, channel);
892 }
893
894 c8sectpfe_debugfs_exit(fei);
895
896 dev_info(fei->dev, "Stopping memdma SLIM core\n");
897 if (readl(fei->io + DMA_CPU_RUN))
898 writel(0x0, fei->io + DMA_CPU_RUN);
899
900 /* unclock all internal IP's */
901 if (readl(fei->io + SYS_INPUT_CLKEN))
902 writel(0, fei->io + SYS_INPUT_CLKEN);
903
904 if (readl(fei->io + SYS_OTHER_CLKEN))
905 writel(0, fei->io + SYS_OTHER_CLKEN);
906
907 clk_disable_unprepare(fei->c8sectpfeclk);
908 }
909
910
configure_channels(struct c8sectpfei * fei)911 static int configure_channels(struct c8sectpfei *fei)
912 {
913 int index = 0, ret;
914 struct device_node *child, *np = fei->dev->of_node;
915
916 /* iterate round each tsin and configure memdma descriptor and IB hw */
917 for_each_child_of_node(np, child) {
918 ret = configure_memdma_and_inputblock(fei,
919 fei->channel_data[index]);
920 if (ret) {
921 dev_err(fei->dev,
922 "configure_memdma_and_inputblock failed\n");
923 of_node_put(child);
924 goto err_unmap;
925 }
926 index++;
927 }
928
929 return 0;
930
931 err_unmap:
932 while (--index >= 0)
933 free_input_block(fei, fei->channel_data[index]);
934
935 return ret;
936 }
937
938 static int
c8sectpfe_elf_sanity_check(struct c8sectpfei * fei,const struct firmware * fw)939 c8sectpfe_elf_sanity_check(struct c8sectpfei *fei, const struct firmware *fw)
940 {
941 struct elf32_hdr *ehdr;
942 char class;
943
944 if (!fw) {
945 dev_err(fei->dev, "failed to load %s\n", FIRMWARE_MEMDMA);
946 return -EINVAL;
947 }
948
949 if (fw->size < sizeof(struct elf32_hdr)) {
950 dev_err(fei->dev, "Image is too small\n");
951 return -EINVAL;
952 }
953
954 ehdr = (struct elf32_hdr *)fw->data;
955
956 /* We only support ELF32 at this point */
957 class = ehdr->e_ident[EI_CLASS];
958 if (class != ELFCLASS32) {
959 dev_err(fei->dev, "Unsupported class: %d\n", class);
960 return -EINVAL;
961 }
962
963 if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) {
964 dev_err(fei->dev, "Unsupported firmware endianness\n");
965 return -EINVAL;
966 }
967
968 if (fw->size < ehdr->e_shoff + sizeof(struct elf32_shdr)) {
969 dev_err(fei->dev, "Image is too small\n");
970 return -EINVAL;
971 }
972
973 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
974 dev_err(fei->dev, "Image is corrupted (bad magic)\n");
975 return -EINVAL;
976 }
977
978 /* Check ELF magic */
979 ehdr = (Elf32_Ehdr *)fw->data;
980 if (ehdr->e_ident[EI_MAG0] != ELFMAG0 ||
981 ehdr->e_ident[EI_MAG1] != ELFMAG1 ||
982 ehdr->e_ident[EI_MAG2] != ELFMAG2 ||
983 ehdr->e_ident[EI_MAG3] != ELFMAG3) {
984 dev_err(fei->dev, "Invalid ELF magic\n");
985 return -EINVAL;
986 }
987
988 if (ehdr->e_type != ET_EXEC) {
989 dev_err(fei->dev, "Unsupported ELF header type\n");
990 return -EINVAL;
991 }
992
993 if (ehdr->e_phoff > fw->size) {
994 dev_err(fei->dev, "Firmware size is too small\n");
995 return -EINVAL;
996 }
997
998 return 0;
999 }
1000
1001
load_imem_segment(struct c8sectpfei * fei,Elf32_Phdr * phdr,const struct firmware * fw,u8 __iomem * dest,int seg_num)1002 static void load_imem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
1003 const struct firmware *fw, u8 __iomem *dest,
1004 int seg_num)
1005 {
1006 const u8 *imem_src = fw->data + phdr->p_offset;
1007 int i;
1008
1009 /*
1010 * For IMEM segments, the segment contains 24-bit
1011 * instructions which must be padded to 32-bit
1012 * instructions before being written. The written
1013 * segment is padded with NOP instructions.
1014 */
1015
1016 dev_dbg(fei->dev,
1017 "Loading IMEM segment %d 0x%08x\n\t (0x%x bytes) -> 0x%p (0x%x bytes)\n",
1018 seg_num, phdr->p_paddr, phdr->p_filesz, dest,
1019 phdr->p_memsz + phdr->p_memsz / 3);
1020
1021 for (i = 0; i < phdr->p_filesz; i++) {
1022
1023 writeb(readb((void __iomem *)imem_src), (void __iomem *)dest);
1024
1025 /* Every 3 bytes, add an additional
1026 * padding zero in destination */
1027 if (i % 3 == 2) {
1028 dest++;
1029 writeb(0x00, (void __iomem *)dest);
1030 }
1031
1032 dest++;
1033 imem_src++;
1034 }
1035 }
1036
load_dmem_segment(struct c8sectpfei * fei,Elf32_Phdr * phdr,const struct firmware * fw,u8 __iomem * dst,int seg_num)1037 static void load_dmem_segment(struct c8sectpfei *fei, Elf32_Phdr *phdr,
1038 const struct firmware *fw, u8 __iomem *dst, int seg_num)
1039 {
1040 /*
1041 * For DMEM segments copy the segment data from the ELF
1042 * file and pad segment with zeroes
1043 */
1044
1045 dev_dbg(fei->dev,
1046 "Loading DMEM segment %d 0x%08x\n\t(0x%x bytes) -> 0x%p (0x%x bytes)\n",
1047 seg_num, phdr->p_paddr, phdr->p_filesz,
1048 dst, phdr->p_memsz);
1049
1050 memcpy((void __force *)dst, (void *)fw->data + phdr->p_offset,
1051 phdr->p_filesz);
1052
1053 memset((void __force *)dst + phdr->p_filesz, 0,
1054 phdr->p_memsz - phdr->p_filesz);
1055 }
1056
load_slim_core_fw(const struct firmware * fw,struct c8sectpfei * fei)1057 static int load_slim_core_fw(const struct firmware *fw, struct c8sectpfei *fei)
1058 {
1059 Elf32_Ehdr *ehdr;
1060 Elf32_Phdr *phdr;
1061 u8 __iomem *dst;
1062 int err = 0, i;
1063
1064 if (!fw || !fei)
1065 return -EINVAL;
1066
1067 ehdr = (Elf32_Ehdr *)fw->data;
1068 phdr = (Elf32_Phdr *)(fw->data + ehdr->e_phoff);
1069
1070 /* go through the available ELF segments */
1071 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
1072
1073 /* Only consider LOAD segments */
1074 if (phdr->p_type != PT_LOAD)
1075 continue;
1076
1077 /*
1078 * Check segment is contained within the fw->data buffer
1079 */
1080 if (phdr->p_offset + phdr->p_filesz > fw->size) {
1081 dev_err(fei->dev,
1082 "Segment %d is outside of firmware file\n", i);
1083 err = -EINVAL;
1084 break;
1085 }
1086
1087 /*
1088 * MEMDMA IMEM has executable flag set, otherwise load
1089 * this segment into DMEM.
1090 *
1091 */
1092
1093 if (phdr->p_flags & PF_X) {
1094 dst = (u8 __iomem *) fei->io + DMA_MEMDMA_IMEM;
1095 /*
1096 * The Slim ELF file uses 32-bit word addressing for
1097 * load offsets.
1098 */
1099 dst += (phdr->p_paddr & 0xFFFFF) * sizeof(unsigned int);
1100 load_imem_segment(fei, phdr, fw, dst, i);
1101 } else {
1102 dst = (u8 __iomem *) fei->io + DMA_MEMDMA_DMEM;
1103 /*
1104 * The Slim ELF file uses 32-bit word addressing for
1105 * load offsets.
1106 */
1107 dst += (phdr->p_paddr & 0xFFFFF) * sizeof(unsigned int);
1108 load_dmem_segment(fei, phdr, fw, dst, i);
1109 }
1110 }
1111
1112 release_firmware(fw);
1113 return err;
1114 }
1115
load_c8sectpfe_fw(struct c8sectpfei * fei)1116 static int load_c8sectpfe_fw(struct c8sectpfei *fei)
1117 {
1118 const struct firmware *fw;
1119 int err;
1120
1121 dev_info(fei->dev, "Loading firmware: %s\n", FIRMWARE_MEMDMA);
1122
1123 err = request_firmware(&fw, FIRMWARE_MEMDMA, fei->dev);
1124 if (err)
1125 return err;
1126
1127 err = c8sectpfe_elf_sanity_check(fei, fw);
1128 if (err) {
1129 dev_err(fei->dev, "c8sectpfe_elf_sanity_check failed err=(%d)\n"
1130 , err);
1131 release_firmware(fw);
1132 return err;
1133 }
1134
1135 err = load_slim_core_fw(fw, fei);
1136 if (err) {
1137 dev_err(fei->dev, "load_slim_core_fw failed err=(%d)\n", err);
1138 return err;
1139 }
1140
1141 /* now the firmware is loaded configure the input blocks */
1142 err = configure_channels(fei);
1143 if (err) {
1144 dev_err(fei->dev, "configure_channels failed err=(%d)\n", err);
1145 return err;
1146 }
1147
1148 /*
1149 * STBus target port can access IMEM and DMEM ports
1150 * without waiting for CPU
1151 */
1152 writel(0x1, fei->io + DMA_PER_STBUS_SYNC);
1153
1154 dev_info(fei->dev, "Boot the memdma SLIM core\n");
1155 writel(0x1, fei->io + DMA_CPU_RUN);
1156
1157 atomic_set(&fei->fw_loaded, 1);
1158
1159 return 0;
1160 }
1161
1162 static const struct of_device_id c8sectpfe_match[] = {
1163 { .compatible = "st,stih407-c8sectpfe" },
1164 { /* sentinel */ },
1165 };
1166 MODULE_DEVICE_TABLE(of, c8sectpfe_match);
1167
1168 static struct platform_driver c8sectpfe_driver = {
1169 .driver = {
1170 .name = "c8sectpfe",
1171 .of_match_table = c8sectpfe_match,
1172 },
1173 .probe = c8sectpfe_probe,
1174 .remove_new = c8sectpfe_remove,
1175 };
1176
1177 module_platform_driver(c8sectpfe_driver);
1178
1179 MODULE_AUTHOR("Peter Bennett <peter.bennett@st.com>");
1180 MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
1181 MODULE_DESCRIPTION("C8SECTPFE STi DVB Driver");
1182 MODULE_LICENSE("GPL");
1183