1 /*
2  * netup_unidvb_core.c
3  *
4  * Main module for NetUP Universal Dual DVB-CI
5  *
6  * Copyright (C) 2014 NetUP Inc.
7  * Copyright (C) 2014 Sergey Kozlov <serjk@netup.ru>
8  * Copyright (C) 2014 Abylay Ospan <aospan@netup.ru>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version.
14  *
15  * This program is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  * GNU General Public License for more details.
19  */
20 
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/moduleparam.h>
24 #include <linux/kmod.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include <linux/interrupt.h>
28 #include <linux/delay.h>
29 #include <linux/list.h>
30 #include <media/videobuf2-v4l2.h>
31 #include <media/videobuf2-vmalloc.h>
32 
33 #include "netup_unidvb.h"
34 #include "cxd2841er.h"
35 #include "horus3a.h"
36 #include "ascot2e.h"
37 #include "helene.h"
38 #include "lnbh25.h"
39 
40 static int spi_enable;
41 module_param(spi_enable, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
42 
43 MODULE_DESCRIPTION("Driver for NetUP Dual Universal DVB CI PCIe card");
44 MODULE_AUTHOR("info@netup.ru");
45 MODULE_VERSION(NETUP_UNIDVB_VERSION);
46 MODULE_LICENSE("GPL");
47 
48 DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
49 
50 /* Avalon-MM PCI-E registers */
51 #define	AVL_PCIE_IENR		0x50
52 #define AVL_PCIE_ISR		0x40
53 #define AVL_IRQ_ENABLE		0x80
54 #define AVL_IRQ_ASSERTED	0x80
55 /* GPIO registers */
56 #define GPIO_REG_IO		0x4880
57 #define GPIO_REG_IO_TOGGLE	0x4882
58 #define GPIO_REG_IO_SET		0x4884
59 #define GPIO_REG_IO_CLEAR	0x4886
60 /* GPIO bits */
61 #define GPIO_FEA_RESET		(1 << 0)
62 #define GPIO_FEB_RESET		(1 << 1)
63 #define GPIO_RFA_CTL		(1 << 2)
64 #define GPIO_RFB_CTL		(1 << 3)
65 #define GPIO_FEA_TU_RESET	(1 << 4)
66 #define GPIO_FEB_TU_RESET	(1 << 5)
67 /* DMA base address */
68 #define NETUP_DMA0_ADDR		0x4900
69 #define NETUP_DMA1_ADDR		0x4940
70 /* 8 DMA blocks * 128 packets * 188 bytes*/
71 #define NETUP_DMA_BLOCKS_COUNT	8
72 #define NETUP_DMA_PACKETS_COUNT	128
73 /* DMA status bits */
74 #define BIT_DMA_RUN		1
75 #define BIT_DMA_ERROR		2
76 #define BIT_DMA_IRQ		0x200
77 
78 /**
79  * struct netup_dma_regs - the map of DMA module registers
80  * @ctrlstat_set:	Control register, write to set control bits
81  * @ctrlstat_clear:	Control register, write to clear control bits
82  * @start_addr_lo:	DMA ring buffer start address, lower part
83  * @start_addr_hi:	DMA ring buffer start address, higher part
84  * @size:		DMA ring buffer size register
85  *			* Bits [0-7]:	DMA packet size, 188 bytes
86  *			* Bits [16-23]:	packets count in block, 128 packets
87  *			* Bits [24-31]:	blocks count, 8 blocks
88  * @timeout:		DMA timeout in units of 8ns
89  *			For example, value of 375000000 equals to 3 sec
90  * @curr_addr_lo:	Current ring buffer head address, lower part
91  * @curr_addr_hi:	Current ring buffer head address, higher part
92  * @stat_pkt_received:	Statistic register, not tested
93  * @stat_pkt_accepted:	Statistic register, not tested
94  * @stat_pkt_overruns:	Statistic register, not tested
95  * @stat_pkt_underruns:	Statistic register, not tested
96  * @stat_fifo_overruns:	Statistic register, not tested
97  */
98 struct netup_dma_regs {
99 	__le32	ctrlstat_set;
100 	__le32	ctrlstat_clear;
101 	__le32	start_addr_lo;
102 	__le32	start_addr_hi;
103 	__le32	size;
104 	__le32	timeout;
105 	__le32	curr_addr_lo;
106 	__le32	curr_addr_hi;
107 	__le32	stat_pkt_received;
108 	__le32	stat_pkt_accepted;
109 	__le32	stat_pkt_overruns;
110 	__le32	stat_pkt_underruns;
111 	__le32	stat_fifo_overruns;
112 } __packed __aligned(1);
113 
114 struct netup_unidvb_buffer {
115 	struct vb2_v4l2_buffer vb;
116 	struct list_head	list;
117 	u32			size;
118 };
119 
120 static int netup_unidvb_tuner_ctrl(void *priv, int is_dvb_tc);
121 static void netup_unidvb_queue_cleanup(struct netup_dma *dma);
122 
123 static struct cxd2841er_config demod_config = {
124 	.i2c_addr = 0xc8,
125 	.xtal = SONY_XTAL_24000,
126 	.flags = CXD2841ER_USE_GATECTRL | CXD2841ER_ASCOT
127 };
128 
129 static struct horus3a_config horus3a_conf = {
130 	.i2c_address = 0xc0,
131 	.xtal_freq_mhz = 16,
132 	.set_tuner_callback = netup_unidvb_tuner_ctrl
133 };
134 
135 static struct ascot2e_config ascot2e_conf = {
136 	.i2c_address = 0xc2,
137 	.set_tuner_callback = netup_unidvb_tuner_ctrl
138 };
139 
140 static struct helene_config helene_conf = {
141 	.i2c_address = 0xc0,
142 	.xtal = SONY_HELENE_XTAL_24000,
143 	.set_tuner_callback = netup_unidvb_tuner_ctrl
144 };
145 
146 static struct lnbh25_config lnbh25_conf = {
147 	.i2c_address = 0x10,
148 	.data2_config = LNBH25_TEN | LNBH25_EXTM
149 };
150 
151 static int netup_unidvb_tuner_ctrl(void *priv, int is_dvb_tc)
152 {
153 	u8 reg, mask;
154 	struct netup_dma *dma = priv;
155 	struct netup_unidvb_dev *ndev;
156 
157 	if (!priv)
158 		return -EINVAL;
159 	ndev = dma->ndev;
160 	dev_dbg(&ndev->pci_dev->dev, "%s(): num %d is_dvb_tc %d\n",
161 		__func__, dma->num, is_dvb_tc);
162 	reg = readb(ndev->bmmio0 + GPIO_REG_IO);
163 	mask = (dma->num == 0) ? GPIO_RFA_CTL : GPIO_RFB_CTL;
164 
165 	/* inverted tuner control in hw rev. 1.4 */
166 	if (ndev->rev == NETUP_HW_REV_1_4)
167 		is_dvb_tc = !is_dvb_tc;
168 
169 	if (!is_dvb_tc)
170 		reg |= mask;
171 	else
172 		reg &= ~mask;
173 	writeb(reg, ndev->bmmio0 + GPIO_REG_IO);
174 	return 0;
175 }
176 
177 static void netup_unidvb_dev_enable(struct netup_unidvb_dev *ndev)
178 {
179 	u16 gpio_reg;
180 
181 	/* enable PCI-E interrupts */
182 	writel(AVL_IRQ_ENABLE, ndev->bmmio0 + AVL_PCIE_IENR);
183 	/* unreset frontends bits[0:1] */
184 	writeb(0x00, ndev->bmmio0 + GPIO_REG_IO);
185 	msleep(100);
186 	gpio_reg =
187 		GPIO_FEA_RESET | GPIO_FEB_RESET |
188 		GPIO_FEA_TU_RESET | GPIO_FEB_TU_RESET |
189 		GPIO_RFA_CTL | GPIO_RFB_CTL;
190 	writeb(gpio_reg, ndev->bmmio0 + GPIO_REG_IO);
191 	dev_dbg(&ndev->pci_dev->dev,
192 		"%s(): AVL_PCIE_IENR 0x%x GPIO_REG_IO 0x%x\n",
193 		__func__, readl(ndev->bmmio0 + AVL_PCIE_IENR),
194 		(int)readb(ndev->bmmio0 + GPIO_REG_IO));
195 
196 }
197 
198 static void netup_unidvb_dma_enable(struct netup_dma *dma, int enable)
199 {
200 	u32 irq_mask = (dma->num == 0 ?
201 		NETUP_UNIDVB_IRQ_DMA1 : NETUP_UNIDVB_IRQ_DMA2);
202 
203 	dev_dbg(&dma->ndev->pci_dev->dev,
204 		"%s(): DMA%d enable %d\n", __func__, dma->num, enable);
205 	if (enable) {
206 		writel(BIT_DMA_RUN, &dma->regs->ctrlstat_set);
207 		writew(irq_mask, dma->ndev->bmmio0 + REG_IMASK_SET);
208 	} else {
209 		writel(BIT_DMA_RUN, &dma->regs->ctrlstat_clear);
210 		writew(irq_mask, dma->ndev->bmmio0 + REG_IMASK_CLEAR);
211 	}
212 }
213 
214 static irqreturn_t netup_dma_interrupt(struct netup_dma *dma)
215 {
216 	u64 addr_curr;
217 	u32 size;
218 	unsigned long flags;
219 	struct device *dev = &dma->ndev->pci_dev->dev;
220 
221 	spin_lock_irqsave(&dma->lock, flags);
222 	addr_curr = ((u64)readl(&dma->regs->curr_addr_hi) << 32) |
223 		(u64)readl(&dma->regs->curr_addr_lo) | dma->high_addr;
224 	/* clear IRQ */
225 	writel(BIT_DMA_IRQ, &dma->regs->ctrlstat_clear);
226 	/* sanity check */
227 	if (addr_curr < dma->addr_phys ||
228 			addr_curr > dma->addr_phys +  dma->ring_buffer_size) {
229 		if (addr_curr != 0) {
230 			dev_err(dev,
231 				"%s(): addr 0x%llx not from 0x%llx:0x%llx\n",
232 				__func__, addr_curr, (u64)dma->addr_phys,
233 				(u64)(dma->addr_phys + dma->ring_buffer_size));
234 		}
235 		goto irq_handled;
236 	}
237 	size = (addr_curr >= dma->addr_last) ?
238 		(u32)(addr_curr - dma->addr_last) :
239 		(u32)(dma->ring_buffer_size - (dma->addr_last - addr_curr));
240 	if (dma->data_size != 0) {
241 		printk_ratelimited("%s(): lost interrupt, data size %d\n",
242 			__func__, dma->data_size);
243 		dma->data_size += size;
244 	}
245 	if (dma->data_size == 0 || dma->data_size > dma->ring_buffer_size) {
246 		dma->data_size = size;
247 		dma->data_offset = (u32)(dma->addr_last - dma->addr_phys);
248 	}
249 	dma->addr_last = addr_curr;
250 	queue_work(dma->ndev->wq, &dma->work);
251 irq_handled:
252 	spin_unlock_irqrestore(&dma->lock, flags);
253 	return IRQ_HANDLED;
254 }
255 
256 static irqreturn_t netup_unidvb_isr(int irq, void *dev_id)
257 {
258 	struct pci_dev *pci_dev = (struct pci_dev *)dev_id;
259 	struct netup_unidvb_dev *ndev = pci_get_drvdata(pci_dev);
260 	u32 reg40, reg_isr;
261 	irqreturn_t iret = IRQ_NONE;
262 
263 	/* disable interrupts */
264 	writel(0, ndev->bmmio0 + AVL_PCIE_IENR);
265 	/* check IRQ source */
266 	reg40 = readl(ndev->bmmio0 + AVL_PCIE_ISR);
267 	if ((reg40 & AVL_IRQ_ASSERTED) != 0) {
268 		/* IRQ is being signaled */
269 		reg_isr = readw(ndev->bmmio0 + REG_ISR);
270 		if (reg_isr & NETUP_UNIDVB_IRQ_I2C0) {
271 			iret = netup_i2c_interrupt(&ndev->i2c[0]);
272 		} else if (reg_isr & NETUP_UNIDVB_IRQ_I2C1) {
273 			iret = netup_i2c_interrupt(&ndev->i2c[1]);
274 		} else if (reg_isr & NETUP_UNIDVB_IRQ_SPI) {
275 			iret = netup_spi_interrupt(ndev->spi);
276 		} else if (reg_isr & NETUP_UNIDVB_IRQ_DMA1) {
277 			iret = netup_dma_interrupt(&ndev->dma[0]);
278 		} else if (reg_isr & NETUP_UNIDVB_IRQ_DMA2) {
279 			iret = netup_dma_interrupt(&ndev->dma[1]);
280 		} else if (reg_isr & NETUP_UNIDVB_IRQ_CI) {
281 			iret = netup_ci_interrupt(ndev);
282 		} else {
283 			dev_err(&pci_dev->dev,
284 				"%s(): unknown interrupt 0x%x\n",
285 				__func__, reg_isr);
286 		}
287 	}
288 	/* re-enable interrupts */
289 	writel(AVL_IRQ_ENABLE, ndev->bmmio0 + AVL_PCIE_IENR);
290 	return iret;
291 }
292 
293 static int netup_unidvb_queue_setup(struct vb2_queue *vq,
294 				    unsigned int *nbuffers,
295 				    unsigned int *nplanes,
296 				    unsigned int sizes[],
297 				    struct device *alloc_devs[])
298 {
299 	struct netup_dma *dma = vb2_get_drv_priv(vq);
300 
301 	dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__);
302 
303 	*nplanes = 1;
304 	if (vq->num_buffers + *nbuffers < VIDEO_MAX_FRAME)
305 		*nbuffers = VIDEO_MAX_FRAME - vq->num_buffers;
306 	sizes[0] = PAGE_ALIGN(NETUP_DMA_PACKETS_COUNT * 188);
307 	dev_dbg(&dma->ndev->pci_dev->dev, "%s() nbuffers=%d sizes[0]=%d\n",
308 		__func__, *nbuffers, sizes[0]);
309 	return 0;
310 }
311 
312 static int netup_unidvb_buf_prepare(struct vb2_buffer *vb)
313 {
314 	struct netup_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
315 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
316 	struct netup_unidvb_buffer *buf = container_of(vbuf,
317 				struct netup_unidvb_buffer, vb);
318 
319 	dev_dbg(&dma->ndev->pci_dev->dev, "%s(): buf 0x%p\n", __func__, buf);
320 	buf->size = 0;
321 	return 0;
322 }
323 
324 static void netup_unidvb_buf_queue(struct vb2_buffer *vb)
325 {
326 	unsigned long flags;
327 	struct netup_dma *dma = vb2_get_drv_priv(vb->vb2_queue);
328 	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
329 	struct netup_unidvb_buffer *buf = container_of(vbuf,
330 				struct netup_unidvb_buffer, vb);
331 
332 	dev_dbg(&dma->ndev->pci_dev->dev, "%s(): %p\n", __func__, buf);
333 	spin_lock_irqsave(&dma->lock, flags);
334 	list_add_tail(&buf->list, &dma->free_buffers);
335 	spin_unlock_irqrestore(&dma->lock, flags);
336 	mod_timer(&dma->timeout, jiffies + msecs_to_jiffies(1000));
337 }
338 
339 static int netup_unidvb_start_streaming(struct vb2_queue *q, unsigned int count)
340 {
341 	struct netup_dma *dma = vb2_get_drv_priv(q);
342 
343 	dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__);
344 	netup_unidvb_dma_enable(dma, 1);
345 	return 0;
346 }
347 
348 static void netup_unidvb_stop_streaming(struct vb2_queue *q)
349 {
350 	struct netup_dma *dma = vb2_get_drv_priv(q);
351 
352 	dev_dbg(&dma->ndev->pci_dev->dev, "%s()\n", __func__);
353 	netup_unidvb_dma_enable(dma, 0);
354 	netup_unidvb_queue_cleanup(dma);
355 }
356 
357 static const struct vb2_ops dvb_qops = {
358 	.queue_setup		= netup_unidvb_queue_setup,
359 	.buf_prepare		= netup_unidvb_buf_prepare,
360 	.buf_queue		= netup_unidvb_buf_queue,
361 	.start_streaming	= netup_unidvb_start_streaming,
362 	.stop_streaming		= netup_unidvb_stop_streaming,
363 };
364 
365 static int netup_unidvb_queue_init(struct netup_dma *dma,
366 				   struct vb2_queue *vb_queue)
367 {
368 	int res;
369 
370 	/* Init videobuf2 queue structure */
371 	vb_queue->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
372 	vb_queue->io_modes = VB2_MMAP | VB2_USERPTR | VB2_READ;
373 	vb_queue->drv_priv = dma;
374 	vb_queue->buf_struct_size = sizeof(struct netup_unidvb_buffer);
375 	vb_queue->ops = &dvb_qops;
376 	vb_queue->mem_ops = &vb2_vmalloc_memops;
377 	vb_queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
378 	res = vb2_queue_init(vb_queue);
379 	if (res != 0) {
380 		dev_err(&dma->ndev->pci_dev->dev,
381 			"%s(): vb2_queue_init failed (%d)\n", __func__, res);
382 	}
383 	return res;
384 }
385 
386 static int netup_unidvb_dvb_init(struct netup_unidvb_dev *ndev,
387 				 int num)
388 {
389 	int fe_count = 2;
390 	int i = 0;
391 	struct vb2_dvb_frontend *fes[2];
392 	u8 fe_name[32];
393 
394 	if (ndev->rev == NETUP_HW_REV_1_3)
395 		demod_config.xtal = SONY_XTAL_20500;
396 	else
397 		demod_config.xtal = SONY_XTAL_24000;
398 
399 	if (num < 0 || num > 1) {
400 		dev_dbg(&ndev->pci_dev->dev,
401 			"%s(): unable to init DVB bus %d\n", __func__, num);
402 		return -ENODEV;
403 	}
404 	mutex_init(&ndev->frontends[num].lock);
405 	INIT_LIST_HEAD(&ndev->frontends[num].felist);
406 
407 	for (i = 0; i < fe_count; i++) {
408 		if (vb2_dvb_alloc_frontend(&ndev->frontends[num], i+1)
409 				== NULL) {
410 			dev_err(&ndev->pci_dev->dev,
411 					"%s(): unable to allocate vb2_dvb_frontend\n",
412 					__func__);
413 			return -ENOMEM;
414 		}
415 	}
416 
417 	for (i = 0; i < fe_count; i++) {
418 		fes[i] = vb2_dvb_get_frontend(&ndev->frontends[num], i+1);
419 		if (fes[i] == NULL) {
420 			dev_err(&ndev->pci_dev->dev,
421 				"%s(): frontends has not been allocated\n",
422 				__func__);
423 			return -EINVAL;
424 		}
425 	}
426 
427 	for (i = 0; i < fe_count; i++) {
428 		netup_unidvb_queue_init(&ndev->dma[num], &fes[i]->dvb.dvbq);
429 		snprintf(fe_name, sizeof(fe_name), "netup_fe%d", i);
430 		fes[i]->dvb.name = fe_name;
431 	}
432 
433 	fes[0]->dvb.frontend = dvb_attach(cxd2841er_attach_s,
434 		&demod_config, &ndev->i2c[num].adap);
435 	if (fes[0]->dvb.frontend == NULL) {
436 		dev_dbg(&ndev->pci_dev->dev,
437 			"%s(): unable to attach DVB-S/S2 frontend\n",
438 			__func__);
439 		goto frontend_detach;
440 	}
441 
442 	if (ndev->rev == NETUP_HW_REV_1_3) {
443 		horus3a_conf.set_tuner_priv = &ndev->dma[num];
444 		if (!dvb_attach(horus3a_attach, fes[0]->dvb.frontend,
445 					&horus3a_conf, &ndev->i2c[num].adap)) {
446 			dev_dbg(&ndev->pci_dev->dev,
447 					"%s(): unable to attach HORUS3A DVB-S/S2 tuner frontend\n",
448 					__func__);
449 			goto frontend_detach;
450 		}
451 	} else {
452 		helene_conf.set_tuner_priv = &ndev->dma[num];
453 		if (!dvb_attach(helene_attach_s, fes[0]->dvb.frontend,
454 					&helene_conf, &ndev->i2c[num].adap)) {
455 			dev_err(&ndev->pci_dev->dev,
456 					"%s(): unable to attach HELENE DVB-S/S2 tuner frontend\n",
457 					__func__);
458 			goto frontend_detach;
459 		}
460 	}
461 
462 	if (!dvb_attach(lnbh25_attach, fes[0]->dvb.frontend,
463 			&lnbh25_conf, &ndev->i2c[num].adap)) {
464 		dev_dbg(&ndev->pci_dev->dev,
465 			"%s(): unable to attach SEC frontend\n", __func__);
466 		goto frontend_detach;
467 	}
468 
469 	/* DVB-T/T2 frontend */
470 	fes[1]->dvb.frontend = dvb_attach(cxd2841er_attach_t_c,
471 		&demod_config, &ndev->i2c[num].adap);
472 	if (fes[1]->dvb.frontend == NULL) {
473 		dev_dbg(&ndev->pci_dev->dev,
474 			"%s(): unable to attach Ter frontend\n", __func__);
475 		goto frontend_detach;
476 	}
477 	fes[1]->dvb.frontend->id = 1;
478 	if (ndev->rev == NETUP_HW_REV_1_3) {
479 		ascot2e_conf.set_tuner_priv = &ndev->dma[num];
480 		if (!dvb_attach(ascot2e_attach, fes[1]->dvb.frontend,
481 					&ascot2e_conf, &ndev->i2c[num].adap)) {
482 			dev_dbg(&ndev->pci_dev->dev,
483 					"%s(): unable to attach Ter tuner frontend\n",
484 					__func__);
485 			goto frontend_detach;
486 		}
487 	} else {
488 		helene_conf.set_tuner_priv = &ndev->dma[num];
489 		if (!dvb_attach(helene_attach, fes[1]->dvb.frontend,
490 					&helene_conf, &ndev->i2c[num].adap)) {
491 			dev_err(&ndev->pci_dev->dev,
492 					"%s(): unable to attach HELENE Ter tuner frontend\n",
493 					__func__);
494 			goto frontend_detach;
495 		}
496 	}
497 
498 	if (vb2_dvb_register_bus(&ndev->frontends[num],
499 				 THIS_MODULE, NULL,
500 				 &ndev->pci_dev->dev, NULL, adapter_nr, 1)) {
501 		dev_dbg(&ndev->pci_dev->dev,
502 			"%s(): unable to register DVB bus %d\n",
503 			__func__, num);
504 		goto frontend_detach;
505 	}
506 	dev_info(&ndev->pci_dev->dev, "DVB init done, num=%d\n", num);
507 	return 0;
508 frontend_detach:
509 	vb2_dvb_dealloc_frontends(&ndev->frontends[num]);
510 	return -EINVAL;
511 }
512 
513 static void netup_unidvb_dvb_fini(struct netup_unidvb_dev *ndev, int num)
514 {
515 	if (num < 0 || num > 1) {
516 		dev_err(&ndev->pci_dev->dev,
517 			"%s(): unable to unregister DVB bus %d\n",
518 			__func__, num);
519 		return;
520 	}
521 	vb2_dvb_unregister_bus(&ndev->frontends[num]);
522 	dev_info(&ndev->pci_dev->dev,
523 		"%s(): DVB bus %d unregistered\n", __func__, num);
524 }
525 
526 static int netup_unidvb_dvb_setup(struct netup_unidvb_dev *ndev)
527 {
528 	int res;
529 
530 	res = netup_unidvb_dvb_init(ndev, 0);
531 	if (res)
532 		return res;
533 	res = netup_unidvb_dvb_init(ndev, 1);
534 	if (res) {
535 		netup_unidvb_dvb_fini(ndev, 0);
536 		return res;
537 	}
538 	return 0;
539 }
540 
541 static int netup_unidvb_ring_copy(struct netup_dma *dma,
542 				  struct netup_unidvb_buffer *buf)
543 {
544 	u32 copy_bytes, ring_bytes;
545 	u32 buff_bytes = NETUP_DMA_PACKETS_COUNT * 188 - buf->size;
546 	u8 *p = vb2_plane_vaddr(&buf->vb.vb2_buf, 0);
547 	struct netup_unidvb_dev *ndev = dma->ndev;
548 
549 	if (p == NULL) {
550 		dev_err(&ndev->pci_dev->dev,
551 			"%s(): buffer is NULL\n", __func__);
552 		return -EINVAL;
553 	}
554 	p += buf->size;
555 	if (dma->data_offset + dma->data_size > dma->ring_buffer_size) {
556 		ring_bytes = dma->ring_buffer_size - dma->data_offset;
557 		copy_bytes = (ring_bytes > buff_bytes) ?
558 			buff_bytes : ring_bytes;
559 		memcpy_fromio(p, (u8 __iomem *)(dma->addr_virt + dma->data_offset), copy_bytes);
560 		p += copy_bytes;
561 		buf->size += copy_bytes;
562 		buff_bytes -= copy_bytes;
563 		dma->data_size -= copy_bytes;
564 		dma->data_offset += copy_bytes;
565 		if (dma->data_offset == dma->ring_buffer_size)
566 			dma->data_offset = 0;
567 	}
568 	if (buff_bytes > 0) {
569 		ring_bytes = dma->data_size;
570 		copy_bytes = (ring_bytes > buff_bytes) ?
571 				buff_bytes : ring_bytes;
572 		memcpy_fromio(p, (u8 __iomem *)(dma->addr_virt + dma->data_offset), copy_bytes);
573 		buf->size += copy_bytes;
574 		dma->data_size -= copy_bytes;
575 		dma->data_offset += copy_bytes;
576 		if (dma->data_offset == dma->ring_buffer_size)
577 			dma->data_offset = 0;
578 	}
579 	return 0;
580 }
581 
582 static void netup_unidvb_dma_worker(struct work_struct *work)
583 {
584 	struct netup_dma *dma = container_of(work, struct netup_dma, work);
585 	struct netup_unidvb_dev *ndev = dma->ndev;
586 	struct netup_unidvb_buffer *buf;
587 	unsigned long flags;
588 
589 	spin_lock_irqsave(&dma->lock, flags);
590 	if (dma->data_size == 0) {
591 		dev_dbg(&ndev->pci_dev->dev,
592 			"%s(): data_size == 0\n", __func__);
593 		goto work_done;
594 	}
595 	while (dma->data_size > 0) {
596 		if (list_empty(&dma->free_buffers)) {
597 			dev_dbg(&ndev->pci_dev->dev,
598 				"%s(): no free buffers\n", __func__);
599 			goto work_done;
600 		}
601 		buf = list_first_entry(&dma->free_buffers,
602 			struct netup_unidvb_buffer, list);
603 		if (buf->size >= NETUP_DMA_PACKETS_COUNT * 188) {
604 			dev_dbg(&ndev->pci_dev->dev,
605 				"%s(): buffer overflow, size %d\n",
606 				__func__, buf->size);
607 			goto work_done;
608 		}
609 		if (netup_unidvb_ring_copy(dma, buf))
610 			goto work_done;
611 		if (buf->size == NETUP_DMA_PACKETS_COUNT * 188) {
612 			list_del(&buf->list);
613 			dev_dbg(&ndev->pci_dev->dev,
614 				"%s(): buffer %p done, size %d\n",
615 				__func__, buf, buf->size);
616 			buf->vb.vb2_buf.timestamp = ktime_get_ns();
617 			vb2_set_plane_payload(&buf->vb.vb2_buf, 0, buf->size);
618 			vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE);
619 		}
620 	}
621 work_done:
622 	dma->data_size = 0;
623 	spin_unlock_irqrestore(&dma->lock, flags);
624 }
625 
626 static void netup_unidvb_queue_cleanup(struct netup_dma *dma)
627 {
628 	struct netup_unidvb_buffer *buf;
629 	unsigned long flags;
630 
631 	spin_lock_irqsave(&dma->lock, flags);
632 	while (!list_empty(&dma->free_buffers)) {
633 		buf = list_first_entry(&dma->free_buffers,
634 			struct netup_unidvb_buffer, list);
635 		list_del(&buf->list);
636 		vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR);
637 	}
638 	spin_unlock_irqrestore(&dma->lock, flags);
639 }
640 
641 static void netup_unidvb_dma_timeout(struct timer_list *t)
642 {
643 	struct netup_dma *dma = from_timer(dma, t, timeout);
644 	struct netup_unidvb_dev *ndev = dma->ndev;
645 
646 	dev_dbg(&ndev->pci_dev->dev, "%s()\n", __func__);
647 	netup_unidvb_queue_cleanup(dma);
648 }
649 
650 static int netup_unidvb_dma_init(struct netup_unidvb_dev *ndev, int num)
651 {
652 	struct netup_dma *dma;
653 	struct device *dev = &ndev->pci_dev->dev;
654 
655 	if (num < 0 || num > 1) {
656 		dev_err(dev, "%s(): unable to register DMA%d\n",
657 			__func__, num);
658 		return -ENODEV;
659 	}
660 	dma = &ndev->dma[num];
661 	dev_info(dev, "%s(): starting DMA%d\n", __func__, num);
662 	dma->num = num;
663 	dma->ndev = ndev;
664 	spin_lock_init(&dma->lock);
665 	INIT_WORK(&dma->work, netup_unidvb_dma_worker);
666 	INIT_LIST_HEAD(&dma->free_buffers);
667 	timer_setup(&dma->timeout, netup_unidvb_dma_timeout, 0);
668 	dma->ring_buffer_size = ndev->dma_size / 2;
669 	dma->addr_virt = ndev->dma_virt + dma->ring_buffer_size * num;
670 	dma->addr_phys = (dma_addr_t)((u64)ndev->dma_phys +
671 		dma->ring_buffer_size * num);
672 	dev_info(dev, "%s(): DMA%d buffer virt/phys 0x%p/0x%llx size %d\n",
673 		__func__, num, dma->addr_virt,
674 		(unsigned long long)dma->addr_phys,
675 		dma->ring_buffer_size);
676 	memset_io((u8 __iomem *)dma->addr_virt, 0, dma->ring_buffer_size);
677 	dma->addr_last = dma->addr_phys;
678 	dma->high_addr = (u32)(dma->addr_phys & 0xC0000000);
679 	dma->regs = (struct netup_dma_regs __iomem *)(num == 0 ?
680 		ndev->bmmio0 + NETUP_DMA0_ADDR :
681 		ndev->bmmio0 + NETUP_DMA1_ADDR);
682 	writel((NETUP_DMA_BLOCKS_COUNT << 24) |
683 		(NETUP_DMA_PACKETS_COUNT << 8) | 188, &dma->regs->size);
684 	writel((u32)(dma->addr_phys & 0x3FFFFFFF), &dma->regs->start_addr_lo);
685 	writel(0, &dma->regs->start_addr_hi);
686 	writel(dma->high_addr, ndev->bmmio0 + 0x1000);
687 	writel(375000000, &dma->regs->timeout);
688 	msleep(1000);
689 	writel(BIT_DMA_IRQ, &dma->regs->ctrlstat_clear);
690 	return 0;
691 }
692 
693 static void netup_unidvb_dma_fini(struct netup_unidvb_dev *ndev, int num)
694 {
695 	struct netup_dma *dma;
696 
697 	if (num < 0 || num > 1)
698 		return;
699 	dev_dbg(&ndev->pci_dev->dev, "%s(): num %d\n", __func__, num);
700 	dma = &ndev->dma[num];
701 	netup_unidvb_dma_enable(dma, 0);
702 	msleep(50);
703 	cancel_work_sync(&dma->work);
704 	del_timer(&dma->timeout);
705 }
706 
707 static int netup_unidvb_dma_setup(struct netup_unidvb_dev *ndev)
708 {
709 	int res;
710 
711 	res = netup_unidvb_dma_init(ndev, 0);
712 	if (res)
713 		return res;
714 	res = netup_unidvb_dma_init(ndev, 1);
715 	if (res) {
716 		netup_unidvb_dma_fini(ndev, 0);
717 		return res;
718 	}
719 	netup_unidvb_dma_enable(&ndev->dma[0], 0);
720 	netup_unidvb_dma_enable(&ndev->dma[1], 0);
721 	return 0;
722 }
723 
724 static int netup_unidvb_ci_setup(struct netup_unidvb_dev *ndev,
725 				 struct pci_dev *pci_dev)
726 {
727 	int res;
728 
729 	writew(NETUP_UNIDVB_IRQ_CI, ndev->bmmio0 + REG_IMASK_SET);
730 	res = netup_unidvb_ci_register(ndev, 0, pci_dev);
731 	if (res)
732 		return res;
733 	res = netup_unidvb_ci_register(ndev, 1, pci_dev);
734 	if (res)
735 		netup_unidvb_ci_unregister(ndev, 0);
736 	return res;
737 }
738 
739 static int netup_unidvb_request_mmio(struct pci_dev *pci_dev)
740 {
741 	if (!request_mem_region(pci_resource_start(pci_dev, 0),
742 			pci_resource_len(pci_dev, 0), NETUP_UNIDVB_NAME)) {
743 		dev_err(&pci_dev->dev,
744 			"%s(): unable to request MMIO bar 0 at 0x%llx\n",
745 			__func__,
746 			(unsigned long long)pci_resource_start(pci_dev, 0));
747 		return -EBUSY;
748 	}
749 	if (!request_mem_region(pci_resource_start(pci_dev, 1),
750 			pci_resource_len(pci_dev, 1), NETUP_UNIDVB_NAME)) {
751 		dev_err(&pci_dev->dev,
752 			"%s(): unable to request MMIO bar 1 at 0x%llx\n",
753 			__func__,
754 			(unsigned long long)pci_resource_start(pci_dev, 1));
755 		release_mem_region(pci_resource_start(pci_dev, 0),
756 			pci_resource_len(pci_dev, 0));
757 		return -EBUSY;
758 	}
759 	return 0;
760 }
761 
762 static int netup_unidvb_request_modules(struct device *dev)
763 {
764 	static const char * const modules[] = {
765 		"lnbh25", "ascot2e", "horus3a", "cxd2841er", "helene", NULL
766 	};
767 	const char * const *curr_mod = modules;
768 	int err;
769 
770 	while (*curr_mod != NULL) {
771 		err = request_module(*curr_mod);
772 		if (err) {
773 			dev_warn(dev, "request_module(%s) failed: %d\n",
774 				*curr_mod, err);
775 		}
776 		++curr_mod;
777 	}
778 	return 0;
779 }
780 
781 static int netup_unidvb_initdev(struct pci_dev *pci_dev,
782 				const struct pci_device_id *pci_id)
783 {
784 	u8 board_revision;
785 	u16 board_vendor;
786 	struct netup_unidvb_dev *ndev;
787 	int old_firmware = 0;
788 
789 	netup_unidvb_request_modules(&pci_dev->dev);
790 
791 	/* Check card revision */
792 	if (pci_dev->revision != NETUP_PCI_DEV_REVISION) {
793 		dev_err(&pci_dev->dev,
794 			"netup_unidvb: expected card revision %d, got %d\n",
795 			NETUP_PCI_DEV_REVISION, pci_dev->revision);
796 		dev_err(&pci_dev->dev,
797 			"Please upgrade firmware!\n");
798 		dev_err(&pci_dev->dev,
799 			"Instructions on http://www.netup.tv\n");
800 		old_firmware = 1;
801 		spi_enable = 1;
802 	}
803 
804 	/* allocate device context */
805 	ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
806 	if (!ndev)
807 		goto dev_alloc_err;
808 
809 	/* detect hardware revision */
810 	if (pci_dev->device == NETUP_HW_REV_1_3)
811 		ndev->rev = NETUP_HW_REV_1_3;
812 	else
813 		ndev->rev = NETUP_HW_REV_1_4;
814 
815 	dev_info(&pci_dev->dev,
816 		"%s(): board (0x%x) hardware revision 0x%x\n",
817 		__func__, pci_dev->device, ndev->rev);
818 
819 	ndev->old_fw = old_firmware;
820 	ndev->wq = create_singlethread_workqueue(NETUP_UNIDVB_NAME);
821 	if (!ndev->wq) {
822 		dev_err(&pci_dev->dev,
823 			"%s(): unable to create workqueue\n", __func__);
824 		goto wq_create_err;
825 	}
826 	ndev->pci_dev = pci_dev;
827 	ndev->pci_bus = pci_dev->bus->number;
828 	ndev->pci_slot = PCI_SLOT(pci_dev->devfn);
829 	ndev->pci_func = PCI_FUNC(pci_dev->devfn);
830 	ndev->board_num = ndev->pci_bus*10 + ndev->pci_slot;
831 	pci_set_drvdata(pci_dev, ndev);
832 	/* PCI init */
833 	dev_info(&pci_dev->dev, "%s(): PCI device (%d). Bus:0x%x Slot:0x%x\n",
834 		__func__, ndev->board_num, ndev->pci_bus, ndev->pci_slot);
835 
836 	if (pci_enable_device(pci_dev)) {
837 		dev_err(&pci_dev->dev, "%s(): pci_enable_device failed\n",
838 			__func__);
839 		goto pci_enable_err;
840 	}
841 	/* read PCI info */
842 	pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &board_revision);
843 	pci_read_config_word(pci_dev, PCI_VENDOR_ID, &board_vendor);
844 	if (board_vendor != NETUP_VENDOR_ID) {
845 		dev_err(&pci_dev->dev, "%s(): unknown board vendor 0x%x",
846 			__func__, board_vendor);
847 		goto pci_detect_err;
848 	}
849 	dev_info(&pci_dev->dev,
850 		"%s(): board vendor 0x%x, revision 0x%x\n",
851 		__func__, board_vendor, board_revision);
852 	pci_set_master(pci_dev);
853 	if (pci_set_dma_mask(pci_dev, 0xffffffff) < 0) {
854 		dev_err(&pci_dev->dev,
855 			"%s(): 32bit PCI DMA is not supported\n", __func__);
856 		goto pci_detect_err;
857 	}
858 	dev_info(&pci_dev->dev, "%s(): using 32bit PCI DMA\n", __func__);
859 	/* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */
860 	pcie_capability_clear_and_set_word(pci_dev, PCI_EXP_DEVCTL,
861 		PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN |
862 		PCI_EXP_DEVCTL_NOSNOOP_EN, 0);
863 	/* Adjust PCIe completion timeout. */
864 	pcie_capability_clear_and_set_word(pci_dev,
865 		PCI_EXP_DEVCTL2, PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0x2);
866 
867 	if (netup_unidvb_request_mmio(pci_dev)) {
868 		dev_err(&pci_dev->dev,
869 			"%s(): unable to request MMIO regions\n", __func__);
870 		goto pci_detect_err;
871 	}
872 	ndev->lmmio0 = ioremap(pci_resource_start(pci_dev, 0),
873 		pci_resource_len(pci_dev, 0));
874 	if (!ndev->lmmio0) {
875 		dev_err(&pci_dev->dev,
876 			"%s(): unable to remap MMIO bar 0\n", __func__);
877 		goto pci_bar0_error;
878 	}
879 	ndev->lmmio1 = ioremap(pci_resource_start(pci_dev, 1),
880 		pci_resource_len(pci_dev, 1));
881 	if (!ndev->lmmio1) {
882 		dev_err(&pci_dev->dev,
883 			"%s(): unable to remap MMIO bar 1\n", __func__);
884 		goto pci_bar1_error;
885 	}
886 	ndev->bmmio0 = (u8 __iomem *)ndev->lmmio0;
887 	ndev->bmmio1 = (u8 __iomem *)ndev->lmmio1;
888 	dev_info(&pci_dev->dev,
889 		"%s(): PCI MMIO at 0x%p (%d); 0x%p (%d); IRQ %d",
890 		__func__,
891 		ndev->lmmio0, (u32)pci_resource_len(pci_dev, 0),
892 		ndev->lmmio1, (u32)pci_resource_len(pci_dev, 1),
893 		pci_dev->irq);
894 	if (request_irq(pci_dev->irq, netup_unidvb_isr, IRQF_SHARED,
895 			"netup_unidvb", pci_dev) < 0) {
896 		dev_err(&pci_dev->dev,
897 			"%s(): can't get IRQ %d\n", __func__, pci_dev->irq);
898 		goto irq_request_err;
899 	}
900 	ndev->dma_size = 2 * 188 *
901 		NETUP_DMA_BLOCKS_COUNT * NETUP_DMA_PACKETS_COUNT;
902 	ndev->dma_virt = dma_alloc_coherent(&pci_dev->dev,
903 		ndev->dma_size, &ndev->dma_phys, GFP_KERNEL);
904 	if (!ndev->dma_virt) {
905 		dev_err(&pci_dev->dev, "%s(): unable to allocate DMA buffer\n",
906 			__func__);
907 		goto dma_alloc_err;
908 	}
909 	netup_unidvb_dev_enable(ndev);
910 	if (spi_enable && netup_spi_init(ndev)) {
911 		dev_warn(&pci_dev->dev,
912 			"netup_unidvb: SPI flash setup failed\n");
913 		goto spi_setup_err;
914 	}
915 	if (old_firmware) {
916 		dev_err(&pci_dev->dev,
917 			"netup_unidvb: card initialization was incomplete\n");
918 		return 0;
919 	}
920 	if (netup_i2c_register(ndev)) {
921 		dev_err(&pci_dev->dev, "netup_unidvb: I2C setup failed\n");
922 		goto i2c_setup_err;
923 	}
924 	/* enable I2C IRQs */
925 	writew(NETUP_UNIDVB_IRQ_I2C0 | NETUP_UNIDVB_IRQ_I2C1,
926 		ndev->bmmio0 + REG_IMASK_SET);
927 	usleep_range(5000, 10000);
928 	if (netup_unidvb_dvb_setup(ndev)) {
929 		dev_err(&pci_dev->dev, "netup_unidvb: DVB setup failed\n");
930 		goto dvb_setup_err;
931 	}
932 	if (netup_unidvb_ci_setup(ndev, pci_dev)) {
933 		dev_err(&pci_dev->dev, "netup_unidvb: CI setup failed\n");
934 		goto ci_setup_err;
935 	}
936 	if (netup_unidvb_dma_setup(ndev)) {
937 		dev_err(&pci_dev->dev, "netup_unidvb: DMA setup failed\n");
938 		goto dma_setup_err;
939 	}
940 	dev_info(&pci_dev->dev,
941 		"netup_unidvb: device has been initialized\n");
942 	return 0;
943 dma_setup_err:
944 	netup_unidvb_ci_unregister(ndev, 0);
945 	netup_unidvb_ci_unregister(ndev, 1);
946 ci_setup_err:
947 	netup_unidvb_dvb_fini(ndev, 0);
948 	netup_unidvb_dvb_fini(ndev, 1);
949 dvb_setup_err:
950 	netup_i2c_unregister(ndev);
951 i2c_setup_err:
952 	if (ndev->spi)
953 		netup_spi_release(ndev);
954 spi_setup_err:
955 	dma_free_coherent(&pci_dev->dev, ndev->dma_size,
956 			ndev->dma_virt, ndev->dma_phys);
957 dma_alloc_err:
958 	free_irq(pci_dev->irq, pci_dev);
959 irq_request_err:
960 	iounmap(ndev->lmmio1);
961 pci_bar1_error:
962 	iounmap(ndev->lmmio0);
963 pci_bar0_error:
964 	release_mem_region(pci_resource_start(pci_dev, 0),
965 		pci_resource_len(pci_dev, 0));
966 	release_mem_region(pci_resource_start(pci_dev, 1),
967 		pci_resource_len(pci_dev, 1));
968 pci_detect_err:
969 	pci_disable_device(pci_dev);
970 pci_enable_err:
971 	pci_set_drvdata(pci_dev, NULL);
972 	destroy_workqueue(ndev->wq);
973 wq_create_err:
974 	kfree(ndev);
975 dev_alloc_err:
976 	dev_err(&pci_dev->dev,
977 		"%s(): failed to initialize device\n", __func__);
978 	return -EIO;
979 }
980 
981 static void netup_unidvb_finidev(struct pci_dev *pci_dev)
982 {
983 	struct netup_unidvb_dev *ndev = pci_get_drvdata(pci_dev);
984 
985 	dev_info(&pci_dev->dev, "%s(): trying to stop device\n", __func__);
986 	if (!ndev->old_fw) {
987 		netup_unidvb_dma_fini(ndev, 0);
988 		netup_unidvb_dma_fini(ndev, 1);
989 		netup_unidvb_ci_unregister(ndev, 0);
990 		netup_unidvb_ci_unregister(ndev, 1);
991 		netup_unidvb_dvb_fini(ndev, 0);
992 		netup_unidvb_dvb_fini(ndev, 1);
993 		netup_i2c_unregister(ndev);
994 	}
995 	if (ndev->spi)
996 		netup_spi_release(ndev);
997 	writew(0xffff, ndev->bmmio0 + REG_IMASK_CLEAR);
998 	dma_free_coherent(&ndev->pci_dev->dev, ndev->dma_size,
999 			ndev->dma_virt, ndev->dma_phys);
1000 	free_irq(pci_dev->irq, pci_dev);
1001 	iounmap(ndev->lmmio0);
1002 	iounmap(ndev->lmmio1);
1003 	release_mem_region(pci_resource_start(pci_dev, 0),
1004 		pci_resource_len(pci_dev, 0));
1005 	release_mem_region(pci_resource_start(pci_dev, 1),
1006 		pci_resource_len(pci_dev, 1));
1007 	pci_disable_device(pci_dev);
1008 	pci_set_drvdata(pci_dev, NULL);
1009 	destroy_workqueue(ndev->wq);
1010 	kfree(ndev);
1011 	dev_info(&pci_dev->dev,
1012 		"%s(): device has been successfully stopped\n", __func__);
1013 }
1014 
1015 
1016 static const struct pci_device_id netup_unidvb_pci_tbl[] = {
1017 	{ PCI_DEVICE(0x1b55, 0x18f6) }, /* hw rev. 1.3 */
1018 	{ PCI_DEVICE(0x1b55, 0x18f7) }, /* hw rev. 1.4 */
1019 	{ 0, }
1020 };
1021 MODULE_DEVICE_TABLE(pci, netup_unidvb_pci_tbl);
1022 
1023 static struct pci_driver netup_unidvb_pci_driver = {
1024 	.name     = "netup_unidvb",
1025 	.id_table = netup_unidvb_pci_tbl,
1026 	.probe    = netup_unidvb_initdev,
1027 	.remove   = netup_unidvb_finidev,
1028 	.suspend  = NULL,
1029 	.resume   = NULL,
1030 };
1031 
1032 module_pci_driver(netup_unidvb_pci_driver);
1033