xref: /openbmc/linux/drivers/media/pci/pt1/pt1.c (revision 4e1a33b1)
1 /*
2  * driver for Earthsoft PT1/PT2
3  *
4  * Copyright (C) 2009 HIRANO Takahito <hiranotaka@zng.info>
5  *
6  * based on pt1dvr - http://pt1dvr.sourceforge.jp/
7  * 	by Tomoaki Ishikawa <tomy@users.sourceforge.jp>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  */
19 
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/pci.h>
25 #include <linux/kthread.h>
26 #include <linux/freezer.h>
27 #include <linux/ratelimit.h>
28 
29 #include "dvbdev.h"
30 #include "dvb_demux.h"
31 #include "dmxdev.h"
32 #include "dvb_net.h"
33 #include "dvb_frontend.h"
34 
35 #include "va1j5jf8007t.h"
36 #include "va1j5jf8007s.h"
37 
38 #define DRIVER_NAME "earth-pt1"
39 
40 #define PT1_PAGE_SHIFT 12
41 #define PT1_PAGE_SIZE (1 << PT1_PAGE_SHIFT)
42 #define PT1_NR_UPACKETS 1024
43 #define PT1_NR_BUFS 511
44 
45 struct pt1_buffer_page {
46 	__le32 upackets[PT1_NR_UPACKETS];
47 };
48 
49 struct pt1_table_page {
50 	__le32 next_pfn;
51 	__le32 buf_pfns[PT1_NR_BUFS];
52 };
53 
54 struct pt1_buffer {
55 	struct pt1_buffer_page *page;
56 	dma_addr_t addr;
57 };
58 
59 struct pt1_table {
60 	struct pt1_table_page *page;
61 	dma_addr_t addr;
62 	struct pt1_buffer bufs[PT1_NR_BUFS];
63 };
64 
65 #define PT1_NR_ADAPS 4
66 
67 struct pt1_adapter;
68 
69 struct pt1 {
70 	struct pci_dev *pdev;
71 	void __iomem *regs;
72 	struct i2c_adapter i2c_adap;
73 	int i2c_running;
74 	struct pt1_adapter *adaps[PT1_NR_ADAPS];
75 	struct pt1_table *tables;
76 	struct task_struct *kthread;
77 	int table_index;
78 	int buf_index;
79 
80 	struct mutex lock;
81 	int power;
82 	int reset;
83 };
84 
85 struct pt1_adapter {
86 	struct pt1 *pt1;
87 	int index;
88 
89 	u8 *buf;
90 	int upacket_count;
91 	int packet_count;
92 	int st_count;
93 
94 	struct dvb_adapter adap;
95 	struct dvb_demux demux;
96 	int users;
97 	struct dmxdev dmxdev;
98 	struct dvb_frontend *fe;
99 	int (*orig_set_voltage)(struct dvb_frontend *fe,
100 				enum fe_sec_voltage voltage);
101 	int (*orig_sleep)(struct dvb_frontend *fe);
102 	int (*orig_init)(struct dvb_frontend *fe);
103 
104 	enum fe_sec_voltage voltage;
105 	int sleep;
106 };
107 
108 static void pt1_write_reg(struct pt1 *pt1, int reg, u32 data)
109 {
110 	writel(data, pt1->regs + reg * 4);
111 }
112 
113 static u32 pt1_read_reg(struct pt1 *pt1, int reg)
114 {
115 	return readl(pt1->regs + reg * 4);
116 }
117 
118 static int pt1_nr_tables = 8;
119 module_param_named(nr_tables, pt1_nr_tables, int, 0);
120 
121 static void pt1_increment_table_count(struct pt1 *pt1)
122 {
123 	pt1_write_reg(pt1, 0, 0x00000020);
124 }
125 
126 static void pt1_init_table_count(struct pt1 *pt1)
127 {
128 	pt1_write_reg(pt1, 0, 0x00000010);
129 }
130 
131 static void pt1_register_tables(struct pt1 *pt1, u32 first_pfn)
132 {
133 	pt1_write_reg(pt1, 5, first_pfn);
134 	pt1_write_reg(pt1, 0, 0x0c000040);
135 }
136 
137 static void pt1_unregister_tables(struct pt1 *pt1)
138 {
139 	pt1_write_reg(pt1, 0, 0x08080000);
140 }
141 
142 static int pt1_sync(struct pt1 *pt1)
143 {
144 	int i;
145 	for (i = 0; i < 57; i++) {
146 		if (pt1_read_reg(pt1, 0) & 0x20000000)
147 			return 0;
148 		pt1_write_reg(pt1, 0, 0x00000008);
149 	}
150 	dev_err(&pt1->pdev->dev, "could not sync\n");
151 	return -EIO;
152 }
153 
154 static u64 pt1_identify(struct pt1 *pt1)
155 {
156 	int i;
157 	u64 id;
158 	id = 0;
159 	for (i = 0; i < 57; i++) {
160 		id |= (u64)(pt1_read_reg(pt1, 0) >> 30 & 1) << i;
161 		pt1_write_reg(pt1, 0, 0x00000008);
162 	}
163 	return id;
164 }
165 
166 static int pt1_unlock(struct pt1 *pt1)
167 {
168 	int i;
169 	pt1_write_reg(pt1, 0, 0x00000008);
170 	for (i = 0; i < 3; i++) {
171 		if (pt1_read_reg(pt1, 0) & 0x80000000)
172 			return 0;
173 		schedule_timeout_uninterruptible((HZ + 999) / 1000);
174 	}
175 	dev_err(&pt1->pdev->dev, "could not unlock\n");
176 	return -EIO;
177 }
178 
179 static int pt1_reset_pci(struct pt1 *pt1)
180 {
181 	int i;
182 	pt1_write_reg(pt1, 0, 0x01010000);
183 	pt1_write_reg(pt1, 0, 0x01000000);
184 	for (i = 0; i < 10; i++) {
185 		if (pt1_read_reg(pt1, 0) & 0x00000001)
186 			return 0;
187 		schedule_timeout_uninterruptible((HZ + 999) / 1000);
188 	}
189 	dev_err(&pt1->pdev->dev, "could not reset PCI\n");
190 	return -EIO;
191 }
192 
193 static int pt1_reset_ram(struct pt1 *pt1)
194 {
195 	int i;
196 	pt1_write_reg(pt1, 0, 0x02020000);
197 	pt1_write_reg(pt1, 0, 0x02000000);
198 	for (i = 0; i < 10; i++) {
199 		if (pt1_read_reg(pt1, 0) & 0x00000002)
200 			return 0;
201 		schedule_timeout_uninterruptible((HZ + 999) / 1000);
202 	}
203 	dev_err(&pt1->pdev->dev, "could not reset RAM\n");
204 	return -EIO;
205 }
206 
207 static int pt1_do_enable_ram(struct pt1 *pt1)
208 {
209 	int i, j;
210 	u32 status;
211 	status = pt1_read_reg(pt1, 0) & 0x00000004;
212 	pt1_write_reg(pt1, 0, 0x00000002);
213 	for (i = 0; i < 10; i++) {
214 		for (j = 0; j < 1024; j++) {
215 			if ((pt1_read_reg(pt1, 0) & 0x00000004) != status)
216 				return 0;
217 		}
218 		schedule_timeout_uninterruptible((HZ + 999) / 1000);
219 	}
220 	dev_err(&pt1->pdev->dev, "could not enable RAM\n");
221 	return -EIO;
222 }
223 
224 static int pt1_enable_ram(struct pt1 *pt1)
225 {
226 	int i, ret;
227 	int phase;
228 	schedule_timeout_uninterruptible((HZ + 999) / 1000);
229 	phase = pt1->pdev->device == 0x211a ? 128 : 166;
230 	for (i = 0; i < phase; i++) {
231 		ret = pt1_do_enable_ram(pt1);
232 		if (ret < 0)
233 			return ret;
234 	}
235 	return 0;
236 }
237 
238 static void pt1_disable_ram(struct pt1 *pt1)
239 {
240 	pt1_write_reg(pt1, 0, 0x0b0b0000);
241 }
242 
243 static void pt1_set_stream(struct pt1 *pt1, int index, int enabled)
244 {
245 	pt1_write_reg(pt1, 2, 1 << (index + 8) | enabled << index);
246 }
247 
248 static void pt1_init_streams(struct pt1 *pt1)
249 {
250 	int i;
251 	for (i = 0; i < PT1_NR_ADAPS; i++)
252 		pt1_set_stream(pt1, i, 0);
253 }
254 
255 static int pt1_filter(struct pt1 *pt1, struct pt1_buffer_page *page)
256 {
257 	u32 upacket;
258 	int i;
259 	int index;
260 	struct pt1_adapter *adap;
261 	int offset;
262 	u8 *buf;
263 	int sc;
264 
265 	if (!page->upackets[PT1_NR_UPACKETS - 1])
266 		return 0;
267 
268 	for (i = 0; i < PT1_NR_UPACKETS; i++) {
269 		upacket = le32_to_cpu(page->upackets[i]);
270 		index = (upacket >> 29) - 1;
271 		if (index < 0 || index >=  PT1_NR_ADAPS)
272 			continue;
273 
274 		adap = pt1->adaps[index];
275 		if (upacket >> 25 & 1)
276 			adap->upacket_count = 0;
277 		else if (!adap->upacket_count)
278 			continue;
279 
280 		if (upacket >> 24 & 1)
281 			printk_ratelimited(KERN_INFO "earth-pt1: device buffer overflowing. table[%d] buf[%d]\n",
282 				pt1->table_index, pt1->buf_index);
283 		sc = upacket >> 26 & 0x7;
284 		if (adap->st_count != -1 && sc != ((adap->st_count + 1) & 0x7))
285 			printk_ratelimited(KERN_INFO "earth-pt1: data loss in streamID(adapter)[%d]\n",
286 					   index);
287 		adap->st_count = sc;
288 
289 		buf = adap->buf;
290 		offset = adap->packet_count * 188 + adap->upacket_count * 3;
291 		buf[offset] = upacket >> 16;
292 		buf[offset + 1] = upacket >> 8;
293 		if (adap->upacket_count != 62)
294 			buf[offset + 2] = upacket;
295 
296 		if (++adap->upacket_count >= 63) {
297 			adap->upacket_count = 0;
298 			if (++adap->packet_count >= 21) {
299 				dvb_dmx_swfilter_packets(&adap->demux, buf, 21);
300 				adap->packet_count = 0;
301 			}
302 		}
303 	}
304 
305 	page->upackets[PT1_NR_UPACKETS - 1] = 0;
306 	return 1;
307 }
308 
309 static int pt1_thread(void *data)
310 {
311 	struct pt1 *pt1;
312 	struct pt1_buffer_page *page;
313 
314 	pt1 = data;
315 	set_freezable();
316 
317 	while (!kthread_should_stop()) {
318 		try_to_freeze();
319 
320 		page = pt1->tables[pt1->table_index].bufs[pt1->buf_index].page;
321 		if (!pt1_filter(pt1, page)) {
322 			schedule_timeout_interruptible((HZ + 999) / 1000);
323 			continue;
324 		}
325 
326 		if (++pt1->buf_index >= PT1_NR_BUFS) {
327 			pt1_increment_table_count(pt1);
328 			pt1->buf_index = 0;
329 			if (++pt1->table_index >= pt1_nr_tables)
330 				pt1->table_index = 0;
331 		}
332 	}
333 
334 	return 0;
335 }
336 
337 static void pt1_free_page(struct pt1 *pt1, void *page, dma_addr_t addr)
338 {
339 	dma_free_coherent(&pt1->pdev->dev, PT1_PAGE_SIZE, page, addr);
340 }
341 
342 static void *pt1_alloc_page(struct pt1 *pt1, dma_addr_t *addrp, u32 *pfnp)
343 {
344 	void *page;
345 	dma_addr_t addr;
346 
347 	page = dma_alloc_coherent(&pt1->pdev->dev, PT1_PAGE_SIZE, &addr,
348 				  GFP_KERNEL);
349 	if (page == NULL)
350 		return NULL;
351 
352 	BUG_ON(addr & (PT1_PAGE_SIZE - 1));
353 	BUG_ON(addr >> PT1_PAGE_SHIFT >> 31 >> 1);
354 
355 	*addrp = addr;
356 	*pfnp = addr >> PT1_PAGE_SHIFT;
357 	return page;
358 }
359 
360 static void pt1_cleanup_buffer(struct pt1 *pt1, struct pt1_buffer *buf)
361 {
362 	pt1_free_page(pt1, buf->page, buf->addr);
363 }
364 
365 static int
366 pt1_init_buffer(struct pt1 *pt1, struct pt1_buffer *buf,  u32 *pfnp)
367 {
368 	struct pt1_buffer_page *page;
369 	dma_addr_t addr;
370 
371 	page = pt1_alloc_page(pt1, &addr, pfnp);
372 	if (page == NULL)
373 		return -ENOMEM;
374 
375 	page->upackets[PT1_NR_UPACKETS - 1] = 0;
376 
377 	buf->page = page;
378 	buf->addr = addr;
379 	return 0;
380 }
381 
382 static void pt1_cleanup_table(struct pt1 *pt1, struct pt1_table *table)
383 {
384 	int i;
385 
386 	for (i = 0; i < PT1_NR_BUFS; i++)
387 		pt1_cleanup_buffer(pt1, &table->bufs[i]);
388 
389 	pt1_free_page(pt1, table->page, table->addr);
390 }
391 
392 static int
393 pt1_init_table(struct pt1 *pt1, struct pt1_table *table, u32 *pfnp)
394 {
395 	struct pt1_table_page *page;
396 	dma_addr_t addr;
397 	int i, ret;
398 	u32 buf_pfn;
399 
400 	page = pt1_alloc_page(pt1, &addr, pfnp);
401 	if (page == NULL)
402 		return -ENOMEM;
403 
404 	for (i = 0; i < PT1_NR_BUFS; i++) {
405 		ret = pt1_init_buffer(pt1, &table->bufs[i], &buf_pfn);
406 		if (ret < 0)
407 			goto err;
408 
409 		page->buf_pfns[i] = cpu_to_le32(buf_pfn);
410 	}
411 
412 	pt1_increment_table_count(pt1);
413 	table->page = page;
414 	table->addr = addr;
415 	return 0;
416 
417 err:
418 	while (i--)
419 		pt1_cleanup_buffer(pt1, &table->bufs[i]);
420 
421 	pt1_free_page(pt1, page, addr);
422 	return ret;
423 }
424 
425 static void pt1_cleanup_tables(struct pt1 *pt1)
426 {
427 	struct pt1_table *tables;
428 	int i;
429 
430 	tables = pt1->tables;
431 	pt1_unregister_tables(pt1);
432 
433 	for (i = 0; i < pt1_nr_tables; i++)
434 		pt1_cleanup_table(pt1, &tables[i]);
435 
436 	vfree(tables);
437 }
438 
439 static int pt1_init_tables(struct pt1 *pt1)
440 {
441 	struct pt1_table *tables;
442 	int i, ret;
443 	u32 first_pfn, pfn;
444 
445 	tables = vmalloc(sizeof(struct pt1_table) * pt1_nr_tables);
446 	if (tables == NULL)
447 		return -ENOMEM;
448 
449 	pt1_init_table_count(pt1);
450 
451 	i = 0;
452 	if (pt1_nr_tables) {
453 		ret = pt1_init_table(pt1, &tables[0], &first_pfn);
454 		if (ret)
455 			goto err;
456 		i++;
457 	}
458 
459 	while (i < pt1_nr_tables) {
460 		ret = pt1_init_table(pt1, &tables[i], &pfn);
461 		if (ret)
462 			goto err;
463 		tables[i - 1].page->next_pfn = cpu_to_le32(pfn);
464 		i++;
465 	}
466 
467 	tables[pt1_nr_tables - 1].page->next_pfn = cpu_to_le32(first_pfn);
468 
469 	pt1_register_tables(pt1, first_pfn);
470 	pt1->tables = tables;
471 	return 0;
472 
473 err:
474 	while (i--)
475 		pt1_cleanup_table(pt1, &tables[i]);
476 
477 	vfree(tables);
478 	return ret;
479 }
480 
481 static int pt1_start_polling(struct pt1 *pt1)
482 {
483 	int ret = 0;
484 
485 	mutex_lock(&pt1->lock);
486 	if (!pt1->kthread) {
487 		pt1->kthread = kthread_run(pt1_thread, pt1, "earth-pt1");
488 		if (IS_ERR(pt1->kthread)) {
489 			ret = PTR_ERR(pt1->kthread);
490 			pt1->kthread = NULL;
491 		}
492 	}
493 	mutex_unlock(&pt1->lock);
494 	return ret;
495 }
496 
497 static int pt1_start_feed(struct dvb_demux_feed *feed)
498 {
499 	struct pt1_adapter *adap;
500 	adap = container_of(feed->demux, struct pt1_adapter, demux);
501 	if (!adap->users++) {
502 		int ret;
503 
504 		ret = pt1_start_polling(adap->pt1);
505 		if (ret)
506 			return ret;
507 		pt1_set_stream(adap->pt1, adap->index, 1);
508 	}
509 	return 0;
510 }
511 
512 static void pt1_stop_polling(struct pt1 *pt1)
513 {
514 	int i, count;
515 
516 	mutex_lock(&pt1->lock);
517 	for (i = 0, count = 0; i < PT1_NR_ADAPS; i++)
518 		count += pt1->adaps[i]->users;
519 
520 	if (count == 0 && pt1->kthread) {
521 		kthread_stop(pt1->kthread);
522 		pt1->kthread = NULL;
523 	}
524 	mutex_unlock(&pt1->lock);
525 }
526 
527 static int pt1_stop_feed(struct dvb_demux_feed *feed)
528 {
529 	struct pt1_adapter *adap;
530 	adap = container_of(feed->demux, struct pt1_adapter, demux);
531 	if (!--adap->users) {
532 		pt1_set_stream(adap->pt1, adap->index, 0);
533 		pt1_stop_polling(adap->pt1);
534 	}
535 	return 0;
536 }
537 
538 static void
539 pt1_update_power(struct pt1 *pt1)
540 {
541 	int bits;
542 	int i;
543 	struct pt1_adapter *adap;
544 	static const int sleep_bits[] = {
545 		1 << 4,
546 		1 << 6 | 1 << 7,
547 		1 << 5,
548 		1 << 6 | 1 << 8,
549 	};
550 
551 	bits = pt1->power | !pt1->reset << 3;
552 	mutex_lock(&pt1->lock);
553 	for (i = 0; i < PT1_NR_ADAPS; i++) {
554 		adap = pt1->adaps[i];
555 		switch (adap->voltage) {
556 		case SEC_VOLTAGE_13: /* actually 11V */
557 			bits |= 1 << 1;
558 			break;
559 		case SEC_VOLTAGE_18: /* actually 15V */
560 			bits |= 1 << 1 | 1 << 2;
561 			break;
562 		default:
563 			break;
564 		}
565 
566 		/* XXX: The bits should be changed depending on adap->sleep. */
567 		bits |= sleep_bits[i];
568 	}
569 	pt1_write_reg(pt1, 1, bits);
570 	mutex_unlock(&pt1->lock);
571 }
572 
573 static int pt1_set_voltage(struct dvb_frontend *fe, enum fe_sec_voltage voltage)
574 {
575 	struct pt1_adapter *adap;
576 
577 	adap = container_of(fe->dvb, struct pt1_adapter, adap);
578 	adap->voltage = voltage;
579 	pt1_update_power(adap->pt1);
580 
581 	if (adap->orig_set_voltage)
582 		return adap->orig_set_voltage(fe, voltage);
583 	else
584 		return 0;
585 }
586 
587 static int pt1_sleep(struct dvb_frontend *fe)
588 {
589 	struct pt1_adapter *adap;
590 
591 	adap = container_of(fe->dvb, struct pt1_adapter, adap);
592 	adap->sleep = 1;
593 	pt1_update_power(adap->pt1);
594 
595 	if (adap->orig_sleep)
596 		return adap->orig_sleep(fe);
597 	else
598 		return 0;
599 }
600 
601 static int pt1_wakeup(struct dvb_frontend *fe)
602 {
603 	struct pt1_adapter *adap;
604 
605 	adap = container_of(fe->dvb, struct pt1_adapter, adap);
606 	adap->sleep = 0;
607 	pt1_update_power(adap->pt1);
608 	schedule_timeout_uninterruptible((HZ + 999) / 1000);
609 
610 	if (adap->orig_init)
611 		return adap->orig_init(fe);
612 	else
613 		return 0;
614 }
615 
616 static void pt1_free_adapter(struct pt1_adapter *adap)
617 {
618 	adap->demux.dmx.close(&adap->demux.dmx);
619 	dvb_dmxdev_release(&adap->dmxdev);
620 	dvb_dmx_release(&adap->demux);
621 	dvb_unregister_adapter(&adap->adap);
622 	free_page((unsigned long)adap->buf);
623 	kfree(adap);
624 }
625 
626 DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
627 
628 static struct pt1_adapter *
629 pt1_alloc_adapter(struct pt1 *pt1)
630 {
631 	struct pt1_adapter *adap;
632 	void *buf;
633 	struct dvb_adapter *dvb_adap;
634 	struct dvb_demux *demux;
635 	struct dmxdev *dmxdev;
636 	int ret;
637 
638 	adap = kzalloc(sizeof(struct pt1_adapter), GFP_KERNEL);
639 	if (!adap) {
640 		ret = -ENOMEM;
641 		goto err;
642 	}
643 
644 	adap->pt1 = pt1;
645 
646 	adap->voltage = SEC_VOLTAGE_OFF;
647 	adap->sleep = 1;
648 
649 	buf = (u8 *)__get_free_page(GFP_KERNEL);
650 	if (!buf) {
651 		ret = -ENOMEM;
652 		goto err_kfree;
653 	}
654 
655 	adap->buf = buf;
656 	adap->upacket_count = 0;
657 	adap->packet_count = 0;
658 	adap->st_count = -1;
659 
660 	dvb_adap = &adap->adap;
661 	dvb_adap->priv = adap;
662 	ret = dvb_register_adapter(dvb_adap, DRIVER_NAME, THIS_MODULE,
663 				   &pt1->pdev->dev, adapter_nr);
664 	if (ret < 0)
665 		goto err_free_page;
666 
667 	demux = &adap->demux;
668 	demux->dmx.capabilities = DMX_TS_FILTERING | DMX_SECTION_FILTERING;
669 	demux->priv = adap;
670 	demux->feednum = 256;
671 	demux->filternum = 256;
672 	demux->start_feed = pt1_start_feed;
673 	demux->stop_feed = pt1_stop_feed;
674 	demux->write_to_decoder = NULL;
675 	ret = dvb_dmx_init(demux);
676 	if (ret < 0)
677 		goto err_unregister_adapter;
678 
679 	dmxdev = &adap->dmxdev;
680 	dmxdev->filternum = 256;
681 	dmxdev->demux = &demux->dmx;
682 	dmxdev->capabilities = 0;
683 	ret = dvb_dmxdev_init(dmxdev, dvb_adap);
684 	if (ret < 0)
685 		goto err_dmx_release;
686 
687 	return adap;
688 
689 err_dmx_release:
690 	dvb_dmx_release(demux);
691 err_unregister_adapter:
692 	dvb_unregister_adapter(dvb_adap);
693 err_free_page:
694 	free_page((unsigned long)buf);
695 err_kfree:
696 	kfree(adap);
697 err:
698 	return ERR_PTR(ret);
699 }
700 
701 static void pt1_cleanup_adapters(struct pt1 *pt1)
702 {
703 	int i;
704 	for (i = 0; i < PT1_NR_ADAPS; i++)
705 		pt1_free_adapter(pt1->adaps[i]);
706 }
707 
708 static int pt1_init_adapters(struct pt1 *pt1)
709 {
710 	int i;
711 	struct pt1_adapter *adap;
712 	int ret;
713 
714 	for (i = 0; i < PT1_NR_ADAPS; i++) {
715 		adap = pt1_alloc_adapter(pt1);
716 		if (IS_ERR(adap)) {
717 			ret = PTR_ERR(adap);
718 			goto err;
719 		}
720 
721 		adap->index = i;
722 		pt1->adaps[i] = adap;
723 	}
724 	return 0;
725 
726 err:
727 	while (i--)
728 		pt1_free_adapter(pt1->adaps[i]);
729 
730 	return ret;
731 }
732 
733 static void pt1_cleanup_frontend(struct pt1_adapter *adap)
734 {
735 	dvb_unregister_frontend(adap->fe);
736 }
737 
738 static int pt1_init_frontend(struct pt1_adapter *adap, struct dvb_frontend *fe)
739 {
740 	int ret;
741 
742 	adap->orig_set_voltage = fe->ops.set_voltage;
743 	adap->orig_sleep = fe->ops.sleep;
744 	adap->orig_init = fe->ops.init;
745 	fe->ops.set_voltage = pt1_set_voltage;
746 	fe->ops.sleep = pt1_sleep;
747 	fe->ops.init = pt1_wakeup;
748 
749 	ret = dvb_register_frontend(&adap->adap, fe);
750 	if (ret < 0)
751 		return ret;
752 
753 	adap->fe = fe;
754 	return 0;
755 }
756 
757 static void pt1_cleanup_frontends(struct pt1 *pt1)
758 {
759 	int i;
760 	for (i = 0; i < PT1_NR_ADAPS; i++)
761 		pt1_cleanup_frontend(pt1->adaps[i]);
762 }
763 
764 struct pt1_config {
765 	struct va1j5jf8007s_config va1j5jf8007s_config;
766 	struct va1j5jf8007t_config va1j5jf8007t_config;
767 };
768 
769 static const struct pt1_config pt1_configs[2] = {
770 	{
771 		{
772 			.demod_address = 0x1b,
773 			.frequency = VA1J5JF8007S_20MHZ,
774 		},
775 		{
776 			.demod_address = 0x1a,
777 			.frequency = VA1J5JF8007T_20MHZ,
778 		},
779 	}, {
780 		{
781 			.demod_address = 0x19,
782 			.frequency = VA1J5JF8007S_20MHZ,
783 		},
784 		{
785 			.demod_address = 0x18,
786 			.frequency = VA1J5JF8007T_20MHZ,
787 		},
788 	},
789 };
790 
791 static const struct pt1_config pt2_configs[2] = {
792 	{
793 		{
794 			.demod_address = 0x1b,
795 			.frequency = VA1J5JF8007S_25MHZ,
796 		},
797 		{
798 			.demod_address = 0x1a,
799 			.frequency = VA1J5JF8007T_25MHZ,
800 		},
801 	}, {
802 		{
803 			.demod_address = 0x19,
804 			.frequency = VA1J5JF8007S_25MHZ,
805 		},
806 		{
807 			.demod_address = 0x18,
808 			.frequency = VA1J5JF8007T_25MHZ,
809 		},
810 	},
811 };
812 
813 static int pt1_init_frontends(struct pt1 *pt1)
814 {
815 	int i, j;
816 	struct i2c_adapter *i2c_adap;
817 	const struct pt1_config *configs, *config;
818 	struct dvb_frontend *fe[4];
819 	int ret;
820 
821 	i = 0;
822 	j = 0;
823 
824 	i2c_adap = &pt1->i2c_adap;
825 	configs = pt1->pdev->device == 0x211a ? pt1_configs : pt2_configs;
826 	do {
827 		config = &configs[i / 2];
828 
829 		fe[i] = va1j5jf8007s_attach(&config->va1j5jf8007s_config,
830 					    i2c_adap);
831 		if (!fe[i]) {
832 			ret = -ENODEV; /* This does not sound nice... */
833 			goto err;
834 		}
835 		i++;
836 
837 		fe[i] = va1j5jf8007t_attach(&config->va1j5jf8007t_config,
838 					    i2c_adap);
839 		if (!fe[i]) {
840 			ret = -ENODEV;
841 			goto err;
842 		}
843 		i++;
844 
845 		ret = va1j5jf8007s_prepare(fe[i - 2]);
846 		if (ret < 0)
847 			goto err;
848 
849 		ret = va1j5jf8007t_prepare(fe[i - 1]);
850 		if (ret < 0)
851 			goto err;
852 
853 	} while (i < 4);
854 
855 	do {
856 		ret = pt1_init_frontend(pt1->adaps[j], fe[j]);
857 		if (ret < 0)
858 			goto err;
859 	} while (++j < 4);
860 
861 	return 0;
862 
863 err:
864 	while (i-- > j)
865 		fe[i]->ops.release(fe[i]);
866 
867 	while (j--)
868 		dvb_unregister_frontend(fe[j]);
869 
870 	return ret;
871 }
872 
873 static void pt1_i2c_emit(struct pt1 *pt1, int addr, int busy, int read_enable,
874 			 int clock, int data, int next_addr)
875 {
876 	pt1_write_reg(pt1, 4, addr << 18 | busy << 13 | read_enable << 12 |
877 		      !clock << 11 | !data << 10 | next_addr);
878 }
879 
880 static void pt1_i2c_write_bit(struct pt1 *pt1, int addr, int *addrp, int data)
881 {
882 	pt1_i2c_emit(pt1, addr,     1, 0, 0, data, addr + 1);
883 	pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, data, addr + 2);
884 	pt1_i2c_emit(pt1, addr + 2, 1, 0, 0, data, addr + 3);
885 	*addrp = addr + 3;
886 }
887 
888 static void pt1_i2c_read_bit(struct pt1 *pt1, int addr, int *addrp)
889 {
890 	pt1_i2c_emit(pt1, addr,     1, 0, 0, 1, addr + 1);
891 	pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 1, addr + 2);
892 	pt1_i2c_emit(pt1, addr + 2, 1, 1, 1, 1, addr + 3);
893 	pt1_i2c_emit(pt1, addr + 3, 1, 0, 0, 1, addr + 4);
894 	*addrp = addr + 4;
895 }
896 
897 static void pt1_i2c_write_byte(struct pt1 *pt1, int addr, int *addrp, int data)
898 {
899 	int i;
900 	for (i = 0; i < 8; i++)
901 		pt1_i2c_write_bit(pt1, addr, &addr, data >> (7 - i) & 1);
902 	pt1_i2c_write_bit(pt1, addr, &addr, 1);
903 	*addrp = addr;
904 }
905 
906 static void pt1_i2c_read_byte(struct pt1 *pt1, int addr, int *addrp, int last)
907 {
908 	int i;
909 	for (i = 0; i < 8; i++)
910 		pt1_i2c_read_bit(pt1, addr, &addr);
911 	pt1_i2c_write_bit(pt1, addr, &addr, last);
912 	*addrp = addr;
913 }
914 
915 static void pt1_i2c_prepare(struct pt1 *pt1, int addr, int *addrp)
916 {
917 	pt1_i2c_emit(pt1, addr,     1, 0, 1, 1, addr + 1);
918 	pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 0, addr + 2);
919 	pt1_i2c_emit(pt1, addr + 2, 1, 0, 0, 0, addr + 3);
920 	*addrp = addr + 3;
921 }
922 
923 static void
924 pt1_i2c_write_msg(struct pt1 *pt1, int addr, int *addrp, struct i2c_msg *msg)
925 {
926 	int i;
927 	pt1_i2c_prepare(pt1, addr, &addr);
928 	pt1_i2c_write_byte(pt1, addr, &addr, msg->addr << 1);
929 	for (i = 0; i < msg->len; i++)
930 		pt1_i2c_write_byte(pt1, addr, &addr, msg->buf[i]);
931 	*addrp = addr;
932 }
933 
934 static void
935 pt1_i2c_read_msg(struct pt1 *pt1, int addr, int *addrp, struct i2c_msg *msg)
936 {
937 	int i;
938 	pt1_i2c_prepare(pt1, addr, &addr);
939 	pt1_i2c_write_byte(pt1, addr, &addr, msg->addr << 1 | 1);
940 	for (i = 0; i < msg->len; i++)
941 		pt1_i2c_read_byte(pt1, addr, &addr, i == msg->len - 1);
942 	*addrp = addr;
943 }
944 
945 static int pt1_i2c_end(struct pt1 *pt1, int addr)
946 {
947 	pt1_i2c_emit(pt1, addr,     1, 0, 0, 0, addr + 1);
948 	pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 0, addr + 2);
949 	pt1_i2c_emit(pt1, addr + 2, 1, 0, 1, 1, 0);
950 
951 	pt1_write_reg(pt1, 0, 0x00000004);
952 	do {
953 		if (signal_pending(current))
954 			return -EINTR;
955 		schedule_timeout_interruptible((HZ + 999) / 1000);
956 	} while (pt1_read_reg(pt1, 0) & 0x00000080);
957 	return 0;
958 }
959 
960 static void pt1_i2c_begin(struct pt1 *pt1, int *addrp)
961 {
962 	int addr;
963 	addr = 0;
964 
965 	pt1_i2c_emit(pt1, addr,     0, 0, 1, 1, addr /* itself */);
966 	addr = addr + 1;
967 
968 	if (!pt1->i2c_running) {
969 		pt1_i2c_emit(pt1, addr,     1, 0, 1, 1, addr + 1);
970 		pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 0, addr + 2);
971 		addr = addr + 2;
972 		pt1->i2c_running = 1;
973 	}
974 	*addrp = addr;
975 }
976 
977 static int pt1_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
978 {
979 	struct pt1 *pt1;
980 	int i;
981 	struct i2c_msg *msg, *next_msg;
982 	int addr, ret;
983 	u16 len;
984 	u32 word;
985 
986 	pt1 = i2c_get_adapdata(adap);
987 
988 	for (i = 0; i < num; i++) {
989 		msg = &msgs[i];
990 		if (msg->flags & I2C_M_RD)
991 			return -ENOTSUPP;
992 
993 		if (i + 1 < num)
994 			next_msg = &msgs[i + 1];
995 		else
996 			next_msg = NULL;
997 
998 		if (next_msg && next_msg->flags & I2C_M_RD) {
999 			i++;
1000 
1001 			len = next_msg->len;
1002 			if (len > 4)
1003 				return -ENOTSUPP;
1004 
1005 			pt1_i2c_begin(pt1, &addr);
1006 			pt1_i2c_write_msg(pt1, addr, &addr, msg);
1007 			pt1_i2c_read_msg(pt1, addr, &addr, next_msg);
1008 			ret = pt1_i2c_end(pt1, addr);
1009 			if (ret < 0)
1010 				return ret;
1011 
1012 			word = pt1_read_reg(pt1, 2);
1013 			while (len--) {
1014 				next_msg->buf[len] = word;
1015 				word >>= 8;
1016 			}
1017 		} else {
1018 			pt1_i2c_begin(pt1, &addr);
1019 			pt1_i2c_write_msg(pt1, addr, &addr, msg);
1020 			ret = pt1_i2c_end(pt1, addr);
1021 			if (ret < 0)
1022 				return ret;
1023 		}
1024 	}
1025 
1026 	return num;
1027 }
1028 
1029 static u32 pt1_i2c_func(struct i2c_adapter *adap)
1030 {
1031 	return I2C_FUNC_I2C;
1032 }
1033 
1034 static const struct i2c_algorithm pt1_i2c_algo = {
1035 	.master_xfer = pt1_i2c_xfer,
1036 	.functionality = pt1_i2c_func,
1037 };
1038 
1039 static void pt1_i2c_wait(struct pt1 *pt1)
1040 {
1041 	int i;
1042 	for (i = 0; i < 128; i++)
1043 		pt1_i2c_emit(pt1, 0, 0, 0, 1, 1, 0);
1044 }
1045 
1046 static void pt1_i2c_init(struct pt1 *pt1)
1047 {
1048 	int i;
1049 	for (i = 0; i < 1024; i++)
1050 		pt1_i2c_emit(pt1, i, 0, 0, 1, 1, 0);
1051 }
1052 
1053 static void pt1_remove(struct pci_dev *pdev)
1054 {
1055 	struct pt1 *pt1;
1056 	void __iomem *regs;
1057 
1058 	pt1 = pci_get_drvdata(pdev);
1059 	regs = pt1->regs;
1060 
1061 	if (pt1->kthread)
1062 		kthread_stop(pt1->kthread);
1063 	pt1_cleanup_tables(pt1);
1064 	pt1_cleanup_frontends(pt1);
1065 	pt1_disable_ram(pt1);
1066 	pt1->power = 0;
1067 	pt1->reset = 1;
1068 	pt1_update_power(pt1);
1069 	pt1_cleanup_adapters(pt1);
1070 	i2c_del_adapter(&pt1->i2c_adap);
1071 	kfree(pt1);
1072 	pci_iounmap(pdev, regs);
1073 	pci_release_regions(pdev);
1074 	pci_disable_device(pdev);
1075 }
1076 
1077 static int pt1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1078 {
1079 	int ret;
1080 	void __iomem *regs;
1081 	struct pt1 *pt1;
1082 	struct i2c_adapter *i2c_adap;
1083 
1084 	ret = pci_enable_device(pdev);
1085 	if (ret < 0)
1086 		goto err;
1087 
1088 	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1089 	if (ret < 0)
1090 		goto err_pci_disable_device;
1091 
1092 	pci_set_master(pdev);
1093 
1094 	ret = pci_request_regions(pdev, DRIVER_NAME);
1095 	if (ret < 0)
1096 		goto err_pci_disable_device;
1097 
1098 	regs = pci_iomap(pdev, 0, 0);
1099 	if (!regs) {
1100 		ret = -EIO;
1101 		goto err_pci_release_regions;
1102 	}
1103 
1104 	pt1 = kzalloc(sizeof(struct pt1), GFP_KERNEL);
1105 	if (!pt1) {
1106 		ret = -ENOMEM;
1107 		goto err_pci_iounmap;
1108 	}
1109 
1110 	mutex_init(&pt1->lock);
1111 	pt1->pdev = pdev;
1112 	pt1->regs = regs;
1113 	pci_set_drvdata(pdev, pt1);
1114 
1115 	ret = pt1_init_adapters(pt1);
1116 	if (ret < 0)
1117 		goto err_kfree;
1118 
1119 	mutex_init(&pt1->lock);
1120 
1121 	pt1->power = 0;
1122 	pt1->reset = 1;
1123 	pt1_update_power(pt1);
1124 
1125 	i2c_adap = &pt1->i2c_adap;
1126 	i2c_adap->algo = &pt1_i2c_algo;
1127 	i2c_adap->algo_data = NULL;
1128 	i2c_adap->dev.parent = &pdev->dev;
1129 	strcpy(i2c_adap->name, DRIVER_NAME);
1130 	i2c_set_adapdata(i2c_adap, pt1);
1131 	ret = i2c_add_adapter(i2c_adap);
1132 	if (ret < 0)
1133 		goto err_pt1_cleanup_adapters;
1134 
1135 	pt1_i2c_init(pt1);
1136 	pt1_i2c_wait(pt1);
1137 
1138 	ret = pt1_sync(pt1);
1139 	if (ret < 0)
1140 		goto err_i2c_del_adapter;
1141 
1142 	pt1_identify(pt1);
1143 
1144 	ret = pt1_unlock(pt1);
1145 	if (ret < 0)
1146 		goto err_i2c_del_adapter;
1147 
1148 	ret = pt1_reset_pci(pt1);
1149 	if (ret < 0)
1150 		goto err_i2c_del_adapter;
1151 
1152 	ret = pt1_reset_ram(pt1);
1153 	if (ret < 0)
1154 		goto err_i2c_del_adapter;
1155 
1156 	ret = pt1_enable_ram(pt1);
1157 	if (ret < 0)
1158 		goto err_i2c_del_adapter;
1159 
1160 	pt1_init_streams(pt1);
1161 
1162 	pt1->power = 1;
1163 	pt1_update_power(pt1);
1164 	schedule_timeout_uninterruptible((HZ + 49) / 50);
1165 
1166 	pt1->reset = 0;
1167 	pt1_update_power(pt1);
1168 	schedule_timeout_uninterruptible((HZ + 999) / 1000);
1169 
1170 	ret = pt1_init_frontends(pt1);
1171 	if (ret < 0)
1172 		goto err_pt1_disable_ram;
1173 
1174 	ret = pt1_init_tables(pt1);
1175 	if (ret < 0)
1176 		goto err_pt1_cleanup_frontends;
1177 
1178 	return 0;
1179 
1180 err_pt1_cleanup_frontends:
1181 	pt1_cleanup_frontends(pt1);
1182 err_pt1_disable_ram:
1183 	pt1_disable_ram(pt1);
1184 	pt1->power = 0;
1185 	pt1->reset = 1;
1186 	pt1_update_power(pt1);
1187 err_i2c_del_adapter:
1188 	i2c_del_adapter(i2c_adap);
1189 err_pt1_cleanup_adapters:
1190 	pt1_cleanup_adapters(pt1);
1191 err_kfree:
1192 	kfree(pt1);
1193 err_pci_iounmap:
1194 	pci_iounmap(pdev, regs);
1195 err_pci_release_regions:
1196 	pci_release_regions(pdev);
1197 err_pci_disable_device:
1198 	pci_disable_device(pdev);
1199 err:
1200 	return ret;
1201 
1202 }
1203 
1204 static struct pci_device_id pt1_id_table[] = {
1205 	{ PCI_DEVICE(0x10ee, 0x211a) },
1206 	{ PCI_DEVICE(0x10ee, 0x222a) },
1207 	{ },
1208 };
1209 MODULE_DEVICE_TABLE(pci, pt1_id_table);
1210 
1211 static struct pci_driver pt1_driver = {
1212 	.name		= DRIVER_NAME,
1213 	.probe		= pt1_probe,
1214 	.remove		= pt1_remove,
1215 	.id_table	= pt1_id_table,
1216 };
1217 
1218 module_pci_driver(pt1_driver);
1219 
1220 MODULE_AUTHOR("Takahito HIRANO <hiranotaka@zng.info>");
1221 MODULE_DESCRIPTION("Earthsoft PT1/PT2 Driver");
1222 MODULE_LICENSE("GPL");
1223