xref: /openbmc/linux/drivers/media/pci/pt1/pt1.c (revision 8c749ce9)
1 /*
2  * driver for Earthsoft PT1/PT2
3  *
4  * Copyright (C) 2009 HIRANO Takahito <hiranotaka@zng.info>
5  *
6  * based on pt1dvr - http://pt1dvr.sourceforge.jp/
7  * 	by Tomoaki Ishikawa <tomy@users.sourceforge.jp>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23 
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/vmalloc.h>
28 #include <linux/pci.h>
29 #include <linux/kthread.h>
30 #include <linux/freezer.h>
31 #include <linux/ratelimit.h>
32 
33 #include "dvbdev.h"
34 #include "dvb_demux.h"
35 #include "dmxdev.h"
36 #include "dvb_net.h"
37 #include "dvb_frontend.h"
38 
39 #include "va1j5jf8007t.h"
40 #include "va1j5jf8007s.h"
41 
42 #define DRIVER_NAME "earth-pt1"
43 
44 #define PT1_PAGE_SHIFT 12
45 #define PT1_PAGE_SIZE (1 << PT1_PAGE_SHIFT)
46 #define PT1_NR_UPACKETS 1024
47 #define PT1_NR_BUFS 511
48 
49 struct pt1_buffer_page {
50 	__le32 upackets[PT1_NR_UPACKETS];
51 };
52 
53 struct pt1_table_page {
54 	__le32 next_pfn;
55 	__le32 buf_pfns[PT1_NR_BUFS];
56 };
57 
58 struct pt1_buffer {
59 	struct pt1_buffer_page *page;
60 	dma_addr_t addr;
61 };
62 
63 struct pt1_table {
64 	struct pt1_table_page *page;
65 	dma_addr_t addr;
66 	struct pt1_buffer bufs[PT1_NR_BUFS];
67 };
68 
69 #define PT1_NR_ADAPS 4
70 
71 struct pt1_adapter;
72 
73 struct pt1 {
74 	struct pci_dev *pdev;
75 	void __iomem *regs;
76 	struct i2c_adapter i2c_adap;
77 	int i2c_running;
78 	struct pt1_adapter *adaps[PT1_NR_ADAPS];
79 	struct pt1_table *tables;
80 	struct task_struct *kthread;
81 	int table_index;
82 	int buf_index;
83 
84 	struct mutex lock;
85 	int power;
86 	int reset;
87 };
88 
89 struct pt1_adapter {
90 	struct pt1 *pt1;
91 	int index;
92 
93 	u8 *buf;
94 	int upacket_count;
95 	int packet_count;
96 	int st_count;
97 
98 	struct dvb_adapter adap;
99 	struct dvb_demux demux;
100 	int users;
101 	struct dmxdev dmxdev;
102 	struct dvb_frontend *fe;
103 	int (*orig_set_voltage)(struct dvb_frontend *fe,
104 				enum fe_sec_voltage voltage);
105 	int (*orig_sleep)(struct dvb_frontend *fe);
106 	int (*orig_init)(struct dvb_frontend *fe);
107 
108 	enum fe_sec_voltage voltage;
109 	int sleep;
110 };
111 
112 static void pt1_write_reg(struct pt1 *pt1, int reg, u32 data)
113 {
114 	writel(data, pt1->regs + reg * 4);
115 }
116 
117 static u32 pt1_read_reg(struct pt1 *pt1, int reg)
118 {
119 	return readl(pt1->regs + reg * 4);
120 }
121 
122 static int pt1_nr_tables = 8;
123 module_param_named(nr_tables, pt1_nr_tables, int, 0);
124 
125 static void pt1_increment_table_count(struct pt1 *pt1)
126 {
127 	pt1_write_reg(pt1, 0, 0x00000020);
128 }
129 
130 static void pt1_init_table_count(struct pt1 *pt1)
131 {
132 	pt1_write_reg(pt1, 0, 0x00000010);
133 }
134 
135 static void pt1_register_tables(struct pt1 *pt1, u32 first_pfn)
136 {
137 	pt1_write_reg(pt1, 5, first_pfn);
138 	pt1_write_reg(pt1, 0, 0x0c000040);
139 }
140 
141 static void pt1_unregister_tables(struct pt1 *pt1)
142 {
143 	pt1_write_reg(pt1, 0, 0x08080000);
144 }
145 
146 static int pt1_sync(struct pt1 *pt1)
147 {
148 	int i;
149 	for (i = 0; i < 57; i++) {
150 		if (pt1_read_reg(pt1, 0) & 0x20000000)
151 			return 0;
152 		pt1_write_reg(pt1, 0, 0x00000008);
153 	}
154 	dev_err(&pt1->pdev->dev, "could not sync\n");
155 	return -EIO;
156 }
157 
158 static u64 pt1_identify(struct pt1 *pt1)
159 {
160 	int i;
161 	u64 id;
162 	id = 0;
163 	for (i = 0; i < 57; i++) {
164 		id |= (u64)(pt1_read_reg(pt1, 0) >> 30 & 1) << i;
165 		pt1_write_reg(pt1, 0, 0x00000008);
166 	}
167 	return id;
168 }
169 
170 static int pt1_unlock(struct pt1 *pt1)
171 {
172 	int i;
173 	pt1_write_reg(pt1, 0, 0x00000008);
174 	for (i = 0; i < 3; i++) {
175 		if (pt1_read_reg(pt1, 0) & 0x80000000)
176 			return 0;
177 		schedule_timeout_uninterruptible((HZ + 999) / 1000);
178 	}
179 	dev_err(&pt1->pdev->dev, "could not unlock\n");
180 	return -EIO;
181 }
182 
183 static int pt1_reset_pci(struct pt1 *pt1)
184 {
185 	int i;
186 	pt1_write_reg(pt1, 0, 0x01010000);
187 	pt1_write_reg(pt1, 0, 0x01000000);
188 	for (i = 0; i < 10; i++) {
189 		if (pt1_read_reg(pt1, 0) & 0x00000001)
190 			return 0;
191 		schedule_timeout_uninterruptible((HZ + 999) / 1000);
192 	}
193 	dev_err(&pt1->pdev->dev, "could not reset PCI\n");
194 	return -EIO;
195 }
196 
197 static int pt1_reset_ram(struct pt1 *pt1)
198 {
199 	int i;
200 	pt1_write_reg(pt1, 0, 0x02020000);
201 	pt1_write_reg(pt1, 0, 0x02000000);
202 	for (i = 0; i < 10; i++) {
203 		if (pt1_read_reg(pt1, 0) & 0x00000002)
204 			return 0;
205 		schedule_timeout_uninterruptible((HZ + 999) / 1000);
206 	}
207 	dev_err(&pt1->pdev->dev, "could not reset RAM\n");
208 	return -EIO;
209 }
210 
211 static int pt1_do_enable_ram(struct pt1 *pt1)
212 {
213 	int i, j;
214 	u32 status;
215 	status = pt1_read_reg(pt1, 0) & 0x00000004;
216 	pt1_write_reg(pt1, 0, 0x00000002);
217 	for (i = 0; i < 10; i++) {
218 		for (j = 0; j < 1024; j++) {
219 			if ((pt1_read_reg(pt1, 0) & 0x00000004) != status)
220 				return 0;
221 		}
222 		schedule_timeout_uninterruptible((HZ + 999) / 1000);
223 	}
224 	dev_err(&pt1->pdev->dev, "could not enable RAM\n");
225 	return -EIO;
226 }
227 
228 static int pt1_enable_ram(struct pt1 *pt1)
229 {
230 	int i, ret;
231 	int phase;
232 	schedule_timeout_uninterruptible((HZ + 999) / 1000);
233 	phase = pt1->pdev->device == 0x211a ? 128 : 166;
234 	for (i = 0; i < phase; i++) {
235 		ret = pt1_do_enable_ram(pt1);
236 		if (ret < 0)
237 			return ret;
238 	}
239 	return 0;
240 }
241 
242 static void pt1_disable_ram(struct pt1 *pt1)
243 {
244 	pt1_write_reg(pt1, 0, 0x0b0b0000);
245 }
246 
247 static void pt1_set_stream(struct pt1 *pt1, int index, int enabled)
248 {
249 	pt1_write_reg(pt1, 2, 1 << (index + 8) | enabled << index);
250 }
251 
252 static void pt1_init_streams(struct pt1 *pt1)
253 {
254 	int i;
255 	for (i = 0; i < PT1_NR_ADAPS; i++)
256 		pt1_set_stream(pt1, i, 0);
257 }
258 
259 static int pt1_filter(struct pt1 *pt1, struct pt1_buffer_page *page)
260 {
261 	u32 upacket;
262 	int i;
263 	int index;
264 	struct pt1_adapter *adap;
265 	int offset;
266 	u8 *buf;
267 	int sc;
268 
269 	if (!page->upackets[PT1_NR_UPACKETS - 1])
270 		return 0;
271 
272 	for (i = 0; i < PT1_NR_UPACKETS; i++) {
273 		upacket = le32_to_cpu(page->upackets[i]);
274 		index = (upacket >> 29) - 1;
275 		if (index < 0 || index >=  PT1_NR_ADAPS)
276 			continue;
277 
278 		adap = pt1->adaps[index];
279 		if (upacket >> 25 & 1)
280 			adap->upacket_count = 0;
281 		else if (!adap->upacket_count)
282 			continue;
283 
284 		if (upacket >> 24 & 1)
285 			printk_ratelimited(KERN_INFO "earth-pt1: device "
286 				"buffer overflowing. table[%d] buf[%d]\n",
287 				pt1->table_index, pt1->buf_index);
288 		sc = upacket >> 26 & 0x7;
289 		if (adap->st_count != -1 && sc != ((adap->st_count + 1) & 0x7))
290 			printk_ratelimited(KERN_INFO "earth-pt1: data loss"
291 				" in streamID(adapter)[%d]\n", index);
292 		adap->st_count = sc;
293 
294 		buf = adap->buf;
295 		offset = adap->packet_count * 188 + adap->upacket_count * 3;
296 		buf[offset] = upacket >> 16;
297 		buf[offset + 1] = upacket >> 8;
298 		if (adap->upacket_count != 62)
299 			buf[offset + 2] = upacket;
300 
301 		if (++adap->upacket_count >= 63) {
302 			adap->upacket_count = 0;
303 			if (++adap->packet_count >= 21) {
304 				dvb_dmx_swfilter_packets(&adap->demux, buf, 21);
305 				adap->packet_count = 0;
306 			}
307 		}
308 	}
309 
310 	page->upackets[PT1_NR_UPACKETS - 1] = 0;
311 	return 1;
312 }
313 
314 static int pt1_thread(void *data)
315 {
316 	struct pt1 *pt1;
317 	struct pt1_buffer_page *page;
318 
319 	pt1 = data;
320 	set_freezable();
321 
322 	while (!kthread_should_stop()) {
323 		try_to_freeze();
324 
325 		page = pt1->tables[pt1->table_index].bufs[pt1->buf_index].page;
326 		if (!pt1_filter(pt1, page)) {
327 			schedule_timeout_interruptible((HZ + 999) / 1000);
328 			continue;
329 		}
330 
331 		if (++pt1->buf_index >= PT1_NR_BUFS) {
332 			pt1_increment_table_count(pt1);
333 			pt1->buf_index = 0;
334 			if (++pt1->table_index >= pt1_nr_tables)
335 				pt1->table_index = 0;
336 		}
337 	}
338 
339 	return 0;
340 }
341 
342 static void pt1_free_page(struct pt1 *pt1, void *page, dma_addr_t addr)
343 {
344 	dma_free_coherent(&pt1->pdev->dev, PT1_PAGE_SIZE, page, addr);
345 }
346 
347 static void *pt1_alloc_page(struct pt1 *pt1, dma_addr_t *addrp, u32 *pfnp)
348 {
349 	void *page;
350 	dma_addr_t addr;
351 
352 	page = dma_alloc_coherent(&pt1->pdev->dev, PT1_PAGE_SIZE, &addr,
353 				  GFP_KERNEL);
354 	if (page == NULL)
355 		return NULL;
356 
357 	BUG_ON(addr & (PT1_PAGE_SIZE - 1));
358 	BUG_ON(addr >> PT1_PAGE_SHIFT >> 31 >> 1);
359 
360 	*addrp = addr;
361 	*pfnp = addr >> PT1_PAGE_SHIFT;
362 	return page;
363 }
364 
365 static void pt1_cleanup_buffer(struct pt1 *pt1, struct pt1_buffer *buf)
366 {
367 	pt1_free_page(pt1, buf->page, buf->addr);
368 }
369 
370 static int
371 pt1_init_buffer(struct pt1 *pt1, struct pt1_buffer *buf,  u32 *pfnp)
372 {
373 	struct pt1_buffer_page *page;
374 	dma_addr_t addr;
375 
376 	page = pt1_alloc_page(pt1, &addr, pfnp);
377 	if (page == NULL)
378 		return -ENOMEM;
379 
380 	page->upackets[PT1_NR_UPACKETS - 1] = 0;
381 
382 	buf->page = page;
383 	buf->addr = addr;
384 	return 0;
385 }
386 
387 static void pt1_cleanup_table(struct pt1 *pt1, struct pt1_table *table)
388 {
389 	int i;
390 
391 	for (i = 0; i < PT1_NR_BUFS; i++)
392 		pt1_cleanup_buffer(pt1, &table->bufs[i]);
393 
394 	pt1_free_page(pt1, table->page, table->addr);
395 }
396 
397 static int
398 pt1_init_table(struct pt1 *pt1, struct pt1_table *table, u32 *pfnp)
399 {
400 	struct pt1_table_page *page;
401 	dma_addr_t addr;
402 	int i, ret;
403 	u32 buf_pfn;
404 
405 	page = pt1_alloc_page(pt1, &addr, pfnp);
406 	if (page == NULL)
407 		return -ENOMEM;
408 
409 	for (i = 0; i < PT1_NR_BUFS; i++) {
410 		ret = pt1_init_buffer(pt1, &table->bufs[i], &buf_pfn);
411 		if (ret < 0)
412 			goto err;
413 
414 		page->buf_pfns[i] = cpu_to_le32(buf_pfn);
415 	}
416 
417 	pt1_increment_table_count(pt1);
418 	table->page = page;
419 	table->addr = addr;
420 	return 0;
421 
422 err:
423 	while (i--)
424 		pt1_cleanup_buffer(pt1, &table->bufs[i]);
425 
426 	pt1_free_page(pt1, page, addr);
427 	return ret;
428 }
429 
430 static void pt1_cleanup_tables(struct pt1 *pt1)
431 {
432 	struct pt1_table *tables;
433 	int i;
434 
435 	tables = pt1->tables;
436 	pt1_unregister_tables(pt1);
437 
438 	for (i = 0; i < pt1_nr_tables; i++)
439 		pt1_cleanup_table(pt1, &tables[i]);
440 
441 	vfree(tables);
442 }
443 
444 static int pt1_init_tables(struct pt1 *pt1)
445 {
446 	struct pt1_table *tables;
447 	int i, ret;
448 	u32 first_pfn, pfn;
449 
450 	tables = vmalloc(sizeof(struct pt1_table) * pt1_nr_tables);
451 	if (tables == NULL)
452 		return -ENOMEM;
453 
454 	pt1_init_table_count(pt1);
455 
456 	i = 0;
457 	if (pt1_nr_tables) {
458 		ret = pt1_init_table(pt1, &tables[0], &first_pfn);
459 		if (ret)
460 			goto err;
461 		i++;
462 	}
463 
464 	while (i < pt1_nr_tables) {
465 		ret = pt1_init_table(pt1, &tables[i], &pfn);
466 		if (ret)
467 			goto err;
468 		tables[i - 1].page->next_pfn = cpu_to_le32(pfn);
469 		i++;
470 	}
471 
472 	tables[pt1_nr_tables - 1].page->next_pfn = cpu_to_le32(first_pfn);
473 
474 	pt1_register_tables(pt1, first_pfn);
475 	pt1->tables = tables;
476 	return 0;
477 
478 err:
479 	while (i--)
480 		pt1_cleanup_table(pt1, &tables[i]);
481 
482 	vfree(tables);
483 	return ret;
484 }
485 
486 static int pt1_start_polling(struct pt1 *pt1)
487 {
488 	int ret = 0;
489 
490 	mutex_lock(&pt1->lock);
491 	if (!pt1->kthread) {
492 		pt1->kthread = kthread_run(pt1_thread, pt1, "earth-pt1");
493 		if (IS_ERR(pt1->kthread)) {
494 			ret = PTR_ERR(pt1->kthread);
495 			pt1->kthread = NULL;
496 		}
497 	}
498 	mutex_unlock(&pt1->lock);
499 	return ret;
500 }
501 
502 static int pt1_start_feed(struct dvb_demux_feed *feed)
503 {
504 	struct pt1_adapter *adap;
505 	adap = container_of(feed->demux, struct pt1_adapter, demux);
506 	if (!adap->users++) {
507 		int ret;
508 
509 		ret = pt1_start_polling(adap->pt1);
510 		if (ret)
511 			return ret;
512 		pt1_set_stream(adap->pt1, adap->index, 1);
513 	}
514 	return 0;
515 }
516 
517 static void pt1_stop_polling(struct pt1 *pt1)
518 {
519 	int i, count;
520 
521 	mutex_lock(&pt1->lock);
522 	for (i = 0, count = 0; i < PT1_NR_ADAPS; i++)
523 		count += pt1->adaps[i]->users;
524 
525 	if (count == 0 && pt1->kthread) {
526 		kthread_stop(pt1->kthread);
527 		pt1->kthread = NULL;
528 	}
529 	mutex_unlock(&pt1->lock);
530 }
531 
532 static int pt1_stop_feed(struct dvb_demux_feed *feed)
533 {
534 	struct pt1_adapter *adap;
535 	adap = container_of(feed->demux, struct pt1_adapter, demux);
536 	if (!--adap->users) {
537 		pt1_set_stream(adap->pt1, adap->index, 0);
538 		pt1_stop_polling(adap->pt1);
539 	}
540 	return 0;
541 }
542 
543 static void
544 pt1_update_power(struct pt1 *pt1)
545 {
546 	int bits;
547 	int i;
548 	struct pt1_adapter *adap;
549 	static const int sleep_bits[] = {
550 		1 << 4,
551 		1 << 6 | 1 << 7,
552 		1 << 5,
553 		1 << 6 | 1 << 8,
554 	};
555 
556 	bits = pt1->power | !pt1->reset << 3;
557 	mutex_lock(&pt1->lock);
558 	for (i = 0; i < PT1_NR_ADAPS; i++) {
559 		adap = pt1->adaps[i];
560 		switch (adap->voltage) {
561 		case SEC_VOLTAGE_13: /* actually 11V */
562 			bits |= 1 << 1;
563 			break;
564 		case SEC_VOLTAGE_18: /* actually 15V */
565 			bits |= 1 << 1 | 1 << 2;
566 			break;
567 		default:
568 			break;
569 		}
570 
571 		/* XXX: The bits should be changed depending on adap->sleep. */
572 		bits |= sleep_bits[i];
573 	}
574 	pt1_write_reg(pt1, 1, bits);
575 	mutex_unlock(&pt1->lock);
576 }
577 
578 static int pt1_set_voltage(struct dvb_frontend *fe, enum fe_sec_voltage voltage)
579 {
580 	struct pt1_adapter *adap;
581 
582 	adap = container_of(fe->dvb, struct pt1_adapter, adap);
583 	adap->voltage = voltage;
584 	pt1_update_power(adap->pt1);
585 
586 	if (adap->orig_set_voltage)
587 		return adap->orig_set_voltage(fe, voltage);
588 	else
589 		return 0;
590 }
591 
592 static int pt1_sleep(struct dvb_frontend *fe)
593 {
594 	struct pt1_adapter *adap;
595 
596 	adap = container_of(fe->dvb, struct pt1_adapter, adap);
597 	adap->sleep = 1;
598 	pt1_update_power(adap->pt1);
599 
600 	if (adap->orig_sleep)
601 		return adap->orig_sleep(fe);
602 	else
603 		return 0;
604 }
605 
606 static int pt1_wakeup(struct dvb_frontend *fe)
607 {
608 	struct pt1_adapter *adap;
609 
610 	adap = container_of(fe->dvb, struct pt1_adapter, adap);
611 	adap->sleep = 0;
612 	pt1_update_power(adap->pt1);
613 	schedule_timeout_uninterruptible((HZ + 999) / 1000);
614 
615 	if (adap->orig_init)
616 		return adap->orig_init(fe);
617 	else
618 		return 0;
619 }
620 
621 static void pt1_free_adapter(struct pt1_adapter *adap)
622 {
623 	adap->demux.dmx.close(&adap->demux.dmx);
624 	dvb_dmxdev_release(&adap->dmxdev);
625 	dvb_dmx_release(&adap->demux);
626 	dvb_unregister_adapter(&adap->adap);
627 	free_page((unsigned long)adap->buf);
628 	kfree(adap);
629 }
630 
631 DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
632 
633 static struct pt1_adapter *
634 pt1_alloc_adapter(struct pt1 *pt1)
635 {
636 	struct pt1_adapter *adap;
637 	void *buf;
638 	struct dvb_adapter *dvb_adap;
639 	struct dvb_demux *demux;
640 	struct dmxdev *dmxdev;
641 	int ret;
642 
643 	adap = kzalloc(sizeof(struct pt1_adapter), GFP_KERNEL);
644 	if (!adap) {
645 		ret = -ENOMEM;
646 		goto err;
647 	}
648 
649 	adap->pt1 = pt1;
650 
651 	adap->voltage = SEC_VOLTAGE_OFF;
652 	adap->sleep = 1;
653 
654 	buf = (u8 *)__get_free_page(GFP_KERNEL);
655 	if (!buf) {
656 		ret = -ENOMEM;
657 		goto err_kfree;
658 	}
659 
660 	adap->buf = buf;
661 	adap->upacket_count = 0;
662 	adap->packet_count = 0;
663 	adap->st_count = -1;
664 
665 	dvb_adap = &adap->adap;
666 	dvb_adap->priv = adap;
667 	ret = dvb_register_adapter(dvb_adap, DRIVER_NAME, THIS_MODULE,
668 				   &pt1->pdev->dev, adapter_nr);
669 	if (ret < 0)
670 		goto err_free_page;
671 
672 	demux = &adap->demux;
673 	demux->dmx.capabilities = DMX_TS_FILTERING | DMX_SECTION_FILTERING;
674 	demux->priv = adap;
675 	demux->feednum = 256;
676 	demux->filternum = 256;
677 	demux->start_feed = pt1_start_feed;
678 	demux->stop_feed = pt1_stop_feed;
679 	demux->write_to_decoder = NULL;
680 	ret = dvb_dmx_init(demux);
681 	if (ret < 0)
682 		goto err_unregister_adapter;
683 
684 	dmxdev = &adap->dmxdev;
685 	dmxdev->filternum = 256;
686 	dmxdev->demux = &demux->dmx;
687 	dmxdev->capabilities = 0;
688 	ret = dvb_dmxdev_init(dmxdev, dvb_adap);
689 	if (ret < 0)
690 		goto err_dmx_release;
691 
692 	return adap;
693 
694 err_dmx_release:
695 	dvb_dmx_release(demux);
696 err_unregister_adapter:
697 	dvb_unregister_adapter(dvb_adap);
698 err_free_page:
699 	free_page((unsigned long)buf);
700 err_kfree:
701 	kfree(adap);
702 err:
703 	return ERR_PTR(ret);
704 }
705 
706 static void pt1_cleanup_adapters(struct pt1 *pt1)
707 {
708 	int i;
709 	for (i = 0; i < PT1_NR_ADAPS; i++)
710 		pt1_free_adapter(pt1->adaps[i]);
711 }
712 
713 static int pt1_init_adapters(struct pt1 *pt1)
714 {
715 	int i;
716 	struct pt1_adapter *adap;
717 	int ret;
718 
719 	for (i = 0; i < PT1_NR_ADAPS; i++) {
720 		adap = pt1_alloc_adapter(pt1);
721 		if (IS_ERR(adap)) {
722 			ret = PTR_ERR(adap);
723 			goto err;
724 		}
725 
726 		adap->index = i;
727 		pt1->adaps[i] = adap;
728 	}
729 	return 0;
730 
731 err:
732 	while (i--)
733 		pt1_free_adapter(pt1->adaps[i]);
734 
735 	return ret;
736 }
737 
738 static void pt1_cleanup_frontend(struct pt1_adapter *adap)
739 {
740 	dvb_unregister_frontend(adap->fe);
741 }
742 
743 static int pt1_init_frontend(struct pt1_adapter *adap, struct dvb_frontend *fe)
744 {
745 	int ret;
746 
747 	adap->orig_set_voltage = fe->ops.set_voltage;
748 	adap->orig_sleep = fe->ops.sleep;
749 	adap->orig_init = fe->ops.init;
750 	fe->ops.set_voltage = pt1_set_voltage;
751 	fe->ops.sleep = pt1_sleep;
752 	fe->ops.init = pt1_wakeup;
753 
754 	ret = dvb_register_frontend(&adap->adap, fe);
755 	if (ret < 0)
756 		return ret;
757 
758 	adap->fe = fe;
759 	return 0;
760 }
761 
762 static void pt1_cleanup_frontends(struct pt1 *pt1)
763 {
764 	int i;
765 	for (i = 0; i < PT1_NR_ADAPS; i++)
766 		pt1_cleanup_frontend(pt1->adaps[i]);
767 }
768 
769 struct pt1_config {
770 	struct va1j5jf8007s_config va1j5jf8007s_config;
771 	struct va1j5jf8007t_config va1j5jf8007t_config;
772 };
773 
774 static const struct pt1_config pt1_configs[2] = {
775 	{
776 		{
777 			.demod_address = 0x1b,
778 			.frequency = VA1J5JF8007S_20MHZ,
779 		},
780 		{
781 			.demod_address = 0x1a,
782 			.frequency = VA1J5JF8007T_20MHZ,
783 		},
784 	}, {
785 		{
786 			.demod_address = 0x19,
787 			.frequency = VA1J5JF8007S_20MHZ,
788 		},
789 		{
790 			.demod_address = 0x18,
791 			.frequency = VA1J5JF8007T_20MHZ,
792 		},
793 	},
794 };
795 
796 static const struct pt1_config pt2_configs[2] = {
797 	{
798 		{
799 			.demod_address = 0x1b,
800 			.frequency = VA1J5JF8007S_25MHZ,
801 		},
802 		{
803 			.demod_address = 0x1a,
804 			.frequency = VA1J5JF8007T_25MHZ,
805 		},
806 	}, {
807 		{
808 			.demod_address = 0x19,
809 			.frequency = VA1J5JF8007S_25MHZ,
810 		},
811 		{
812 			.demod_address = 0x18,
813 			.frequency = VA1J5JF8007T_25MHZ,
814 		},
815 	},
816 };
817 
818 static int pt1_init_frontends(struct pt1 *pt1)
819 {
820 	int i, j;
821 	struct i2c_adapter *i2c_adap;
822 	const struct pt1_config *configs, *config;
823 	struct dvb_frontend *fe[4];
824 	int ret;
825 
826 	i = 0;
827 	j = 0;
828 
829 	i2c_adap = &pt1->i2c_adap;
830 	configs = pt1->pdev->device == 0x211a ? pt1_configs : pt2_configs;
831 	do {
832 		config = &configs[i / 2];
833 
834 		fe[i] = va1j5jf8007s_attach(&config->va1j5jf8007s_config,
835 					    i2c_adap);
836 		if (!fe[i]) {
837 			ret = -ENODEV; /* This does not sound nice... */
838 			goto err;
839 		}
840 		i++;
841 
842 		fe[i] = va1j5jf8007t_attach(&config->va1j5jf8007t_config,
843 					    i2c_adap);
844 		if (!fe[i]) {
845 			ret = -ENODEV;
846 			goto err;
847 		}
848 		i++;
849 
850 		ret = va1j5jf8007s_prepare(fe[i - 2]);
851 		if (ret < 0)
852 			goto err;
853 
854 		ret = va1j5jf8007t_prepare(fe[i - 1]);
855 		if (ret < 0)
856 			goto err;
857 
858 	} while (i < 4);
859 
860 	do {
861 		ret = pt1_init_frontend(pt1->adaps[j], fe[j]);
862 		if (ret < 0)
863 			goto err;
864 	} while (++j < 4);
865 
866 	return 0;
867 
868 err:
869 	while (i-- > j)
870 		fe[i]->ops.release(fe[i]);
871 
872 	while (j--)
873 		dvb_unregister_frontend(fe[j]);
874 
875 	return ret;
876 }
877 
878 static void pt1_i2c_emit(struct pt1 *pt1, int addr, int busy, int read_enable,
879 			 int clock, int data, int next_addr)
880 {
881 	pt1_write_reg(pt1, 4, addr << 18 | busy << 13 | read_enable << 12 |
882 		      !clock << 11 | !data << 10 | next_addr);
883 }
884 
885 static void pt1_i2c_write_bit(struct pt1 *pt1, int addr, int *addrp, int data)
886 {
887 	pt1_i2c_emit(pt1, addr,     1, 0, 0, data, addr + 1);
888 	pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, data, addr + 2);
889 	pt1_i2c_emit(pt1, addr + 2, 1, 0, 0, data, addr + 3);
890 	*addrp = addr + 3;
891 }
892 
893 static void pt1_i2c_read_bit(struct pt1 *pt1, int addr, int *addrp)
894 {
895 	pt1_i2c_emit(pt1, addr,     1, 0, 0, 1, addr + 1);
896 	pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 1, addr + 2);
897 	pt1_i2c_emit(pt1, addr + 2, 1, 1, 1, 1, addr + 3);
898 	pt1_i2c_emit(pt1, addr + 3, 1, 0, 0, 1, addr + 4);
899 	*addrp = addr + 4;
900 }
901 
902 static void pt1_i2c_write_byte(struct pt1 *pt1, int addr, int *addrp, int data)
903 {
904 	int i;
905 	for (i = 0; i < 8; i++)
906 		pt1_i2c_write_bit(pt1, addr, &addr, data >> (7 - i) & 1);
907 	pt1_i2c_write_bit(pt1, addr, &addr, 1);
908 	*addrp = addr;
909 }
910 
911 static void pt1_i2c_read_byte(struct pt1 *pt1, int addr, int *addrp, int last)
912 {
913 	int i;
914 	for (i = 0; i < 8; i++)
915 		pt1_i2c_read_bit(pt1, addr, &addr);
916 	pt1_i2c_write_bit(pt1, addr, &addr, last);
917 	*addrp = addr;
918 }
919 
920 static void pt1_i2c_prepare(struct pt1 *pt1, int addr, int *addrp)
921 {
922 	pt1_i2c_emit(pt1, addr,     1, 0, 1, 1, addr + 1);
923 	pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 0, addr + 2);
924 	pt1_i2c_emit(pt1, addr + 2, 1, 0, 0, 0, addr + 3);
925 	*addrp = addr + 3;
926 }
927 
928 static void
929 pt1_i2c_write_msg(struct pt1 *pt1, int addr, int *addrp, struct i2c_msg *msg)
930 {
931 	int i;
932 	pt1_i2c_prepare(pt1, addr, &addr);
933 	pt1_i2c_write_byte(pt1, addr, &addr, msg->addr << 1);
934 	for (i = 0; i < msg->len; i++)
935 		pt1_i2c_write_byte(pt1, addr, &addr, msg->buf[i]);
936 	*addrp = addr;
937 }
938 
939 static void
940 pt1_i2c_read_msg(struct pt1 *pt1, int addr, int *addrp, struct i2c_msg *msg)
941 {
942 	int i;
943 	pt1_i2c_prepare(pt1, addr, &addr);
944 	pt1_i2c_write_byte(pt1, addr, &addr, msg->addr << 1 | 1);
945 	for (i = 0; i < msg->len; i++)
946 		pt1_i2c_read_byte(pt1, addr, &addr, i == msg->len - 1);
947 	*addrp = addr;
948 }
949 
950 static int pt1_i2c_end(struct pt1 *pt1, int addr)
951 {
952 	pt1_i2c_emit(pt1, addr,     1, 0, 0, 0, addr + 1);
953 	pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 0, addr + 2);
954 	pt1_i2c_emit(pt1, addr + 2, 1, 0, 1, 1, 0);
955 
956 	pt1_write_reg(pt1, 0, 0x00000004);
957 	do {
958 		if (signal_pending(current))
959 			return -EINTR;
960 		schedule_timeout_interruptible((HZ + 999) / 1000);
961 	} while (pt1_read_reg(pt1, 0) & 0x00000080);
962 	return 0;
963 }
964 
965 static void pt1_i2c_begin(struct pt1 *pt1, int *addrp)
966 {
967 	int addr;
968 	addr = 0;
969 
970 	pt1_i2c_emit(pt1, addr,     0, 0, 1, 1, addr /* itself */);
971 	addr = addr + 1;
972 
973 	if (!pt1->i2c_running) {
974 		pt1_i2c_emit(pt1, addr,     1, 0, 1, 1, addr + 1);
975 		pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 0, addr + 2);
976 		addr = addr + 2;
977 		pt1->i2c_running = 1;
978 	}
979 	*addrp = addr;
980 }
981 
982 static int pt1_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
983 {
984 	struct pt1 *pt1;
985 	int i;
986 	struct i2c_msg *msg, *next_msg;
987 	int addr, ret;
988 	u16 len;
989 	u32 word;
990 
991 	pt1 = i2c_get_adapdata(adap);
992 
993 	for (i = 0; i < num; i++) {
994 		msg = &msgs[i];
995 		if (msg->flags & I2C_M_RD)
996 			return -ENOTSUPP;
997 
998 		if (i + 1 < num)
999 			next_msg = &msgs[i + 1];
1000 		else
1001 			next_msg = NULL;
1002 
1003 		if (next_msg && next_msg->flags & I2C_M_RD) {
1004 			i++;
1005 
1006 			len = next_msg->len;
1007 			if (len > 4)
1008 				return -ENOTSUPP;
1009 
1010 			pt1_i2c_begin(pt1, &addr);
1011 			pt1_i2c_write_msg(pt1, addr, &addr, msg);
1012 			pt1_i2c_read_msg(pt1, addr, &addr, next_msg);
1013 			ret = pt1_i2c_end(pt1, addr);
1014 			if (ret < 0)
1015 				return ret;
1016 
1017 			word = pt1_read_reg(pt1, 2);
1018 			while (len--) {
1019 				next_msg->buf[len] = word;
1020 				word >>= 8;
1021 			}
1022 		} else {
1023 			pt1_i2c_begin(pt1, &addr);
1024 			pt1_i2c_write_msg(pt1, addr, &addr, msg);
1025 			ret = pt1_i2c_end(pt1, addr);
1026 			if (ret < 0)
1027 				return ret;
1028 		}
1029 	}
1030 
1031 	return num;
1032 }
1033 
1034 static u32 pt1_i2c_func(struct i2c_adapter *adap)
1035 {
1036 	return I2C_FUNC_I2C;
1037 }
1038 
1039 static const struct i2c_algorithm pt1_i2c_algo = {
1040 	.master_xfer = pt1_i2c_xfer,
1041 	.functionality = pt1_i2c_func,
1042 };
1043 
1044 static void pt1_i2c_wait(struct pt1 *pt1)
1045 {
1046 	int i;
1047 	for (i = 0; i < 128; i++)
1048 		pt1_i2c_emit(pt1, 0, 0, 0, 1, 1, 0);
1049 }
1050 
1051 static void pt1_i2c_init(struct pt1 *pt1)
1052 {
1053 	int i;
1054 	for (i = 0; i < 1024; i++)
1055 		pt1_i2c_emit(pt1, i, 0, 0, 1, 1, 0);
1056 }
1057 
1058 static void pt1_remove(struct pci_dev *pdev)
1059 {
1060 	struct pt1 *pt1;
1061 	void __iomem *regs;
1062 
1063 	pt1 = pci_get_drvdata(pdev);
1064 	regs = pt1->regs;
1065 
1066 	if (pt1->kthread)
1067 		kthread_stop(pt1->kthread);
1068 	pt1_cleanup_tables(pt1);
1069 	pt1_cleanup_frontends(pt1);
1070 	pt1_disable_ram(pt1);
1071 	pt1->power = 0;
1072 	pt1->reset = 1;
1073 	pt1_update_power(pt1);
1074 	pt1_cleanup_adapters(pt1);
1075 	i2c_del_adapter(&pt1->i2c_adap);
1076 	kfree(pt1);
1077 	pci_iounmap(pdev, regs);
1078 	pci_release_regions(pdev);
1079 	pci_disable_device(pdev);
1080 }
1081 
1082 static int pt1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1083 {
1084 	int ret;
1085 	void __iomem *regs;
1086 	struct pt1 *pt1;
1087 	struct i2c_adapter *i2c_adap;
1088 
1089 	ret = pci_enable_device(pdev);
1090 	if (ret < 0)
1091 		goto err;
1092 
1093 	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1094 	if (ret < 0)
1095 		goto err_pci_disable_device;
1096 
1097 	pci_set_master(pdev);
1098 
1099 	ret = pci_request_regions(pdev, DRIVER_NAME);
1100 	if (ret < 0)
1101 		goto err_pci_disable_device;
1102 
1103 	regs = pci_iomap(pdev, 0, 0);
1104 	if (!regs) {
1105 		ret = -EIO;
1106 		goto err_pci_release_regions;
1107 	}
1108 
1109 	pt1 = kzalloc(sizeof(struct pt1), GFP_KERNEL);
1110 	if (!pt1) {
1111 		ret = -ENOMEM;
1112 		goto err_pci_iounmap;
1113 	}
1114 
1115 	mutex_init(&pt1->lock);
1116 	pt1->pdev = pdev;
1117 	pt1->regs = regs;
1118 	pci_set_drvdata(pdev, pt1);
1119 
1120 	ret = pt1_init_adapters(pt1);
1121 	if (ret < 0)
1122 		goto err_kfree;
1123 
1124 	mutex_init(&pt1->lock);
1125 
1126 	pt1->power = 0;
1127 	pt1->reset = 1;
1128 	pt1_update_power(pt1);
1129 
1130 	i2c_adap = &pt1->i2c_adap;
1131 	i2c_adap->algo = &pt1_i2c_algo;
1132 	i2c_adap->algo_data = NULL;
1133 	i2c_adap->dev.parent = &pdev->dev;
1134 	strcpy(i2c_adap->name, DRIVER_NAME);
1135 	i2c_set_adapdata(i2c_adap, pt1);
1136 	ret = i2c_add_adapter(i2c_adap);
1137 	if (ret < 0)
1138 		goto err_pt1_cleanup_adapters;
1139 
1140 	pt1_i2c_init(pt1);
1141 	pt1_i2c_wait(pt1);
1142 
1143 	ret = pt1_sync(pt1);
1144 	if (ret < 0)
1145 		goto err_i2c_del_adapter;
1146 
1147 	pt1_identify(pt1);
1148 
1149 	ret = pt1_unlock(pt1);
1150 	if (ret < 0)
1151 		goto err_i2c_del_adapter;
1152 
1153 	ret = pt1_reset_pci(pt1);
1154 	if (ret < 0)
1155 		goto err_i2c_del_adapter;
1156 
1157 	ret = pt1_reset_ram(pt1);
1158 	if (ret < 0)
1159 		goto err_i2c_del_adapter;
1160 
1161 	ret = pt1_enable_ram(pt1);
1162 	if (ret < 0)
1163 		goto err_i2c_del_adapter;
1164 
1165 	pt1_init_streams(pt1);
1166 
1167 	pt1->power = 1;
1168 	pt1_update_power(pt1);
1169 	schedule_timeout_uninterruptible((HZ + 49) / 50);
1170 
1171 	pt1->reset = 0;
1172 	pt1_update_power(pt1);
1173 	schedule_timeout_uninterruptible((HZ + 999) / 1000);
1174 
1175 	ret = pt1_init_frontends(pt1);
1176 	if (ret < 0)
1177 		goto err_pt1_disable_ram;
1178 
1179 	ret = pt1_init_tables(pt1);
1180 	if (ret < 0)
1181 		goto err_pt1_cleanup_frontends;
1182 
1183 	return 0;
1184 
1185 err_pt1_cleanup_frontends:
1186 	pt1_cleanup_frontends(pt1);
1187 err_pt1_disable_ram:
1188 	pt1_disable_ram(pt1);
1189 	pt1->power = 0;
1190 	pt1->reset = 1;
1191 	pt1_update_power(pt1);
1192 err_i2c_del_adapter:
1193 	i2c_del_adapter(i2c_adap);
1194 err_pt1_cleanup_adapters:
1195 	pt1_cleanup_adapters(pt1);
1196 err_kfree:
1197 	kfree(pt1);
1198 err_pci_iounmap:
1199 	pci_iounmap(pdev, regs);
1200 err_pci_release_regions:
1201 	pci_release_regions(pdev);
1202 err_pci_disable_device:
1203 	pci_disable_device(pdev);
1204 err:
1205 	return ret;
1206 
1207 }
1208 
1209 static struct pci_device_id pt1_id_table[] = {
1210 	{ PCI_DEVICE(0x10ee, 0x211a) },
1211 	{ PCI_DEVICE(0x10ee, 0x222a) },
1212 	{ },
1213 };
1214 MODULE_DEVICE_TABLE(pci, pt1_id_table);
1215 
1216 static struct pci_driver pt1_driver = {
1217 	.name		= DRIVER_NAME,
1218 	.probe		= pt1_probe,
1219 	.remove		= pt1_remove,
1220 	.id_table	= pt1_id_table,
1221 };
1222 
1223 module_pci_driver(pt1_driver);
1224 
1225 MODULE_AUTHOR("Takahito HIRANO <hiranotaka@zng.info>");
1226 MODULE_DESCRIPTION("Earthsoft PT1/PT2 Driver");
1227 MODULE_LICENSE("GPL");
1228