xref: /openbmc/linux/drivers/media/pci/pt1/pt1.c (revision e4781421e883340b796da5a724bda7226817990b)
1 /*
2  * driver for Earthsoft PT1/PT2
3  *
4  * Copyright (C) 2009 HIRANO Takahito <hiranotaka@zng.info>
5  *
6  * based on pt1dvr - http://pt1dvr.sourceforge.jp/
7  * 	by Tomoaki Ishikawa <tomy@users.sourceforge.jp>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22  */
23 
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/vmalloc.h>
28 #include <linux/pci.h>
29 #include <linux/kthread.h>
30 #include <linux/freezer.h>
31 #include <linux/ratelimit.h>
32 
33 #include "dvbdev.h"
34 #include "dvb_demux.h"
35 #include "dmxdev.h"
36 #include "dvb_net.h"
37 #include "dvb_frontend.h"
38 
39 #include "va1j5jf8007t.h"
40 #include "va1j5jf8007s.h"
41 
42 #define DRIVER_NAME "earth-pt1"
43 
44 #define PT1_PAGE_SHIFT 12
45 #define PT1_PAGE_SIZE (1 << PT1_PAGE_SHIFT)
46 #define PT1_NR_UPACKETS 1024
47 #define PT1_NR_BUFS 511
48 
49 struct pt1_buffer_page {
50 	__le32 upackets[PT1_NR_UPACKETS];
51 };
52 
53 struct pt1_table_page {
54 	__le32 next_pfn;
55 	__le32 buf_pfns[PT1_NR_BUFS];
56 };
57 
58 struct pt1_buffer {
59 	struct pt1_buffer_page *page;
60 	dma_addr_t addr;
61 };
62 
63 struct pt1_table {
64 	struct pt1_table_page *page;
65 	dma_addr_t addr;
66 	struct pt1_buffer bufs[PT1_NR_BUFS];
67 };
68 
69 #define PT1_NR_ADAPS 4
70 
71 struct pt1_adapter;
72 
73 struct pt1 {
74 	struct pci_dev *pdev;
75 	void __iomem *regs;
76 	struct i2c_adapter i2c_adap;
77 	int i2c_running;
78 	struct pt1_adapter *adaps[PT1_NR_ADAPS];
79 	struct pt1_table *tables;
80 	struct task_struct *kthread;
81 	int table_index;
82 	int buf_index;
83 
84 	struct mutex lock;
85 	int power;
86 	int reset;
87 };
88 
89 struct pt1_adapter {
90 	struct pt1 *pt1;
91 	int index;
92 
93 	u8 *buf;
94 	int upacket_count;
95 	int packet_count;
96 	int st_count;
97 
98 	struct dvb_adapter adap;
99 	struct dvb_demux demux;
100 	int users;
101 	struct dmxdev dmxdev;
102 	struct dvb_frontend *fe;
103 	int (*orig_set_voltage)(struct dvb_frontend *fe,
104 				enum fe_sec_voltage voltage);
105 	int (*orig_sleep)(struct dvb_frontend *fe);
106 	int (*orig_init)(struct dvb_frontend *fe);
107 
108 	enum fe_sec_voltage voltage;
109 	int sleep;
110 };
111 
112 static void pt1_write_reg(struct pt1 *pt1, int reg, u32 data)
113 {
114 	writel(data, pt1->regs + reg * 4);
115 }
116 
117 static u32 pt1_read_reg(struct pt1 *pt1, int reg)
118 {
119 	return readl(pt1->regs + reg * 4);
120 }
121 
122 static int pt1_nr_tables = 8;
123 module_param_named(nr_tables, pt1_nr_tables, int, 0);
124 
125 static void pt1_increment_table_count(struct pt1 *pt1)
126 {
127 	pt1_write_reg(pt1, 0, 0x00000020);
128 }
129 
130 static void pt1_init_table_count(struct pt1 *pt1)
131 {
132 	pt1_write_reg(pt1, 0, 0x00000010);
133 }
134 
135 static void pt1_register_tables(struct pt1 *pt1, u32 first_pfn)
136 {
137 	pt1_write_reg(pt1, 5, first_pfn);
138 	pt1_write_reg(pt1, 0, 0x0c000040);
139 }
140 
141 static void pt1_unregister_tables(struct pt1 *pt1)
142 {
143 	pt1_write_reg(pt1, 0, 0x08080000);
144 }
145 
146 static int pt1_sync(struct pt1 *pt1)
147 {
148 	int i;
149 	for (i = 0; i < 57; i++) {
150 		if (pt1_read_reg(pt1, 0) & 0x20000000)
151 			return 0;
152 		pt1_write_reg(pt1, 0, 0x00000008);
153 	}
154 	dev_err(&pt1->pdev->dev, "could not sync\n");
155 	return -EIO;
156 }
157 
158 static u64 pt1_identify(struct pt1 *pt1)
159 {
160 	int i;
161 	u64 id;
162 	id = 0;
163 	for (i = 0; i < 57; i++) {
164 		id |= (u64)(pt1_read_reg(pt1, 0) >> 30 & 1) << i;
165 		pt1_write_reg(pt1, 0, 0x00000008);
166 	}
167 	return id;
168 }
169 
170 static int pt1_unlock(struct pt1 *pt1)
171 {
172 	int i;
173 	pt1_write_reg(pt1, 0, 0x00000008);
174 	for (i = 0; i < 3; i++) {
175 		if (pt1_read_reg(pt1, 0) & 0x80000000)
176 			return 0;
177 		schedule_timeout_uninterruptible((HZ + 999) / 1000);
178 	}
179 	dev_err(&pt1->pdev->dev, "could not unlock\n");
180 	return -EIO;
181 }
182 
183 static int pt1_reset_pci(struct pt1 *pt1)
184 {
185 	int i;
186 	pt1_write_reg(pt1, 0, 0x01010000);
187 	pt1_write_reg(pt1, 0, 0x01000000);
188 	for (i = 0; i < 10; i++) {
189 		if (pt1_read_reg(pt1, 0) & 0x00000001)
190 			return 0;
191 		schedule_timeout_uninterruptible((HZ + 999) / 1000);
192 	}
193 	dev_err(&pt1->pdev->dev, "could not reset PCI\n");
194 	return -EIO;
195 }
196 
197 static int pt1_reset_ram(struct pt1 *pt1)
198 {
199 	int i;
200 	pt1_write_reg(pt1, 0, 0x02020000);
201 	pt1_write_reg(pt1, 0, 0x02000000);
202 	for (i = 0; i < 10; i++) {
203 		if (pt1_read_reg(pt1, 0) & 0x00000002)
204 			return 0;
205 		schedule_timeout_uninterruptible((HZ + 999) / 1000);
206 	}
207 	dev_err(&pt1->pdev->dev, "could not reset RAM\n");
208 	return -EIO;
209 }
210 
211 static int pt1_do_enable_ram(struct pt1 *pt1)
212 {
213 	int i, j;
214 	u32 status;
215 	status = pt1_read_reg(pt1, 0) & 0x00000004;
216 	pt1_write_reg(pt1, 0, 0x00000002);
217 	for (i = 0; i < 10; i++) {
218 		for (j = 0; j < 1024; j++) {
219 			if ((pt1_read_reg(pt1, 0) & 0x00000004) != status)
220 				return 0;
221 		}
222 		schedule_timeout_uninterruptible((HZ + 999) / 1000);
223 	}
224 	dev_err(&pt1->pdev->dev, "could not enable RAM\n");
225 	return -EIO;
226 }
227 
228 static int pt1_enable_ram(struct pt1 *pt1)
229 {
230 	int i, ret;
231 	int phase;
232 	schedule_timeout_uninterruptible((HZ + 999) / 1000);
233 	phase = pt1->pdev->device == 0x211a ? 128 : 166;
234 	for (i = 0; i < phase; i++) {
235 		ret = pt1_do_enable_ram(pt1);
236 		if (ret < 0)
237 			return ret;
238 	}
239 	return 0;
240 }
241 
242 static void pt1_disable_ram(struct pt1 *pt1)
243 {
244 	pt1_write_reg(pt1, 0, 0x0b0b0000);
245 }
246 
247 static void pt1_set_stream(struct pt1 *pt1, int index, int enabled)
248 {
249 	pt1_write_reg(pt1, 2, 1 << (index + 8) | enabled << index);
250 }
251 
252 static void pt1_init_streams(struct pt1 *pt1)
253 {
254 	int i;
255 	for (i = 0; i < PT1_NR_ADAPS; i++)
256 		pt1_set_stream(pt1, i, 0);
257 }
258 
259 static int pt1_filter(struct pt1 *pt1, struct pt1_buffer_page *page)
260 {
261 	u32 upacket;
262 	int i;
263 	int index;
264 	struct pt1_adapter *adap;
265 	int offset;
266 	u8 *buf;
267 	int sc;
268 
269 	if (!page->upackets[PT1_NR_UPACKETS - 1])
270 		return 0;
271 
272 	for (i = 0; i < PT1_NR_UPACKETS; i++) {
273 		upacket = le32_to_cpu(page->upackets[i]);
274 		index = (upacket >> 29) - 1;
275 		if (index < 0 || index >=  PT1_NR_ADAPS)
276 			continue;
277 
278 		adap = pt1->adaps[index];
279 		if (upacket >> 25 & 1)
280 			adap->upacket_count = 0;
281 		else if (!adap->upacket_count)
282 			continue;
283 
284 		if (upacket >> 24 & 1)
285 			printk_ratelimited(KERN_INFO "earth-pt1: device buffer overflowing. table[%d] buf[%d]\n",
286 				pt1->table_index, pt1->buf_index);
287 		sc = upacket >> 26 & 0x7;
288 		if (adap->st_count != -1 && sc != ((adap->st_count + 1) & 0x7))
289 			printk_ratelimited(KERN_INFO "earth-pt1: data loss in streamID(adapter)[%d]\n",
290 					   index);
291 		adap->st_count = sc;
292 
293 		buf = adap->buf;
294 		offset = adap->packet_count * 188 + adap->upacket_count * 3;
295 		buf[offset] = upacket >> 16;
296 		buf[offset + 1] = upacket >> 8;
297 		if (adap->upacket_count != 62)
298 			buf[offset + 2] = upacket;
299 
300 		if (++adap->upacket_count >= 63) {
301 			adap->upacket_count = 0;
302 			if (++adap->packet_count >= 21) {
303 				dvb_dmx_swfilter_packets(&adap->demux, buf, 21);
304 				adap->packet_count = 0;
305 			}
306 		}
307 	}
308 
309 	page->upackets[PT1_NR_UPACKETS - 1] = 0;
310 	return 1;
311 }
312 
313 static int pt1_thread(void *data)
314 {
315 	struct pt1 *pt1;
316 	struct pt1_buffer_page *page;
317 
318 	pt1 = data;
319 	set_freezable();
320 
321 	while (!kthread_should_stop()) {
322 		try_to_freeze();
323 
324 		page = pt1->tables[pt1->table_index].bufs[pt1->buf_index].page;
325 		if (!pt1_filter(pt1, page)) {
326 			schedule_timeout_interruptible((HZ + 999) / 1000);
327 			continue;
328 		}
329 
330 		if (++pt1->buf_index >= PT1_NR_BUFS) {
331 			pt1_increment_table_count(pt1);
332 			pt1->buf_index = 0;
333 			if (++pt1->table_index >= pt1_nr_tables)
334 				pt1->table_index = 0;
335 		}
336 	}
337 
338 	return 0;
339 }
340 
341 static void pt1_free_page(struct pt1 *pt1, void *page, dma_addr_t addr)
342 {
343 	dma_free_coherent(&pt1->pdev->dev, PT1_PAGE_SIZE, page, addr);
344 }
345 
346 static void *pt1_alloc_page(struct pt1 *pt1, dma_addr_t *addrp, u32 *pfnp)
347 {
348 	void *page;
349 	dma_addr_t addr;
350 
351 	page = dma_alloc_coherent(&pt1->pdev->dev, PT1_PAGE_SIZE, &addr,
352 				  GFP_KERNEL);
353 	if (page == NULL)
354 		return NULL;
355 
356 	BUG_ON(addr & (PT1_PAGE_SIZE - 1));
357 	BUG_ON(addr >> PT1_PAGE_SHIFT >> 31 >> 1);
358 
359 	*addrp = addr;
360 	*pfnp = addr >> PT1_PAGE_SHIFT;
361 	return page;
362 }
363 
364 static void pt1_cleanup_buffer(struct pt1 *pt1, struct pt1_buffer *buf)
365 {
366 	pt1_free_page(pt1, buf->page, buf->addr);
367 }
368 
369 static int
370 pt1_init_buffer(struct pt1 *pt1, struct pt1_buffer *buf,  u32 *pfnp)
371 {
372 	struct pt1_buffer_page *page;
373 	dma_addr_t addr;
374 
375 	page = pt1_alloc_page(pt1, &addr, pfnp);
376 	if (page == NULL)
377 		return -ENOMEM;
378 
379 	page->upackets[PT1_NR_UPACKETS - 1] = 0;
380 
381 	buf->page = page;
382 	buf->addr = addr;
383 	return 0;
384 }
385 
386 static void pt1_cleanup_table(struct pt1 *pt1, struct pt1_table *table)
387 {
388 	int i;
389 
390 	for (i = 0; i < PT1_NR_BUFS; i++)
391 		pt1_cleanup_buffer(pt1, &table->bufs[i]);
392 
393 	pt1_free_page(pt1, table->page, table->addr);
394 }
395 
396 static int
397 pt1_init_table(struct pt1 *pt1, struct pt1_table *table, u32 *pfnp)
398 {
399 	struct pt1_table_page *page;
400 	dma_addr_t addr;
401 	int i, ret;
402 	u32 buf_pfn;
403 
404 	page = pt1_alloc_page(pt1, &addr, pfnp);
405 	if (page == NULL)
406 		return -ENOMEM;
407 
408 	for (i = 0; i < PT1_NR_BUFS; i++) {
409 		ret = pt1_init_buffer(pt1, &table->bufs[i], &buf_pfn);
410 		if (ret < 0)
411 			goto err;
412 
413 		page->buf_pfns[i] = cpu_to_le32(buf_pfn);
414 	}
415 
416 	pt1_increment_table_count(pt1);
417 	table->page = page;
418 	table->addr = addr;
419 	return 0;
420 
421 err:
422 	while (i--)
423 		pt1_cleanup_buffer(pt1, &table->bufs[i]);
424 
425 	pt1_free_page(pt1, page, addr);
426 	return ret;
427 }
428 
429 static void pt1_cleanup_tables(struct pt1 *pt1)
430 {
431 	struct pt1_table *tables;
432 	int i;
433 
434 	tables = pt1->tables;
435 	pt1_unregister_tables(pt1);
436 
437 	for (i = 0; i < pt1_nr_tables; i++)
438 		pt1_cleanup_table(pt1, &tables[i]);
439 
440 	vfree(tables);
441 }
442 
443 static int pt1_init_tables(struct pt1 *pt1)
444 {
445 	struct pt1_table *tables;
446 	int i, ret;
447 	u32 first_pfn, pfn;
448 
449 	tables = vmalloc(sizeof(struct pt1_table) * pt1_nr_tables);
450 	if (tables == NULL)
451 		return -ENOMEM;
452 
453 	pt1_init_table_count(pt1);
454 
455 	i = 0;
456 	if (pt1_nr_tables) {
457 		ret = pt1_init_table(pt1, &tables[0], &first_pfn);
458 		if (ret)
459 			goto err;
460 		i++;
461 	}
462 
463 	while (i < pt1_nr_tables) {
464 		ret = pt1_init_table(pt1, &tables[i], &pfn);
465 		if (ret)
466 			goto err;
467 		tables[i - 1].page->next_pfn = cpu_to_le32(pfn);
468 		i++;
469 	}
470 
471 	tables[pt1_nr_tables - 1].page->next_pfn = cpu_to_le32(first_pfn);
472 
473 	pt1_register_tables(pt1, first_pfn);
474 	pt1->tables = tables;
475 	return 0;
476 
477 err:
478 	while (i--)
479 		pt1_cleanup_table(pt1, &tables[i]);
480 
481 	vfree(tables);
482 	return ret;
483 }
484 
485 static int pt1_start_polling(struct pt1 *pt1)
486 {
487 	int ret = 0;
488 
489 	mutex_lock(&pt1->lock);
490 	if (!pt1->kthread) {
491 		pt1->kthread = kthread_run(pt1_thread, pt1, "earth-pt1");
492 		if (IS_ERR(pt1->kthread)) {
493 			ret = PTR_ERR(pt1->kthread);
494 			pt1->kthread = NULL;
495 		}
496 	}
497 	mutex_unlock(&pt1->lock);
498 	return ret;
499 }
500 
501 static int pt1_start_feed(struct dvb_demux_feed *feed)
502 {
503 	struct pt1_adapter *adap;
504 	adap = container_of(feed->demux, struct pt1_adapter, demux);
505 	if (!adap->users++) {
506 		int ret;
507 
508 		ret = pt1_start_polling(adap->pt1);
509 		if (ret)
510 			return ret;
511 		pt1_set_stream(adap->pt1, adap->index, 1);
512 	}
513 	return 0;
514 }
515 
516 static void pt1_stop_polling(struct pt1 *pt1)
517 {
518 	int i, count;
519 
520 	mutex_lock(&pt1->lock);
521 	for (i = 0, count = 0; i < PT1_NR_ADAPS; i++)
522 		count += pt1->adaps[i]->users;
523 
524 	if (count == 0 && pt1->kthread) {
525 		kthread_stop(pt1->kthread);
526 		pt1->kthread = NULL;
527 	}
528 	mutex_unlock(&pt1->lock);
529 }
530 
531 static int pt1_stop_feed(struct dvb_demux_feed *feed)
532 {
533 	struct pt1_adapter *adap;
534 	adap = container_of(feed->demux, struct pt1_adapter, demux);
535 	if (!--adap->users) {
536 		pt1_set_stream(adap->pt1, adap->index, 0);
537 		pt1_stop_polling(adap->pt1);
538 	}
539 	return 0;
540 }
541 
542 static void
543 pt1_update_power(struct pt1 *pt1)
544 {
545 	int bits;
546 	int i;
547 	struct pt1_adapter *adap;
548 	static const int sleep_bits[] = {
549 		1 << 4,
550 		1 << 6 | 1 << 7,
551 		1 << 5,
552 		1 << 6 | 1 << 8,
553 	};
554 
555 	bits = pt1->power | !pt1->reset << 3;
556 	mutex_lock(&pt1->lock);
557 	for (i = 0; i < PT1_NR_ADAPS; i++) {
558 		adap = pt1->adaps[i];
559 		switch (adap->voltage) {
560 		case SEC_VOLTAGE_13: /* actually 11V */
561 			bits |= 1 << 1;
562 			break;
563 		case SEC_VOLTAGE_18: /* actually 15V */
564 			bits |= 1 << 1 | 1 << 2;
565 			break;
566 		default:
567 			break;
568 		}
569 
570 		/* XXX: The bits should be changed depending on adap->sleep. */
571 		bits |= sleep_bits[i];
572 	}
573 	pt1_write_reg(pt1, 1, bits);
574 	mutex_unlock(&pt1->lock);
575 }
576 
577 static int pt1_set_voltage(struct dvb_frontend *fe, enum fe_sec_voltage voltage)
578 {
579 	struct pt1_adapter *adap;
580 
581 	adap = container_of(fe->dvb, struct pt1_adapter, adap);
582 	adap->voltage = voltage;
583 	pt1_update_power(adap->pt1);
584 
585 	if (adap->orig_set_voltage)
586 		return adap->orig_set_voltage(fe, voltage);
587 	else
588 		return 0;
589 }
590 
591 static int pt1_sleep(struct dvb_frontend *fe)
592 {
593 	struct pt1_adapter *adap;
594 
595 	adap = container_of(fe->dvb, struct pt1_adapter, adap);
596 	adap->sleep = 1;
597 	pt1_update_power(adap->pt1);
598 
599 	if (adap->orig_sleep)
600 		return adap->orig_sleep(fe);
601 	else
602 		return 0;
603 }
604 
605 static int pt1_wakeup(struct dvb_frontend *fe)
606 {
607 	struct pt1_adapter *adap;
608 
609 	adap = container_of(fe->dvb, struct pt1_adapter, adap);
610 	adap->sleep = 0;
611 	pt1_update_power(adap->pt1);
612 	schedule_timeout_uninterruptible((HZ + 999) / 1000);
613 
614 	if (adap->orig_init)
615 		return adap->orig_init(fe);
616 	else
617 		return 0;
618 }
619 
620 static void pt1_free_adapter(struct pt1_adapter *adap)
621 {
622 	adap->demux.dmx.close(&adap->demux.dmx);
623 	dvb_dmxdev_release(&adap->dmxdev);
624 	dvb_dmx_release(&adap->demux);
625 	dvb_unregister_adapter(&adap->adap);
626 	free_page((unsigned long)adap->buf);
627 	kfree(adap);
628 }
629 
630 DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
631 
632 static struct pt1_adapter *
633 pt1_alloc_adapter(struct pt1 *pt1)
634 {
635 	struct pt1_adapter *adap;
636 	void *buf;
637 	struct dvb_adapter *dvb_adap;
638 	struct dvb_demux *demux;
639 	struct dmxdev *dmxdev;
640 	int ret;
641 
642 	adap = kzalloc(sizeof(struct pt1_adapter), GFP_KERNEL);
643 	if (!adap) {
644 		ret = -ENOMEM;
645 		goto err;
646 	}
647 
648 	adap->pt1 = pt1;
649 
650 	adap->voltage = SEC_VOLTAGE_OFF;
651 	adap->sleep = 1;
652 
653 	buf = (u8 *)__get_free_page(GFP_KERNEL);
654 	if (!buf) {
655 		ret = -ENOMEM;
656 		goto err_kfree;
657 	}
658 
659 	adap->buf = buf;
660 	adap->upacket_count = 0;
661 	adap->packet_count = 0;
662 	adap->st_count = -1;
663 
664 	dvb_adap = &adap->adap;
665 	dvb_adap->priv = adap;
666 	ret = dvb_register_adapter(dvb_adap, DRIVER_NAME, THIS_MODULE,
667 				   &pt1->pdev->dev, adapter_nr);
668 	if (ret < 0)
669 		goto err_free_page;
670 
671 	demux = &adap->demux;
672 	demux->dmx.capabilities = DMX_TS_FILTERING | DMX_SECTION_FILTERING;
673 	demux->priv = adap;
674 	demux->feednum = 256;
675 	demux->filternum = 256;
676 	demux->start_feed = pt1_start_feed;
677 	demux->stop_feed = pt1_stop_feed;
678 	demux->write_to_decoder = NULL;
679 	ret = dvb_dmx_init(demux);
680 	if (ret < 0)
681 		goto err_unregister_adapter;
682 
683 	dmxdev = &adap->dmxdev;
684 	dmxdev->filternum = 256;
685 	dmxdev->demux = &demux->dmx;
686 	dmxdev->capabilities = 0;
687 	ret = dvb_dmxdev_init(dmxdev, dvb_adap);
688 	if (ret < 0)
689 		goto err_dmx_release;
690 
691 	return adap;
692 
693 err_dmx_release:
694 	dvb_dmx_release(demux);
695 err_unregister_adapter:
696 	dvb_unregister_adapter(dvb_adap);
697 err_free_page:
698 	free_page((unsigned long)buf);
699 err_kfree:
700 	kfree(adap);
701 err:
702 	return ERR_PTR(ret);
703 }
704 
705 static void pt1_cleanup_adapters(struct pt1 *pt1)
706 {
707 	int i;
708 	for (i = 0; i < PT1_NR_ADAPS; i++)
709 		pt1_free_adapter(pt1->adaps[i]);
710 }
711 
712 static int pt1_init_adapters(struct pt1 *pt1)
713 {
714 	int i;
715 	struct pt1_adapter *adap;
716 	int ret;
717 
718 	for (i = 0; i < PT1_NR_ADAPS; i++) {
719 		adap = pt1_alloc_adapter(pt1);
720 		if (IS_ERR(adap)) {
721 			ret = PTR_ERR(adap);
722 			goto err;
723 		}
724 
725 		adap->index = i;
726 		pt1->adaps[i] = adap;
727 	}
728 	return 0;
729 
730 err:
731 	while (i--)
732 		pt1_free_adapter(pt1->adaps[i]);
733 
734 	return ret;
735 }
736 
737 static void pt1_cleanup_frontend(struct pt1_adapter *adap)
738 {
739 	dvb_unregister_frontend(adap->fe);
740 }
741 
742 static int pt1_init_frontend(struct pt1_adapter *adap, struct dvb_frontend *fe)
743 {
744 	int ret;
745 
746 	adap->orig_set_voltage = fe->ops.set_voltage;
747 	adap->orig_sleep = fe->ops.sleep;
748 	adap->orig_init = fe->ops.init;
749 	fe->ops.set_voltage = pt1_set_voltage;
750 	fe->ops.sleep = pt1_sleep;
751 	fe->ops.init = pt1_wakeup;
752 
753 	ret = dvb_register_frontend(&adap->adap, fe);
754 	if (ret < 0)
755 		return ret;
756 
757 	adap->fe = fe;
758 	return 0;
759 }
760 
761 static void pt1_cleanup_frontends(struct pt1 *pt1)
762 {
763 	int i;
764 	for (i = 0; i < PT1_NR_ADAPS; i++)
765 		pt1_cleanup_frontend(pt1->adaps[i]);
766 }
767 
768 struct pt1_config {
769 	struct va1j5jf8007s_config va1j5jf8007s_config;
770 	struct va1j5jf8007t_config va1j5jf8007t_config;
771 };
772 
773 static const struct pt1_config pt1_configs[2] = {
774 	{
775 		{
776 			.demod_address = 0x1b,
777 			.frequency = VA1J5JF8007S_20MHZ,
778 		},
779 		{
780 			.demod_address = 0x1a,
781 			.frequency = VA1J5JF8007T_20MHZ,
782 		},
783 	}, {
784 		{
785 			.demod_address = 0x19,
786 			.frequency = VA1J5JF8007S_20MHZ,
787 		},
788 		{
789 			.demod_address = 0x18,
790 			.frequency = VA1J5JF8007T_20MHZ,
791 		},
792 	},
793 };
794 
795 static const struct pt1_config pt2_configs[2] = {
796 	{
797 		{
798 			.demod_address = 0x1b,
799 			.frequency = VA1J5JF8007S_25MHZ,
800 		},
801 		{
802 			.demod_address = 0x1a,
803 			.frequency = VA1J5JF8007T_25MHZ,
804 		},
805 	}, {
806 		{
807 			.demod_address = 0x19,
808 			.frequency = VA1J5JF8007S_25MHZ,
809 		},
810 		{
811 			.demod_address = 0x18,
812 			.frequency = VA1J5JF8007T_25MHZ,
813 		},
814 	},
815 };
816 
817 static int pt1_init_frontends(struct pt1 *pt1)
818 {
819 	int i, j;
820 	struct i2c_adapter *i2c_adap;
821 	const struct pt1_config *configs, *config;
822 	struct dvb_frontend *fe[4];
823 	int ret;
824 
825 	i = 0;
826 	j = 0;
827 
828 	i2c_adap = &pt1->i2c_adap;
829 	configs = pt1->pdev->device == 0x211a ? pt1_configs : pt2_configs;
830 	do {
831 		config = &configs[i / 2];
832 
833 		fe[i] = va1j5jf8007s_attach(&config->va1j5jf8007s_config,
834 					    i2c_adap);
835 		if (!fe[i]) {
836 			ret = -ENODEV; /* This does not sound nice... */
837 			goto err;
838 		}
839 		i++;
840 
841 		fe[i] = va1j5jf8007t_attach(&config->va1j5jf8007t_config,
842 					    i2c_adap);
843 		if (!fe[i]) {
844 			ret = -ENODEV;
845 			goto err;
846 		}
847 		i++;
848 
849 		ret = va1j5jf8007s_prepare(fe[i - 2]);
850 		if (ret < 0)
851 			goto err;
852 
853 		ret = va1j5jf8007t_prepare(fe[i - 1]);
854 		if (ret < 0)
855 			goto err;
856 
857 	} while (i < 4);
858 
859 	do {
860 		ret = pt1_init_frontend(pt1->adaps[j], fe[j]);
861 		if (ret < 0)
862 			goto err;
863 	} while (++j < 4);
864 
865 	return 0;
866 
867 err:
868 	while (i-- > j)
869 		fe[i]->ops.release(fe[i]);
870 
871 	while (j--)
872 		dvb_unregister_frontend(fe[j]);
873 
874 	return ret;
875 }
876 
877 static void pt1_i2c_emit(struct pt1 *pt1, int addr, int busy, int read_enable,
878 			 int clock, int data, int next_addr)
879 {
880 	pt1_write_reg(pt1, 4, addr << 18 | busy << 13 | read_enable << 12 |
881 		      !clock << 11 | !data << 10 | next_addr);
882 }
883 
884 static void pt1_i2c_write_bit(struct pt1 *pt1, int addr, int *addrp, int data)
885 {
886 	pt1_i2c_emit(pt1, addr,     1, 0, 0, data, addr + 1);
887 	pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, data, addr + 2);
888 	pt1_i2c_emit(pt1, addr + 2, 1, 0, 0, data, addr + 3);
889 	*addrp = addr + 3;
890 }
891 
892 static void pt1_i2c_read_bit(struct pt1 *pt1, int addr, int *addrp)
893 {
894 	pt1_i2c_emit(pt1, addr,     1, 0, 0, 1, addr + 1);
895 	pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 1, addr + 2);
896 	pt1_i2c_emit(pt1, addr + 2, 1, 1, 1, 1, addr + 3);
897 	pt1_i2c_emit(pt1, addr + 3, 1, 0, 0, 1, addr + 4);
898 	*addrp = addr + 4;
899 }
900 
901 static void pt1_i2c_write_byte(struct pt1 *pt1, int addr, int *addrp, int data)
902 {
903 	int i;
904 	for (i = 0; i < 8; i++)
905 		pt1_i2c_write_bit(pt1, addr, &addr, data >> (7 - i) & 1);
906 	pt1_i2c_write_bit(pt1, addr, &addr, 1);
907 	*addrp = addr;
908 }
909 
910 static void pt1_i2c_read_byte(struct pt1 *pt1, int addr, int *addrp, int last)
911 {
912 	int i;
913 	for (i = 0; i < 8; i++)
914 		pt1_i2c_read_bit(pt1, addr, &addr);
915 	pt1_i2c_write_bit(pt1, addr, &addr, last);
916 	*addrp = addr;
917 }
918 
919 static void pt1_i2c_prepare(struct pt1 *pt1, int addr, int *addrp)
920 {
921 	pt1_i2c_emit(pt1, addr,     1, 0, 1, 1, addr + 1);
922 	pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 0, addr + 2);
923 	pt1_i2c_emit(pt1, addr + 2, 1, 0, 0, 0, addr + 3);
924 	*addrp = addr + 3;
925 }
926 
927 static void
928 pt1_i2c_write_msg(struct pt1 *pt1, int addr, int *addrp, struct i2c_msg *msg)
929 {
930 	int i;
931 	pt1_i2c_prepare(pt1, addr, &addr);
932 	pt1_i2c_write_byte(pt1, addr, &addr, msg->addr << 1);
933 	for (i = 0; i < msg->len; i++)
934 		pt1_i2c_write_byte(pt1, addr, &addr, msg->buf[i]);
935 	*addrp = addr;
936 }
937 
938 static void
939 pt1_i2c_read_msg(struct pt1 *pt1, int addr, int *addrp, struct i2c_msg *msg)
940 {
941 	int i;
942 	pt1_i2c_prepare(pt1, addr, &addr);
943 	pt1_i2c_write_byte(pt1, addr, &addr, msg->addr << 1 | 1);
944 	for (i = 0; i < msg->len; i++)
945 		pt1_i2c_read_byte(pt1, addr, &addr, i == msg->len - 1);
946 	*addrp = addr;
947 }
948 
949 static int pt1_i2c_end(struct pt1 *pt1, int addr)
950 {
951 	pt1_i2c_emit(pt1, addr,     1, 0, 0, 0, addr + 1);
952 	pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 0, addr + 2);
953 	pt1_i2c_emit(pt1, addr + 2, 1, 0, 1, 1, 0);
954 
955 	pt1_write_reg(pt1, 0, 0x00000004);
956 	do {
957 		if (signal_pending(current))
958 			return -EINTR;
959 		schedule_timeout_interruptible((HZ + 999) / 1000);
960 	} while (pt1_read_reg(pt1, 0) & 0x00000080);
961 	return 0;
962 }
963 
964 static void pt1_i2c_begin(struct pt1 *pt1, int *addrp)
965 {
966 	int addr;
967 	addr = 0;
968 
969 	pt1_i2c_emit(pt1, addr,     0, 0, 1, 1, addr /* itself */);
970 	addr = addr + 1;
971 
972 	if (!pt1->i2c_running) {
973 		pt1_i2c_emit(pt1, addr,     1, 0, 1, 1, addr + 1);
974 		pt1_i2c_emit(pt1, addr + 1, 1, 0, 1, 0, addr + 2);
975 		addr = addr + 2;
976 		pt1->i2c_running = 1;
977 	}
978 	*addrp = addr;
979 }
980 
981 static int pt1_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
982 {
983 	struct pt1 *pt1;
984 	int i;
985 	struct i2c_msg *msg, *next_msg;
986 	int addr, ret;
987 	u16 len;
988 	u32 word;
989 
990 	pt1 = i2c_get_adapdata(adap);
991 
992 	for (i = 0; i < num; i++) {
993 		msg = &msgs[i];
994 		if (msg->flags & I2C_M_RD)
995 			return -ENOTSUPP;
996 
997 		if (i + 1 < num)
998 			next_msg = &msgs[i + 1];
999 		else
1000 			next_msg = NULL;
1001 
1002 		if (next_msg && next_msg->flags & I2C_M_RD) {
1003 			i++;
1004 
1005 			len = next_msg->len;
1006 			if (len > 4)
1007 				return -ENOTSUPP;
1008 
1009 			pt1_i2c_begin(pt1, &addr);
1010 			pt1_i2c_write_msg(pt1, addr, &addr, msg);
1011 			pt1_i2c_read_msg(pt1, addr, &addr, next_msg);
1012 			ret = pt1_i2c_end(pt1, addr);
1013 			if (ret < 0)
1014 				return ret;
1015 
1016 			word = pt1_read_reg(pt1, 2);
1017 			while (len--) {
1018 				next_msg->buf[len] = word;
1019 				word >>= 8;
1020 			}
1021 		} else {
1022 			pt1_i2c_begin(pt1, &addr);
1023 			pt1_i2c_write_msg(pt1, addr, &addr, msg);
1024 			ret = pt1_i2c_end(pt1, addr);
1025 			if (ret < 0)
1026 				return ret;
1027 		}
1028 	}
1029 
1030 	return num;
1031 }
1032 
1033 static u32 pt1_i2c_func(struct i2c_adapter *adap)
1034 {
1035 	return I2C_FUNC_I2C;
1036 }
1037 
1038 static const struct i2c_algorithm pt1_i2c_algo = {
1039 	.master_xfer = pt1_i2c_xfer,
1040 	.functionality = pt1_i2c_func,
1041 };
1042 
1043 static void pt1_i2c_wait(struct pt1 *pt1)
1044 {
1045 	int i;
1046 	for (i = 0; i < 128; i++)
1047 		pt1_i2c_emit(pt1, 0, 0, 0, 1, 1, 0);
1048 }
1049 
1050 static void pt1_i2c_init(struct pt1 *pt1)
1051 {
1052 	int i;
1053 	for (i = 0; i < 1024; i++)
1054 		pt1_i2c_emit(pt1, i, 0, 0, 1, 1, 0);
1055 }
1056 
1057 static void pt1_remove(struct pci_dev *pdev)
1058 {
1059 	struct pt1 *pt1;
1060 	void __iomem *regs;
1061 
1062 	pt1 = pci_get_drvdata(pdev);
1063 	regs = pt1->regs;
1064 
1065 	if (pt1->kthread)
1066 		kthread_stop(pt1->kthread);
1067 	pt1_cleanup_tables(pt1);
1068 	pt1_cleanup_frontends(pt1);
1069 	pt1_disable_ram(pt1);
1070 	pt1->power = 0;
1071 	pt1->reset = 1;
1072 	pt1_update_power(pt1);
1073 	pt1_cleanup_adapters(pt1);
1074 	i2c_del_adapter(&pt1->i2c_adap);
1075 	kfree(pt1);
1076 	pci_iounmap(pdev, regs);
1077 	pci_release_regions(pdev);
1078 	pci_disable_device(pdev);
1079 }
1080 
1081 static int pt1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1082 {
1083 	int ret;
1084 	void __iomem *regs;
1085 	struct pt1 *pt1;
1086 	struct i2c_adapter *i2c_adap;
1087 
1088 	ret = pci_enable_device(pdev);
1089 	if (ret < 0)
1090 		goto err;
1091 
1092 	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1093 	if (ret < 0)
1094 		goto err_pci_disable_device;
1095 
1096 	pci_set_master(pdev);
1097 
1098 	ret = pci_request_regions(pdev, DRIVER_NAME);
1099 	if (ret < 0)
1100 		goto err_pci_disable_device;
1101 
1102 	regs = pci_iomap(pdev, 0, 0);
1103 	if (!regs) {
1104 		ret = -EIO;
1105 		goto err_pci_release_regions;
1106 	}
1107 
1108 	pt1 = kzalloc(sizeof(struct pt1), GFP_KERNEL);
1109 	if (!pt1) {
1110 		ret = -ENOMEM;
1111 		goto err_pci_iounmap;
1112 	}
1113 
1114 	mutex_init(&pt1->lock);
1115 	pt1->pdev = pdev;
1116 	pt1->regs = regs;
1117 	pci_set_drvdata(pdev, pt1);
1118 
1119 	ret = pt1_init_adapters(pt1);
1120 	if (ret < 0)
1121 		goto err_kfree;
1122 
1123 	mutex_init(&pt1->lock);
1124 
1125 	pt1->power = 0;
1126 	pt1->reset = 1;
1127 	pt1_update_power(pt1);
1128 
1129 	i2c_adap = &pt1->i2c_adap;
1130 	i2c_adap->algo = &pt1_i2c_algo;
1131 	i2c_adap->algo_data = NULL;
1132 	i2c_adap->dev.parent = &pdev->dev;
1133 	strcpy(i2c_adap->name, DRIVER_NAME);
1134 	i2c_set_adapdata(i2c_adap, pt1);
1135 	ret = i2c_add_adapter(i2c_adap);
1136 	if (ret < 0)
1137 		goto err_pt1_cleanup_adapters;
1138 
1139 	pt1_i2c_init(pt1);
1140 	pt1_i2c_wait(pt1);
1141 
1142 	ret = pt1_sync(pt1);
1143 	if (ret < 0)
1144 		goto err_i2c_del_adapter;
1145 
1146 	pt1_identify(pt1);
1147 
1148 	ret = pt1_unlock(pt1);
1149 	if (ret < 0)
1150 		goto err_i2c_del_adapter;
1151 
1152 	ret = pt1_reset_pci(pt1);
1153 	if (ret < 0)
1154 		goto err_i2c_del_adapter;
1155 
1156 	ret = pt1_reset_ram(pt1);
1157 	if (ret < 0)
1158 		goto err_i2c_del_adapter;
1159 
1160 	ret = pt1_enable_ram(pt1);
1161 	if (ret < 0)
1162 		goto err_i2c_del_adapter;
1163 
1164 	pt1_init_streams(pt1);
1165 
1166 	pt1->power = 1;
1167 	pt1_update_power(pt1);
1168 	schedule_timeout_uninterruptible((HZ + 49) / 50);
1169 
1170 	pt1->reset = 0;
1171 	pt1_update_power(pt1);
1172 	schedule_timeout_uninterruptible((HZ + 999) / 1000);
1173 
1174 	ret = pt1_init_frontends(pt1);
1175 	if (ret < 0)
1176 		goto err_pt1_disable_ram;
1177 
1178 	ret = pt1_init_tables(pt1);
1179 	if (ret < 0)
1180 		goto err_pt1_cleanup_frontends;
1181 
1182 	return 0;
1183 
1184 err_pt1_cleanup_frontends:
1185 	pt1_cleanup_frontends(pt1);
1186 err_pt1_disable_ram:
1187 	pt1_disable_ram(pt1);
1188 	pt1->power = 0;
1189 	pt1->reset = 1;
1190 	pt1_update_power(pt1);
1191 err_i2c_del_adapter:
1192 	i2c_del_adapter(i2c_adap);
1193 err_pt1_cleanup_adapters:
1194 	pt1_cleanup_adapters(pt1);
1195 err_kfree:
1196 	kfree(pt1);
1197 err_pci_iounmap:
1198 	pci_iounmap(pdev, regs);
1199 err_pci_release_regions:
1200 	pci_release_regions(pdev);
1201 err_pci_disable_device:
1202 	pci_disable_device(pdev);
1203 err:
1204 	return ret;
1205 
1206 }
1207 
1208 static struct pci_device_id pt1_id_table[] = {
1209 	{ PCI_DEVICE(0x10ee, 0x211a) },
1210 	{ PCI_DEVICE(0x10ee, 0x222a) },
1211 	{ },
1212 };
1213 MODULE_DEVICE_TABLE(pci, pt1_id_table);
1214 
1215 static struct pci_driver pt1_driver = {
1216 	.name		= DRIVER_NAME,
1217 	.probe		= pt1_probe,
1218 	.remove		= pt1_remove,
1219 	.id_table	= pt1_id_table,
1220 };
1221 
1222 module_pci_driver(pt1_driver);
1223 
1224 MODULE_AUTHOR("Takahito HIRANO <hiranotaka@zng.info>");
1225 MODULE_DESCRIPTION("Earthsoft PT1/PT2 Driver");
1226 MODULE_LICENSE("GPL");
1227