1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  Driver for the Auvitek USB bridge
4  *
5  *  Copyright (c) 2008 Steven Toth <stoth@linuxtv.org>
6  */
7 
8 #include "au0828.h"
9 
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/device.h>
14 #include <media/v4l2-common.h>
15 #include <media/tuner.h>
16 
17 #include "au8522.h"
18 #include "xc5000.h"
19 #include "mxl5007t.h"
20 #include "tda18271.h"
21 
22 static int preallocate_big_buffers;
23 module_param_named(preallocate_big_buffers, preallocate_big_buffers, int, 0644);
24 MODULE_PARM_DESC(preallocate_big_buffers, "Preallocate the larger transfer buffers at module load time");
25 
26 DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr);
27 
28 #define _AU0828_BULKPIPE 0x83
29 #define _BULKPIPESIZE 0xe522
30 
31 static u8 hauppauge_hvr950q_led_states[] = {
32 	0x00, /* off */
33 	0x02, /* yellow */
34 	0x04, /* green */
35 };
36 
37 static struct au8522_led_config hauppauge_hvr950q_led_cfg = {
38 	.gpio_output = 0x00e0,
39 	.gpio_output_enable  = 0x6006,
40 	.gpio_output_disable = 0x0660,
41 
42 	.gpio_leds = 0x00e2,
43 	.led_states  = hauppauge_hvr950q_led_states,
44 	.num_led_states = sizeof(hauppauge_hvr950q_led_states),
45 
46 	.vsb8_strong   = 20 /* dB */ * 10,
47 	.qam64_strong  = 25 /* dB */ * 10,
48 	.qam256_strong = 32 /* dB */ * 10,
49 };
50 
51 static struct au8522_config hauppauge_hvr950q_config = {
52 	.demod_address = 0x8e >> 1,
53 	.status_mode   = AU8522_DEMODLOCKING,
54 	.qam_if        = AU8522_IF_6MHZ,
55 	.vsb_if        = AU8522_IF_6MHZ,
56 	.led_cfg       = &hauppauge_hvr950q_led_cfg,
57 };
58 
59 static struct au8522_config fusionhdtv7usb_config = {
60 	.demod_address = 0x8e >> 1,
61 	.status_mode   = AU8522_DEMODLOCKING,
62 	.qam_if        = AU8522_IF_6MHZ,
63 	.vsb_if        = AU8522_IF_6MHZ,
64 };
65 
66 static struct au8522_config hauppauge_woodbury_config = {
67 	.demod_address = 0x8e >> 1,
68 	.status_mode   = AU8522_DEMODLOCKING,
69 	.qam_if        = AU8522_IF_4MHZ,
70 	.vsb_if        = AU8522_IF_3_25MHZ,
71 };
72 
73 static struct xc5000_config hauppauge_xc5000a_config = {
74 	.i2c_address      = 0x61,
75 	.if_khz           = 6000,
76 	.chip_id          = XC5000A,
77 	.output_amp       = 0x8f,
78 };
79 
80 static struct xc5000_config hauppauge_xc5000c_config = {
81 	.i2c_address      = 0x61,
82 	.if_khz           = 6000,
83 	.chip_id          = XC5000C,
84 	.output_amp       = 0x8f,
85 };
86 
87 static struct mxl5007t_config mxl5007t_hvr950q_config = {
88 	.xtal_freq_hz = MxL_XTAL_24_MHZ,
89 	.if_freq_hz = MxL_IF_6_MHZ,
90 };
91 
92 static struct tda18271_config hauppauge_woodbury_tunerconfig = {
93 	.gate    = TDA18271_GATE_DIGITAL,
94 };
95 
96 static void au0828_restart_dvb_streaming(struct work_struct *work);
97 
au0828_bulk_timeout(struct timer_list * t)98 static void au0828_bulk_timeout(struct timer_list *t)
99 {
100 	struct au0828_dev *dev = from_timer(dev, t, bulk_timeout);
101 
102 	dprintk(1, "%s called\n", __func__);
103 	dev->bulk_timeout_running = 0;
104 	schedule_work(&dev->restart_streaming);
105 }
106 
107 /*-------------------------------------------------------------------*/
urb_completion(struct urb * purb)108 static void urb_completion(struct urb *purb)
109 {
110 	struct au0828_dev *dev = purb->context;
111 	int ptype = usb_pipetype(purb->pipe);
112 	unsigned char *ptr;
113 
114 	dprintk(2, "%s: %d\n", __func__, purb->actual_length);
115 
116 	if (!dev) {
117 		dprintk(2, "%s: no dev!\n", __func__);
118 		return;
119 	}
120 
121 	if (!dev->urb_streaming) {
122 		dprintk(2, "%s: not streaming!\n", __func__);
123 		return;
124 	}
125 
126 	if (ptype != PIPE_BULK) {
127 		pr_err("%s: Unsupported URB type %d\n",
128 		       __func__, ptype);
129 		return;
130 	}
131 
132 	/* See if the stream is corrupted (to work around a hardware
133 	   bug where the stream gets misaligned */
134 	ptr = purb->transfer_buffer;
135 	if (purb->actual_length > 0 && ptr[0] != 0x47) {
136 		dprintk(1, "Need to restart streaming %02x len=%d!\n",
137 			ptr[0], purb->actual_length);
138 		schedule_work(&dev->restart_streaming);
139 		return;
140 	} else if (dev->bulk_timeout_running == 1) {
141 		/* The URB handler has fired, so cancel timer which would
142 		 * restart endpoint if we hadn't
143 		 */
144 		dprintk(1, "%s cancelling bulk timeout\n", __func__);
145 		dev->bulk_timeout_running = 0;
146 		del_timer(&dev->bulk_timeout);
147 	}
148 
149 	/* Feed the transport payload into the kernel demux */
150 	dvb_dmx_swfilter_packets(&dev->dvb.demux,
151 		purb->transfer_buffer, purb->actual_length / 188);
152 
153 	/* Clean the buffer before we requeue */
154 	memset(purb->transfer_buffer, 0, URB_BUFSIZE);
155 
156 	/* Requeue URB */
157 	usb_submit_urb(purb, GFP_ATOMIC);
158 }
159 
stop_urb_transfer(struct au0828_dev * dev)160 static int stop_urb_transfer(struct au0828_dev *dev)
161 {
162 	int i;
163 
164 	dprintk(2, "%s()\n", __func__);
165 
166 	if (!dev->urb_streaming)
167 		return 0;
168 
169 	if (dev->bulk_timeout_running == 1) {
170 		dev->bulk_timeout_running = 0;
171 		del_timer(&dev->bulk_timeout);
172 	}
173 
174 	dev->urb_streaming = false;
175 	for (i = 0; i < URB_COUNT; i++) {
176 		if (dev->urbs[i]) {
177 			usb_kill_urb(dev->urbs[i]);
178 			if (!preallocate_big_buffers)
179 				kfree(dev->urbs[i]->transfer_buffer);
180 
181 			usb_free_urb(dev->urbs[i]);
182 		}
183 	}
184 
185 	return 0;
186 }
187 
start_urb_transfer(struct au0828_dev * dev)188 static int start_urb_transfer(struct au0828_dev *dev)
189 {
190 	struct urb *purb;
191 	int i, ret;
192 
193 	dprintk(2, "%s()\n", __func__);
194 
195 	if (dev->urb_streaming) {
196 		dprintk(2, "%s: bulk xfer already running!\n", __func__);
197 		return 0;
198 	}
199 
200 	for (i = 0; i < URB_COUNT; i++) {
201 
202 		dev->urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
203 		if (!dev->urbs[i])
204 			return -ENOMEM;
205 
206 		purb = dev->urbs[i];
207 
208 		if (preallocate_big_buffers)
209 			purb->transfer_buffer = dev->dig_transfer_buffer[i];
210 		else
211 			purb->transfer_buffer = kzalloc(URB_BUFSIZE,
212 					GFP_KERNEL);
213 
214 		if (!purb->transfer_buffer) {
215 			usb_free_urb(purb);
216 			dev->urbs[i] = NULL;
217 			ret = -ENOMEM;
218 			pr_err("%s: failed big buffer allocation, err = %d\n",
219 			       __func__, ret);
220 			return ret;
221 		}
222 
223 		purb->status = -EINPROGRESS;
224 		usb_fill_bulk_urb(purb,
225 				  dev->usbdev,
226 				  usb_rcvbulkpipe(dev->usbdev,
227 					_AU0828_BULKPIPE),
228 				  purb->transfer_buffer,
229 				  URB_BUFSIZE,
230 				  urb_completion,
231 				  dev);
232 
233 	}
234 
235 	for (i = 0; i < URB_COUNT; i++) {
236 		ret = usb_submit_urb(dev->urbs[i], GFP_ATOMIC);
237 		if (ret != 0) {
238 			stop_urb_transfer(dev);
239 			pr_err("%s: failed urb submission, err = %d\n",
240 			       __func__, ret);
241 			return ret;
242 		}
243 	}
244 
245 	dev->urb_streaming = true;
246 
247 	/* If we don't valid data within 1 second, restart stream */
248 	mod_timer(&dev->bulk_timeout, jiffies + (HZ));
249 	dev->bulk_timeout_running = 1;
250 
251 	return 0;
252 }
253 
au0828_start_transport(struct au0828_dev * dev)254 static void au0828_start_transport(struct au0828_dev *dev)
255 {
256 	au0828_write(dev, 0x608, 0x90);
257 	au0828_write(dev, 0x609, 0x72);
258 	au0828_write(dev, 0x60a, 0x71);
259 	au0828_write(dev, 0x60b, 0x01);
260 
261 }
262 
au0828_stop_transport(struct au0828_dev * dev,int full_stop)263 static void au0828_stop_transport(struct au0828_dev *dev, int full_stop)
264 {
265 	if (full_stop) {
266 		au0828_write(dev, 0x608, 0x00);
267 		au0828_write(dev, 0x609, 0x00);
268 		au0828_write(dev, 0x60a, 0x00);
269 	}
270 	au0828_write(dev, 0x60b, 0x00);
271 }
272 
au0828_dvb_start_feed(struct dvb_demux_feed * feed)273 static int au0828_dvb_start_feed(struct dvb_demux_feed *feed)
274 {
275 	struct dvb_demux *demux = feed->demux;
276 	struct au0828_dev *dev = demux->priv;
277 	struct au0828_dvb *dvb = &dev->dvb;
278 	int ret = 0;
279 
280 	dprintk(1, "%s()\n", __func__);
281 
282 	if (!demux->dmx.frontend)
283 		return -EINVAL;
284 
285 	if (dvb->frontend) {
286 		mutex_lock(&dvb->lock);
287 		dvb->start_count++;
288 		dprintk(1, "%s(), start_count: %d, stop_count: %d\n", __func__,
289 			dvb->start_count, dvb->stop_count);
290 		if (dvb->feeding++ == 0) {
291 			/* Start transport */
292 			au0828_start_transport(dev);
293 			ret = start_urb_transfer(dev);
294 			if (ret < 0) {
295 				au0828_stop_transport(dev, 0);
296 				dvb->feeding--;	/* We ran out of memory... */
297 			}
298 		}
299 		mutex_unlock(&dvb->lock);
300 	}
301 
302 	return ret;
303 }
304 
au0828_dvb_stop_feed(struct dvb_demux_feed * feed)305 static int au0828_dvb_stop_feed(struct dvb_demux_feed *feed)
306 {
307 	struct dvb_demux *demux = feed->demux;
308 	struct au0828_dev *dev = demux->priv;
309 	struct au0828_dvb *dvb = &dev->dvb;
310 	int ret = 0;
311 
312 	dprintk(1, "%s()\n", __func__);
313 
314 	if (dvb->frontend) {
315 		cancel_work_sync(&dev->restart_streaming);
316 
317 		mutex_lock(&dvb->lock);
318 		dvb->stop_count++;
319 		dprintk(1, "%s(), start_count: %d, stop_count: %d\n", __func__,
320 			dvb->start_count, dvb->stop_count);
321 		if (dvb->feeding > 0) {
322 			dvb->feeding--;
323 			if (dvb->feeding == 0) {
324 				/* Stop transport */
325 				ret = stop_urb_transfer(dev);
326 				au0828_stop_transport(dev, 0);
327 			}
328 		}
329 		mutex_unlock(&dvb->lock);
330 	}
331 
332 	return ret;
333 }
334 
au0828_restart_dvb_streaming(struct work_struct * work)335 static void au0828_restart_dvb_streaming(struct work_struct *work)
336 {
337 	struct au0828_dev *dev = container_of(work, struct au0828_dev,
338 					      restart_streaming);
339 	struct au0828_dvb *dvb = &dev->dvb;
340 
341 	if (!dev->urb_streaming)
342 		return;
343 
344 	dprintk(1, "Restarting streaming...!\n");
345 
346 	mutex_lock(&dvb->lock);
347 
348 	/* Stop transport */
349 	stop_urb_transfer(dev);
350 	au0828_stop_transport(dev, 1);
351 
352 	/* Start transport */
353 	au0828_start_transport(dev);
354 	start_urb_transfer(dev);
355 
356 	mutex_unlock(&dvb->lock);
357 }
358 
au0828_set_frontend(struct dvb_frontend * fe)359 static int au0828_set_frontend(struct dvb_frontend *fe)
360 {
361 	struct au0828_dev *dev = fe->dvb->priv;
362 	struct au0828_dvb *dvb = &dev->dvb;
363 	int ret, was_streaming;
364 
365 	mutex_lock(&dvb->lock);
366 	was_streaming = dev->urb_streaming;
367 	if (was_streaming) {
368 		au0828_stop_transport(dev, 1);
369 
370 		/*
371 		 * We can't hold a mutex here, as the restart_streaming
372 		 * kthread may also hold it.
373 		 */
374 		mutex_unlock(&dvb->lock);
375 		cancel_work_sync(&dev->restart_streaming);
376 		mutex_lock(&dvb->lock);
377 
378 		stop_urb_transfer(dev);
379 	}
380 	mutex_unlock(&dvb->lock);
381 
382 	ret = dvb->set_frontend(fe);
383 
384 	if (was_streaming) {
385 		mutex_lock(&dvb->lock);
386 		au0828_start_transport(dev);
387 		start_urb_transfer(dev);
388 		mutex_unlock(&dvb->lock);
389 	}
390 
391 	return ret;
392 }
393 
dvb_register(struct au0828_dev * dev)394 static int dvb_register(struct au0828_dev *dev)
395 {
396 	struct au0828_dvb *dvb = &dev->dvb;
397 	int result;
398 
399 	dprintk(1, "%s()\n", __func__);
400 
401 	if (preallocate_big_buffers) {
402 		int i;
403 		for (i = 0; i < URB_COUNT; i++) {
404 			dev->dig_transfer_buffer[i] = kzalloc(URB_BUFSIZE,
405 					GFP_KERNEL);
406 
407 			if (!dev->dig_transfer_buffer[i]) {
408 				result = -ENOMEM;
409 
410 				pr_err("failed buffer allocation (errno = %d)\n",
411 				       result);
412 				goto fail_adapter;
413 			}
414 		}
415 	}
416 
417 	INIT_WORK(&dev->restart_streaming, au0828_restart_dvb_streaming);
418 
419 	/* register adapter */
420 	result = dvb_register_adapter(&dvb->adapter,
421 				      KBUILD_MODNAME, THIS_MODULE,
422 				      &dev->usbdev->dev, adapter_nr);
423 	if (result < 0) {
424 		pr_err("dvb_register_adapter failed (errno = %d)\n",
425 		       result);
426 		goto fail_adapter;
427 	}
428 
429 #ifdef CONFIG_MEDIA_CONTROLLER_DVB
430 	dvb->adapter.mdev = dev->media_dev;
431 #endif
432 
433 	dvb->adapter.priv = dev;
434 
435 	/* register frontend */
436 	result = dvb_register_frontend(&dvb->adapter, dvb->frontend);
437 	if (result < 0) {
438 		pr_err("dvb_register_frontend failed (errno = %d)\n",
439 		       result);
440 		goto fail_frontend;
441 	}
442 
443 	/* Hook dvb frontend */
444 	dvb->set_frontend = dvb->frontend->ops.set_frontend;
445 	dvb->frontend->ops.set_frontend = au0828_set_frontend;
446 
447 	/* register demux stuff */
448 	dvb->demux.dmx.capabilities =
449 		DMX_TS_FILTERING | DMX_SECTION_FILTERING |
450 		DMX_MEMORY_BASED_FILTERING;
451 	dvb->demux.priv       = dev;
452 	dvb->demux.filternum  = 256;
453 	dvb->demux.feednum    = 256;
454 	dvb->demux.start_feed = au0828_dvb_start_feed;
455 	dvb->demux.stop_feed  = au0828_dvb_stop_feed;
456 	result = dvb_dmx_init(&dvb->demux);
457 	if (result < 0) {
458 		pr_err("dvb_dmx_init failed (errno = %d)\n", result);
459 		goto fail_dmx;
460 	}
461 
462 	dvb->dmxdev.filternum    = 256;
463 	dvb->dmxdev.demux        = &dvb->demux.dmx;
464 	dvb->dmxdev.capabilities = 0;
465 	result = dvb_dmxdev_init(&dvb->dmxdev, &dvb->adapter);
466 	if (result < 0) {
467 		pr_err("dvb_dmxdev_init failed (errno = %d)\n", result);
468 		goto fail_dmxdev;
469 	}
470 
471 	dvb->fe_hw.source = DMX_FRONTEND_0;
472 	result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_hw);
473 	if (result < 0) {
474 		pr_err("add_frontend failed (DMX_FRONTEND_0, errno = %d)\n",
475 		       result);
476 		goto fail_fe_hw;
477 	}
478 
479 	dvb->fe_mem.source = DMX_MEMORY_FE;
480 	result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_mem);
481 	if (result < 0) {
482 		pr_err("add_frontend failed (DMX_MEMORY_FE, errno = %d)\n",
483 		       result);
484 		goto fail_fe_mem;
485 	}
486 
487 	result = dvb->demux.dmx.connect_frontend(&dvb->demux.dmx, &dvb->fe_hw);
488 	if (result < 0) {
489 		pr_err("connect_frontend failed (errno = %d)\n", result);
490 		goto fail_fe_conn;
491 	}
492 
493 	/* register network adapter */
494 	dvb_net_init(&dvb->adapter, &dvb->net, &dvb->demux.dmx);
495 
496 	dvb->start_count = 0;
497 	dvb->stop_count = 0;
498 
499 	result = dvb_create_media_graph(&dvb->adapter, false);
500 	if (result < 0)
501 		goto fail_create_graph;
502 
503 	return 0;
504 
505 fail_create_graph:
506 	dvb_net_release(&dvb->net);
507 fail_fe_conn:
508 	dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_mem);
509 fail_fe_mem:
510 	dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_hw);
511 fail_fe_hw:
512 	dvb_dmxdev_release(&dvb->dmxdev);
513 fail_dmxdev:
514 	dvb_dmx_release(&dvb->demux);
515 fail_dmx:
516 	dvb_unregister_frontend(dvb->frontend);
517 fail_frontend:
518 	dvb_frontend_detach(dvb->frontend);
519 	dvb_unregister_adapter(&dvb->adapter);
520 fail_adapter:
521 
522 	if (preallocate_big_buffers) {
523 		int i;
524 		for (i = 0; i < URB_COUNT; i++)
525 			kfree(dev->dig_transfer_buffer[i]);
526 	}
527 
528 	return result;
529 }
530 
au0828_dvb_unregister(struct au0828_dev * dev)531 void au0828_dvb_unregister(struct au0828_dev *dev)
532 {
533 	struct au0828_dvb *dvb = &dev->dvb;
534 
535 	dprintk(1, "%s()\n", __func__);
536 
537 	if (dvb->frontend == NULL)
538 		return;
539 
540 	cancel_work_sync(&dev->restart_streaming);
541 
542 	dvb_net_release(&dvb->net);
543 	dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_mem);
544 	dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_hw);
545 	dvb_dmxdev_release(&dvb->dmxdev);
546 	dvb_dmx_release(&dvb->demux);
547 	dvb_unregister_frontend(dvb->frontend);
548 	dvb_frontend_detach(dvb->frontend);
549 	dvb_unregister_adapter(&dvb->adapter);
550 
551 	if (preallocate_big_buffers) {
552 		int i;
553 		for (i = 0; i < URB_COUNT; i++)
554 			kfree(dev->dig_transfer_buffer[i]);
555 	}
556 	dvb->frontend = NULL;
557 }
558 
559 /* All the DVB attach calls go here, this function gets modified
560  * for each new card. No other function in this file needs
561  * to change.
562  */
au0828_dvb_register(struct au0828_dev * dev)563 int au0828_dvb_register(struct au0828_dev *dev)
564 {
565 	struct au0828_dvb *dvb = &dev->dvb;
566 	int ret;
567 
568 	dprintk(1, "%s()\n", __func__);
569 
570 	/* init frontend */
571 	switch (dev->boardnr) {
572 	case AU0828_BOARD_HAUPPAUGE_HVR850:
573 	case AU0828_BOARD_HAUPPAUGE_HVR950Q:
574 		dvb->frontend = dvb_attach(au8522_attach,
575 				&hauppauge_hvr950q_config,
576 				&dev->i2c_adap);
577 		if (dvb->frontend != NULL)
578 			switch (dev->board.tuner_type) {
579 			default:
580 			case TUNER_XC5000:
581 				dvb_attach(xc5000_attach, dvb->frontend,
582 					   &dev->i2c_adap,
583 					   &hauppauge_xc5000a_config);
584 				break;
585 			case TUNER_XC5000C:
586 				dvb_attach(xc5000_attach, dvb->frontend,
587 					   &dev->i2c_adap,
588 					   &hauppauge_xc5000c_config);
589 				break;
590 			}
591 		break;
592 	case AU0828_BOARD_HAUPPAUGE_HVR950Q_MXL:
593 		dvb->frontend = dvb_attach(au8522_attach,
594 				&hauppauge_hvr950q_config,
595 				&dev->i2c_adap);
596 		if (dvb->frontend != NULL)
597 			dvb_attach(mxl5007t_attach, dvb->frontend,
598 				   &dev->i2c_adap, 0x60,
599 				   &mxl5007t_hvr950q_config);
600 		break;
601 	case AU0828_BOARD_HAUPPAUGE_WOODBURY:
602 		dvb->frontend = dvb_attach(au8522_attach,
603 				&hauppauge_woodbury_config,
604 				&dev->i2c_adap);
605 		if (dvb->frontend != NULL)
606 			dvb_attach(tda18271_attach, dvb->frontend,
607 				   0x60, &dev->i2c_adap,
608 				   &hauppauge_woodbury_tunerconfig);
609 		break;
610 	case AU0828_BOARD_DVICO_FUSIONHDTV7:
611 		dvb->frontend = dvb_attach(au8522_attach,
612 				&fusionhdtv7usb_config,
613 				&dev->i2c_adap);
614 		if (dvb->frontend != NULL) {
615 			dvb_attach(xc5000_attach, dvb->frontend,
616 				&dev->i2c_adap,
617 				&hauppauge_xc5000a_config);
618 		}
619 		break;
620 	default:
621 		pr_warn("The frontend of your DVB/ATSC card isn't supported yet\n");
622 		break;
623 	}
624 	if (NULL == dvb->frontend) {
625 		pr_err("%s() Frontend initialization failed\n",
626 		       __func__);
627 		return -1;
628 	}
629 	/* define general-purpose callback pointer */
630 	dvb->frontend->callback = au0828_tuner_callback;
631 
632 	/* register everything */
633 	ret = dvb_register(dev);
634 	if (ret < 0) {
635 		if (dvb->frontend->ops.release)
636 			dvb->frontend->ops.release(dvb->frontend);
637 		dvb->frontend = NULL;
638 		return ret;
639 	}
640 
641 	timer_setup(&dev->bulk_timeout, au0828_bulk_timeout, 0);
642 
643 	return 0;
644 }
645 
au0828_dvb_suspend(struct au0828_dev * dev)646 void au0828_dvb_suspend(struct au0828_dev *dev)
647 {
648 	struct au0828_dvb *dvb = &dev->dvb;
649 	int rc;
650 
651 	if (dvb->frontend) {
652 		if (dev->urb_streaming) {
653 			cancel_work_sync(&dev->restart_streaming);
654 			/* Stop transport */
655 			mutex_lock(&dvb->lock);
656 			stop_urb_transfer(dev);
657 			au0828_stop_transport(dev, 1);
658 			mutex_unlock(&dvb->lock);
659 			dev->need_urb_start = true;
660 		}
661 		/* suspend frontend - does tuner and fe to sleep */
662 		rc = dvb_frontend_suspend(dvb->frontend);
663 		pr_info("au0828_dvb_suspend(): Suspending DVB fe %d\n", rc);
664 	}
665 }
666 
au0828_dvb_resume(struct au0828_dev * dev)667 void au0828_dvb_resume(struct au0828_dev *dev)
668 {
669 	struct au0828_dvb *dvb = &dev->dvb;
670 	int rc;
671 
672 	if (dvb->frontend) {
673 		/* resume frontend - does fe and tuner init */
674 		rc = dvb_frontend_resume(dvb->frontend);
675 		pr_info("au0828_dvb_resume(): Resuming DVB fe %d\n", rc);
676 		if (dev->need_urb_start) {
677 			/* Start transport */
678 			mutex_lock(&dvb->lock);
679 			au0828_start_transport(dev);
680 			start_urb_transfer(dev);
681 			mutex_unlock(&dvb->lock);
682 		}
683 	}
684 }
685