xref: /openbmc/linux/drivers/media/pci/ivtv/ivtv-irq.c (revision c819e2cf)
1 /* interrupt handling
2     Copyright (C) 2003-2004  Kevin Thayer <nufan_wfk at yahoo.com>
3     Copyright (C) 2004  Chris Kennedy <c@groovy.org>
4     Copyright (C) 2005-2007  Hans Verkuil <hverkuil@xs4all.nl>
5 
6     This program is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10 
11     This program is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15 
16     You should have received a copy of the GNU General Public License
17     along with this program; if not, write to the Free Software
18     Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
19  */
20 
21 #include "ivtv-driver.h"
22 #include "ivtv-queue.h"
23 #include "ivtv-udma.h"
24 #include "ivtv-irq.h"
25 #include "ivtv-mailbox.h"
26 #include "ivtv-vbi.h"
27 #include "ivtv-yuv.h"
28 #include <media/v4l2-event.h>
29 
30 #define DMA_MAGIC_COOKIE 0x000001fe
31 
32 static void ivtv_dma_dec_start(struct ivtv_stream *s);
33 
34 static const int ivtv_stream_map[] = {
35 	IVTV_ENC_STREAM_TYPE_MPG,
36 	IVTV_ENC_STREAM_TYPE_YUV,
37 	IVTV_ENC_STREAM_TYPE_PCM,
38 	IVTV_ENC_STREAM_TYPE_VBI,
39 };
40 
41 static void ivtv_pcm_work_handler(struct ivtv *itv)
42 {
43 	struct ivtv_stream *s = &itv->streams[IVTV_ENC_STREAM_TYPE_PCM];
44 	struct ivtv_buffer *buf;
45 
46 	/* Pass the PCM data to ivtv-alsa */
47 
48 	while (1) {
49 		/*
50 		 * Users should not be using both the ALSA and V4L2 PCM audio
51 		 * capture interfaces at the same time.  If the user is doing
52 		 * this, there maybe a buffer in q_io to grab, use, and put
53 		 * back in rotation.
54 		 */
55 		buf = ivtv_dequeue(s, &s->q_io);
56 		if (buf == NULL)
57 			buf = ivtv_dequeue(s, &s->q_full);
58 		if (buf == NULL)
59 			break;
60 
61 		if (buf->readpos < buf->bytesused)
62 			itv->pcm_announce_callback(itv->alsa,
63 				(u8 *)(buf->buf + buf->readpos),
64 				(size_t)(buf->bytesused - buf->readpos));
65 
66 		ivtv_enqueue(s, buf, &s->q_free);
67 	}
68 }
69 
70 static void ivtv_pio_work_handler(struct ivtv *itv)
71 {
72 	struct ivtv_stream *s = &itv->streams[itv->cur_pio_stream];
73 	struct ivtv_buffer *buf;
74 	int i = 0;
75 
76 	IVTV_DEBUG_HI_DMA("ivtv_pio_work_handler\n");
77 	if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS ||
78 			s->vdev == NULL || !ivtv_use_pio(s)) {
79 		itv->cur_pio_stream = -1;
80 		/* trigger PIO complete user interrupt */
81 		write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
82 		return;
83 	}
84 	IVTV_DEBUG_HI_DMA("Process PIO %s\n", s->name);
85 	list_for_each_entry(buf, &s->q_dma.list, list) {
86 		u32 size = s->sg_processing[i].size & 0x3ffff;
87 
88 		/* Copy the data from the card to the buffer */
89 		if (s->type == IVTV_DEC_STREAM_TYPE_VBI) {
90 			memcpy_fromio(buf->buf, itv->dec_mem + s->sg_processing[i].src - IVTV_DECODER_OFFSET, size);
91 		}
92 		else {
93 			memcpy_fromio(buf->buf, itv->enc_mem + s->sg_processing[i].src, size);
94 		}
95 		i++;
96 		if (i == s->sg_processing_size)
97 			break;
98 	}
99 	write_reg(IVTV_IRQ_ENC_PIO_COMPLETE, 0x44);
100 }
101 
102 void ivtv_irq_work_handler(struct kthread_work *work)
103 {
104 	struct ivtv *itv = container_of(work, struct ivtv, irq_work);
105 
106 	if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags))
107 		ivtv_pio_work_handler(itv);
108 
109 	if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags))
110 		ivtv_vbi_work_handler(itv);
111 
112 	if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags))
113 		ivtv_yuv_work_handler(itv);
114 
115 	if (test_and_clear_bit(IVTV_F_I_WORK_HANDLER_PCM, &itv->i_flags))
116 		ivtv_pcm_work_handler(itv);
117 }
118 
119 /* Determine the required DMA size, setup enough buffers in the predma queue and
120    actually copy the data from the card to the buffers in case a PIO transfer is
121    required for this stream.
122  */
123 static int stream_enc_dma_append(struct ivtv_stream *s, u32 data[CX2341X_MBOX_MAX_DATA])
124 {
125 	struct ivtv *itv = s->itv;
126 	struct ivtv_buffer *buf;
127 	u32 bytes_needed = 0;
128 	u32 offset, size;
129 	u32 UVoffset = 0, UVsize = 0;
130 	int skip_bufs = s->q_predma.buffers;
131 	int idx = s->sg_pending_size;
132 	int rc;
133 
134 	/* sanity checks */
135 	if (s->vdev == NULL) {
136 		IVTV_DEBUG_WARN("Stream %s not started\n", s->name);
137 		return -1;
138 	}
139 	if (!test_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
140 		IVTV_DEBUG_WARN("Stream %s not open\n", s->name);
141 		return -1;
142 	}
143 
144 	/* determine offset, size and PTS for the various streams */
145 	switch (s->type) {
146 		case IVTV_ENC_STREAM_TYPE_MPG:
147 			offset = data[1];
148 			size = data[2];
149 			s->pending_pts = 0;
150 			break;
151 
152 		case IVTV_ENC_STREAM_TYPE_YUV:
153 			offset = data[1];
154 			size = data[2];
155 			UVoffset = data[3];
156 			UVsize = data[4];
157 			s->pending_pts = ((u64) data[5] << 32) | data[6];
158 			break;
159 
160 		case IVTV_ENC_STREAM_TYPE_PCM:
161 			offset = data[1] + 12;
162 			size = data[2] - 12;
163 			s->pending_pts = read_dec(offset - 8) |
164 				((u64)(read_dec(offset - 12)) << 32);
165 			if (itv->has_cx23415)
166 				offset += IVTV_DECODER_OFFSET;
167 			break;
168 
169 		case IVTV_ENC_STREAM_TYPE_VBI:
170 			size = itv->vbi.enc_size * itv->vbi.fpi;
171 			offset = read_enc(itv->vbi.enc_start - 4) + 12;
172 			if (offset == 12) {
173 				IVTV_DEBUG_INFO("VBI offset == 0\n");
174 				return -1;
175 			}
176 			s->pending_pts = read_enc(offset - 4) | ((u64)read_enc(offset - 8) << 32);
177 			break;
178 
179 		case IVTV_DEC_STREAM_TYPE_VBI:
180 			size = read_dec(itv->vbi.dec_start + 4) + 8;
181 			offset = read_dec(itv->vbi.dec_start) + itv->vbi.dec_start;
182 			s->pending_pts = 0;
183 			offset += IVTV_DECODER_OFFSET;
184 			break;
185 		default:
186 			/* shouldn't happen */
187 			return -1;
188 	}
189 
190 	/* if this is the start of the DMA then fill in the magic cookie */
191 	if (s->sg_pending_size == 0 && ivtv_use_dma(s)) {
192 		if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
193 		    s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
194 			s->pending_backup = read_dec(offset - IVTV_DECODER_OFFSET);
195 			write_dec_sync(DMA_MAGIC_COOKIE, offset - IVTV_DECODER_OFFSET);
196 		}
197 		else {
198 			s->pending_backup = read_enc(offset);
199 			write_enc_sync(DMA_MAGIC_COOKIE, offset);
200 		}
201 		s->pending_offset = offset;
202 	}
203 
204 	bytes_needed = size;
205 	if (s->type == IVTV_ENC_STREAM_TYPE_YUV) {
206 		/* The size for the Y samples needs to be rounded upwards to a
207 		   multiple of the buf_size. The UV samples then start in the
208 		   next buffer. */
209 		bytes_needed = s->buf_size * ((bytes_needed + s->buf_size - 1) / s->buf_size);
210 		bytes_needed += UVsize;
211 	}
212 
213 	IVTV_DEBUG_HI_DMA("%s %s: 0x%08x bytes at 0x%08x\n",
214 		ivtv_use_pio(s) ? "PIO" : "DMA", s->name, bytes_needed, offset);
215 
216 	rc = ivtv_queue_move(s, &s->q_free, &s->q_full, &s->q_predma, bytes_needed);
217 	if (rc < 0) { /* Insufficient buffers */
218 		IVTV_DEBUG_WARN("Cannot obtain %d bytes for %s data transfer\n",
219 				bytes_needed, s->name);
220 		return -1;
221 	}
222 	if (rc && !s->buffers_stolen && test_bit(IVTV_F_S_APPL_IO, &s->s_flags)) {
223 		IVTV_WARN("All %s stream buffers are full. Dropping data.\n", s->name);
224 		IVTV_WARN("Cause: the application is not reading fast enough.\n");
225 	}
226 	s->buffers_stolen = rc;
227 
228 	/* got the buffers, now fill in sg_pending */
229 	buf = list_entry(s->q_predma.list.next, struct ivtv_buffer, list);
230 	memset(buf->buf, 0, 128);
231 	list_for_each_entry(buf, &s->q_predma.list, list) {
232 		if (skip_bufs-- > 0)
233 			continue;
234 		s->sg_pending[idx].dst = buf->dma_handle;
235 		s->sg_pending[idx].src = offset;
236 		s->sg_pending[idx].size = s->buf_size;
237 		buf->bytesused = min(size, s->buf_size);
238 		buf->dma_xfer_cnt = s->dma_xfer_cnt;
239 
240 		s->q_predma.bytesused += buf->bytesused;
241 		size -= buf->bytesused;
242 		offset += s->buf_size;
243 
244 		/* Sync SG buffers */
245 		ivtv_buf_sync_for_device(s, buf);
246 
247 		if (size == 0) {	/* YUV */
248 			/* process the UV section */
249 			offset = UVoffset;
250 			size = UVsize;
251 		}
252 		idx++;
253 	}
254 	s->sg_pending_size = idx;
255 	return 0;
256 }
257 
258 static void dma_post(struct ivtv_stream *s)
259 {
260 	struct ivtv *itv = s->itv;
261 	struct ivtv_buffer *buf = NULL;
262 	struct list_head *p;
263 	u32 offset;
264 	__le32 *u32buf;
265 	int x = 0;
266 
267 	IVTV_DEBUG_HI_DMA("%s %s completed (%x)\n", ivtv_use_pio(s) ? "PIO" : "DMA",
268 			s->name, s->dma_offset);
269 	list_for_each(p, &s->q_dma.list) {
270 		buf = list_entry(p, struct ivtv_buffer, list);
271 		u32buf = (__le32 *)buf->buf;
272 
273 		/* Sync Buffer */
274 		ivtv_buf_sync_for_cpu(s, buf);
275 
276 		if (x == 0 && ivtv_use_dma(s)) {
277 			offset = s->dma_last_offset;
278 			if (le32_to_cpu(u32buf[offset / 4]) != DMA_MAGIC_COOKIE)
279 			{
280 				for (offset = 0; offset < 64; offset++)
281 					if (le32_to_cpu(u32buf[offset]) == DMA_MAGIC_COOKIE)
282 						break;
283 				offset *= 4;
284 				if (offset == 256) {
285 					IVTV_DEBUG_WARN("%s: Couldn't find start of buffer within the first 256 bytes\n", s->name);
286 					offset = s->dma_last_offset;
287 				}
288 				if (s->dma_last_offset != offset)
289 					IVTV_DEBUG_WARN("%s: offset %d -> %d\n", s->name, s->dma_last_offset, offset);
290 				s->dma_last_offset = offset;
291 			}
292 			if (itv->has_cx23415 && (s->type == IVTV_ENC_STREAM_TYPE_PCM ||
293 						s->type == IVTV_DEC_STREAM_TYPE_VBI)) {
294 				write_dec_sync(0, s->dma_offset - IVTV_DECODER_OFFSET);
295 			}
296 			else {
297 				write_enc_sync(0, s->dma_offset);
298 			}
299 			if (offset) {
300 				buf->bytesused -= offset;
301 				memcpy(buf->buf, buf->buf + offset, buf->bytesused + offset);
302 			}
303 			*u32buf = cpu_to_le32(s->dma_backup);
304 		}
305 		x++;
306 		/* flag byteswap ABCD -> DCBA for MPG & VBI data outside irq */
307 		if (s->type == IVTV_ENC_STREAM_TYPE_MPG ||
308 		    s->type == IVTV_ENC_STREAM_TYPE_VBI)
309 			buf->b_flags |= IVTV_F_B_NEED_BUF_SWAP;
310 	}
311 	if (buf)
312 		buf->bytesused += s->dma_last_offset;
313 	if (buf && s->type == IVTV_DEC_STREAM_TYPE_VBI) {
314 		list_for_each_entry(buf, &s->q_dma.list, list) {
315 			/* Parse and Groom VBI Data */
316 			s->q_dma.bytesused -= buf->bytesused;
317 			ivtv_process_vbi_data(itv, buf, 0, s->type);
318 			s->q_dma.bytesused += buf->bytesused;
319 		}
320 		if (s->fh == NULL) {
321 			ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
322 			return;
323 		}
324 	}
325 
326 	ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
327 
328 	if (s->type == IVTV_ENC_STREAM_TYPE_PCM &&
329 	    itv->pcm_announce_callback != NULL) {
330 		/*
331 		 * Set up the work handler to pass the data to ivtv-alsa.
332 		 *
333 		 * We just use q_full and let the work handler race with users
334 		 * making ivtv-fileops.c calls on the PCM device node.
335 		 *
336 		 * Users should not be using both the ALSA and V4L2 PCM audio
337 		 * capture interfaces at the same time.  If the user does this,
338 		 * fragments of data will just go out each interface as they
339 		 * race for PCM data.
340 		 */
341 		set_bit(IVTV_F_I_WORK_HANDLER_PCM, &itv->i_flags);
342 		set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
343 	}
344 
345 	if (s->fh)
346 		wake_up(&s->waitq);
347 }
348 
349 void ivtv_dma_stream_dec_prepare(struct ivtv_stream *s, u32 offset, int lock)
350 {
351 	struct ivtv *itv = s->itv;
352 	struct yuv_playback_info *yi = &itv->yuv_info;
353 	u8 frame = yi->draw_frame;
354 	struct yuv_frame_info *f = &yi->new_frame_info[frame];
355 	struct ivtv_buffer *buf;
356 	u32 y_size = 720 * ((f->src_h + 31) & ~31);
357 	u32 uv_offset = offset + IVTV_YUV_BUFFER_UV_OFFSET;
358 	int y_done = 0;
359 	int bytes_written = 0;
360 	unsigned long flags = 0;
361 	int idx = 0;
362 
363 	IVTV_DEBUG_HI_DMA("DEC PREPARE DMA %s: %08x %08x\n", s->name, s->q_predma.bytesused, offset);
364 
365 	/* Insert buffer block for YUV if needed */
366 	if (s->type == IVTV_DEC_STREAM_TYPE_YUV && f->offset_y) {
367 		if (yi->blanking_dmaptr) {
368 			s->sg_pending[idx].src = yi->blanking_dmaptr;
369 			s->sg_pending[idx].dst = offset;
370 			s->sg_pending[idx].size = 720 * 16;
371 		}
372 		offset += 720 * 16;
373 		idx++;
374 	}
375 
376 	list_for_each_entry(buf, &s->q_predma.list, list) {
377 		/* YUV UV Offset from Y Buffer */
378 		if (s->type == IVTV_DEC_STREAM_TYPE_YUV && !y_done &&
379 				(bytes_written + buf->bytesused) >= y_size) {
380 			s->sg_pending[idx].src = buf->dma_handle;
381 			s->sg_pending[idx].dst = offset;
382 			s->sg_pending[idx].size = y_size - bytes_written;
383 			offset = uv_offset;
384 			if (s->sg_pending[idx].size != buf->bytesused) {
385 				idx++;
386 				s->sg_pending[idx].src =
387 				  buf->dma_handle + s->sg_pending[idx - 1].size;
388 				s->sg_pending[idx].dst = offset;
389 				s->sg_pending[idx].size =
390 				   buf->bytesused - s->sg_pending[idx - 1].size;
391 				offset += s->sg_pending[idx].size;
392 			}
393 			y_done = 1;
394 		} else {
395 			s->sg_pending[idx].src = buf->dma_handle;
396 			s->sg_pending[idx].dst = offset;
397 			s->sg_pending[idx].size = buf->bytesused;
398 			offset += buf->bytesused;
399 		}
400 		bytes_written += buf->bytesused;
401 
402 		/* Sync SG buffers */
403 		ivtv_buf_sync_for_device(s, buf);
404 		idx++;
405 	}
406 	s->sg_pending_size = idx;
407 
408 	/* Sync Hardware SG List of buffers */
409 	ivtv_stream_sync_for_device(s);
410 	if (lock)
411 		spin_lock_irqsave(&itv->dma_reg_lock, flags);
412 	if (!test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
413 		ivtv_dma_dec_start(s);
414 	}
415 	else {
416 		set_bit(IVTV_F_S_DMA_PENDING, &s->s_flags);
417 	}
418 	if (lock)
419 		spin_unlock_irqrestore(&itv->dma_reg_lock, flags);
420 }
421 
422 static void ivtv_dma_enc_start_xfer(struct ivtv_stream *s)
423 {
424 	struct ivtv *itv = s->itv;
425 
426 	s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
427 	s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
428 	s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
429 	s->sg_processed++;
430 	/* Sync Hardware SG List of buffers */
431 	ivtv_stream_sync_for_device(s);
432 	write_reg(s->sg_handle, IVTV_REG_ENCDMAADDR);
433 	write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x02, IVTV_REG_DMAXFER);
434 	itv->dma_timer.expires = jiffies + msecs_to_jiffies(300);
435 	add_timer(&itv->dma_timer);
436 }
437 
438 static void ivtv_dma_dec_start_xfer(struct ivtv_stream *s)
439 {
440 	struct ivtv *itv = s->itv;
441 
442 	s->sg_dma->src = cpu_to_le32(s->sg_processing[s->sg_processed].src);
443 	s->sg_dma->dst = cpu_to_le32(s->sg_processing[s->sg_processed].dst);
444 	s->sg_dma->size = cpu_to_le32(s->sg_processing[s->sg_processed].size | 0x80000000);
445 	s->sg_processed++;
446 	/* Sync Hardware SG List of buffers */
447 	ivtv_stream_sync_for_device(s);
448 	write_reg(s->sg_handle, IVTV_REG_DECDMAADDR);
449 	write_reg_sync(read_reg(IVTV_REG_DMAXFER) | 0x01, IVTV_REG_DMAXFER);
450 	itv->dma_timer.expires = jiffies + msecs_to_jiffies(300);
451 	add_timer(&itv->dma_timer);
452 }
453 
454 /* start the encoder DMA */
455 static void ivtv_dma_enc_start(struct ivtv_stream *s)
456 {
457 	struct ivtv *itv = s->itv;
458 	struct ivtv_stream *s_vbi = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
459 	int i;
460 
461 	IVTV_DEBUG_HI_DMA("start %s for %s\n", ivtv_use_dma(s) ? "DMA" : "PIO", s->name);
462 
463 	if (s->q_predma.bytesused)
464 		ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
465 
466 	if (ivtv_use_dma(s))
467 		s->sg_pending[s->sg_pending_size - 1].size += 256;
468 
469 	/* If this is an MPEG stream, and VBI data is also pending, then append the
470 	   VBI DMA to the MPEG DMA and transfer both sets of data at once.
471 
472 	   VBI DMA is a second class citizen compared to MPEG and mixing them together
473 	   will confuse the firmware (the end of a VBI DMA is seen as the end of a
474 	   MPEG DMA, thus effectively dropping an MPEG frame). So instead we make
475 	   sure we only use the MPEG DMA to transfer the VBI DMA if both are in
476 	   use. This way no conflicts occur. */
477 	clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
478 	if (s->type == IVTV_ENC_STREAM_TYPE_MPG && s_vbi->sg_pending_size &&
479 			s->sg_pending_size + s_vbi->sg_pending_size <= s->buffers) {
480 		ivtv_queue_move(s_vbi, &s_vbi->q_predma, NULL, &s_vbi->q_dma, s_vbi->q_predma.bytesused);
481 		if (ivtv_use_dma(s_vbi))
482 			s_vbi->sg_pending[s_vbi->sg_pending_size - 1].size += 256;
483 		for (i = 0; i < s_vbi->sg_pending_size; i++) {
484 			s->sg_pending[s->sg_pending_size++] = s_vbi->sg_pending[i];
485 		}
486 		s_vbi->dma_offset = s_vbi->pending_offset;
487 		s_vbi->sg_pending_size = 0;
488 		s_vbi->dma_xfer_cnt++;
489 		set_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags);
490 		IVTV_DEBUG_HI_DMA("include DMA for %s\n", s_vbi->name);
491 	}
492 
493 	s->dma_xfer_cnt++;
494 	memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_host_element) * s->sg_pending_size);
495 	s->sg_processing_size = s->sg_pending_size;
496 	s->sg_pending_size = 0;
497 	s->sg_processed = 0;
498 	s->dma_offset = s->pending_offset;
499 	s->dma_backup = s->pending_backup;
500 	s->dma_pts = s->pending_pts;
501 
502 	if (ivtv_use_pio(s)) {
503 		set_bit(IVTV_F_I_WORK_HANDLER_PIO, &itv->i_flags);
504 		set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
505 		set_bit(IVTV_F_I_PIO, &itv->i_flags);
506 		itv->cur_pio_stream = s->type;
507 	}
508 	else {
509 		itv->dma_retries = 0;
510 		ivtv_dma_enc_start_xfer(s);
511 		set_bit(IVTV_F_I_DMA, &itv->i_flags);
512 		itv->cur_dma_stream = s->type;
513 	}
514 }
515 
516 static void ivtv_dma_dec_start(struct ivtv_stream *s)
517 {
518 	struct ivtv *itv = s->itv;
519 
520 	if (s->q_predma.bytesused)
521 		ivtv_queue_move(s, &s->q_predma, NULL, &s->q_dma, s->q_predma.bytesused);
522 	s->dma_xfer_cnt++;
523 	memcpy(s->sg_processing, s->sg_pending, sizeof(struct ivtv_sg_host_element) * s->sg_pending_size);
524 	s->sg_processing_size = s->sg_pending_size;
525 	s->sg_pending_size = 0;
526 	s->sg_processed = 0;
527 
528 	IVTV_DEBUG_HI_DMA("start DMA for %s\n", s->name);
529 	itv->dma_retries = 0;
530 	ivtv_dma_dec_start_xfer(s);
531 	set_bit(IVTV_F_I_DMA, &itv->i_flags);
532 	itv->cur_dma_stream = s->type;
533 }
534 
535 static void ivtv_irq_dma_read(struct ivtv *itv)
536 {
537 	struct ivtv_stream *s = NULL;
538 	struct ivtv_buffer *buf;
539 	int hw_stream_type = 0;
540 
541 	IVTV_DEBUG_HI_IRQ("DEC DMA READ\n");
542 
543 	del_timer(&itv->dma_timer);
544 
545 	if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && itv->cur_dma_stream < 0)
546 		return;
547 
548 	if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
549 		s = &itv->streams[itv->cur_dma_stream];
550 		ivtv_stream_sync_for_cpu(s);
551 
552 		if (read_reg(IVTV_REG_DMASTATUS) & 0x14) {
553 			IVTV_DEBUG_WARN("DEC DMA ERROR %x (xfer %d of %d, retry %d)\n",
554 					read_reg(IVTV_REG_DMASTATUS),
555 					s->sg_processed, s->sg_processing_size, itv->dma_retries);
556 			write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
557 			if (itv->dma_retries == 3) {
558 				/* Too many retries, give up on this frame */
559 				itv->dma_retries = 0;
560 				s->sg_processed = s->sg_processing_size;
561 			}
562 			else {
563 				/* Retry, starting with the first xfer segment.
564 				   Just retrying the current segment is not sufficient. */
565 				s->sg_processed = 0;
566 				itv->dma_retries++;
567 			}
568 		}
569 		if (s->sg_processed < s->sg_processing_size) {
570 			/* DMA next buffer */
571 			ivtv_dma_dec_start_xfer(s);
572 			return;
573 		}
574 		if (s->type == IVTV_DEC_STREAM_TYPE_YUV)
575 			hw_stream_type = 2;
576 		IVTV_DEBUG_HI_DMA("DEC DATA READ %s: %d\n", s->name, s->q_dma.bytesused);
577 
578 		/* For some reason must kick the firmware, like PIO mode,
579 		   I think this tells the firmware we are done and the size
580 		   of the xfer so it can calculate what we need next.
581 		   I think we can do this part ourselves but would have to
582 		   fully calculate xfer info ourselves and not use interrupts
583 		 */
584 		ivtv_vapi(itv, CX2341X_DEC_SCHED_DMA_FROM_HOST, 3, 0, s->q_dma.bytesused,
585 				hw_stream_type);
586 
587 		/* Free last DMA call */
588 		while ((buf = ivtv_dequeue(s, &s->q_dma)) != NULL) {
589 			ivtv_buf_sync_for_cpu(s, buf);
590 			ivtv_enqueue(s, buf, &s->q_free);
591 		}
592 		wake_up(&s->waitq);
593 	}
594 	clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
595 	clear_bit(IVTV_F_I_DMA, &itv->i_flags);
596 	itv->cur_dma_stream = -1;
597 	wake_up(&itv->dma_waitq);
598 }
599 
600 static void ivtv_irq_enc_dma_complete(struct ivtv *itv)
601 {
602 	u32 data[CX2341X_MBOX_MAX_DATA];
603 	struct ivtv_stream *s;
604 
605 	ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data);
606 	IVTV_DEBUG_HI_IRQ("ENC DMA COMPLETE %x %d (%d)\n", data[0], data[1], itv->cur_dma_stream);
607 
608 	del_timer(&itv->dma_timer);
609 
610 	if (itv->cur_dma_stream < 0)
611 		return;
612 
613 	s = &itv->streams[itv->cur_dma_stream];
614 	ivtv_stream_sync_for_cpu(s);
615 
616 	if (data[0] & 0x18) {
617 		IVTV_DEBUG_WARN("ENC DMA ERROR %x (offset %08x, xfer %d of %d, retry %d)\n", data[0],
618 			s->dma_offset, s->sg_processed, s->sg_processing_size, itv->dma_retries);
619 		write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
620 		if (itv->dma_retries == 3) {
621 			/* Too many retries, give up on this frame */
622 			itv->dma_retries = 0;
623 			s->sg_processed = s->sg_processing_size;
624 		}
625 		else {
626 			/* Retry, starting with the first xfer segment.
627 			   Just retrying the current segment is not sufficient. */
628 			s->sg_processed = 0;
629 			itv->dma_retries++;
630 		}
631 	}
632 	if (s->sg_processed < s->sg_processing_size) {
633 		/* DMA next buffer */
634 		ivtv_dma_enc_start_xfer(s);
635 		return;
636 	}
637 	clear_bit(IVTV_F_I_DMA, &itv->i_flags);
638 	itv->cur_dma_stream = -1;
639 	dma_post(s);
640 	if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
641 		s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
642 		dma_post(s);
643 	}
644 	s->sg_processing_size = 0;
645 	s->sg_processed = 0;
646 	wake_up(&itv->dma_waitq);
647 }
648 
649 static void ivtv_irq_enc_pio_complete(struct ivtv *itv)
650 {
651 	struct ivtv_stream *s;
652 
653 	if (itv->cur_pio_stream < 0 || itv->cur_pio_stream >= IVTV_MAX_STREAMS) {
654 		itv->cur_pio_stream = -1;
655 		return;
656 	}
657 	s = &itv->streams[itv->cur_pio_stream];
658 	IVTV_DEBUG_HI_IRQ("ENC PIO COMPLETE %s\n", s->name);
659 	clear_bit(IVTV_F_I_PIO, &itv->i_flags);
660 	itv->cur_pio_stream = -1;
661 	dma_post(s);
662 	if (s->type == IVTV_ENC_STREAM_TYPE_MPG)
663 		ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 0);
664 	else if (s->type == IVTV_ENC_STREAM_TYPE_YUV)
665 		ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 1);
666 	else if (s->type == IVTV_ENC_STREAM_TYPE_PCM)
667 		ivtv_vapi(itv, CX2341X_ENC_SCHED_DMA_TO_HOST, 3, 0, 0, 2);
668 	clear_bit(IVTV_F_I_PIO, &itv->i_flags);
669 	if (test_and_clear_bit(IVTV_F_S_DMA_HAS_VBI, &s->s_flags)) {
670 		s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
671 		dma_post(s);
672 	}
673 	wake_up(&itv->dma_waitq);
674 }
675 
676 static void ivtv_irq_dma_err(struct ivtv *itv)
677 {
678 	u32 data[CX2341X_MBOX_MAX_DATA];
679 	u32 status;
680 
681 	del_timer(&itv->dma_timer);
682 
683 	ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data);
684 	status = read_reg(IVTV_REG_DMASTATUS);
685 	IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1],
686 				status, itv->cur_dma_stream);
687 	/*
688 	 * We do *not* write back to the IVTV_REG_DMASTATUS register to
689 	 * clear the error status, if either the encoder write (0x02) or
690 	 * decoder read (0x01) bus master DMA operation do not indicate
691 	 * completed.  We can race with the DMA engine, which may have
692 	 * transitioned to completed status *after* we read the register.
693 	 * Setting a IVTV_REG_DMASTATUS flag back to "busy" status, after the
694 	 * DMA engine has completed, will cause the DMA engine to stop working.
695 	 */
696 	status &= 0x3;
697 	if (status == 0x3)
698 		write_reg(status, IVTV_REG_DMASTATUS);
699 
700 	if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) &&
701 	    itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) {
702 		struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream];
703 
704 		if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) {
705 			/* retry */
706 			/*
707 			 * FIXME - handle cases of DMA error similar to
708 			 * encoder below, except conditioned on status & 0x1
709 			 */
710 			ivtv_dma_dec_start(s);
711 			return;
712 		} else {
713 			if ((status & 0x2) == 0) {
714 				/*
715 				 * CX2341x Bus Master DMA write is ongoing.
716 				 * Reset the timer and let it complete.
717 				 */
718 				itv->dma_timer.expires =
719 						jiffies + msecs_to_jiffies(600);
720 				add_timer(&itv->dma_timer);
721 				return;
722 			}
723 
724 			if (itv->dma_retries < 3) {
725 				/*
726 				 * CX2341x Bus Master DMA write has ended.
727 				 * Retry the write, starting with the first
728 				 * xfer segment. Just retrying the current
729 				 * segment is not sufficient.
730 				 */
731 				s->sg_processed = 0;
732 				itv->dma_retries++;
733 				ivtv_dma_enc_start_xfer(s);
734 				return;
735 			}
736 			/* Too many retries, give up on this one */
737 		}
738 
739 	}
740 	if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) {
741 		ivtv_udma_start(itv);
742 		return;
743 	}
744 	clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
745 	clear_bit(IVTV_F_I_DMA, &itv->i_flags);
746 	itv->cur_dma_stream = -1;
747 	wake_up(&itv->dma_waitq);
748 }
749 
750 static void ivtv_irq_enc_start_cap(struct ivtv *itv)
751 {
752 	u32 data[CX2341X_MBOX_MAX_DATA];
753 	struct ivtv_stream *s;
754 
755 	/* Get DMA destination and size arguments from card */
756 	ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA, 7, data);
757 	IVTV_DEBUG_HI_IRQ("ENC START CAP %d: %08x %08x\n", data[0], data[1], data[2]);
758 
759 	if (data[0] > 2 || data[1] == 0 || data[2] == 0) {
760 		IVTV_DEBUG_WARN("Unknown input: %08x %08x %08x\n",
761 				data[0], data[1], data[2]);
762 		return;
763 	}
764 	s = &itv->streams[ivtv_stream_map[data[0]]];
765 	if (!stream_enc_dma_append(s, data)) {
766 		set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
767 	}
768 }
769 
770 static void ivtv_irq_enc_vbi_cap(struct ivtv *itv)
771 {
772 	u32 data[CX2341X_MBOX_MAX_DATA];
773 	struct ivtv_stream *s;
774 
775 	IVTV_DEBUG_HI_IRQ("ENC START VBI CAP\n");
776 	s = &itv->streams[IVTV_ENC_STREAM_TYPE_VBI];
777 
778 	if (!stream_enc_dma_append(s, data))
779 		set_bit(ivtv_use_pio(s) ? IVTV_F_S_PIO_PENDING : IVTV_F_S_DMA_PENDING, &s->s_flags);
780 }
781 
782 static void ivtv_irq_dec_vbi_reinsert(struct ivtv *itv)
783 {
784 	u32 data[CX2341X_MBOX_MAX_DATA];
785 	struct ivtv_stream *s = &itv->streams[IVTV_DEC_STREAM_TYPE_VBI];
786 
787 	IVTV_DEBUG_HI_IRQ("DEC VBI REINSERT\n");
788 	if (test_bit(IVTV_F_S_CLAIMED, &s->s_flags) &&
789 			!stream_enc_dma_append(s, data)) {
790 		set_bit(IVTV_F_S_PIO_PENDING, &s->s_flags);
791 	}
792 }
793 
794 static void ivtv_irq_dec_data_req(struct ivtv *itv)
795 {
796 	u32 data[CX2341X_MBOX_MAX_DATA];
797 	struct ivtv_stream *s;
798 
799 	/* YUV or MPG */
800 
801 	if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags)) {
802 		ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, 2, data);
803 		itv->dma_data_req_size =
804 				 1080 * ((itv->yuv_info.v4l2_src_h + 31) & ~31);
805 		itv->dma_data_req_offset = data[1];
806 		if (atomic_read(&itv->yuv_info.next_dma_frame) >= 0)
807 			ivtv_yuv_frame_complete(itv);
808 		s = &itv->streams[IVTV_DEC_STREAM_TYPE_YUV];
809 	}
810 	else {
811 		ivtv_api_get_data(&itv->dec_mbox, IVTV_MBOX_DMA, 3, data);
812 		itv->dma_data_req_size = min_t(u32, data[2], 0x10000);
813 		itv->dma_data_req_offset = data[1];
814 		s = &itv->streams[IVTV_DEC_STREAM_TYPE_MPG];
815 	}
816 	IVTV_DEBUG_HI_IRQ("DEC DATA REQ %s: %d %08x %u\n", s->name, s->q_full.bytesused,
817 		       itv->dma_data_req_offset, itv->dma_data_req_size);
818 	if (itv->dma_data_req_size == 0 || s->q_full.bytesused < itv->dma_data_req_size) {
819 		set_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
820 	}
821 	else {
822 		if (test_bit(IVTV_F_I_DEC_YUV, &itv->i_flags))
823 			ivtv_yuv_setup_stream_frame(itv);
824 		clear_bit(IVTV_F_S_NEEDS_DATA, &s->s_flags);
825 		ivtv_queue_move(s, &s->q_full, NULL, &s->q_predma, itv->dma_data_req_size);
826 		ivtv_dma_stream_dec_prepare(s, itv->dma_data_req_offset + IVTV_DECODER_OFFSET, 0);
827 	}
828 }
829 
830 static void ivtv_irq_vsync(struct ivtv *itv)
831 {
832 	/* The vsync interrupt is unusual in that it won't clear until
833 	 * the end of the first line for the current field, at which
834 	 * point it clears itself. This can result in repeated vsync
835 	 * interrupts, or a missed vsync. Read some of the registers
836 	 * to determine the line being displayed and ensure we handle
837 	 * one vsync per frame.
838 	 */
839 	unsigned int frame = read_reg(IVTV_REG_DEC_LINE_FIELD) & 1;
840 	struct yuv_playback_info *yi = &itv->yuv_info;
841 	int last_dma_frame = atomic_read(&yi->next_dma_frame);
842 	struct yuv_frame_info *f = &yi->new_frame_info[last_dma_frame];
843 
844 	if (0) IVTV_DEBUG_IRQ("DEC VSYNC\n");
845 
846 	if (((frame ^ f->sync_field) == 0 &&
847 		((itv->last_vsync_field & 1) ^ f->sync_field)) ||
848 			(frame != (itv->last_vsync_field & 1) && !f->interlaced)) {
849 		int next_dma_frame = last_dma_frame;
850 
851 		if (!(f->interlaced && f->delay && yi->fields_lapsed < 1)) {
852 			if (next_dma_frame >= 0 && next_dma_frame != atomic_read(&yi->next_fill_frame)) {
853 				write_reg(yuv_offset[next_dma_frame] >> 4, 0x82c);
854 				write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x830);
855 				write_reg(yuv_offset[next_dma_frame] >> 4, 0x834);
856 				write_reg((yuv_offset[next_dma_frame] + IVTV_YUV_BUFFER_UV_OFFSET) >> 4, 0x838);
857 				next_dma_frame = (next_dma_frame + 1) % IVTV_YUV_BUFFERS;
858 				atomic_set(&yi->next_dma_frame, next_dma_frame);
859 				yi->fields_lapsed = -1;
860 				yi->running = 1;
861 			}
862 		}
863 	}
864 	if (frame != (itv->last_vsync_field & 1)) {
865 		static const struct v4l2_event evtop = {
866 			.type = V4L2_EVENT_VSYNC,
867 			.u.vsync.field = V4L2_FIELD_TOP,
868 		};
869 		static const struct v4l2_event evbottom = {
870 			.type = V4L2_EVENT_VSYNC,
871 			.u.vsync.field = V4L2_FIELD_BOTTOM,
872 		};
873 		struct ivtv_stream *s = ivtv_get_output_stream(itv);
874 
875 		itv->last_vsync_field += 1;
876 		if (frame == 0) {
877 			clear_bit(IVTV_F_I_VALID_DEC_TIMINGS, &itv->i_flags);
878 			clear_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
879 		}
880 		else {
881 			set_bit(IVTV_F_I_EV_VSYNC_FIELD, &itv->i_flags);
882 		}
883 		if (test_bit(IVTV_F_I_EV_VSYNC_ENABLED, &itv->i_flags)) {
884 			set_bit(IVTV_F_I_EV_VSYNC, &itv->i_flags);
885 			wake_up(&itv->event_waitq);
886 			if (s)
887 				wake_up(&s->waitq);
888 		}
889 		if (s && s->vdev)
890 			v4l2_event_queue(s->vdev, frame ? &evtop : &evbottom);
891 		wake_up(&itv->vsync_waitq);
892 
893 		/* Send VBI to saa7127 */
894 		if (frame && (itv->output_mode == OUT_PASSTHROUGH ||
895 			test_bit(IVTV_F_I_UPDATE_WSS, &itv->i_flags) ||
896 			test_bit(IVTV_F_I_UPDATE_VPS, &itv->i_flags) ||
897 			test_bit(IVTV_F_I_UPDATE_CC, &itv->i_flags))) {
898 			set_bit(IVTV_F_I_WORK_HANDLER_VBI, &itv->i_flags);
899 			set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
900 		}
901 
902 		/* Check if we need to update the yuv registers */
903 		if (yi->running && (yi->yuv_forced_update || f->update)) {
904 			if (!f->update) {
905 				last_dma_frame =
906 					(u8)(atomic_read(&yi->next_dma_frame) -
907 						 1) % IVTV_YUV_BUFFERS;
908 				f = &yi->new_frame_info[last_dma_frame];
909 			}
910 
911 			if (f->src_w) {
912 				yi->update_frame = last_dma_frame;
913 				f->update = 0;
914 				yi->yuv_forced_update = 0;
915 				set_bit(IVTV_F_I_WORK_HANDLER_YUV, &itv->i_flags);
916 				set_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags);
917 			}
918 		}
919 
920 		yi->fields_lapsed++;
921 	}
922 }
923 
924 #define IVTV_IRQ_DMA (IVTV_IRQ_DMA_READ | IVTV_IRQ_ENC_DMA_COMPLETE | IVTV_IRQ_DMA_ERR | IVTV_IRQ_ENC_START_CAP | IVTV_IRQ_ENC_VBI_CAP | IVTV_IRQ_DEC_DATA_REQ | IVTV_IRQ_DEC_VBI_RE_INSERT)
925 
926 irqreturn_t ivtv_irq_handler(int irq, void *dev_id)
927 {
928 	struct ivtv *itv = (struct ivtv *)dev_id;
929 	u32 combo;
930 	u32 stat;
931 	int i;
932 	u8 vsync_force = 0;
933 
934 	spin_lock(&itv->dma_reg_lock);
935 	/* get contents of irq status register */
936 	stat = read_reg(IVTV_REG_IRQSTATUS);
937 
938 	combo = ~itv->irqmask & stat;
939 
940 	/* Clear out IRQ */
941 	if (combo) write_reg(combo, IVTV_REG_IRQSTATUS);
942 
943 	if (0 == combo) {
944 		/* The vsync interrupt is unusual and clears itself. If we
945 		 * took too long, we may have missed it. Do some checks
946 		 */
947 		if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
948 			/* vsync is enabled, see if we're in a new field */
949 			if ((itv->last_vsync_field & 1) !=
950 			    (read_reg(IVTV_REG_DEC_LINE_FIELD) & 1)) {
951 				/* New field, looks like we missed it */
952 				IVTV_DEBUG_YUV("VSync interrupt missed %d\n",
953 				       read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16);
954 				vsync_force = 1;
955 			}
956 		}
957 
958 		if (!vsync_force) {
959 			/* No Vsync expected, wasn't for us */
960 			spin_unlock(&itv->dma_reg_lock);
961 			return IRQ_NONE;
962 		}
963 	}
964 
965 	/* Exclude interrupts noted below from the output, otherwise the log is flooded with
966 	   these messages */
967 	if (combo & ~0xff6d0400)
968 		IVTV_DEBUG_HI_IRQ("======= valid IRQ bits: 0x%08x ======\n", combo);
969 
970 	if (combo & IVTV_IRQ_DEC_DMA_COMPLETE) {
971 		IVTV_DEBUG_HI_IRQ("DEC DMA COMPLETE\n");
972 	}
973 
974 	if (combo & IVTV_IRQ_DMA_READ) {
975 		ivtv_irq_dma_read(itv);
976 	}
977 
978 	if (combo & IVTV_IRQ_ENC_DMA_COMPLETE) {
979 		ivtv_irq_enc_dma_complete(itv);
980 	}
981 
982 	if (combo & IVTV_IRQ_ENC_PIO_COMPLETE) {
983 		ivtv_irq_enc_pio_complete(itv);
984 	}
985 
986 	if (combo & IVTV_IRQ_DMA_ERR) {
987 		ivtv_irq_dma_err(itv);
988 	}
989 
990 	if (combo & IVTV_IRQ_ENC_START_CAP) {
991 		ivtv_irq_enc_start_cap(itv);
992 	}
993 
994 	if (combo & IVTV_IRQ_ENC_VBI_CAP) {
995 		ivtv_irq_enc_vbi_cap(itv);
996 	}
997 
998 	if (combo & IVTV_IRQ_DEC_VBI_RE_INSERT) {
999 		ivtv_irq_dec_vbi_reinsert(itv);
1000 	}
1001 
1002 	if (combo & IVTV_IRQ_ENC_EOS) {
1003 		IVTV_DEBUG_IRQ("ENC EOS\n");
1004 		set_bit(IVTV_F_I_EOS, &itv->i_flags);
1005 		wake_up(&itv->eos_waitq);
1006 	}
1007 
1008 	if (combo & IVTV_IRQ_DEC_DATA_REQ) {
1009 		ivtv_irq_dec_data_req(itv);
1010 	}
1011 
1012 	/* Decoder Vertical Sync - We can't rely on 'combo', so check if vsync enabled */
1013 	if (~itv->irqmask & IVTV_IRQ_DEC_VSYNC) {
1014 		ivtv_irq_vsync(itv);
1015 	}
1016 
1017 	if (combo & IVTV_IRQ_ENC_VIM_RST) {
1018 		IVTV_DEBUG_IRQ("VIM RST\n");
1019 		/*ivtv_vapi(itv, CX2341X_ENC_REFRESH_INPUT, 0); */
1020 	}
1021 
1022 	if (combo & IVTV_IRQ_DEC_AUD_MODE_CHG) {
1023 		IVTV_DEBUG_INFO("Stereo mode changed\n");
1024 	}
1025 
1026 	if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
1027 		itv->irq_rr_idx++;
1028 		for (i = 0; i < IVTV_MAX_STREAMS; i++) {
1029 			int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
1030 			struct ivtv_stream *s = &itv->streams[idx];
1031 
1032 			if (!test_and_clear_bit(IVTV_F_S_DMA_PENDING, &s->s_flags))
1033 				continue;
1034 			if (s->type >= IVTV_DEC_STREAM_TYPE_MPG)
1035 				ivtv_dma_dec_start(s);
1036 			else
1037 				ivtv_dma_enc_start(s);
1038 			break;
1039 		}
1040 
1041 		if (i == IVTV_MAX_STREAMS &&
1042 		    test_bit(IVTV_F_I_UDMA_PENDING, &itv->i_flags))
1043 			ivtv_udma_start(itv);
1044 	}
1045 
1046 	if ((combo & IVTV_IRQ_DMA) && !test_bit(IVTV_F_I_PIO, &itv->i_flags)) {
1047 		itv->irq_rr_idx++;
1048 		for (i = 0; i < IVTV_MAX_STREAMS; i++) {
1049 			int idx = (i + itv->irq_rr_idx) % IVTV_MAX_STREAMS;
1050 			struct ivtv_stream *s = &itv->streams[idx];
1051 
1052 			if (!test_and_clear_bit(IVTV_F_S_PIO_PENDING, &s->s_flags))
1053 				continue;
1054 			if (s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type < IVTV_DEC_STREAM_TYPE_MPG)
1055 				ivtv_dma_enc_start(s);
1056 			break;
1057 		}
1058 	}
1059 
1060 	if (test_and_clear_bit(IVTV_F_I_HAVE_WORK, &itv->i_flags)) {
1061 		queue_kthread_work(&itv->irq_worker, &itv->irq_work);
1062 	}
1063 
1064 	spin_unlock(&itv->dma_reg_lock);
1065 
1066 	/* If we've just handled a 'forced' vsync, it's safest to say it
1067 	 * wasn't ours. Another device may have triggered it at just
1068 	 * the right time.
1069 	 */
1070 	return vsync_force ? IRQ_NONE : IRQ_HANDLED;
1071 }
1072 
1073 void ivtv_unfinished_dma(unsigned long arg)
1074 {
1075 	struct ivtv *itv = (struct ivtv *)arg;
1076 
1077 	if (!test_bit(IVTV_F_I_DMA, &itv->i_flags))
1078 		return;
1079 	IVTV_ERR("DMA TIMEOUT %08x %d\n", read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream);
1080 
1081 	write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS);
1082 	clear_bit(IVTV_F_I_UDMA, &itv->i_flags);
1083 	clear_bit(IVTV_F_I_DMA, &itv->i_flags);
1084 	itv->cur_dma_stream = -1;
1085 	wake_up(&itv->dma_waitq);
1086 }
1087