1c942fddfSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2b285192aSMauro Carvalho Chehab /*
3b285192aSMauro Carvalho Chehab * cx18 buffer queues
4b285192aSMauro Carvalho Chehab *
5b285192aSMauro Carvalho Chehab * Derived from ivtv-queue.c
6b285192aSMauro Carvalho Chehab *
7b285192aSMauro Carvalho Chehab * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
8b285192aSMauro Carvalho Chehab * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
9b285192aSMauro Carvalho Chehab */
10b285192aSMauro Carvalho Chehab
11b285192aSMauro Carvalho Chehab #include "cx18-driver.h"
12b285192aSMauro Carvalho Chehab #include "cx18-queue.h"
13b285192aSMauro Carvalho Chehab #include "cx18-streams.h"
14b285192aSMauro Carvalho Chehab #include "cx18-scb.h"
15b285192aSMauro Carvalho Chehab #include "cx18-io.h"
16b285192aSMauro Carvalho Chehab
cx18_buf_swap(struct cx18_buffer * buf)17b285192aSMauro Carvalho Chehab void cx18_buf_swap(struct cx18_buffer *buf)
18b285192aSMauro Carvalho Chehab {
19b285192aSMauro Carvalho Chehab int i;
20b285192aSMauro Carvalho Chehab
21b285192aSMauro Carvalho Chehab for (i = 0; i < buf->bytesused; i += 4)
22b285192aSMauro Carvalho Chehab swab32s((u32 *)(buf->buf + i));
23b285192aSMauro Carvalho Chehab }
24b285192aSMauro Carvalho Chehab
_cx18_mdl_swap(struct cx18_mdl * mdl)25b285192aSMauro Carvalho Chehab void _cx18_mdl_swap(struct cx18_mdl *mdl)
26b285192aSMauro Carvalho Chehab {
27b285192aSMauro Carvalho Chehab struct cx18_buffer *buf;
28b285192aSMauro Carvalho Chehab
29b285192aSMauro Carvalho Chehab list_for_each_entry(buf, &mdl->buf_list, list) {
30b285192aSMauro Carvalho Chehab if (buf->bytesused == 0)
31b285192aSMauro Carvalho Chehab break;
32b285192aSMauro Carvalho Chehab cx18_buf_swap(buf);
33b285192aSMauro Carvalho Chehab }
34b285192aSMauro Carvalho Chehab }
35b285192aSMauro Carvalho Chehab
cx18_queue_init(struct cx18_queue * q)36b285192aSMauro Carvalho Chehab void cx18_queue_init(struct cx18_queue *q)
37b285192aSMauro Carvalho Chehab {
38b285192aSMauro Carvalho Chehab INIT_LIST_HEAD(&q->list);
39b285192aSMauro Carvalho Chehab atomic_set(&q->depth, 0);
40b285192aSMauro Carvalho Chehab q->bytesused = 0;
41b285192aSMauro Carvalho Chehab }
42b285192aSMauro Carvalho Chehab
_cx18_enqueue(struct cx18_stream * s,struct cx18_mdl * mdl,struct cx18_queue * q,int to_front)43b285192aSMauro Carvalho Chehab struct cx18_queue *_cx18_enqueue(struct cx18_stream *s, struct cx18_mdl *mdl,
44b285192aSMauro Carvalho Chehab struct cx18_queue *q, int to_front)
45b285192aSMauro Carvalho Chehab {
46b285192aSMauro Carvalho Chehab /* clear the mdl if it is not to be enqueued to the full queue */
47b285192aSMauro Carvalho Chehab if (q != &s->q_full) {
48b285192aSMauro Carvalho Chehab mdl->bytesused = 0;
49b285192aSMauro Carvalho Chehab mdl->readpos = 0;
50b285192aSMauro Carvalho Chehab mdl->m_flags = 0;
51b285192aSMauro Carvalho Chehab mdl->skipped = 0;
52b285192aSMauro Carvalho Chehab mdl->curr_buf = NULL;
53b285192aSMauro Carvalho Chehab }
54b285192aSMauro Carvalho Chehab
55b285192aSMauro Carvalho Chehab /* q_busy is restricted to a max buffer count imposed by firmware */
56b285192aSMauro Carvalho Chehab if (q == &s->q_busy &&
57b285192aSMauro Carvalho Chehab atomic_read(&q->depth) >= CX18_MAX_FW_MDLS_PER_STREAM)
58b285192aSMauro Carvalho Chehab q = &s->q_free;
59b285192aSMauro Carvalho Chehab
60b285192aSMauro Carvalho Chehab spin_lock(&q->lock);
61b285192aSMauro Carvalho Chehab
62b285192aSMauro Carvalho Chehab if (to_front)
63b285192aSMauro Carvalho Chehab list_add(&mdl->list, &q->list); /* LIFO */
64b285192aSMauro Carvalho Chehab else
65b285192aSMauro Carvalho Chehab list_add_tail(&mdl->list, &q->list); /* FIFO */
66b285192aSMauro Carvalho Chehab q->bytesused += mdl->bytesused - mdl->readpos;
67b285192aSMauro Carvalho Chehab atomic_inc(&q->depth);
68b285192aSMauro Carvalho Chehab
69b285192aSMauro Carvalho Chehab spin_unlock(&q->lock);
70b285192aSMauro Carvalho Chehab return q;
71b285192aSMauro Carvalho Chehab }
72b285192aSMauro Carvalho Chehab
cx18_dequeue(struct cx18_stream * s,struct cx18_queue * q)73b285192aSMauro Carvalho Chehab struct cx18_mdl *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q)
74b285192aSMauro Carvalho Chehab {
75b285192aSMauro Carvalho Chehab struct cx18_mdl *mdl = NULL;
76b285192aSMauro Carvalho Chehab
77b285192aSMauro Carvalho Chehab spin_lock(&q->lock);
78b285192aSMauro Carvalho Chehab if (!list_empty(&q->list)) {
79b285192aSMauro Carvalho Chehab mdl = list_first_entry(&q->list, struct cx18_mdl, list);
80b285192aSMauro Carvalho Chehab list_del_init(&mdl->list);
81b285192aSMauro Carvalho Chehab q->bytesused -= mdl->bytesused - mdl->readpos;
82b285192aSMauro Carvalho Chehab mdl->skipped = 0;
83b285192aSMauro Carvalho Chehab atomic_dec(&q->depth);
84b285192aSMauro Carvalho Chehab }
85b285192aSMauro Carvalho Chehab spin_unlock(&q->lock);
86b285192aSMauro Carvalho Chehab return mdl;
87b285192aSMauro Carvalho Chehab }
88b285192aSMauro Carvalho Chehab
_cx18_mdl_update_bufs_for_cpu(struct cx18_stream * s,struct cx18_mdl * mdl)89b285192aSMauro Carvalho Chehab static void _cx18_mdl_update_bufs_for_cpu(struct cx18_stream *s,
90b285192aSMauro Carvalho Chehab struct cx18_mdl *mdl)
91b285192aSMauro Carvalho Chehab {
92b285192aSMauro Carvalho Chehab struct cx18_buffer *buf;
93b285192aSMauro Carvalho Chehab u32 buf_size = s->buf_size;
94b285192aSMauro Carvalho Chehab u32 bytesused = mdl->bytesused;
95b285192aSMauro Carvalho Chehab
96b285192aSMauro Carvalho Chehab list_for_each_entry(buf, &mdl->buf_list, list) {
97b285192aSMauro Carvalho Chehab buf->readpos = 0;
98b285192aSMauro Carvalho Chehab if (bytesused >= buf_size) {
99b285192aSMauro Carvalho Chehab buf->bytesused = buf_size;
100b285192aSMauro Carvalho Chehab bytesused -= buf_size;
101b285192aSMauro Carvalho Chehab } else {
102b285192aSMauro Carvalho Chehab buf->bytesused = bytesused;
103b285192aSMauro Carvalho Chehab bytesused = 0;
104b285192aSMauro Carvalho Chehab }
105b285192aSMauro Carvalho Chehab cx18_buf_sync_for_cpu(s, buf);
106b285192aSMauro Carvalho Chehab }
107b285192aSMauro Carvalho Chehab }
108b285192aSMauro Carvalho Chehab
cx18_mdl_update_bufs_for_cpu(struct cx18_stream * s,struct cx18_mdl * mdl)109b285192aSMauro Carvalho Chehab static inline void cx18_mdl_update_bufs_for_cpu(struct cx18_stream *s,
110b285192aSMauro Carvalho Chehab struct cx18_mdl *mdl)
111b285192aSMauro Carvalho Chehab {
112b285192aSMauro Carvalho Chehab struct cx18_buffer *buf;
113b285192aSMauro Carvalho Chehab
114b285192aSMauro Carvalho Chehab if (list_is_singular(&mdl->buf_list)) {
115b285192aSMauro Carvalho Chehab buf = list_first_entry(&mdl->buf_list, struct cx18_buffer,
116b285192aSMauro Carvalho Chehab list);
117b285192aSMauro Carvalho Chehab buf->bytesused = mdl->bytesused;
118b285192aSMauro Carvalho Chehab buf->readpos = 0;
119b285192aSMauro Carvalho Chehab cx18_buf_sync_for_cpu(s, buf);
120b285192aSMauro Carvalho Chehab } else {
121b285192aSMauro Carvalho Chehab _cx18_mdl_update_bufs_for_cpu(s, mdl);
122b285192aSMauro Carvalho Chehab }
123b285192aSMauro Carvalho Chehab }
124b285192aSMauro Carvalho Chehab
cx18_queue_get_mdl(struct cx18_stream * s,u32 id,u32 bytesused)125b285192aSMauro Carvalho Chehab struct cx18_mdl *cx18_queue_get_mdl(struct cx18_stream *s, u32 id,
126b285192aSMauro Carvalho Chehab u32 bytesused)
127b285192aSMauro Carvalho Chehab {
128b285192aSMauro Carvalho Chehab struct cx18 *cx = s->cx;
129b285192aSMauro Carvalho Chehab struct cx18_mdl *mdl;
130b285192aSMauro Carvalho Chehab struct cx18_mdl *tmp;
131b285192aSMauro Carvalho Chehab struct cx18_mdl *ret = NULL;
132b285192aSMauro Carvalho Chehab LIST_HEAD(sweep_up);
133b285192aSMauro Carvalho Chehab
134b285192aSMauro Carvalho Chehab /*
135b285192aSMauro Carvalho Chehab * We don't have to acquire multiple q locks here, because we are
136b285192aSMauro Carvalho Chehab * serialized by the single threaded work handler.
137b285192aSMauro Carvalho Chehab * MDLs from the firmware will thus remain in order as
138b285192aSMauro Carvalho Chehab * they are moved from q_busy to q_full or to the dvb ring buffer.
139b285192aSMauro Carvalho Chehab */
140b285192aSMauro Carvalho Chehab spin_lock(&s->q_busy.lock);
141b285192aSMauro Carvalho Chehab list_for_each_entry_safe(mdl, tmp, &s->q_busy.list, list) {
142b285192aSMauro Carvalho Chehab /*
143b285192aSMauro Carvalho Chehab * We should find what the firmware told us is done,
144b285192aSMauro Carvalho Chehab * right at the front of the queue. If we don't, we likely have
145b285192aSMauro Carvalho Chehab * missed an mdl done message from the firmware.
146b285192aSMauro Carvalho Chehab * Once we skip an mdl repeatedly, relative to the size of
147b285192aSMauro Carvalho Chehab * q_busy, we have high confidence we've missed it.
148b285192aSMauro Carvalho Chehab */
149b285192aSMauro Carvalho Chehab if (mdl->id != id) {
150b285192aSMauro Carvalho Chehab mdl->skipped++;
151b285192aSMauro Carvalho Chehab if (mdl->skipped >= atomic_read(&s->q_busy.depth)-1) {
152b285192aSMauro Carvalho Chehab /* mdl must have fallen out of rotation */
1536beb1388SMauro Carvalho Chehab CX18_WARN("Skipped %s, MDL %d, %d times - it must have dropped out of rotation\n",
1546beb1388SMauro Carvalho Chehab s->name, mdl->id,
155b285192aSMauro Carvalho Chehab mdl->skipped);
156b285192aSMauro Carvalho Chehab /* Sweep it up to put it back into rotation */
157b285192aSMauro Carvalho Chehab list_move_tail(&mdl->list, &sweep_up);
158b285192aSMauro Carvalho Chehab atomic_dec(&s->q_busy.depth);
159b285192aSMauro Carvalho Chehab }
160b285192aSMauro Carvalho Chehab continue;
161b285192aSMauro Carvalho Chehab }
162b285192aSMauro Carvalho Chehab /*
163b285192aSMauro Carvalho Chehab * We pull the desired mdl off of the queue here. Something
164b285192aSMauro Carvalho Chehab * will have to put it back on a queue later.
165b285192aSMauro Carvalho Chehab */
166b285192aSMauro Carvalho Chehab list_del_init(&mdl->list);
167b285192aSMauro Carvalho Chehab atomic_dec(&s->q_busy.depth);
168b285192aSMauro Carvalho Chehab ret = mdl;
169b285192aSMauro Carvalho Chehab break;
170b285192aSMauro Carvalho Chehab }
171b285192aSMauro Carvalho Chehab spin_unlock(&s->q_busy.lock);
172b285192aSMauro Carvalho Chehab
173b285192aSMauro Carvalho Chehab /*
174b285192aSMauro Carvalho Chehab * We found the mdl for which we were looking. Get it ready for
175b285192aSMauro Carvalho Chehab * the caller to put on q_full or in the dvb ring buffer.
176b285192aSMauro Carvalho Chehab */
177b285192aSMauro Carvalho Chehab if (ret != NULL) {
178b285192aSMauro Carvalho Chehab ret->bytesused = bytesused;
179b285192aSMauro Carvalho Chehab ret->skipped = 0;
180b285192aSMauro Carvalho Chehab /* 0'ed readpos, m_flags & curr_buf when mdl went on q_busy */
181b285192aSMauro Carvalho Chehab cx18_mdl_update_bufs_for_cpu(s, ret);
182b285192aSMauro Carvalho Chehab if (s->type != CX18_ENC_STREAM_TYPE_TS)
183b285192aSMauro Carvalho Chehab set_bit(CX18_F_M_NEED_SWAP, &ret->m_flags);
184b285192aSMauro Carvalho Chehab }
185b285192aSMauro Carvalho Chehab
186b285192aSMauro Carvalho Chehab /* Put any mdls the firmware is ignoring back into normal rotation */
187b285192aSMauro Carvalho Chehab list_for_each_entry_safe(mdl, tmp, &sweep_up, list) {
188b285192aSMauro Carvalho Chehab list_del_init(&mdl->list);
189b285192aSMauro Carvalho Chehab cx18_enqueue(s, mdl, &s->q_free);
190b285192aSMauro Carvalho Chehab }
191b285192aSMauro Carvalho Chehab return ret;
192b285192aSMauro Carvalho Chehab }
193b285192aSMauro Carvalho Chehab
194b285192aSMauro Carvalho Chehab /* Move all mdls of a queue, while flushing the mdl */
cx18_queue_flush(struct cx18_stream * s,struct cx18_queue * q_src,struct cx18_queue * q_dst)195b285192aSMauro Carvalho Chehab static void cx18_queue_flush(struct cx18_stream *s,
196b285192aSMauro Carvalho Chehab struct cx18_queue *q_src, struct cx18_queue *q_dst)
197b285192aSMauro Carvalho Chehab {
198b285192aSMauro Carvalho Chehab struct cx18_mdl *mdl;
199b285192aSMauro Carvalho Chehab
200b285192aSMauro Carvalho Chehab /* It only makes sense to flush to q_free or q_idle */
201b285192aSMauro Carvalho Chehab if (q_src == q_dst || q_dst == &s->q_full || q_dst == &s->q_busy)
202b285192aSMauro Carvalho Chehab return;
203b285192aSMauro Carvalho Chehab
204b285192aSMauro Carvalho Chehab spin_lock(&q_src->lock);
205b285192aSMauro Carvalho Chehab spin_lock(&q_dst->lock);
206b285192aSMauro Carvalho Chehab while (!list_empty(&q_src->list)) {
207b285192aSMauro Carvalho Chehab mdl = list_first_entry(&q_src->list, struct cx18_mdl, list);
208b285192aSMauro Carvalho Chehab list_move_tail(&mdl->list, &q_dst->list);
209b285192aSMauro Carvalho Chehab mdl->bytesused = 0;
210b285192aSMauro Carvalho Chehab mdl->readpos = 0;
211b285192aSMauro Carvalho Chehab mdl->m_flags = 0;
212b285192aSMauro Carvalho Chehab mdl->skipped = 0;
213b285192aSMauro Carvalho Chehab mdl->curr_buf = NULL;
214b285192aSMauro Carvalho Chehab atomic_inc(&q_dst->depth);
215b285192aSMauro Carvalho Chehab }
216b285192aSMauro Carvalho Chehab cx18_queue_init(q_src);
217b285192aSMauro Carvalho Chehab spin_unlock(&q_src->lock);
218b285192aSMauro Carvalho Chehab spin_unlock(&q_dst->lock);
219b285192aSMauro Carvalho Chehab }
220b285192aSMauro Carvalho Chehab
cx18_flush_queues(struct cx18_stream * s)221b285192aSMauro Carvalho Chehab void cx18_flush_queues(struct cx18_stream *s)
222b285192aSMauro Carvalho Chehab {
223b285192aSMauro Carvalho Chehab cx18_queue_flush(s, &s->q_busy, &s->q_free);
224b285192aSMauro Carvalho Chehab cx18_queue_flush(s, &s->q_full, &s->q_free);
225b285192aSMauro Carvalho Chehab }
226b285192aSMauro Carvalho Chehab
227b285192aSMauro Carvalho Chehab /*
228b285192aSMauro Carvalho Chehab * Note, s->buf_pool is not protected by a lock,
229b285192aSMauro Carvalho Chehab * the stream better not have *anything* going on when calling this
230b285192aSMauro Carvalho Chehab */
cx18_unload_queues(struct cx18_stream * s)231b285192aSMauro Carvalho Chehab void cx18_unload_queues(struct cx18_stream *s)
232b285192aSMauro Carvalho Chehab {
233b285192aSMauro Carvalho Chehab struct cx18_queue *q_idle = &s->q_idle;
234b285192aSMauro Carvalho Chehab struct cx18_mdl *mdl;
235b285192aSMauro Carvalho Chehab struct cx18_buffer *buf;
236b285192aSMauro Carvalho Chehab
237b285192aSMauro Carvalho Chehab /* Move all MDLS to q_idle */
238b285192aSMauro Carvalho Chehab cx18_queue_flush(s, &s->q_busy, q_idle);
239b285192aSMauro Carvalho Chehab cx18_queue_flush(s, &s->q_full, q_idle);
240b285192aSMauro Carvalho Chehab cx18_queue_flush(s, &s->q_free, q_idle);
241b285192aSMauro Carvalho Chehab
242b285192aSMauro Carvalho Chehab /* Reset MDL id's and move all buffers back to the stream's buf_pool */
243b285192aSMauro Carvalho Chehab spin_lock(&q_idle->lock);
244b285192aSMauro Carvalho Chehab list_for_each_entry(mdl, &q_idle->list, list) {
245b285192aSMauro Carvalho Chehab while (!list_empty(&mdl->buf_list)) {
246b285192aSMauro Carvalho Chehab buf = list_first_entry(&mdl->buf_list,
247b285192aSMauro Carvalho Chehab struct cx18_buffer, list);
248b285192aSMauro Carvalho Chehab list_move_tail(&buf->list, &s->buf_pool);
249b285192aSMauro Carvalho Chehab buf->bytesused = 0;
250b285192aSMauro Carvalho Chehab buf->readpos = 0;
251b285192aSMauro Carvalho Chehab }
252b285192aSMauro Carvalho Chehab mdl->id = s->mdl_base_idx; /* reset id to a "safe" value */
253b285192aSMauro Carvalho Chehab /* all other mdl fields were cleared by cx18_queue_flush() */
254b285192aSMauro Carvalho Chehab }
255b285192aSMauro Carvalho Chehab spin_unlock(&q_idle->lock);
256b285192aSMauro Carvalho Chehab }
257b285192aSMauro Carvalho Chehab
258b285192aSMauro Carvalho Chehab /*
259b285192aSMauro Carvalho Chehab * Note, s->buf_pool is not protected by a lock,
260b285192aSMauro Carvalho Chehab * the stream better not have *anything* going on when calling this
261b285192aSMauro Carvalho Chehab */
cx18_load_queues(struct cx18_stream * s)262b285192aSMauro Carvalho Chehab void cx18_load_queues(struct cx18_stream *s)
263b285192aSMauro Carvalho Chehab {
264b285192aSMauro Carvalho Chehab struct cx18 *cx = s->cx;
265b285192aSMauro Carvalho Chehab struct cx18_mdl *mdl;
266b285192aSMauro Carvalho Chehab struct cx18_buffer *buf;
267b285192aSMauro Carvalho Chehab int mdl_id;
268b285192aSMauro Carvalho Chehab int i;
269b285192aSMauro Carvalho Chehab u32 partial_buf_size;
270b285192aSMauro Carvalho Chehab
271b285192aSMauro Carvalho Chehab /*
272b285192aSMauro Carvalho Chehab * Attach buffers to MDLs, give the MDLs ids, and add MDLs to q_free
273b285192aSMauro Carvalho Chehab * Excess MDLs are left on q_idle
274b285192aSMauro Carvalho Chehab * Excess buffers are left in buf_pool and/or on an MDL in q_idle
275b285192aSMauro Carvalho Chehab */
276b285192aSMauro Carvalho Chehab mdl_id = s->mdl_base_idx;
277b285192aSMauro Carvalho Chehab for (mdl = cx18_dequeue(s, &s->q_idle), i = s->bufs_per_mdl;
278b285192aSMauro Carvalho Chehab mdl != NULL && i == s->bufs_per_mdl;
279b285192aSMauro Carvalho Chehab mdl = cx18_dequeue(s, &s->q_idle)) {
280b285192aSMauro Carvalho Chehab
281b285192aSMauro Carvalho Chehab mdl->id = mdl_id;
282b285192aSMauro Carvalho Chehab
283b285192aSMauro Carvalho Chehab for (i = 0; i < s->bufs_per_mdl; i++) {
284b285192aSMauro Carvalho Chehab if (list_empty(&s->buf_pool))
285b285192aSMauro Carvalho Chehab break;
286b285192aSMauro Carvalho Chehab
287b285192aSMauro Carvalho Chehab buf = list_first_entry(&s->buf_pool, struct cx18_buffer,
288b285192aSMauro Carvalho Chehab list);
289b285192aSMauro Carvalho Chehab list_move_tail(&buf->list, &mdl->buf_list);
290b285192aSMauro Carvalho Chehab
291b285192aSMauro Carvalho Chehab /* update the firmware's MDL array with this buffer */
292b285192aSMauro Carvalho Chehab cx18_writel(cx, buf->dma_handle,
293b285192aSMauro Carvalho Chehab &cx->scb->cpu_mdl[mdl_id + i].paddr);
294b285192aSMauro Carvalho Chehab cx18_writel(cx, s->buf_size,
295b285192aSMauro Carvalho Chehab &cx->scb->cpu_mdl[mdl_id + i].length);
296b285192aSMauro Carvalho Chehab }
297b285192aSMauro Carvalho Chehab
298b285192aSMauro Carvalho Chehab if (i == s->bufs_per_mdl) {
299b285192aSMauro Carvalho Chehab /*
300b285192aSMauro Carvalho Chehab * The encoder doesn't honor s->mdl_size. So in the
301b285192aSMauro Carvalho Chehab * case of a non-integral number of buffers to meet
302b285192aSMauro Carvalho Chehab * mdl_size, we lie about the size of the last buffer
303b285192aSMauro Carvalho Chehab * in the MDL to get the encoder to really only send
304b285192aSMauro Carvalho Chehab * us mdl_size bytes per MDL transfer.
305b285192aSMauro Carvalho Chehab */
306b285192aSMauro Carvalho Chehab partial_buf_size = s->mdl_size % s->buf_size;
307b285192aSMauro Carvalho Chehab if (partial_buf_size) {
308b285192aSMauro Carvalho Chehab cx18_writel(cx, partial_buf_size,
309b285192aSMauro Carvalho Chehab &cx->scb->cpu_mdl[mdl_id + i - 1].length);
310b285192aSMauro Carvalho Chehab }
311b285192aSMauro Carvalho Chehab cx18_enqueue(s, mdl, &s->q_free);
312b285192aSMauro Carvalho Chehab } else {
313b285192aSMauro Carvalho Chehab /* Not enough buffers for this MDL; we won't use it */
314b285192aSMauro Carvalho Chehab cx18_push(s, mdl, &s->q_idle);
315b285192aSMauro Carvalho Chehab }
316b285192aSMauro Carvalho Chehab mdl_id += i;
317b285192aSMauro Carvalho Chehab }
318b285192aSMauro Carvalho Chehab }
319b285192aSMauro Carvalho Chehab
_cx18_mdl_sync_for_device(struct cx18_stream * s,struct cx18_mdl * mdl)320b285192aSMauro Carvalho Chehab void _cx18_mdl_sync_for_device(struct cx18_stream *s, struct cx18_mdl *mdl)
321b285192aSMauro Carvalho Chehab {
322b285192aSMauro Carvalho Chehab int dma = s->dma;
323b285192aSMauro Carvalho Chehab u32 buf_size = s->buf_size;
324b285192aSMauro Carvalho Chehab struct pci_dev *pci_dev = s->cx->pci_dev;
325b285192aSMauro Carvalho Chehab struct cx18_buffer *buf;
326b285192aSMauro Carvalho Chehab
327b285192aSMauro Carvalho Chehab list_for_each_entry(buf, &mdl->buf_list, list)
328*887069f4SChristophe JAILLET dma_sync_single_for_device(&pci_dev->dev, buf->dma_handle,
329b285192aSMauro Carvalho Chehab buf_size, dma);
330b285192aSMauro Carvalho Chehab }
331b285192aSMauro Carvalho Chehab
cx18_stream_alloc(struct cx18_stream * s)332b285192aSMauro Carvalho Chehab int cx18_stream_alloc(struct cx18_stream *s)
333b285192aSMauro Carvalho Chehab {
334b285192aSMauro Carvalho Chehab struct cx18 *cx = s->cx;
335b285192aSMauro Carvalho Chehab int i;
336b285192aSMauro Carvalho Chehab
337b285192aSMauro Carvalho Chehab if (s->buffers == 0)
338b285192aSMauro Carvalho Chehab return 0;
339b285192aSMauro Carvalho Chehab
3406beb1388SMauro Carvalho Chehab CX18_DEBUG_INFO("Allocate %s stream: %d x %d buffers (%d.%02d kB total)\n",
341b285192aSMauro Carvalho Chehab s->name, s->buffers, s->buf_size,
342b285192aSMauro Carvalho Chehab s->buffers * s->buf_size / 1024,
343b285192aSMauro Carvalho Chehab (s->buffers * s->buf_size * 100 / 1024) % 100);
344b285192aSMauro Carvalho Chehab
345b285192aSMauro Carvalho Chehab if (((char __iomem *)&cx->scb->cpu_mdl[cx->free_mdl_idx + s->buffers] -
346b285192aSMauro Carvalho Chehab (char __iomem *)cx->scb) > SCB_RESERVED_SIZE) {
347b285192aSMauro Carvalho Chehab unsigned bufsz = (((char __iomem *)cx->scb) + SCB_RESERVED_SIZE -
348b285192aSMauro Carvalho Chehab ((char __iomem *)cx->scb->cpu_mdl));
349b285192aSMauro Carvalho Chehab
350b285192aSMauro Carvalho Chehab CX18_ERR("Too many buffers, cannot fit in SCB area\n");
351339f06c5SMauro Carvalho Chehab CX18_ERR("Max buffers = %zu\n",
352b285192aSMauro Carvalho Chehab bufsz / sizeof(struct cx18_mdl_ent));
353b285192aSMauro Carvalho Chehab return -ENOMEM;
354b285192aSMauro Carvalho Chehab }
355b285192aSMauro Carvalho Chehab
356b285192aSMauro Carvalho Chehab s->mdl_base_idx = cx->free_mdl_idx;
357b285192aSMauro Carvalho Chehab
358b285192aSMauro Carvalho Chehab /* allocate stream buffers and MDLs */
359b285192aSMauro Carvalho Chehab for (i = 0; i < s->buffers; i++) {
360b285192aSMauro Carvalho Chehab struct cx18_mdl *mdl;
361b285192aSMauro Carvalho Chehab struct cx18_buffer *buf;
362b285192aSMauro Carvalho Chehab
363b285192aSMauro Carvalho Chehab /* 1 MDL per buffer to handle the worst & also default case */
364b285192aSMauro Carvalho Chehab mdl = kzalloc(sizeof(struct cx18_mdl), GFP_KERNEL|__GFP_NOWARN);
365b285192aSMauro Carvalho Chehab if (mdl == NULL)
366b285192aSMauro Carvalho Chehab break;
367b285192aSMauro Carvalho Chehab
368b285192aSMauro Carvalho Chehab buf = kzalloc(sizeof(struct cx18_buffer),
369b285192aSMauro Carvalho Chehab GFP_KERNEL|__GFP_NOWARN);
370b285192aSMauro Carvalho Chehab if (buf == NULL) {
371b285192aSMauro Carvalho Chehab kfree(mdl);
372b285192aSMauro Carvalho Chehab break;
373b285192aSMauro Carvalho Chehab }
374b285192aSMauro Carvalho Chehab
375b285192aSMauro Carvalho Chehab buf->buf = kmalloc(s->buf_size, GFP_KERNEL|__GFP_NOWARN);
376b285192aSMauro Carvalho Chehab if (buf->buf == NULL) {
377b285192aSMauro Carvalho Chehab kfree(mdl);
378b285192aSMauro Carvalho Chehab kfree(buf);
379b285192aSMauro Carvalho Chehab break;
380b285192aSMauro Carvalho Chehab }
381b285192aSMauro Carvalho Chehab
382b285192aSMauro Carvalho Chehab INIT_LIST_HEAD(&mdl->list);
383b285192aSMauro Carvalho Chehab INIT_LIST_HEAD(&mdl->buf_list);
384b285192aSMauro Carvalho Chehab mdl->id = s->mdl_base_idx; /* a somewhat safe value */
385b285192aSMauro Carvalho Chehab cx18_enqueue(s, mdl, &s->q_idle);
386b285192aSMauro Carvalho Chehab
387b285192aSMauro Carvalho Chehab INIT_LIST_HEAD(&buf->list);
388*887069f4SChristophe JAILLET buf->dma_handle = dma_map_single(&s->cx->pci_dev->dev,
389*887069f4SChristophe JAILLET buf->buf, s->buf_size,
390*887069f4SChristophe JAILLET s->dma);
391b285192aSMauro Carvalho Chehab cx18_buf_sync_for_cpu(s, buf);
392b285192aSMauro Carvalho Chehab list_add_tail(&buf->list, &s->buf_pool);
393b285192aSMauro Carvalho Chehab }
394b285192aSMauro Carvalho Chehab if (i == s->buffers) {
395b285192aSMauro Carvalho Chehab cx->free_mdl_idx += s->buffers;
396b285192aSMauro Carvalho Chehab return 0;
397b285192aSMauro Carvalho Chehab }
398b285192aSMauro Carvalho Chehab CX18_ERR("Couldn't allocate buffers for %s stream\n", s->name);
399b285192aSMauro Carvalho Chehab cx18_stream_free(s);
400b285192aSMauro Carvalho Chehab return -ENOMEM;
401b285192aSMauro Carvalho Chehab }
402b285192aSMauro Carvalho Chehab
cx18_stream_free(struct cx18_stream * s)403b285192aSMauro Carvalho Chehab void cx18_stream_free(struct cx18_stream *s)
404b285192aSMauro Carvalho Chehab {
405b285192aSMauro Carvalho Chehab struct cx18_mdl *mdl;
406b285192aSMauro Carvalho Chehab struct cx18_buffer *buf;
407b285192aSMauro Carvalho Chehab struct cx18 *cx = s->cx;
408b285192aSMauro Carvalho Chehab
409b285192aSMauro Carvalho Chehab CX18_DEBUG_INFO("Deallocating buffers for %s stream\n", s->name);
410b285192aSMauro Carvalho Chehab
411b285192aSMauro Carvalho Chehab /* move all buffers to buf_pool and all MDLs to q_idle */
412b285192aSMauro Carvalho Chehab cx18_unload_queues(s);
413b285192aSMauro Carvalho Chehab
414b285192aSMauro Carvalho Chehab /* empty q_idle */
415b285192aSMauro Carvalho Chehab while ((mdl = cx18_dequeue(s, &s->q_idle)))
416b285192aSMauro Carvalho Chehab kfree(mdl);
417b285192aSMauro Carvalho Chehab
418b285192aSMauro Carvalho Chehab /* empty buf_pool */
419b285192aSMauro Carvalho Chehab while (!list_empty(&s->buf_pool)) {
420b285192aSMauro Carvalho Chehab buf = list_first_entry(&s->buf_pool, struct cx18_buffer, list);
421b285192aSMauro Carvalho Chehab list_del_init(&buf->list);
422b285192aSMauro Carvalho Chehab
423*887069f4SChristophe JAILLET dma_unmap_single(&s->cx->pci_dev->dev, buf->dma_handle,
424b285192aSMauro Carvalho Chehab s->buf_size, s->dma);
425b285192aSMauro Carvalho Chehab kfree(buf->buf);
426b285192aSMauro Carvalho Chehab kfree(buf);
427b285192aSMauro Carvalho Chehab }
428b285192aSMauro Carvalho Chehab }
429