1*b285192aSMauro Carvalho Chehab /* 2*b285192aSMauro Carvalho Chehab * cx18 buffer queues 3*b285192aSMauro Carvalho Chehab * 4*b285192aSMauro Carvalho Chehab * Derived from ivtv-queue.c 5*b285192aSMauro Carvalho Chehab * 6*b285192aSMauro Carvalho Chehab * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> 7*b285192aSMauro Carvalho Chehab * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net> 8*b285192aSMauro Carvalho Chehab * 9*b285192aSMauro Carvalho Chehab * This program is free software; you can redistribute it and/or modify 10*b285192aSMauro Carvalho Chehab * it under the terms of the GNU General Public License as published by 11*b285192aSMauro Carvalho Chehab * the Free Software Foundation; either version 2 of the License, or 12*b285192aSMauro Carvalho Chehab * (at your option) any later version. 13*b285192aSMauro Carvalho Chehab * 14*b285192aSMauro Carvalho Chehab * This program is distributed in the hope that it will be useful, 15*b285192aSMauro Carvalho Chehab * but WITHOUT ANY WARRANTY; without even the implied warranty of 16*b285192aSMauro Carvalho Chehab * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 17*b285192aSMauro Carvalho Chehab * GNU General Public License for more details. 18*b285192aSMauro Carvalho Chehab * 19*b285192aSMauro Carvalho Chehab * You should have received a copy of the GNU General Public License 20*b285192aSMauro Carvalho Chehab * along with this program; if not, write to the Free Software 21*b285192aSMauro Carvalho Chehab * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 22*b285192aSMauro Carvalho Chehab * 02111-1307 USA 23*b285192aSMauro Carvalho Chehab */ 24*b285192aSMauro Carvalho Chehab 25*b285192aSMauro Carvalho Chehab #include "cx18-driver.h" 26*b285192aSMauro Carvalho Chehab #include "cx18-queue.h" 27*b285192aSMauro Carvalho Chehab #include "cx18-streams.h" 28*b285192aSMauro Carvalho Chehab #include "cx18-scb.h" 29*b285192aSMauro Carvalho Chehab #include "cx18-io.h" 30*b285192aSMauro Carvalho Chehab 31*b285192aSMauro Carvalho Chehab void cx18_buf_swap(struct cx18_buffer *buf) 32*b285192aSMauro Carvalho Chehab { 33*b285192aSMauro Carvalho Chehab int i; 34*b285192aSMauro Carvalho Chehab 35*b285192aSMauro Carvalho Chehab for (i = 0; i < buf->bytesused; i += 4) 36*b285192aSMauro Carvalho Chehab swab32s((u32 *)(buf->buf + i)); 37*b285192aSMauro Carvalho Chehab } 38*b285192aSMauro Carvalho Chehab 39*b285192aSMauro Carvalho Chehab void _cx18_mdl_swap(struct cx18_mdl *mdl) 40*b285192aSMauro Carvalho Chehab { 41*b285192aSMauro Carvalho Chehab struct cx18_buffer *buf; 42*b285192aSMauro Carvalho Chehab 43*b285192aSMauro Carvalho Chehab list_for_each_entry(buf, &mdl->buf_list, list) { 44*b285192aSMauro Carvalho Chehab if (buf->bytesused == 0) 45*b285192aSMauro Carvalho Chehab break; 46*b285192aSMauro Carvalho Chehab cx18_buf_swap(buf); 47*b285192aSMauro Carvalho Chehab } 48*b285192aSMauro Carvalho Chehab } 49*b285192aSMauro Carvalho Chehab 50*b285192aSMauro Carvalho Chehab void cx18_queue_init(struct cx18_queue *q) 51*b285192aSMauro Carvalho Chehab { 52*b285192aSMauro Carvalho Chehab INIT_LIST_HEAD(&q->list); 53*b285192aSMauro Carvalho Chehab atomic_set(&q->depth, 0); 54*b285192aSMauro Carvalho Chehab q->bytesused = 0; 55*b285192aSMauro Carvalho Chehab } 56*b285192aSMauro Carvalho Chehab 57*b285192aSMauro Carvalho Chehab struct cx18_queue *_cx18_enqueue(struct cx18_stream *s, struct cx18_mdl *mdl, 58*b285192aSMauro Carvalho Chehab struct cx18_queue *q, int to_front) 59*b285192aSMauro Carvalho Chehab { 60*b285192aSMauro Carvalho Chehab /* clear the mdl if it is not to be enqueued to the full queue */ 61*b285192aSMauro Carvalho Chehab if (q != &s->q_full) { 62*b285192aSMauro Carvalho Chehab mdl->bytesused = 0; 63*b285192aSMauro Carvalho Chehab mdl->readpos = 0; 64*b285192aSMauro Carvalho Chehab mdl->m_flags = 0; 65*b285192aSMauro Carvalho Chehab mdl->skipped = 0; 66*b285192aSMauro Carvalho Chehab mdl->curr_buf = NULL; 67*b285192aSMauro Carvalho Chehab } 68*b285192aSMauro Carvalho Chehab 69*b285192aSMauro Carvalho Chehab /* q_busy is restricted to a max buffer count imposed by firmware */ 70*b285192aSMauro Carvalho Chehab if (q == &s->q_busy && 71*b285192aSMauro Carvalho Chehab atomic_read(&q->depth) >= CX18_MAX_FW_MDLS_PER_STREAM) 72*b285192aSMauro Carvalho Chehab q = &s->q_free; 73*b285192aSMauro Carvalho Chehab 74*b285192aSMauro Carvalho Chehab spin_lock(&q->lock); 75*b285192aSMauro Carvalho Chehab 76*b285192aSMauro Carvalho Chehab if (to_front) 77*b285192aSMauro Carvalho Chehab list_add(&mdl->list, &q->list); /* LIFO */ 78*b285192aSMauro Carvalho Chehab else 79*b285192aSMauro Carvalho Chehab list_add_tail(&mdl->list, &q->list); /* FIFO */ 80*b285192aSMauro Carvalho Chehab q->bytesused += mdl->bytesused - mdl->readpos; 81*b285192aSMauro Carvalho Chehab atomic_inc(&q->depth); 82*b285192aSMauro Carvalho Chehab 83*b285192aSMauro Carvalho Chehab spin_unlock(&q->lock); 84*b285192aSMauro Carvalho Chehab return q; 85*b285192aSMauro Carvalho Chehab } 86*b285192aSMauro Carvalho Chehab 87*b285192aSMauro Carvalho Chehab struct cx18_mdl *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q) 88*b285192aSMauro Carvalho Chehab { 89*b285192aSMauro Carvalho Chehab struct cx18_mdl *mdl = NULL; 90*b285192aSMauro Carvalho Chehab 91*b285192aSMauro Carvalho Chehab spin_lock(&q->lock); 92*b285192aSMauro Carvalho Chehab if (!list_empty(&q->list)) { 93*b285192aSMauro Carvalho Chehab mdl = list_first_entry(&q->list, struct cx18_mdl, list); 94*b285192aSMauro Carvalho Chehab list_del_init(&mdl->list); 95*b285192aSMauro Carvalho Chehab q->bytesused -= mdl->bytesused - mdl->readpos; 96*b285192aSMauro Carvalho Chehab mdl->skipped = 0; 97*b285192aSMauro Carvalho Chehab atomic_dec(&q->depth); 98*b285192aSMauro Carvalho Chehab } 99*b285192aSMauro Carvalho Chehab spin_unlock(&q->lock); 100*b285192aSMauro Carvalho Chehab return mdl; 101*b285192aSMauro Carvalho Chehab } 102*b285192aSMauro Carvalho Chehab 103*b285192aSMauro Carvalho Chehab static void _cx18_mdl_update_bufs_for_cpu(struct cx18_stream *s, 104*b285192aSMauro Carvalho Chehab struct cx18_mdl *mdl) 105*b285192aSMauro Carvalho Chehab { 106*b285192aSMauro Carvalho Chehab struct cx18_buffer *buf; 107*b285192aSMauro Carvalho Chehab u32 buf_size = s->buf_size; 108*b285192aSMauro Carvalho Chehab u32 bytesused = mdl->bytesused; 109*b285192aSMauro Carvalho Chehab 110*b285192aSMauro Carvalho Chehab list_for_each_entry(buf, &mdl->buf_list, list) { 111*b285192aSMauro Carvalho Chehab buf->readpos = 0; 112*b285192aSMauro Carvalho Chehab if (bytesused >= buf_size) { 113*b285192aSMauro Carvalho Chehab buf->bytesused = buf_size; 114*b285192aSMauro Carvalho Chehab bytesused -= buf_size; 115*b285192aSMauro Carvalho Chehab } else { 116*b285192aSMauro Carvalho Chehab buf->bytesused = bytesused; 117*b285192aSMauro Carvalho Chehab bytesused = 0; 118*b285192aSMauro Carvalho Chehab } 119*b285192aSMauro Carvalho Chehab cx18_buf_sync_for_cpu(s, buf); 120*b285192aSMauro Carvalho Chehab } 121*b285192aSMauro Carvalho Chehab } 122*b285192aSMauro Carvalho Chehab 123*b285192aSMauro Carvalho Chehab static inline void cx18_mdl_update_bufs_for_cpu(struct cx18_stream *s, 124*b285192aSMauro Carvalho Chehab struct cx18_mdl *mdl) 125*b285192aSMauro Carvalho Chehab { 126*b285192aSMauro Carvalho Chehab struct cx18_buffer *buf; 127*b285192aSMauro Carvalho Chehab 128*b285192aSMauro Carvalho Chehab if (list_is_singular(&mdl->buf_list)) { 129*b285192aSMauro Carvalho Chehab buf = list_first_entry(&mdl->buf_list, struct cx18_buffer, 130*b285192aSMauro Carvalho Chehab list); 131*b285192aSMauro Carvalho Chehab buf->bytesused = mdl->bytesused; 132*b285192aSMauro Carvalho Chehab buf->readpos = 0; 133*b285192aSMauro Carvalho Chehab cx18_buf_sync_for_cpu(s, buf); 134*b285192aSMauro Carvalho Chehab } else { 135*b285192aSMauro Carvalho Chehab _cx18_mdl_update_bufs_for_cpu(s, mdl); 136*b285192aSMauro Carvalho Chehab } 137*b285192aSMauro Carvalho Chehab } 138*b285192aSMauro Carvalho Chehab 139*b285192aSMauro Carvalho Chehab struct cx18_mdl *cx18_queue_get_mdl(struct cx18_stream *s, u32 id, 140*b285192aSMauro Carvalho Chehab u32 bytesused) 141*b285192aSMauro Carvalho Chehab { 142*b285192aSMauro Carvalho Chehab struct cx18 *cx = s->cx; 143*b285192aSMauro Carvalho Chehab struct cx18_mdl *mdl; 144*b285192aSMauro Carvalho Chehab struct cx18_mdl *tmp; 145*b285192aSMauro Carvalho Chehab struct cx18_mdl *ret = NULL; 146*b285192aSMauro Carvalho Chehab LIST_HEAD(sweep_up); 147*b285192aSMauro Carvalho Chehab 148*b285192aSMauro Carvalho Chehab /* 149*b285192aSMauro Carvalho Chehab * We don't have to acquire multiple q locks here, because we are 150*b285192aSMauro Carvalho Chehab * serialized by the single threaded work handler. 151*b285192aSMauro Carvalho Chehab * MDLs from the firmware will thus remain in order as 152*b285192aSMauro Carvalho Chehab * they are moved from q_busy to q_full or to the dvb ring buffer. 153*b285192aSMauro Carvalho Chehab */ 154*b285192aSMauro Carvalho Chehab spin_lock(&s->q_busy.lock); 155*b285192aSMauro Carvalho Chehab list_for_each_entry_safe(mdl, tmp, &s->q_busy.list, list) { 156*b285192aSMauro Carvalho Chehab /* 157*b285192aSMauro Carvalho Chehab * We should find what the firmware told us is done, 158*b285192aSMauro Carvalho Chehab * right at the front of the queue. If we don't, we likely have 159*b285192aSMauro Carvalho Chehab * missed an mdl done message from the firmware. 160*b285192aSMauro Carvalho Chehab * Once we skip an mdl repeatedly, relative to the size of 161*b285192aSMauro Carvalho Chehab * q_busy, we have high confidence we've missed it. 162*b285192aSMauro Carvalho Chehab */ 163*b285192aSMauro Carvalho Chehab if (mdl->id != id) { 164*b285192aSMauro Carvalho Chehab mdl->skipped++; 165*b285192aSMauro Carvalho Chehab if (mdl->skipped >= atomic_read(&s->q_busy.depth)-1) { 166*b285192aSMauro Carvalho Chehab /* mdl must have fallen out of rotation */ 167*b285192aSMauro Carvalho Chehab CX18_WARN("Skipped %s, MDL %d, %d " 168*b285192aSMauro Carvalho Chehab "times - it must have dropped out of " 169*b285192aSMauro Carvalho Chehab "rotation\n", s->name, mdl->id, 170*b285192aSMauro Carvalho Chehab mdl->skipped); 171*b285192aSMauro Carvalho Chehab /* Sweep it up to put it back into rotation */ 172*b285192aSMauro Carvalho Chehab list_move_tail(&mdl->list, &sweep_up); 173*b285192aSMauro Carvalho Chehab atomic_dec(&s->q_busy.depth); 174*b285192aSMauro Carvalho Chehab } 175*b285192aSMauro Carvalho Chehab continue; 176*b285192aSMauro Carvalho Chehab } 177*b285192aSMauro Carvalho Chehab /* 178*b285192aSMauro Carvalho Chehab * We pull the desired mdl off of the queue here. Something 179*b285192aSMauro Carvalho Chehab * will have to put it back on a queue later. 180*b285192aSMauro Carvalho Chehab */ 181*b285192aSMauro Carvalho Chehab list_del_init(&mdl->list); 182*b285192aSMauro Carvalho Chehab atomic_dec(&s->q_busy.depth); 183*b285192aSMauro Carvalho Chehab ret = mdl; 184*b285192aSMauro Carvalho Chehab break; 185*b285192aSMauro Carvalho Chehab } 186*b285192aSMauro Carvalho Chehab spin_unlock(&s->q_busy.lock); 187*b285192aSMauro Carvalho Chehab 188*b285192aSMauro Carvalho Chehab /* 189*b285192aSMauro Carvalho Chehab * We found the mdl for which we were looking. Get it ready for 190*b285192aSMauro Carvalho Chehab * the caller to put on q_full or in the dvb ring buffer. 191*b285192aSMauro Carvalho Chehab */ 192*b285192aSMauro Carvalho Chehab if (ret != NULL) { 193*b285192aSMauro Carvalho Chehab ret->bytesused = bytesused; 194*b285192aSMauro Carvalho Chehab ret->skipped = 0; 195*b285192aSMauro Carvalho Chehab /* 0'ed readpos, m_flags & curr_buf when mdl went on q_busy */ 196*b285192aSMauro Carvalho Chehab cx18_mdl_update_bufs_for_cpu(s, ret); 197*b285192aSMauro Carvalho Chehab if (s->type != CX18_ENC_STREAM_TYPE_TS) 198*b285192aSMauro Carvalho Chehab set_bit(CX18_F_M_NEED_SWAP, &ret->m_flags); 199*b285192aSMauro Carvalho Chehab } 200*b285192aSMauro Carvalho Chehab 201*b285192aSMauro Carvalho Chehab /* Put any mdls the firmware is ignoring back into normal rotation */ 202*b285192aSMauro Carvalho Chehab list_for_each_entry_safe(mdl, tmp, &sweep_up, list) { 203*b285192aSMauro Carvalho Chehab list_del_init(&mdl->list); 204*b285192aSMauro Carvalho Chehab cx18_enqueue(s, mdl, &s->q_free); 205*b285192aSMauro Carvalho Chehab } 206*b285192aSMauro Carvalho Chehab return ret; 207*b285192aSMauro Carvalho Chehab } 208*b285192aSMauro Carvalho Chehab 209*b285192aSMauro Carvalho Chehab /* Move all mdls of a queue, while flushing the mdl */ 210*b285192aSMauro Carvalho Chehab static void cx18_queue_flush(struct cx18_stream *s, 211*b285192aSMauro Carvalho Chehab struct cx18_queue *q_src, struct cx18_queue *q_dst) 212*b285192aSMauro Carvalho Chehab { 213*b285192aSMauro Carvalho Chehab struct cx18_mdl *mdl; 214*b285192aSMauro Carvalho Chehab 215*b285192aSMauro Carvalho Chehab /* It only makes sense to flush to q_free or q_idle */ 216*b285192aSMauro Carvalho Chehab if (q_src == q_dst || q_dst == &s->q_full || q_dst == &s->q_busy) 217*b285192aSMauro Carvalho Chehab return; 218*b285192aSMauro Carvalho Chehab 219*b285192aSMauro Carvalho Chehab spin_lock(&q_src->lock); 220*b285192aSMauro Carvalho Chehab spin_lock(&q_dst->lock); 221*b285192aSMauro Carvalho Chehab while (!list_empty(&q_src->list)) { 222*b285192aSMauro Carvalho Chehab mdl = list_first_entry(&q_src->list, struct cx18_mdl, list); 223*b285192aSMauro Carvalho Chehab list_move_tail(&mdl->list, &q_dst->list); 224*b285192aSMauro Carvalho Chehab mdl->bytesused = 0; 225*b285192aSMauro Carvalho Chehab mdl->readpos = 0; 226*b285192aSMauro Carvalho Chehab mdl->m_flags = 0; 227*b285192aSMauro Carvalho Chehab mdl->skipped = 0; 228*b285192aSMauro Carvalho Chehab mdl->curr_buf = NULL; 229*b285192aSMauro Carvalho Chehab atomic_inc(&q_dst->depth); 230*b285192aSMauro Carvalho Chehab } 231*b285192aSMauro Carvalho Chehab cx18_queue_init(q_src); 232*b285192aSMauro Carvalho Chehab spin_unlock(&q_src->lock); 233*b285192aSMauro Carvalho Chehab spin_unlock(&q_dst->lock); 234*b285192aSMauro Carvalho Chehab } 235*b285192aSMauro Carvalho Chehab 236*b285192aSMauro Carvalho Chehab void cx18_flush_queues(struct cx18_stream *s) 237*b285192aSMauro Carvalho Chehab { 238*b285192aSMauro Carvalho Chehab cx18_queue_flush(s, &s->q_busy, &s->q_free); 239*b285192aSMauro Carvalho Chehab cx18_queue_flush(s, &s->q_full, &s->q_free); 240*b285192aSMauro Carvalho Chehab } 241*b285192aSMauro Carvalho Chehab 242*b285192aSMauro Carvalho Chehab /* 243*b285192aSMauro Carvalho Chehab * Note, s->buf_pool is not protected by a lock, 244*b285192aSMauro Carvalho Chehab * the stream better not have *anything* going on when calling this 245*b285192aSMauro Carvalho Chehab */ 246*b285192aSMauro Carvalho Chehab void cx18_unload_queues(struct cx18_stream *s) 247*b285192aSMauro Carvalho Chehab { 248*b285192aSMauro Carvalho Chehab struct cx18_queue *q_idle = &s->q_idle; 249*b285192aSMauro Carvalho Chehab struct cx18_mdl *mdl; 250*b285192aSMauro Carvalho Chehab struct cx18_buffer *buf; 251*b285192aSMauro Carvalho Chehab 252*b285192aSMauro Carvalho Chehab /* Move all MDLS to q_idle */ 253*b285192aSMauro Carvalho Chehab cx18_queue_flush(s, &s->q_busy, q_idle); 254*b285192aSMauro Carvalho Chehab cx18_queue_flush(s, &s->q_full, q_idle); 255*b285192aSMauro Carvalho Chehab cx18_queue_flush(s, &s->q_free, q_idle); 256*b285192aSMauro Carvalho Chehab 257*b285192aSMauro Carvalho Chehab /* Reset MDL id's and move all buffers back to the stream's buf_pool */ 258*b285192aSMauro Carvalho Chehab spin_lock(&q_idle->lock); 259*b285192aSMauro Carvalho Chehab list_for_each_entry(mdl, &q_idle->list, list) { 260*b285192aSMauro Carvalho Chehab while (!list_empty(&mdl->buf_list)) { 261*b285192aSMauro Carvalho Chehab buf = list_first_entry(&mdl->buf_list, 262*b285192aSMauro Carvalho Chehab struct cx18_buffer, list); 263*b285192aSMauro Carvalho Chehab list_move_tail(&buf->list, &s->buf_pool); 264*b285192aSMauro Carvalho Chehab buf->bytesused = 0; 265*b285192aSMauro Carvalho Chehab buf->readpos = 0; 266*b285192aSMauro Carvalho Chehab } 267*b285192aSMauro Carvalho Chehab mdl->id = s->mdl_base_idx; /* reset id to a "safe" value */ 268*b285192aSMauro Carvalho Chehab /* all other mdl fields were cleared by cx18_queue_flush() */ 269*b285192aSMauro Carvalho Chehab } 270*b285192aSMauro Carvalho Chehab spin_unlock(&q_idle->lock); 271*b285192aSMauro Carvalho Chehab } 272*b285192aSMauro Carvalho Chehab 273*b285192aSMauro Carvalho Chehab /* 274*b285192aSMauro Carvalho Chehab * Note, s->buf_pool is not protected by a lock, 275*b285192aSMauro Carvalho Chehab * the stream better not have *anything* going on when calling this 276*b285192aSMauro Carvalho Chehab */ 277*b285192aSMauro Carvalho Chehab void cx18_load_queues(struct cx18_stream *s) 278*b285192aSMauro Carvalho Chehab { 279*b285192aSMauro Carvalho Chehab struct cx18 *cx = s->cx; 280*b285192aSMauro Carvalho Chehab struct cx18_mdl *mdl; 281*b285192aSMauro Carvalho Chehab struct cx18_buffer *buf; 282*b285192aSMauro Carvalho Chehab int mdl_id; 283*b285192aSMauro Carvalho Chehab int i; 284*b285192aSMauro Carvalho Chehab u32 partial_buf_size; 285*b285192aSMauro Carvalho Chehab 286*b285192aSMauro Carvalho Chehab /* 287*b285192aSMauro Carvalho Chehab * Attach buffers to MDLs, give the MDLs ids, and add MDLs to q_free 288*b285192aSMauro Carvalho Chehab * Excess MDLs are left on q_idle 289*b285192aSMauro Carvalho Chehab * Excess buffers are left in buf_pool and/or on an MDL in q_idle 290*b285192aSMauro Carvalho Chehab */ 291*b285192aSMauro Carvalho Chehab mdl_id = s->mdl_base_idx; 292*b285192aSMauro Carvalho Chehab for (mdl = cx18_dequeue(s, &s->q_idle), i = s->bufs_per_mdl; 293*b285192aSMauro Carvalho Chehab mdl != NULL && i == s->bufs_per_mdl; 294*b285192aSMauro Carvalho Chehab mdl = cx18_dequeue(s, &s->q_idle)) { 295*b285192aSMauro Carvalho Chehab 296*b285192aSMauro Carvalho Chehab mdl->id = mdl_id; 297*b285192aSMauro Carvalho Chehab 298*b285192aSMauro Carvalho Chehab for (i = 0; i < s->bufs_per_mdl; i++) { 299*b285192aSMauro Carvalho Chehab if (list_empty(&s->buf_pool)) 300*b285192aSMauro Carvalho Chehab break; 301*b285192aSMauro Carvalho Chehab 302*b285192aSMauro Carvalho Chehab buf = list_first_entry(&s->buf_pool, struct cx18_buffer, 303*b285192aSMauro Carvalho Chehab list); 304*b285192aSMauro Carvalho Chehab list_move_tail(&buf->list, &mdl->buf_list); 305*b285192aSMauro Carvalho Chehab 306*b285192aSMauro Carvalho Chehab /* update the firmware's MDL array with this buffer */ 307*b285192aSMauro Carvalho Chehab cx18_writel(cx, buf->dma_handle, 308*b285192aSMauro Carvalho Chehab &cx->scb->cpu_mdl[mdl_id + i].paddr); 309*b285192aSMauro Carvalho Chehab cx18_writel(cx, s->buf_size, 310*b285192aSMauro Carvalho Chehab &cx->scb->cpu_mdl[mdl_id + i].length); 311*b285192aSMauro Carvalho Chehab } 312*b285192aSMauro Carvalho Chehab 313*b285192aSMauro Carvalho Chehab if (i == s->bufs_per_mdl) { 314*b285192aSMauro Carvalho Chehab /* 315*b285192aSMauro Carvalho Chehab * The encoder doesn't honor s->mdl_size. So in the 316*b285192aSMauro Carvalho Chehab * case of a non-integral number of buffers to meet 317*b285192aSMauro Carvalho Chehab * mdl_size, we lie about the size of the last buffer 318*b285192aSMauro Carvalho Chehab * in the MDL to get the encoder to really only send 319*b285192aSMauro Carvalho Chehab * us mdl_size bytes per MDL transfer. 320*b285192aSMauro Carvalho Chehab */ 321*b285192aSMauro Carvalho Chehab partial_buf_size = s->mdl_size % s->buf_size; 322*b285192aSMauro Carvalho Chehab if (partial_buf_size) { 323*b285192aSMauro Carvalho Chehab cx18_writel(cx, partial_buf_size, 324*b285192aSMauro Carvalho Chehab &cx->scb->cpu_mdl[mdl_id + i - 1].length); 325*b285192aSMauro Carvalho Chehab } 326*b285192aSMauro Carvalho Chehab cx18_enqueue(s, mdl, &s->q_free); 327*b285192aSMauro Carvalho Chehab } else { 328*b285192aSMauro Carvalho Chehab /* Not enough buffers for this MDL; we won't use it */ 329*b285192aSMauro Carvalho Chehab cx18_push(s, mdl, &s->q_idle); 330*b285192aSMauro Carvalho Chehab } 331*b285192aSMauro Carvalho Chehab mdl_id += i; 332*b285192aSMauro Carvalho Chehab } 333*b285192aSMauro Carvalho Chehab } 334*b285192aSMauro Carvalho Chehab 335*b285192aSMauro Carvalho Chehab void _cx18_mdl_sync_for_device(struct cx18_stream *s, struct cx18_mdl *mdl) 336*b285192aSMauro Carvalho Chehab { 337*b285192aSMauro Carvalho Chehab int dma = s->dma; 338*b285192aSMauro Carvalho Chehab u32 buf_size = s->buf_size; 339*b285192aSMauro Carvalho Chehab struct pci_dev *pci_dev = s->cx->pci_dev; 340*b285192aSMauro Carvalho Chehab struct cx18_buffer *buf; 341*b285192aSMauro Carvalho Chehab 342*b285192aSMauro Carvalho Chehab list_for_each_entry(buf, &mdl->buf_list, list) 343*b285192aSMauro Carvalho Chehab pci_dma_sync_single_for_device(pci_dev, buf->dma_handle, 344*b285192aSMauro Carvalho Chehab buf_size, dma); 345*b285192aSMauro Carvalho Chehab } 346*b285192aSMauro Carvalho Chehab 347*b285192aSMauro Carvalho Chehab int cx18_stream_alloc(struct cx18_stream *s) 348*b285192aSMauro Carvalho Chehab { 349*b285192aSMauro Carvalho Chehab struct cx18 *cx = s->cx; 350*b285192aSMauro Carvalho Chehab int i; 351*b285192aSMauro Carvalho Chehab 352*b285192aSMauro Carvalho Chehab if (s->buffers == 0) 353*b285192aSMauro Carvalho Chehab return 0; 354*b285192aSMauro Carvalho Chehab 355*b285192aSMauro Carvalho Chehab CX18_DEBUG_INFO("Allocate %s stream: %d x %d buffers " 356*b285192aSMauro Carvalho Chehab "(%d.%02d kB total)\n", 357*b285192aSMauro Carvalho Chehab s->name, s->buffers, s->buf_size, 358*b285192aSMauro Carvalho Chehab s->buffers * s->buf_size / 1024, 359*b285192aSMauro Carvalho Chehab (s->buffers * s->buf_size * 100 / 1024) % 100); 360*b285192aSMauro Carvalho Chehab 361*b285192aSMauro Carvalho Chehab if (((char __iomem *)&cx->scb->cpu_mdl[cx->free_mdl_idx + s->buffers] - 362*b285192aSMauro Carvalho Chehab (char __iomem *)cx->scb) > SCB_RESERVED_SIZE) { 363*b285192aSMauro Carvalho Chehab unsigned bufsz = (((char __iomem *)cx->scb) + SCB_RESERVED_SIZE - 364*b285192aSMauro Carvalho Chehab ((char __iomem *)cx->scb->cpu_mdl)); 365*b285192aSMauro Carvalho Chehab 366*b285192aSMauro Carvalho Chehab CX18_ERR("Too many buffers, cannot fit in SCB area\n"); 367*b285192aSMauro Carvalho Chehab CX18_ERR("Max buffers = %zd\n", 368*b285192aSMauro Carvalho Chehab bufsz / sizeof(struct cx18_mdl_ent)); 369*b285192aSMauro Carvalho Chehab return -ENOMEM; 370*b285192aSMauro Carvalho Chehab } 371*b285192aSMauro Carvalho Chehab 372*b285192aSMauro Carvalho Chehab s->mdl_base_idx = cx->free_mdl_idx; 373*b285192aSMauro Carvalho Chehab 374*b285192aSMauro Carvalho Chehab /* allocate stream buffers and MDLs */ 375*b285192aSMauro Carvalho Chehab for (i = 0; i < s->buffers; i++) { 376*b285192aSMauro Carvalho Chehab struct cx18_mdl *mdl; 377*b285192aSMauro Carvalho Chehab struct cx18_buffer *buf; 378*b285192aSMauro Carvalho Chehab 379*b285192aSMauro Carvalho Chehab /* 1 MDL per buffer to handle the worst & also default case */ 380*b285192aSMauro Carvalho Chehab mdl = kzalloc(sizeof(struct cx18_mdl), GFP_KERNEL|__GFP_NOWARN); 381*b285192aSMauro Carvalho Chehab if (mdl == NULL) 382*b285192aSMauro Carvalho Chehab break; 383*b285192aSMauro Carvalho Chehab 384*b285192aSMauro Carvalho Chehab buf = kzalloc(sizeof(struct cx18_buffer), 385*b285192aSMauro Carvalho Chehab GFP_KERNEL|__GFP_NOWARN); 386*b285192aSMauro Carvalho Chehab if (buf == NULL) { 387*b285192aSMauro Carvalho Chehab kfree(mdl); 388*b285192aSMauro Carvalho Chehab break; 389*b285192aSMauro Carvalho Chehab } 390*b285192aSMauro Carvalho Chehab 391*b285192aSMauro Carvalho Chehab buf->buf = kmalloc(s->buf_size, GFP_KERNEL|__GFP_NOWARN); 392*b285192aSMauro Carvalho Chehab if (buf->buf == NULL) { 393*b285192aSMauro Carvalho Chehab kfree(mdl); 394*b285192aSMauro Carvalho Chehab kfree(buf); 395*b285192aSMauro Carvalho Chehab break; 396*b285192aSMauro Carvalho Chehab } 397*b285192aSMauro Carvalho Chehab 398*b285192aSMauro Carvalho Chehab INIT_LIST_HEAD(&mdl->list); 399*b285192aSMauro Carvalho Chehab INIT_LIST_HEAD(&mdl->buf_list); 400*b285192aSMauro Carvalho Chehab mdl->id = s->mdl_base_idx; /* a somewhat safe value */ 401*b285192aSMauro Carvalho Chehab cx18_enqueue(s, mdl, &s->q_idle); 402*b285192aSMauro Carvalho Chehab 403*b285192aSMauro Carvalho Chehab INIT_LIST_HEAD(&buf->list); 404*b285192aSMauro Carvalho Chehab buf->dma_handle = pci_map_single(s->cx->pci_dev, 405*b285192aSMauro Carvalho Chehab buf->buf, s->buf_size, s->dma); 406*b285192aSMauro Carvalho Chehab cx18_buf_sync_for_cpu(s, buf); 407*b285192aSMauro Carvalho Chehab list_add_tail(&buf->list, &s->buf_pool); 408*b285192aSMauro Carvalho Chehab } 409*b285192aSMauro Carvalho Chehab if (i == s->buffers) { 410*b285192aSMauro Carvalho Chehab cx->free_mdl_idx += s->buffers; 411*b285192aSMauro Carvalho Chehab return 0; 412*b285192aSMauro Carvalho Chehab } 413*b285192aSMauro Carvalho Chehab CX18_ERR("Couldn't allocate buffers for %s stream\n", s->name); 414*b285192aSMauro Carvalho Chehab cx18_stream_free(s); 415*b285192aSMauro Carvalho Chehab return -ENOMEM; 416*b285192aSMauro Carvalho Chehab } 417*b285192aSMauro Carvalho Chehab 418*b285192aSMauro Carvalho Chehab void cx18_stream_free(struct cx18_stream *s) 419*b285192aSMauro Carvalho Chehab { 420*b285192aSMauro Carvalho Chehab struct cx18_mdl *mdl; 421*b285192aSMauro Carvalho Chehab struct cx18_buffer *buf; 422*b285192aSMauro Carvalho Chehab struct cx18 *cx = s->cx; 423*b285192aSMauro Carvalho Chehab 424*b285192aSMauro Carvalho Chehab CX18_DEBUG_INFO("Deallocating buffers for %s stream\n", s->name); 425*b285192aSMauro Carvalho Chehab 426*b285192aSMauro Carvalho Chehab /* move all buffers to buf_pool and all MDLs to q_idle */ 427*b285192aSMauro Carvalho Chehab cx18_unload_queues(s); 428*b285192aSMauro Carvalho Chehab 429*b285192aSMauro Carvalho Chehab /* empty q_idle */ 430*b285192aSMauro Carvalho Chehab while ((mdl = cx18_dequeue(s, &s->q_idle))) 431*b285192aSMauro Carvalho Chehab kfree(mdl); 432*b285192aSMauro Carvalho Chehab 433*b285192aSMauro Carvalho Chehab /* empty buf_pool */ 434*b285192aSMauro Carvalho Chehab while (!list_empty(&s->buf_pool)) { 435*b285192aSMauro Carvalho Chehab buf = list_first_entry(&s->buf_pool, struct cx18_buffer, list); 436*b285192aSMauro Carvalho Chehab list_del_init(&buf->list); 437*b285192aSMauro Carvalho Chehab 438*b285192aSMauro Carvalho Chehab pci_unmap_single(s->cx->pci_dev, buf->dma_handle, 439*b285192aSMauro Carvalho Chehab s->buf_size, s->dma); 440*b285192aSMauro Carvalho Chehab kfree(buf->buf); 441*b285192aSMauro Carvalho Chehab kfree(buf); 442*b285192aSMauro Carvalho Chehab } 443*b285192aSMauro Carvalho Chehab } 444