Lines Matching refs:queue

100 	dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size),  in iio_buffer_block_release()
103 iio_buffer_put(&block->queue->buffer); in iio_buffer_block_release()
166 struct iio_dma_buffer_queue *queue, size_t size) in iio_dma_buffer_alloc_block() argument
174 block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size), in iio_dma_buffer_alloc_block()
183 block->queue = queue; in iio_dma_buffer_alloc_block()
187 iio_buffer_get(&queue->buffer); in iio_dma_buffer_alloc_block()
194 struct iio_dma_buffer_queue *queue = block->queue; in _iio_dma_buffer_block_done() local
202 list_add_tail(&block->head, &queue->outgoing); in _iio_dma_buffer_block_done()
215 struct iio_dma_buffer_queue *queue = block->queue; in iio_dma_buffer_block_done() local
218 spin_lock_irqsave(&queue->list_lock, flags); in iio_dma_buffer_block_done()
220 spin_unlock_irqrestore(&queue->list_lock, flags); in iio_dma_buffer_block_done()
223 wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM); in iio_dma_buffer_block_done()
237 void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue, in iio_dma_buffer_block_list_abort() argument
243 spin_lock_irqsave(&queue->list_lock, flags); in iio_dma_buffer_block_list_abort()
250 spin_unlock_irqrestore(&queue->list_lock, flags); in iio_dma_buffer_block_list_abort()
252 wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM); in iio_dma_buffer_block_list_abort()
282 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); in iio_dma_buffer_request_update() local
294 size = DIV_ROUND_UP(queue->buffer.bytes_per_datum * in iio_dma_buffer_request_update()
295 queue->buffer.length, 2); in iio_dma_buffer_request_update()
297 mutex_lock(&queue->lock); in iio_dma_buffer_request_update()
300 if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size)) in iio_dma_buffer_request_update()
303 queue->fileio.block_size = size; in iio_dma_buffer_request_update()
304 queue->fileio.active_block = NULL; in iio_dma_buffer_request_update()
306 spin_lock_irq(&queue->list_lock); in iio_dma_buffer_request_update()
307 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) { in iio_dma_buffer_request_update()
308 block = queue->fileio.blocks[i]; in iio_dma_buffer_request_update()
320 INIT_LIST_HEAD(&queue->outgoing); in iio_dma_buffer_request_update()
321 spin_unlock_irq(&queue->list_lock); in iio_dma_buffer_request_update()
323 INIT_LIST_HEAD(&queue->incoming); in iio_dma_buffer_request_update()
325 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) { in iio_dma_buffer_request_update()
326 if (queue->fileio.blocks[i]) { in iio_dma_buffer_request_update()
327 block = queue->fileio.blocks[i]; in iio_dma_buffer_request_update()
340 block = iio_dma_buffer_alloc_block(queue, size); in iio_dma_buffer_request_update()
345 queue->fileio.blocks[i] = block; in iio_dma_buffer_request_update()
349 list_add_tail(&block->head, &queue->incoming); in iio_dma_buffer_request_update()
353 mutex_unlock(&queue->lock); in iio_dma_buffer_request_update()
359 static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue, in iio_dma_buffer_submit_block() argument
369 if (!queue->ops) in iio_dma_buffer_submit_block()
374 ret = queue->ops->submit(queue, block); in iio_dma_buffer_submit_block()
403 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); in iio_dma_buffer_enable() local
406 mutex_lock(&queue->lock); in iio_dma_buffer_enable()
407 queue->active = true; in iio_dma_buffer_enable()
408 list_for_each_entry_safe(block, _block, &queue->incoming, head) { in iio_dma_buffer_enable()
410 iio_dma_buffer_submit_block(queue, block); in iio_dma_buffer_enable()
412 mutex_unlock(&queue->lock); in iio_dma_buffer_enable()
429 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); in iio_dma_buffer_disable() local
431 mutex_lock(&queue->lock); in iio_dma_buffer_disable()
432 queue->active = false; in iio_dma_buffer_disable()
434 if (queue->ops && queue->ops->abort) in iio_dma_buffer_disable()
435 queue->ops->abort(queue); in iio_dma_buffer_disable()
436 mutex_unlock(&queue->lock); in iio_dma_buffer_disable()
442 static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue, in iio_dma_buffer_enqueue() argument
447 } else if (queue->active) { in iio_dma_buffer_enqueue()
448 iio_dma_buffer_submit_block(queue, block); in iio_dma_buffer_enqueue()
451 list_add_tail(&block->head, &queue->incoming); in iio_dma_buffer_enqueue()
456 struct iio_dma_buffer_queue *queue) in iio_dma_buffer_dequeue() argument
460 spin_lock_irq(&queue->list_lock); in iio_dma_buffer_dequeue()
461 block = list_first_entry_or_null(&queue->outgoing, struct in iio_dma_buffer_dequeue()
467 spin_unlock_irq(&queue->list_lock); in iio_dma_buffer_dequeue()
484 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); in iio_dma_buffer_read() local
491 mutex_lock(&queue->lock); in iio_dma_buffer_read()
493 if (!queue->fileio.active_block) { in iio_dma_buffer_read()
494 block = iio_dma_buffer_dequeue(queue); in iio_dma_buffer_read()
499 queue->fileio.pos = 0; in iio_dma_buffer_read()
500 queue->fileio.active_block = block; in iio_dma_buffer_read()
502 block = queue->fileio.active_block; in iio_dma_buffer_read()
506 if (n > block->bytes_used - queue->fileio.pos) in iio_dma_buffer_read()
507 n = block->bytes_used - queue->fileio.pos; in iio_dma_buffer_read()
509 if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) { in iio_dma_buffer_read()
514 queue->fileio.pos += n; in iio_dma_buffer_read()
516 if (queue->fileio.pos == block->bytes_used) { in iio_dma_buffer_read()
517 queue->fileio.active_block = NULL; in iio_dma_buffer_read()
518 iio_dma_buffer_enqueue(queue, block); in iio_dma_buffer_read()
524 mutex_unlock(&queue->lock); in iio_dma_buffer_read()
539 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf); in iio_dma_buffer_data_available() local
550 mutex_lock(&queue->lock); in iio_dma_buffer_data_available()
551 if (queue->fileio.active_block) in iio_dma_buffer_data_available()
552 data_available += queue->fileio.active_block->size; in iio_dma_buffer_data_available()
554 spin_lock_irq(&queue->list_lock); in iio_dma_buffer_data_available()
555 list_for_each_entry(block, &queue->outgoing, head) in iio_dma_buffer_data_available()
557 spin_unlock_irq(&queue->list_lock); in iio_dma_buffer_data_available()
558 mutex_unlock(&queue->lock); in iio_dma_buffer_data_available()
610 int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue, in iio_dma_buffer_init() argument
613 iio_buffer_init(&queue->buffer); in iio_dma_buffer_init()
614 queue->buffer.length = PAGE_SIZE; in iio_dma_buffer_init()
615 queue->buffer.watermark = queue->buffer.length / 2; in iio_dma_buffer_init()
616 queue->dev = dev; in iio_dma_buffer_init()
617 queue->ops = ops; in iio_dma_buffer_init()
619 INIT_LIST_HEAD(&queue->incoming); in iio_dma_buffer_init()
620 INIT_LIST_HEAD(&queue->outgoing); in iio_dma_buffer_init()
622 mutex_init(&queue->lock); in iio_dma_buffer_init()
623 spin_lock_init(&queue->list_lock); in iio_dma_buffer_init()
636 void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue) in iio_dma_buffer_exit() argument
640 mutex_lock(&queue->lock); in iio_dma_buffer_exit()
642 spin_lock_irq(&queue->list_lock); in iio_dma_buffer_exit()
643 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) { in iio_dma_buffer_exit()
644 if (!queue->fileio.blocks[i]) in iio_dma_buffer_exit()
646 queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD; in iio_dma_buffer_exit()
648 INIT_LIST_HEAD(&queue->outgoing); in iio_dma_buffer_exit()
649 spin_unlock_irq(&queue->list_lock); in iio_dma_buffer_exit()
651 INIT_LIST_HEAD(&queue->incoming); in iio_dma_buffer_exit()
653 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) { in iio_dma_buffer_exit()
654 if (!queue->fileio.blocks[i]) in iio_dma_buffer_exit()
656 iio_buffer_block_put(queue->fileio.blocks[i]); in iio_dma_buffer_exit()
657 queue->fileio.blocks[i] = NULL; in iio_dma_buffer_exit()
659 queue->fileio.active_block = NULL; in iio_dma_buffer_exit()
660 queue->ops = NULL; in iio_dma_buffer_exit()
662 mutex_unlock(&queue->lock); in iio_dma_buffer_exit()
674 void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue) in iio_dma_buffer_release() argument
676 mutex_destroy(&queue->lock); in iio_dma_buffer_release()