Lines Matching +full:block +full:- +full:size

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright 2013-2015 Analog Devices Inc.
4 * Author: Lars-Peter Clausen <lars@metafoo.de>
16 #include <linux/iio/buffer-dma.h>
17 #include <linux/dma-mapping.h>
21 * For DMA buffers the storage is sub-divided into so called blocks. Each block
22 * has its own memory buffer. The size of the block is the granularity at which
24 * basic unit of data exchange from one sample to one block decreases the
27 * sample the overhead will be x for each sample. Whereas when using a block
35 * them with data. Block on the outgoing queue have been filled with data and
38 * A block can be in one of the following states:
40 * the block.
43 * * Owned by the DMA controller: The DMA controller is processing the block
48 * * Dead: A block that is dead has been marked as to be freed. It might still
51 * incoming or outgoing queue the block will be freed.
54 * with both the block structure as well as the storage memory for the block
55 * will be freed when the last reference to the block is dropped. This means a
56 * block must not be accessed without holding a reference.
64 * converter to the memory region of the block. Once the DMA transfer has been
66 * block.
68 * Prior to this it must set the bytes_used field of the block contains
70 * size of the block, but if the DMA hardware has certain alignment requirements
71 * for the transfer length it might choose to use less than the full size. In
73 * datum, i.e. the block must not contain partial samples.
75 * The driver must call iio_dma_buffer_block_done() for each block it has
77 * perform a DMA transfer for the block, e.g. because the buffer was disabled
78 * before the block transfer was started. In this case it should set bytes_used
95 struct iio_dma_buffer_block *block = container_of(kref, in iio_buffer_block_release() local
98 WARN_ON(block->state != IIO_BLOCK_STATE_DEAD); in iio_buffer_block_release()
100 dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size), in iio_buffer_block_release()
101 block->vaddr, block->phys_addr); in iio_buffer_block_release()
103 iio_buffer_put(&block->queue->buffer); in iio_buffer_block_release()
104 kfree(block); in iio_buffer_block_release()
107 static void iio_buffer_block_get(struct iio_dma_buffer_block *block) in iio_buffer_block_get() argument
109 kref_get(&block->kref); in iio_buffer_block_get()
112 static void iio_buffer_block_put(struct iio_dma_buffer_block *block) in iio_buffer_block_put() argument
114 kref_put(&block->kref, iio_buffer_block_release); in iio_buffer_block_put()
126 struct iio_dma_buffer_block *block, *_block; in iio_dma_buffer_cleanup_worker() local
133 list_for_each_entry_safe(block, _block, &block_list, head) in iio_dma_buffer_cleanup_worker()
134 iio_buffer_block_release(&block->kref); in iio_dma_buffer_cleanup_worker()
140 struct iio_dma_buffer_block *block; in iio_buffer_block_release_atomic() local
143 block = container_of(kref, struct iio_dma_buffer_block, kref); in iio_buffer_block_release_atomic()
146 list_add_tail(&block->head, &iio_dma_buffer_dead_blocks); in iio_buffer_block_release_atomic()
155 static void iio_buffer_block_put_atomic(struct iio_dma_buffer_block *block) in iio_buffer_block_put_atomic() argument
157 kref_put(&block->kref, iio_buffer_block_release_atomic); in iio_buffer_block_put_atomic()
166 struct iio_dma_buffer_queue *queue, size_t size) in iio_dma_buffer_alloc_block() argument
168 struct iio_dma_buffer_block *block; in iio_dma_buffer_alloc_block() local
170 block = kzalloc(sizeof(*block), GFP_KERNEL); in iio_dma_buffer_alloc_block()
171 if (!block) in iio_dma_buffer_alloc_block()
174 block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size), in iio_dma_buffer_alloc_block()
175 &block->phys_addr, GFP_KERNEL); in iio_dma_buffer_alloc_block()
176 if (!block->vaddr) { in iio_dma_buffer_alloc_block()
177 kfree(block); in iio_dma_buffer_alloc_block()
181 block->size = size; in iio_dma_buffer_alloc_block()
182 block->state = IIO_BLOCK_STATE_DEQUEUED; in iio_dma_buffer_alloc_block()
183 block->queue = queue; in iio_dma_buffer_alloc_block()
184 INIT_LIST_HEAD(&block->head); in iio_dma_buffer_alloc_block()
185 kref_init(&block->kref); in iio_dma_buffer_alloc_block()
187 iio_buffer_get(&queue->buffer); in iio_dma_buffer_alloc_block()
189 return block; in iio_dma_buffer_alloc_block()
192 static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block) in _iio_dma_buffer_block_done() argument
194 struct iio_dma_buffer_queue *queue = block->queue; in _iio_dma_buffer_block_done()
200 if (block->state != IIO_BLOCK_STATE_DEAD) { in _iio_dma_buffer_block_done()
201 block->state = IIO_BLOCK_STATE_DONE; in _iio_dma_buffer_block_done()
202 list_add_tail(&block->head, &queue->outgoing); in _iio_dma_buffer_block_done()
207 * iio_dma_buffer_block_done() - Indicate that a block has been completed
208 * @block: The completed block
210 * Should be called when the DMA controller has finished handling the block to
211 * pass back ownership of the block to the queue.
213 void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block) in iio_dma_buffer_block_done() argument
215 struct iio_dma_buffer_queue *queue = block->queue; in iio_dma_buffer_block_done()
218 spin_lock_irqsave(&queue->list_lock, flags); in iio_dma_buffer_block_done()
219 _iio_dma_buffer_block_done(block); in iio_dma_buffer_block_done()
220 spin_unlock_irqrestore(&queue->list_lock, flags); in iio_dma_buffer_block_done()
222 iio_buffer_block_put_atomic(block); in iio_dma_buffer_block_done()
223 wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM); in iio_dma_buffer_block_done()
228 * iio_dma_buffer_block_list_abort() - Indicate that a list block has been
234 * stopped. This will set bytes_used to 0 for each block in the list and then
240 struct iio_dma_buffer_block *block, *_block; in iio_dma_buffer_block_list_abort() local
243 spin_lock_irqsave(&queue->list_lock, flags); in iio_dma_buffer_block_list_abort()
244 list_for_each_entry_safe(block, _block, list, head) { in iio_dma_buffer_block_list_abort()
245 list_del(&block->head); in iio_dma_buffer_block_list_abort()
246 block->bytes_used = 0; in iio_dma_buffer_block_list_abort()
247 _iio_dma_buffer_block_done(block); in iio_dma_buffer_block_list_abort()
248 iio_buffer_block_put_atomic(block); in iio_dma_buffer_block_list_abort()
250 spin_unlock_irqrestore(&queue->list_lock, flags); in iio_dma_buffer_block_list_abort()
252 wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM); in iio_dma_buffer_block_list_abort()
256 static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block) in iio_dma_block_reusable() argument
259 * If the core owns the block it can be re-used. This should be the in iio_dma_block_reusable()
261 * not support abort and has not given back the block yet. in iio_dma_block_reusable()
263 switch (block->state) { in iio_dma_block_reusable()
274 * iio_dma_buffer_request_update() - DMA buffer request_update callback
283 struct iio_dma_buffer_block *block; in iio_dma_buffer_request_update() local
285 size_t size; in iio_dma_buffer_request_update() local
291 * buffering scheme with usually one block at a time being used by the in iio_dma_buffer_request_update()
294 size = DIV_ROUND_UP(queue->buffer.bytes_per_datum * in iio_dma_buffer_request_update()
295 queue->buffer.length, 2); in iio_dma_buffer_request_update()
297 mutex_lock(&queue->lock); in iio_dma_buffer_request_update()
300 if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size)) in iio_dma_buffer_request_update()
303 queue->fileio.block_size = size; in iio_dma_buffer_request_update()
304 queue->fileio.active_block = NULL; in iio_dma_buffer_request_update()
306 spin_lock_irq(&queue->list_lock); in iio_dma_buffer_request_update()
307 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) { in iio_dma_buffer_request_update()
308 block = queue->fileio.blocks[i]; in iio_dma_buffer_request_update()
310 /* If we can't re-use it free it */ in iio_dma_buffer_request_update()
311 if (block && (!iio_dma_block_reusable(block) || !try_reuse)) in iio_dma_buffer_request_update()
312 block->state = IIO_BLOCK_STATE_DEAD; in iio_dma_buffer_request_update()
320 INIT_LIST_HEAD(&queue->outgoing); in iio_dma_buffer_request_update()
321 spin_unlock_irq(&queue->list_lock); in iio_dma_buffer_request_update()
323 INIT_LIST_HEAD(&queue->incoming); in iio_dma_buffer_request_update()
325 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) { in iio_dma_buffer_request_update()
326 if (queue->fileio.blocks[i]) { in iio_dma_buffer_request_update()
327 block = queue->fileio.blocks[i]; in iio_dma_buffer_request_update()
328 if (block->state == IIO_BLOCK_STATE_DEAD) { in iio_dma_buffer_request_update()
330 iio_buffer_block_put(block); in iio_dma_buffer_request_update()
331 block = NULL; in iio_dma_buffer_request_update()
333 block->size = size; in iio_dma_buffer_request_update()
336 block = NULL; in iio_dma_buffer_request_update()
339 if (!block) { in iio_dma_buffer_request_update()
340 block = iio_dma_buffer_alloc_block(queue, size); in iio_dma_buffer_request_update()
341 if (!block) { in iio_dma_buffer_request_update()
342 ret = -ENOMEM; in iio_dma_buffer_request_update()
345 queue->fileio.blocks[i] = block; in iio_dma_buffer_request_update()
348 block->state = IIO_BLOCK_STATE_QUEUED; in iio_dma_buffer_request_update()
349 list_add_tail(&block->head, &queue->incoming); in iio_dma_buffer_request_update()
353 mutex_unlock(&queue->lock); in iio_dma_buffer_request_update()
360 struct iio_dma_buffer_block *block) in iio_dma_buffer_submit_block() argument
365 * If the hardware has already been removed we put the block into in iio_dma_buffer_submit_block()
369 if (!queue->ops) in iio_dma_buffer_submit_block()
372 block->state = IIO_BLOCK_STATE_ACTIVE; in iio_dma_buffer_submit_block()
373 iio_buffer_block_get(block); in iio_dma_buffer_submit_block()
374 ret = queue->ops->submit(queue, block); in iio_dma_buffer_submit_block()
378 * other then wait for the buffer to be disabled and re-enabled in iio_dma_buffer_submit_block()
386 iio_buffer_block_put(block); in iio_dma_buffer_submit_block()
391 * iio_dma_buffer_enable() - Enable DMA buffer
404 struct iio_dma_buffer_block *block, *_block; in iio_dma_buffer_enable() local
406 mutex_lock(&queue->lock); in iio_dma_buffer_enable()
407 queue->active = true; in iio_dma_buffer_enable()
408 list_for_each_entry_safe(block, _block, &queue->incoming, head) { in iio_dma_buffer_enable()
409 list_del(&block->head); in iio_dma_buffer_enable()
410 iio_dma_buffer_submit_block(queue, block); in iio_dma_buffer_enable()
412 mutex_unlock(&queue->lock); in iio_dma_buffer_enable()
419 * iio_dma_buffer_disable() - Disable DMA buffer
431 mutex_lock(&queue->lock); in iio_dma_buffer_disable()
432 queue->active = false; in iio_dma_buffer_disable()
434 if (queue->ops && queue->ops->abort) in iio_dma_buffer_disable()
435 queue->ops->abort(queue); in iio_dma_buffer_disable()
436 mutex_unlock(&queue->lock); in iio_dma_buffer_disable()
443 struct iio_dma_buffer_block *block) in iio_dma_buffer_enqueue() argument
445 if (block->state == IIO_BLOCK_STATE_DEAD) { in iio_dma_buffer_enqueue()
446 iio_buffer_block_put(block); in iio_dma_buffer_enqueue()
447 } else if (queue->active) { in iio_dma_buffer_enqueue()
448 iio_dma_buffer_submit_block(queue, block); in iio_dma_buffer_enqueue()
450 block->state = IIO_BLOCK_STATE_QUEUED; in iio_dma_buffer_enqueue()
451 list_add_tail(&block->head, &queue->incoming); in iio_dma_buffer_enqueue()
458 struct iio_dma_buffer_block *block; in iio_dma_buffer_dequeue() local
460 spin_lock_irq(&queue->list_lock); in iio_dma_buffer_dequeue()
461 block = list_first_entry_or_null(&queue->outgoing, struct in iio_dma_buffer_dequeue()
463 if (block != NULL) { in iio_dma_buffer_dequeue()
464 list_del(&block->head); in iio_dma_buffer_dequeue()
465 block->state = IIO_BLOCK_STATE_DEQUEUED; in iio_dma_buffer_dequeue()
467 spin_unlock_irq(&queue->list_lock); in iio_dma_buffer_dequeue()
469 return block; in iio_dma_buffer_dequeue()
473 * iio_dma_buffer_read() - DMA buffer read callback
485 struct iio_dma_buffer_block *block; in iio_dma_buffer_read() local
488 if (n < buffer->bytes_per_datum) in iio_dma_buffer_read()
489 return -EINVAL; in iio_dma_buffer_read()
491 mutex_lock(&queue->lock); in iio_dma_buffer_read()
493 if (!queue->fileio.active_block) { in iio_dma_buffer_read()
494 block = iio_dma_buffer_dequeue(queue); in iio_dma_buffer_read()
495 if (block == NULL) { in iio_dma_buffer_read()
499 queue->fileio.pos = 0; in iio_dma_buffer_read()
500 queue->fileio.active_block = block; in iio_dma_buffer_read()
502 block = queue->fileio.active_block; in iio_dma_buffer_read()
505 n = rounddown(n, buffer->bytes_per_datum); in iio_dma_buffer_read()
506 if (n > block->bytes_used - queue->fileio.pos) in iio_dma_buffer_read()
507 n = block->bytes_used - queue->fileio.pos; in iio_dma_buffer_read()
509 if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) { in iio_dma_buffer_read()
510 ret = -EFAULT; in iio_dma_buffer_read()
514 queue->fileio.pos += n; in iio_dma_buffer_read()
516 if (queue->fileio.pos == block->bytes_used) { in iio_dma_buffer_read()
517 queue->fileio.active_block = NULL; in iio_dma_buffer_read()
518 iio_dma_buffer_enqueue(queue, block); in iio_dma_buffer_read()
524 mutex_unlock(&queue->lock); in iio_dma_buffer_read()
531 * iio_dma_buffer_data_available() - DMA buffer data_available callback
540 struct iio_dma_buffer_block *block; in iio_dma_buffer_data_available() local
544 * For counting the available bytes we'll use the size of the block not in iio_dma_buffer_data_available()
545 * the number of actual bytes available in the block. Otherwise it is in iio_dma_buffer_data_available()
550 mutex_lock(&queue->lock); in iio_dma_buffer_data_available()
551 if (queue->fileio.active_block) in iio_dma_buffer_data_available()
552 data_available += queue->fileio.active_block->size; in iio_dma_buffer_data_available()
554 spin_lock_irq(&queue->list_lock); in iio_dma_buffer_data_available()
555 list_for_each_entry(block, &queue->outgoing, head) in iio_dma_buffer_data_available()
556 data_available += block->size; in iio_dma_buffer_data_available()
557 spin_unlock_irq(&queue->list_lock); in iio_dma_buffer_data_available()
558 mutex_unlock(&queue->lock); in iio_dma_buffer_data_available()
565 * iio_dma_buffer_set_bytes_per_datum() - DMA buffer set_bytes_per_datum callback
566 * @buffer: Buffer to set the bytes-per-datum for
567 * @bpd: The new bytes-per-datum value
574 buffer->bytes_per_datum = bpd; in iio_dma_buffer_set_bytes_per_datum()
581 * iio_dma_buffer_set_length - DMA buffer set_length callback
593 buffer->length = length; in iio_dma_buffer_set_length()
594 buffer->watermark = length / 2; in iio_dma_buffer_set_length()
601 * iio_dma_buffer_init() - Initialize DMA buffer queue
613 iio_buffer_init(&queue->buffer); in iio_dma_buffer_init()
614 queue->buffer.length = PAGE_SIZE; in iio_dma_buffer_init()
615 queue->buffer.watermark = queue->buffer.length / 2; in iio_dma_buffer_init()
616 queue->dev = dev; in iio_dma_buffer_init()
617 queue->ops = ops; in iio_dma_buffer_init()
619 INIT_LIST_HEAD(&queue->incoming); in iio_dma_buffer_init()
620 INIT_LIST_HEAD(&queue->outgoing); in iio_dma_buffer_init()
622 mutex_init(&queue->lock); in iio_dma_buffer_init()
623 spin_lock_init(&queue->list_lock); in iio_dma_buffer_init()
630 * iio_dma_buffer_exit() - Cleanup DMA buffer queue
640 mutex_lock(&queue->lock); in iio_dma_buffer_exit()
642 spin_lock_irq(&queue->list_lock); in iio_dma_buffer_exit()
643 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) { in iio_dma_buffer_exit()
644 if (!queue->fileio.blocks[i]) in iio_dma_buffer_exit()
646 queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD; in iio_dma_buffer_exit()
648 INIT_LIST_HEAD(&queue->outgoing); in iio_dma_buffer_exit()
649 spin_unlock_irq(&queue->list_lock); in iio_dma_buffer_exit()
651 INIT_LIST_HEAD(&queue->incoming); in iio_dma_buffer_exit()
653 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) { in iio_dma_buffer_exit()
654 if (!queue->fileio.blocks[i]) in iio_dma_buffer_exit()
656 iio_buffer_block_put(queue->fileio.blocks[i]); in iio_dma_buffer_exit()
657 queue->fileio.blocks[i] = NULL; in iio_dma_buffer_exit()
659 queue->fileio.active_block = NULL; in iio_dma_buffer_exit()
660 queue->ops = NULL; in iio_dma_buffer_exit()
662 mutex_unlock(&queue->lock); in iio_dma_buffer_exit()
667 * iio_dma_buffer_release() - Release final buffer resources
676 mutex_destroy(&queue->lock); in iio_dma_buffer_release()
680 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");