xref: /openbmc/linux/include/linux/iio/buffer-dma.h (revision ed14e769)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright 2013-2015 Analog Devices Inc.
4  *  Author: Lars-Peter Clausen <lars@metafoo.de>
5  */
6 
7 #ifndef __INDUSTRIALIO_DMA_BUFFER_H__
8 #define __INDUSTRIALIO_DMA_BUFFER_H__
9 
10 #include <linux/list.h>
11 #include <linux/kref.h>
12 #include <linux/spinlock.h>
13 #include <linux/mutex.h>
14 #include <linux/iio/buffer_impl.h>
15 
16 struct iio_dma_buffer_queue;
17 struct iio_dma_buffer_ops;
18 struct device;
19 
20 /**
21  * enum iio_block_state - State of a struct iio_dma_buffer_block
22  * @IIO_BLOCK_STATE_DEQUEUED: Block is not queued
23  * @IIO_BLOCK_STATE_QUEUED: Block is on the incoming queue
24  * @IIO_BLOCK_STATE_ACTIVE: Block is currently being processed by the DMA
25  * @IIO_BLOCK_STATE_DONE: Block is on the outgoing queue
26  * @IIO_BLOCK_STATE_DEAD: Block has been marked as to be freed
27  */
28 enum iio_block_state {
29 	IIO_BLOCK_STATE_DEQUEUED,
30 	IIO_BLOCK_STATE_QUEUED,
31 	IIO_BLOCK_STATE_ACTIVE,
32 	IIO_BLOCK_STATE_DONE,
33 	IIO_BLOCK_STATE_DEAD,
34 };
35 
36 /**
37  * struct iio_dma_buffer_block - IIO buffer block
38  * @head: List head
39  * @size: Total size of the block in bytes
40  * @bytes_used: Number of bytes that contain valid data
41  * @vaddr: Virutal address of the blocks memory
42  * @phys_addr: Physical address of the blocks memory
43  * @queue: Parent DMA buffer queue
44  * @kref: kref used to manage the lifetime of block
45  * @state: Current state of the block
46  */
47 struct iio_dma_buffer_block {
48 	/* May only be accessed by the owner of the block */
49 	struct list_head head;
50 	size_t bytes_used;
51 
52 	/*
53 	 * Set during allocation, constant thereafter. May be accessed read-only
54 	 * by anybody holding a reference to the block.
55 	 */
56 	void *vaddr;
57 	dma_addr_t phys_addr;
58 	size_t size;
59 	struct iio_dma_buffer_queue *queue;
60 
61 	/* Must not be accessed outside the core. */
62 	struct kref kref;
63 	/*
64 	 * Must not be accessed outside the core. Access needs to hold
65 	 * queue->list_lock if the block is not owned by the core.
66 	 */
67 	enum iio_block_state state;
68 };
69 
70 /**
71  * struct iio_dma_buffer_queue_fileio - FileIO state for the DMA buffer
72  * @blocks: Buffer blocks used for fileio
73  * @active_block: Block being used in read()
74  * @pos: Read offset in the active block
75  * @block_size: Size of each block
76  */
77 struct iio_dma_buffer_queue_fileio {
78 	struct iio_dma_buffer_block *blocks[2];
79 	struct iio_dma_buffer_block *active_block;
80 	size_t pos;
81 	size_t block_size;
82 };
83 
84 /**
85  * struct iio_dma_buffer_queue - DMA buffer base structure
86  * @buffer: IIO buffer base structure
87  * @dev: Parent device
88  * @ops: DMA buffer callbacks
89  * @lock: Protects the incoming list, active and the fields in the fileio
90  *   substruct
91  * @list_lock: Protects lists that contain blocks which can be modified in
92  *   atomic context as well as blocks on those lists. This is the outgoing queue
93  *   list and typically also a list of active blocks in the part that handles
94  *   the DMA controller
95  * @incoming: List of buffers on the incoming queue
96  * @outgoing: List of buffers on the outgoing queue
97  * @active: Whether the buffer is currently active
98  * @fileio: FileIO state
99  */
100 struct iio_dma_buffer_queue {
101 	struct iio_buffer buffer;
102 	struct device *dev;
103 	const struct iio_dma_buffer_ops *ops;
104 
105 	struct mutex lock;
106 	spinlock_t list_lock;
107 	struct list_head incoming;
108 	struct list_head outgoing;
109 
110 	bool active;
111 
112 	struct iio_dma_buffer_queue_fileio fileio;
113 };
114 
115 /**
116  * struct iio_dma_buffer_ops - DMA buffer callback operations
117  * @submit: Called when a block is submitted to the DMA controller
118  * @abort: Should abort all pending transfers
119  */
120 struct iio_dma_buffer_ops {
121 	int (*submit)(struct iio_dma_buffer_queue *queue,
122 		struct iio_dma_buffer_block *block);
123 	void (*abort)(struct iio_dma_buffer_queue *queue);
124 };
125 
126 void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block);
127 void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
128 	struct list_head *list);
129 
130 int iio_dma_buffer_enable(struct iio_buffer *buffer,
131 	struct iio_dev *indio_dev);
132 int iio_dma_buffer_disable(struct iio_buffer *buffer,
133 	struct iio_dev *indio_dev);
134 int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
135 	char __user *user_buffer);
136 size_t iio_dma_buffer_data_available(struct iio_buffer *buffer);
137 int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd);
138 int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length);
139 int iio_dma_buffer_request_update(struct iio_buffer *buffer);
140 
141 int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
142 	struct device *dma_dev, const struct iio_dma_buffer_ops *ops);
143 void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue);
144 void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue);
145 
146 #endif
147