xref: /openbmc/linux/sound/core/seq/seq_fifo.c (revision c67e8ec0)
1 /*
2  *   ALSA sequencer FIFO
3  *   Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl>
4  *
5  *
6  *   This program is free software; you can redistribute it and/or modify
7  *   it under the terms of the GNU General Public License as published by
8  *   the Free Software Foundation; either version 2 of the License, or
9  *   (at your option) any later version.
10  *
11  *   This program is distributed in the hope that it will be useful,
12  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
13  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14  *   GNU General Public License for more details.
15  *
16  *   You should have received a copy of the GNU General Public License
17  *   along with this program; if not, write to the Free Software
18  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
19  *
20  */
21 
22 #include <sound/core.h>
23 #include <linux/slab.h>
24 #include <linux/sched/signal.h>
25 
26 #include "seq_fifo.h"
27 #include "seq_lock.h"
28 
29 
30 /* FIFO */
31 
32 /* create new fifo */
33 struct snd_seq_fifo *snd_seq_fifo_new(int poolsize)
34 {
35 	struct snd_seq_fifo *f;
36 
37 	f = kzalloc(sizeof(*f), GFP_KERNEL);
38 	if (!f)
39 		return NULL;
40 
41 	f->pool = snd_seq_pool_new(poolsize);
42 	if (f->pool == NULL) {
43 		kfree(f);
44 		return NULL;
45 	}
46 	if (snd_seq_pool_init(f->pool) < 0) {
47 		snd_seq_pool_delete(&f->pool);
48 		kfree(f);
49 		return NULL;
50 	}
51 
52 	spin_lock_init(&f->lock);
53 	snd_use_lock_init(&f->use_lock);
54 	init_waitqueue_head(&f->input_sleep);
55 	atomic_set(&f->overflow, 0);
56 
57 	f->head = NULL;
58 	f->tail = NULL;
59 	f->cells = 0;
60 
61 	return f;
62 }
63 
64 void snd_seq_fifo_delete(struct snd_seq_fifo **fifo)
65 {
66 	struct snd_seq_fifo *f;
67 
68 	if (snd_BUG_ON(!fifo))
69 		return;
70 	f = *fifo;
71 	if (snd_BUG_ON(!f))
72 		return;
73 	*fifo = NULL;
74 
75 	if (f->pool)
76 		snd_seq_pool_mark_closing(f->pool);
77 
78 	snd_seq_fifo_clear(f);
79 
80 	/* wake up clients if any */
81 	if (waitqueue_active(&f->input_sleep))
82 		wake_up(&f->input_sleep);
83 
84 	/* release resources...*/
85 	/*....................*/
86 
87 	if (f->pool) {
88 		snd_seq_pool_done(f->pool);
89 		snd_seq_pool_delete(&f->pool);
90 	}
91 
92 	kfree(f);
93 }
94 
95 static struct snd_seq_event_cell *fifo_cell_out(struct snd_seq_fifo *f);
96 
97 /* clear queue */
98 void snd_seq_fifo_clear(struct snd_seq_fifo *f)
99 {
100 	struct snd_seq_event_cell *cell;
101 	unsigned long flags;
102 
103 	/* clear overflow flag */
104 	atomic_set(&f->overflow, 0);
105 
106 	snd_use_lock_sync(&f->use_lock);
107 	spin_lock_irqsave(&f->lock, flags);
108 	/* drain the fifo */
109 	while ((cell = fifo_cell_out(f)) != NULL) {
110 		snd_seq_cell_free(cell);
111 	}
112 	spin_unlock_irqrestore(&f->lock, flags);
113 }
114 
115 
116 /* enqueue event to fifo */
117 int snd_seq_fifo_event_in(struct snd_seq_fifo *f,
118 			  struct snd_seq_event *event)
119 {
120 	struct snd_seq_event_cell *cell;
121 	unsigned long flags;
122 	int err;
123 
124 	if (snd_BUG_ON(!f))
125 		return -EINVAL;
126 
127 	snd_use_lock_use(&f->use_lock);
128 	err = snd_seq_event_dup(f->pool, event, &cell, 1, NULL, NULL); /* always non-blocking */
129 	if (err < 0) {
130 		if ((err == -ENOMEM) || (err == -EAGAIN))
131 			atomic_inc(&f->overflow);
132 		snd_use_lock_free(&f->use_lock);
133 		return err;
134 	}
135 
136 	/* append new cells to fifo */
137 	spin_lock_irqsave(&f->lock, flags);
138 	if (f->tail != NULL)
139 		f->tail->next = cell;
140 	f->tail = cell;
141 	if (f->head == NULL)
142 		f->head = cell;
143 	cell->next = NULL;
144 	f->cells++;
145 	spin_unlock_irqrestore(&f->lock, flags);
146 
147 	/* wakeup client */
148 	if (waitqueue_active(&f->input_sleep))
149 		wake_up(&f->input_sleep);
150 
151 	snd_use_lock_free(&f->use_lock);
152 
153 	return 0; /* success */
154 
155 }
156 
157 /* dequeue cell from fifo */
158 static struct snd_seq_event_cell *fifo_cell_out(struct snd_seq_fifo *f)
159 {
160 	struct snd_seq_event_cell *cell;
161 
162 	if ((cell = f->head) != NULL) {
163 		f->head = cell->next;
164 
165 		/* reset tail if this was the last element */
166 		if (f->tail == cell)
167 			f->tail = NULL;
168 
169 		cell->next = NULL;
170 		f->cells--;
171 	}
172 
173 	return cell;
174 }
175 
176 /* dequeue cell from fifo and copy on user space */
177 int snd_seq_fifo_cell_out(struct snd_seq_fifo *f,
178 			  struct snd_seq_event_cell **cellp, int nonblock)
179 {
180 	struct snd_seq_event_cell *cell;
181 	unsigned long flags;
182 	wait_queue_entry_t wait;
183 
184 	if (snd_BUG_ON(!f))
185 		return -EINVAL;
186 
187 	*cellp = NULL;
188 	init_waitqueue_entry(&wait, current);
189 	spin_lock_irqsave(&f->lock, flags);
190 	while ((cell = fifo_cell_out(f)) == NULL) {
191 		if (nonblock) {
192 			/* non-blocking - return immediately */
193 			spin_unlock_irqrestore(&f->lock, flags);
194 			return -EAGAIN;
195 		}
196 		set_current_state(TASK_INTERRUPTIBLE);
197 		add_wait_queue(&f->input_sleep, &wait);
198 		spin_unlock_irq(&f->lock);
199 		schedule();
200 		spin_lock_irq(&f->lock);
201 		remove_wait_queue(&f->input_sleep, &wait);
202 		if (signal_pending(current)) {
203 			spin_unlock_irqrestore(&f->lock, flags);
204 			return -ERESTARTSYS;
205 		}
206 	}
207 	spin_unlock_irqrestore(&f->lock, flags);
208 	*cellp = cell;
209 
210 	return 0;
211 }
212 
213 
214 void snd_seq_fifo_cell_putback(struct snd_seq_fifo *f,
215 			       struct snd_seq_event_cell *cell)
216 {
217 	unsigned long flags;
218 
219 	if (cell) {
220 		spin_lock_irqsave(&f->lock, flags);
221 		cell->next = f->head;
222 		f->head = cell;
223 		if (!f->tail)
224 			f->tail = cell;
225 		f->cells++;
226 		spin_unlock_irqrestore(&f->lock, flags);
227 	}
228 }
229 
230 
231 /* polling; return non-zero if queue is available */
232 int snd_seq_fifo_poll_wait(struct snd_seq_fifo *f, struct file *file,
233 			   poll_table *wait)
234 {
235 	poll_wait(file, &f->input_sleep, wait);
236 	return (f->cells > 0);
237 }
238 
239 /* change the size of pool; all old events are removed */
240 int snd_seq_fifo_resize(struct snd_seq_fifo *f, int poolsize)
241 {
242 	unsigned long flags;
243 	struct snd_seq_pool *newpool, *oldpool;
244 	struct snd_seq_event_cell *cell, *next, *oldhead;
245 
246 	if (snd_BUG_ON(!f || !f->pool))
247 		return -EINVAL;
248 
249 	/* allocate new pool */
250 	newpool = snd_seq_pool_new(poolsize);
251 	if (newpool == NULL)
252 		return -ENOMEM;
253 	if (snd_seq_pool_init(newpool) < 0) {
254 		snd_seq_pool_delete(&newpool);
255 		return -ENOMEM;
256 	}
257 
258 	spin_lock_irqsave(&f->lock, flags);
259 	/* remember old pool */
260 	oldpool = f->pool;
261 	oldhead = f->head;
262 	/* exchange pools */
263 	f->pool = newpool;
264 	f->head = NULL;
265 	f->tail = NULL;
266 	f->cells = 0;
267 	/* NOTE: overflow flag is not cleared */
268 	spin_unlock_irqrestore(&f->lock, flags);
269 
270 	/* close the old pool and wait until all users are gone */
271 	snd_seq_pool_mark_closing(oldpool);
272 	snd_use_lock_sync(&f->use_lock);
273 
274 	/* release cells in old pool */
275 	for (cell = oldhead; cell; cell = next) {
276 		next = cell->next;
277 		snd_seq_cell_free(cell);
278 	}
279 	snd_seq_pool_delete(&oldpool);
280 
281 	return 0;
282 }
283