xref: /openbmc/linux/sound/core/seq/seq_memory.c (revision 151f4e2b)
1 /*
2  *  ALSA sequencer Memory Manager
3  *  Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl>
4  *                        Jaroslav Kysela <perex@perex.cz>
5  *                2000 by Takashi Iwai <tiwai@suse.de>
6  *
7  *   This program is free software; you can redistribute it and/or modify
8  *   it under the terms of the GNU General Public License as published by
9  *   the Free Software Foundation; either version 2 of the License, or
10  *   (at your option) any later version.
11  *
12  *   This program is distributed in the hope that it will be useful,
13  *   but WITHOUT ANY WARRANTY; without even the implied warranty of
14  *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  *   GNU General Public License for more details.
16  *
17  *   You should have received a copy of the GNU General Public License
18  *   along with this program; if not, write to the Free Software
19  *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
20  *
21  */
22 
23 #include <linux/init.h>
24 #include <linux/export.h>
25 #include <linux/slab.h>
26 #include <linux/sched/signal.h>
27 #include <linux/mm.h>
28 #include <sound/core.h>
29 
30 #include <sound/seq_kernel.h>
31 #include "seq_memory.h"
32 #include "seq_queue.h"
33 #include "seq_info.h"
34 #include "seq_lock.h"
35 
36 static inline int snd_seq_pool_available(struct snd_seq_pool *pool)
37 {
38 	return pool->total_elements - atomic_read(&pool->counter);
39 }
40 
41 static inline int snd_seq_output_ok(struct snd_seq_pool *pool)
42 {
43 	return snd_seq_pool_available(pool) >= pool->room;
44 }
45 
46 /*
47  * Variable length event:
48  * The event like sysex uses variable length type.
49  * The external data may be stored in three different formats.
50  * 1) kernel space
51  *    This is the normal case.
52  *      ext.data.len = length
53  *      ext.data.ptr = buffer pointer
54  * 2) user space
55  *    When an event is generated via read(), the external data is
56  *    kept in user space until expanded.
57  *      ext.data.len = length | SNDRV_SEQ_EXT_USRPTR
58  *      ext.data.ptr = userspace pointer
59  * 3) chained cells
60  *    When the variable length event is enqueued (in prioq or fifo),
61  *    the external data is decomposed to several cells.
62  *      ext.data.len = length | SNDRV_SEQ_EXT_CHAINED
63  *      ext.data.ptr = the additiona cell head
64  *         -> cell.next -> cell.next -> ..
65  */
66 
67 /*
68  * exported:
69  * call dump function to expand external data.
70  */
71 
72 static int get_var_len(const struct snd_seq_event *event)
73 {
74 	if ((event->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE)
75 		return -EINVAL;
76 
77 	return event->data.ext.len & ~SNDRV_SEQ_EXT_MASK;
78 }
79 
80 int snd_seq_dump_var_event(const struct snd_seq_event *event,
81 			   snd_seq_dump_func_t func, void *private_data)
82 {
83 	int len, err;
84 	struct snd_seq_event_cell *cell;
85 
86 	if ((len = get_var_len(event)) <= 0)
87 		return len;
88 
89 	if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) {
90 		char buf[32];
91 		char __user *curptr = (char __force __user *)event->data.ext.ptr;
92 		while (len > 0) {
93 			int size = sizeof(buf);
94 			if (len < size)
95 				size = len;
96 			if (copy_from_user(buf, curptr, size))
97 				return -EFAULT;
98 			err = func(private_data, buf, size);
99 			if (err < 0)
100 				return err;
101 			curptr += size;
102 			len -= size;
103 		}
104 		return 0;
105 	}
106 	if (!(event->data.ext.len & SNDRV_SEQ_EXT_CHAINED))
107 		return func(private_data, event->data.ext.ptr, len);
108 
109 	cell = (struct snd_seq_event_cell *)event->data.ext.ptr;
110 	for (; len > 0 && cell; cell = cell->next) {
111 		int size = sizeof(struct snd_seq_event);
112 		if (len < size)
113 			size = len;
114 		err = func(private_data, &cell->event, size);
115 		if (err < 0)
116 			return err;
117 		len -= size;
118 	}
119 	return 0;
120 }
121 EXPORT_SYMBOL(snd_seq_dump_var_event);
122 
123 
124 /*
125  * exported:
126  * expand the variable length event to linear buffer space.
127  */
128 
129 static int seq_copy_in_kernel(char **bufptr, const void *src, int size)
130 {
131 	memcpy(*bufptr, src, size);
132 	*bufptr += size;
133 	return 0;
134 }
135 
136 static int seq_copy_in_user(char __user **bufptr, const void *src, int size)
137 {
138 	if (copy_to_user(*bufptr, src, size))
139 		return -EFAULT;
140 	*bufptr += size;
141 	return 0;
142 }
143 
144 int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char *buf,
145 			     int in_kernel, int size_aligned)
146 {
147 	int len, newlen;
148 	int err;
149 
150 	if ((len = get_var_len(event)) < 0)
151 		return len;
152 	newlen = len;
153 	if (size_aligned > 0)
154 		newlen = roundup(len, size_aligned);
155 	if (count < newlen)
156 		return -EAGAIN;
157 
158 	if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) {
159 		if (! in_kernel)
160 			return -EINVAL;
161 		if (copy_from_user(buf, (void __force __user *)event->data.ext.ptr, len))
162 			return -EFAULT;
163 		return newlen;
164 	}
165 	err = snd_seq_dump_var_event(event,
166 				     in_kernel ? (snd_seq_dump_func_t)seq_copy_in_kernel :
167 				     (snd_seq_dump_func_t)seq_copy_in_user,
168 				     &buf);
169 	return err < 0 ? err : newlen;
170 }
171 EXPORT_SYMBOL(snd_seq_expand_var_event);
172 
173 /*
174  * release this cell, free extended data if available
175  */
176 
177 static inline void free_cell(struct snd_seq_pool *pool,
178 			     struct snd_seq_event_cell *cell)
179 {
180 	cell->next = pool->free;
181 	pool->free = cell;
182 	atomic_dec(&pool->counter);
183 }
184 
185 void snd_seq_cell_free(struct snd_seq_event_cell * cell)
186 {
187 	unsigned long flags;
188 	struct snd_seq_pool *pool;
189 
190 	if (snd_BUG_ON(!cell))
191 		return;
192 	pool = cell->pool;
193 	if (snd_BUG_ON(!pool))
194 		return;
195 
196 	spin_lock_irqsave(&pool->lock, flags);
197 	free_cell(pool, cell);
198 	if (snd_seq_ev_is_variable(&cell->event)) {
199 		if (cell->event.data.ext.len & SNDRV_SEQ_EXT_CHAINED) {
200 			struct snd_seq_event_cell *curp, *nextptr;
201 			curp = cell->event.data.ext.ptr;
202 			for (; curp; curp = nextptr) {
203 				nextptr = curp->next;
204 				curp->next = pool->free;
205 				free_cell(pool, curp);
206 			}
207 		}
208 	}
209 	if (waitqueue_active(&pool->output_sleep)) {
210 		/* has enough space now? */
211 		if (snd_seq_output_ok(pool))
212 			wake_up(&pool->output_sleep);
213 	}
214 	spin_unlock_irqrestore(&pool->lock, flags);
215 }
216 
217 
218 /*
219  * allocate an event cell.
220  */
221 static int snd_seq_cell_alloc(struct snd_seq_pool *pool,
222 			      struct snd_seq_event_cell **cellp,
223 			      int nonblock, struct file *file,
224 			      struct mutex *mutexp)
225 {
226 	struct snd_seq_event_cell *cell;
227 	unsigned long flags;
228 	int err = -EAGAIN;
229 	wait_queue_entry_t wait;
230 
231 	if (pool == NULL)
232 		return -EINVAL;
233 
234 	*cellp = NULL;
235 
236 	init_waitqueue_entry(&wait, current);
237 	spin_lock_irqsave(&pool->lock, flags);
238 	if (pool->ptr == NULL) {	/* not initialized */
239 		pr_debug("ALSA: seq: pool is not initialized\n");
240 		err = -EINVAL;
241 		goto __error;
242 	}
243 	while (pool->free == NULL && ! nonblock && ! pool->closing) {
244 
245 		set_current_state(TASK_INTERRUPTIBLE);
246 		add_wait_queue(&pool->output_sleep, &wait);
247 		spin_unlock_irqrestore(&pool->lock, flags);
248 		if (mutexp)
249 			mutex_unlock(mutexp);
250 		schedule();
251 		if (mutexp)
252 			mutex_lock(mutexp);
253 		spin_lock_irqsave(&pool->lock, flags);
254 		remove_wait_queue(&pool->output_sleep, &wait);
255 		/* interrupted? */
256 		if (signal_pending(current)) {
257 			err = -ERESTARTSYS;
258 			goto __error;
259 		}
260 	}
261 	if (pool->closing) { /* closing.. */
262 		err = -ENOMEM;
263 		goto __error;
264 	}
265 
266 	cell = pool->free;
267 	if (cell) {
268 		int used;
269 		pool->free = cell->next;
270 		atomic_inc(&pool->counter);
271 		used = atomic_read(&pool->counter);
272 		if (pool->max_used < used)
273 			pool->max_used = used;
274 		pool->event_alloc_success++;
275 		/* clear cell pointers */
276 		cell->next = NULL;
277 		err = 0;
278 	} else
279 		pool->event_alloc_failures++;
280 	*cellp = cell;
281 
282 __error:
283 	spin_unlock_irqrestore(&pool->lock, flags);
284 	return err;
285 }
286 
287 
288 /*
289  * duplicate the event to a cell.
290  * if the event has external data, the data is decomposed to additional
291  * cells.
292  */
293 int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event,
294 		      struct snd_seq_event_cell **cellp, int nonblock,
295 		      struct file *file, struct mutex *mutexp)
296 {
297 	int ncells, err;
298 	unsigned int extlen;
299 	struct snd_seq_event_cell *cell;
300 
301 	*cellp = NULL;
302 
303 	ncells = 0;
304 	extlen = 0;
305 	if (snd_seq_ev_is_variable(event)) {
306 		extlen = event->data.ext.len & ~SNDRV_SEQ_EXT_MASK;
307 		ncells = (extlen + sizeof(struct snd_seq_event) - 1) / sizeof(struct snd_seq_event);
308 	}
309 	if (ncells >= pool->total_elements)
310 		return -ENOMEM;
311 
312 	err = snd_seq_cell_alloc(pool, &cell, nonblock, file, mutexp);
313 	if (err < 0)
314 		return err;
315 
316 	/* copy the event */
317 	cell->event = *event;
318 
319 	/* decompose */
320 	if (snd_seq_ev_is_variable(event)) {
321 		int len = extlen;
322 		int is_chained = event->data.ext.len & SNDRV_SEQ_EXT_CHAINED;
323 		int is_usrptr = event->data.ext.len & SNDRV_SEQ_EXT_USRPTR;
324 		struct snd_seq_event_cell *src, *tmp, *tail;
325 		char *buf;
326 
327 		cell->event.data.ext.len = extlen | SNDRV_SEQ_EXT_CHAINED;
328 		cell->event.data.ext.ptr = NULL;
329 
330 		src = (struct snd_seq_event_cell *)event->data.ext.ptr;
331 		buf = (char *)event->data.ext.ptr;
332 		tail = NULL;
333 
334 		while (ncells-- > 0) {
335 			int size = sizeof(struct snd_seq_event);
336 			if (len < size)
337 				size = len;
338 			err = snd_seq_cell_alloc(pool, &tmp, nonblock, file,
339 						 mutexp);
340 			if (err < 0)
341 				goto __error;
342 			if (cell->event.data.ext.ptr == NULL)
343 				cell->event.data.ext.ptr = tmp;
344 			if (tail)
345 				tail->next = tmp;
346 			tail = tmp;
347 			/* copy chunk */
348 			if (is_chained && src) {
349 				tmp->event = src->event;
350 				src = src->next;
351 			} else if (is_usrptr) {
352 				if (copy_from_user(&tmp->event, (char __force __user *)buf, size)) {
353 					err = -EFAULT;
354 					goto __error;
355 				}
356 			} else {
357 				memcpy(&tmp->event, buf, size);
358 			}
359 			buf += size;
360 			len -= size;
361 		}
362 	}
363 
364 	*cellp = cell;
365 	return 0;
366 
367 __error:
368 	snd_seq_cell_free(cell);
369 	return err;
370 }
371 
372 
373 /* poll wait */
374 int snd_seq_pool_poll_wait(struct snd_seq_pool *pool, struct file *file,
375 			   poll_table *wait)
376 {
377 	poll_wait(file, &pool->output_sleep, wait);
378 	return snd_seq_output_ok(pool);
379 }
380 
381 
382 /* allocate room specified number of events */
383 int snd_seq_pool_init(struct snd_seq_pool *pool)
384 {
385 	int cell;
386 	struct snd_seq_event_cell *cellptr;
387 
388 	if (snd_BUG_ON(!pool))
389 		return -EINVAL;
390 
391 	cellptr = kvmalloc_array(sizeof(struct snd_seq_event_cell), pool->size,
392 				 GFP_KERNEL);
393 	if (!cellptr)
394 		return -ENOMEM;
395 
396 	/* add new cells to the free cell list */
397 	spin_lock_irq(&pool->lock);
398 	if (pool->ptr) {
399 		spin_unlock_irq(&pool->lock);
400 		kvfree(cellptr);
401 		return 0;
402 	}
403 
404 	pool->ptr = cellptr;
405 	pool->free = NULL;
406 
407 	for (cell = 0; cell < pool->size; cell++) {
408 		cellptr = pool->ptr + cell;
409 		cellptr->pool = pool;
410 		cellptr->next = pool->free;
411 		pool->free = cellptr;
412 	}
413 	pool->room = (pool->size + 1) / 2;
414 
415 	/* init statistics */
416 	pool->max_used = 0;
417 	pool->total_elements = pool->size;
418 	spin_unlock_irq(&pool->lock);
419 	return 0;
420 }
421 
422 /* refuse the further insertion to the pool */
423 void snd_seq_pool_mark_closing(struct snd_seq_pool *pool)
424 {
425 	unsigned long flags;
426 
427 	if (snd_BUG_ON(!pool))
428 		return;
429 	spin_lock_irqsave(&pool->lock, flags);
430 	pool->closing = 1;
431 	spin_unlock_irqrestore(&pool->lock, flags);
432 }
433 
434 /* remove events */
435 int snd_seq_pool_done(struct snd_seq_pool *pool)
436 {
437 	struct snd_seq_event_cell *ptr;
438 
439 	if (snd_BUG_ON(!pool))
440 		return -EINVAL;
441 
442 	/* wait for closing all threads */
443 	if (waitqueue_active(&pool->output_sleep))
444 		wake_up(&pool->output_sleep);
445 
446 	while (atomic_read(&pool->counter) > 0)
447 		schedule_timeout_uninterruptible(1);
448 
449 	/* release all resources */
450 	spin_lock_irq(&pool->lock);
451 	ptr = pool->ptr;
452 	pool->ptr = NULL;
453 	pool->free = NULL;
454 	pool->total_elements = 0;
455 	spin_unlock_irq(&pool->lock);
456 
457 	kvfree(ptr);
458 
459 	spin_lock_irq(&pool->lock);
460 	pool->closing = 0;
461 	spin_unlock_irq(&pool->lock);
462 
463 	return 0;
464 }
465 
466 
467 /* init new memory pool */
468 struct snd_seq_pool *snd_seq_pool_new(int poolsize)
469 {
470 	struct snd_seq_pool *pool;
471 
472 	/* create pool block */
473 	pool = kzalloc(sizeof(*pool), GFP_KERNEL);
474 	if (!pool)
475 		return NULL;
476 	spin_lock_init(&pool->lock);
477 	pool->ptr = NULL;
478 	pool->free = NULL;
479 	pool->total_elements = 0;
480 	atomic_set(&pool->counter, 0);
481 	pool->closing = 0;
482 	init_waitqueue_head(&pool->output_sleep);
483 
484 	pool->size = poolsize;
485 
486 	/* init statistics */
487 	pool->max_used = 0;
488 	return pool;
489 }
490 
491 /* remove memory pool */
492 int snd_seq_pool_delete(struct snd_seq_pool **ppool)
493 {
494 	struct snd_seq_pool *pool = *ppool;
495 
496 	*ppool = NULL;
497 	if (pool == NULL)
498 		return 0;
499 	snd_seq_pool_mark_closing(pool);
500 	snd_seq_pool_done(pool);
501 	kfree(pool);
502 	return 0;
503 }
504 
505 /* exported to seq_clientmgr.c */
506 void snd_seq_info_pool(struct snd_info_buffer *buffer,
507 		       struct snd_seq_pool *pool, char *space)
508 {
509 	if (pool == NULL)
510 		return;
511 	snd_iprintf(buffer, "%sPool size          : %d\n", space, pool->total_elements);
512 	snd_iprintf(buffer, "%sCells in use       : %d\n", space, atomic_read(&pool->counter));
513 	snd_iprintf(buffer, "%sPeak cells in use  : %d\n", space, pool->max_used);
514 	snd_iprintf(buffer, "%sAlloc success      : %d\n", space, pool->event_alloc_success);
515 	snd_iprintf(buffer, "%sAlloc failures     : %d\n", space, pool->event_alloc_failures);
516 }
517