1 /* 2 * ALSA sequencer Memory Manager 3 * Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl> 4 * Jaroslav Kysela <perex@perex.cz> 5 * 2000 by Takashi Iwai <tiwai@suse.de> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 * 21 */ 22 23 #include <linux/init.h> 24 #include <linux/slab.h> 25 #include <linux/vmalloc.h> 26 #include <sound/core.h> 27 28 #include <sound/seq_kernel.h> 29 #include "seq_memory.h" 30 #include "seq_queue.h" 31 #include "seq_info.h" 32 #include "seq_lock.h" 33 34 static inline int snd_seq_pool_available(struct snd_seq_pool *pool) 35 { 36 return pool->total_elements - atomic_read(&pool->counter); 37 } 38 39 static inline int snd_seq_output_ok(struct snd_seq_pool *pool) 40 { 41 return snd_seq_pool_available(pool) >= pool->room; 42 } 43 44 /* 45 * Variable length event: 46 * The event like sysex uses variable length type. 47 * The external data may be stored in three different formats. 48 * 1) kernel space 49 * This is the normal case. 50 * ext.data.len = length 51 * ext.data.ptr = buffer pointer 52 * 2) user space 53 * When an event is generated via read(), the external data is 54 * kept in user space until expanded. 55 * ext.data.len = length | SNDRV_SEQ_EXT_USRPTR 56 * ext.data.ptr = userspace pointer 57 * 3) chained cells 58 * When the variable length event is enqueued (in prioq or fifo), 59 * the external data is decomposed to several cells. 60 * ext.data.len = length | SNDRV_SEQ_EXT_CHAINED 61 * ext.data.ptr = the additiona cell head 62 * -> cell.next -> cell.next -> .. 63 */ 64 65 /* 66 * exported: 67 * call dump function to expand external data. 68 */ 69 70 static int get_var_len(const struct snd_seq_event *event) 71 { 72 if ((event->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE) 73 return -EINVAL; 74 75 return event->data.ext.len & ~SNDRV_SEQ_EXT_MASK; 76 } 77 78 int snd_seq_dump_var_event(const struct snd_seq_event *event, 79 snd_seq_dump_func_t func, void *private_data) 80 { 81 int len, err; 82 struct snd_seq_event_cell *cell; 83 84 if ((len = get_var_len(event)) <= 0) 85 return len; 86 87 if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) { 88 char buf[32]; 89 char __user *curptr = (char __user *)event->data.ext.ptr; 90 while (len > 0) { 91 int size = sizeof(buf); 92 if (len < size) 93 size = len; 94 if (copy_from_user(buf, curptr, size)) 95 return -EFAULT; 96 err = func(private_data, buf, size); 97 if (err < 0) 98 return err; 99 curptr += size; 100 len -= size; 101 } 102 return 0; 103 } if (! (event->data.ext.len & SNDRV_SEQ_EXT_CHAINED)) { 104 return func(private_data, event->data.ext.ptr, len); 105 } 106 107 cell = (struct snd_seq_event_cell *)event->data.ext.ptr; 108 for (; len > 0 && cell; cell = cell->next) { 109 int size = sizeof(struct snd_seq_event); 110 if (len < size) 111 size = len; 112 err = func(private_data, &cell->event, size); 113 if (err < 0) 114 return err; 115 len -= size; 116 } 117 return 0; 118 } 119 120 EXPORT_SYMBOL(snd_seq_dump_var_event); 121 122 123 /* 124 * exported: 125 * expand the variable length event to linear buffer space. 126 */ 127 128 static int seq_copy_in_kernel(char **bufptr, const void *src, int size) 129 { 130 memcpy(*bufptr, src, size); 131 *bufptr += size; 132 return 0; 133 } 134 135 static int seq_copy_in_user(char __user **bufptr, const void *src, int size) 136 { 137 if (copy_to_user(*bufptr, src, size)) 138 return -EFAULT; 139 *bufptr += size; 140 return 0; 141 } 142 143 int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char *buf, 144 int in_kernel, int size_aligned) 145 { 146 int len, newlen; 147 int err; 148 149 if ((len = get_var_len(event)) < 0) 150 return len; 151 newlen = len; 152 if (size_aligned > 0) 153 newlen = roundup(len, size_aligned); 154 if (count < newlen) 155 return -EAGAIN; 156 157 if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) { 158 if (! in_kernel) 159 return -EINVAL; 160 if (copy_from_user(buf, (void __user *)event->data.ext.ptr, len)) 161 return -EFAULT; 162 return newlen; 163 } 164 err = snd_seq_dump_var_event(event, 165 in_kernel ? (snd_seq_dump_func_t)seq_copy_in_kernel : 166 (snd_seq_dump_func_t)seq_copy_in_user, 167 &buf); 168 return err < 0 ? err : newlen; 169 } 170 171 EXPORT_SYMBOL(snd_seq_expand_var_event); 172 173 /* 174 * release this cell, free extended data if available 175 */ 176 177 static inline void free_cell(struct snd_seq_pool *pool, 178 struct snd_seq_event_cell *cell) 179 { 180 cell->next = pool->free; 181 pool->free = cell; 182 atomic_dec(&pool->counter); 183 } 184 185 void snd_seq_cell_free(struct snd_seq_event_cell * cell) 186 { 187 unsigned long flags; 188 struct snd_seq_pool *pool; 189 190 snd_assert(cell != NULL, return); 191 pool = cell->pool; 192 snd_assert(pool != NULL, return); 193 194 spin_lock_irqsave(&pool->lock, flags); 195 free_cell(pool, cell); 196 if (snd_seq_ev_is_variable(&cell->event)) { 197 if (cell->event.data.ext.len & SNDRV_SEQ_EXT_CHAINED) { 198 struct snd_seq_event_cell *curp, *nextptr; 199 curp = cell->event.data.ext.ptr; 200 for (; curp; curp = nextptr) { 201 nextptr = curp->next; 202 curp->next = pool->free; 203 free_cell(pool, curp); 204 } 205 } 206 } 207 if (waitqueue_active(&pool->output_sleep)) { 208 /* has enough space now? */ 209 if (snd_seq_output_ok(pool)) 210 wake_up(&pool->output_sleep); 211 } 212 spin_unlock_irqrestore(&pool->lock, flags); 213 } 214 215 216 /* 217 * allocate an event cell. 218 */ 219 static int snd_seq_cell_alloc(struct snd_seq_pool *pool, 220 struct snd_seq_event_cell **cellp, 221 int nonblock, struct file *file) 222 { 223 struct snd_seq_event_cell *cell; 224 unsigned long flags; 225 int err = -EAGAIN; 226 wait_queue_t wait; 227 228 if (pool == NULL) 229 return -EINVAL; 230 231 *cellp = NULL; 232 233 init_waitqueue_entry(&wait, current); 234 spin_lock_irqsave(&pool->lock, flags); 235 if (pool->ptr == NULL) { /* not initialized */ 236 snd_printd("seq: pool is not initialized\n"); 237 err = -EINVAL; 238 goto __error; 239 } 240 while (pool->free == NULL && ! nonblock && ! pool->closing) { 241 242 set_current_state(TASK_INTERRUPTIBLE); 243 add_wait_queue(&pool->output_sleep, &wait); 244 spin_unlock_irq(&pool->lock); 245 schedule(); 246 spin_lock_irq(&pool->lock); 247 remove_wait_queue(&pool->output_sleep, &wait); 248 /* interrupted? */ 249 if (signal_pending(current)) { 250 err = -ERESTARTSYS; 251 goto __error; 252 } 253 } 254 if (pool->closing) { /* closing.. */ 255 err = -ENOMEM; 256 goto __error; 257 } 258 259 cell = pool->free; 260 if (cell) { 261 int used; 262 pool->free = cell->next; 263 atomic_inc(&pool->counter); 264 used = atomic_read(&pool->counter); 265 if (pool->max_used < used) 266 pool->max_used = used; 267 pool->event_alloc_success++; 268 /* clear cell pointers */ 269 cell->next = NULL; 270 err = 0; 271 } else 272 pool->event_alloc_failures++; 273 *cellp = cell; 274 275 __error: 276 spin_unlock_irqrestore(&pool->lock, flags); 277 return err; 278 } 279 280 281 /* 282 * duplicate the event to a cell. 283 * if the event has external data, the data is decomposed to additional 284 * cells. 285 */ 286 int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, 287 struct snd_seq_event_cell **cellp, int nonblock, 288 struct file *file) 289 { 290 int ncells, err; 291 unsigned int extlen; 292 struct snd_seq_event_cell *cell; 293 294 *cellp = NULL; 295 296 ncells = 0; 297 extlen = 0; 298 if (snd_seq_ev_is_variable(event)) { 299 extlen = event->data.ext.len & ~SNDRV_SEQ_EXT_MASK; 300 ncells = (extlen + sizeof(struct snd_seq_event) - 1) / sizeof(struct snd_seq_event); 301 } 302 if (ncells >= pool->total_elements) 303 return -ENOMEM; 304 305 err = snd_seq_cell_alloc(pool, &cell, nonblock, file); 306 if (err < 0) 307 return err; 308 309 /* copy the event */ 310 cell->event = *event; 311 312 /* decompose */ 313 if (snd_seq_ev_is_variable(event)) { 314 int len = extlen; 315 int is_chained = event->data.ext.len & SNDRV_SEQ_EXT_CHAINED; 316 int is_usrptr = event->data.ext.len & SNDRV_SEQ_EXT_USRPTR; 317 struct snd_seq_event_cell *src, *tmp, *tail; 318 char *buf; 319 320 cell->event.data.ext.len = extlen | SNDRV_SEQ_EXT_CHAINED; 321 cell->event.data.ext.ptr = NULL; 322 323 src = (struct snd_seq_event_cell *)event->data.ext.ptr; 324 buf = (char *)event->data.ext.ptr; 325 tail = NULL; 326 327 while (ncells-- > 0) { 328 int size = sizeof(struct snd_seq_event); 329 if (len < size) 330 size = len; 331 err = snd_seq_cell_alloc(pool, &tmp, nonblock, file); 332 if (err < 0) 333 goto __error; 334 if (cell->event.data.ext.ptr == NULL) 335 cell->event.data.ext.ptr = tmp; 336 if (tail) 337 tail->next = tmp; 338 tail = tmp; 339 /* copy chunk */ 340 if (is_chained && src) { 341 tmp->event = src->event; 342 src = src->next; 343 } else if (is_usrptr) { 344 if (copy_from_user(&tmp->event, (char __user *)buf, size)) { 345 err = -EFAULT; 346 goto __error; 347 } 348 } else { 349 memcpy(&tmp->event, buf, size); 350 } 351 buf += size; 352 len -= size; 353 } 354 } 355 356 *cellp = cell; 357 return 0; 358 359 __error: 360 snd_seq_cell_free(cell); 361 return err; 362 } 363 364 365 /* poll wait */ 366 int snd_seq_pool_poll_wait(struct snd_seq_pool *pool, struct file *file, 367 poll_table *wait) 368 { 369 poll_wait(file, &pool->output_sleep, wait); 370 return snd_seq_output_ok(pool); 371 } 372 373 374 /* allocate room specified number of events */ 375 int snd_seq_pool_init(struct snd_seq_pool *pool) 376 { 377 int cell; 378 struct snd_seq_event_cell *cellptr; 379 unsigned long flags; 380 381 snd_assert(pool != NULL, return -EINVAL); 382 if (pool->ptr) /* should be atomic? */ 383 return 0; 384 385 pool->ptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size); 386 if (pool->ptr == NULL) { 387 snd_printd("seq: malloc for sequencer events failed\n"); 388 return -ENOMEM; 389 } 390 391 /* add new cells to the free cell list */ 392 spin_lock_irqsave(&pool->lock, flags); 393 pool->free = NULL; 394 395 for (cell = 0; cell < pool->size; cell++) { 396 cellptr = pool->ptr + cell; 397 cellptr->pool = pool; 398 cellptr->next = pool->free; 399 pool->free = cellptr; 400 } 401 pool->room = (pool->size + 1) / 2; 402 403 /* init statistics */ 404 pool->max_used = 0; 405 pool->total_elements = pool->size; 406 spin_unlock_irqrestore(&pool->lock, flags); 407 return 0; 408 } 409 410 /* remove events */ 411 int snd_seq_pool_done(struct snd_seq_pool *pool) 412 { 413 unsigned long flags; 414 struct snd_seq_event_cell *ptr; 415 int max_count = 5 * HZ; 416 417 snd_assert(pool != NULL, return -EINVAL); 418 419 /* wait for closing all threads */ 420 spin_lock_irqsave(&pool->lock, flags); 421 pool->closing = 1; 422 spin_unlock_irqrestore(&pool->lock, flags); 423 424 if (waitqueue_active(&pool->output_sleep)) 425 wake_up(&pool->output_sleep); 426 427 while (atomic_read(&pool->counter) > 0) { 428 if (max_count == 0) { 429 snd_printk(KERN_WARNING "snd_seq_pool_done timeout: %d cells remain\n", atomic_read(&pool->counter)); 430 break; 431 } 432 schedule_timeout_uninterruptible(1); 433 max_count--; 434 } 435 436 /* release all resources */ 437 spin_lock_irqsave(&pool->lock, flags); 438 ptr = pool->ptr; 439 pool->ptr = NULL; 440 pool->free = NULL; 441 pool->total_elements = 0; 442 spin_unlock_irqrestore(&pool->lock, flags); 443 444 vfree(ptr); 445 446 spin_lock_irqsave(&pool->lock, flags); 447 pool->closing = 0; 448 spin_unlock_irqrestore(&pool->lock, flags); 449 450 return 0; 451 } 452 453 454 /* init new memory pool */ 455 struct snd_seq_pool *snd_seq_pool_new(int poolsize) 456 { 457 struct snd_seq_pool *pool; 458 459 /* create pool block */ 460 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 461 if (pool == NULL) { 462 snd_printd("seq: malloc failed for pool\n"); 463 return NULL; 464 } 465 spin_lock_init(&pool->lock); 466 pool->ptr = NULL; 467 pool->free = NULL; 468 pool->total_elements = 0; 469 atomic_set(&pool->counter, 0); 470 pool->closing = 0; 471 init_waitqueue_head(&pool->output_sleep); 472 473 pool->size = poolsize; 474 475 /* init statistics */ 476 pool->max_used = 0; 477 return pool; 478 } 479 480 /* remove memory pool */ 481 int snd_seq_pool_delete(struct snd_seq_pool **ppool) 482 { 483 struct snd_seq_pool *pool = *ppool; 484 485 *ppool = NULL; 486 if (pool == NULL) 487 return 0; 488 snd_seq_pool_done(pool); 489 kfree(pool); 490 return 0; 491 } 492 493 /* initialize sequencer memory */ 494 int __init snd_sequencer_memory_init(void) 495 { 496 return 0; 497 } 498 499 /* release sequencer memory */ 500 void __exit snd_sequencer_memory_done(void) 501 { 502 } 503 504 505 /* exported to seq_clientmgr.c */ 506 void snd_seq_info_pool(struct snd_info_buffer *buffer, 507 struct snd_seq_pool *pool, char *space) 508 { 509 if (pool == NULL) 510 return; 511 snd_iprintf(buffer, "%sPool size : %d\n", space, pool->total_elements); 512 snd_iprintf(buffer, "%sCells in use : %d\n", space, atomic_read(&pool->counter)); 513 snd_iprintf(buffer, "%sPeak cells in use : %d\n", space, pool->max_used); 514 snd_iprintf(buffer, "%sAlloc success : %d\n", space, pool->event_alloc_success); 515 snd_iprintf(buffer, "%sAlloc failures : %d\n", space, pool->event_alloc_failures); 516 } 517