1 /* 2 * ALSA sequencer Memory Manager 3 * Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl> 4 * Jaroslav Kysela <perex@suse.cz> 5 * 2000 by Takashi Iwai <tiwai@suse.de> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 * 21 */ 22 23 #include <sound/driver.h> 24 #include <linux/init.h> 25 #include <linux/slab.h> 26 #include <linux/vmalloc.h> 27 #include <sound/core.h> 28 29 #include <sound/seq_kernel.h> 30 #include "seq_memory.h" 31 #include "seq_queue.h" 32 #include "seq_info.h" 33 #include "seq_lock.h" 34 35 static inline int snd_seq_pool_available(struct snd_seq_pool *pool) 36 { 37 return pool->total_elements - atomic_read(&pool->counter); 38 } 39 40 static inline int snd_seq_output_ok(struct snd_seq_pool *pool) 41 { 42 return snd_seq_pool_available(pool) >= pool->room; 43 } 44 45 /* 46 * Variable length event: 47 * The event like sysex uses variable length type. 48 * The external data may be stored in three different formats. 49 * 1) kernel space 50 * This is the normal case. 51 * ext.data.len = length 52 * ext.data.ptr = buffer pointer 53 * 2) user space 54 * When an event is generated via read(), the external data is 55 * kept in user space until expanded. 56 * ext.data.len = length | SNDRV_SEQ_EXT_USRPTR 57 * ext.data.ptr = userspace pointer 58 * 3) chained cells 59 * When the variable length event is enqueued (in prioq or fifo), 60 * the external data is decomposed to several cells. 61 * ext.data.len = length | SNDRV_SEQ_EXT_CHAINED 62 * ext.data.ptr = the additiona cell head 63 * -> cell.next -> cell.next -> .. 64 */ 65 66 /* 67 * exported: 68 * call dump function to expand external data. 69 */ 70 71 static int get_var_len(const struct snd_seq_event *event) 72 { 73 if ((event->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE) 74 return -EINVAL; 75 76 return event->data.ext.len & ~SNDRV_SEQ_EXT_MASK; 77 } 78 79 int snd_seq_dump_var_event(const struct snd_seq_event *event, 80 snd_seq_dump_func_t func, void *private_data) 81 { 82 int len, err; 83 struct snd_seq_event_cell *cell; 84 85 if ((len = get_var_len(event)) <= 0) 86 return len; 87 88 if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) { 89 char buf[32]; 90 char __user *curptr = (char __user *)event->data.ext.ptr; 91 while (len > 0) { 92 int size = sizeof(buf); 93 if (len < size) 94 size = len; 95 if (copy_from_user(buf, curptr, size)) 96 return -EFAULT; 97 err = func(private_data, buf, size); 98 if (err < 0) 99 return err; 100 curptr += size; 101 len -= size; 102 } 103 return 0; 104 } if (! (event->data.ext.len & SNDRV_SEQ_EXT_CHAINED)) { 105 return func(private_data, event->data.ext.ptr, len); 106 } 107 108 cell = (struct snd_seq_event_cell *)event->data.ext.ptr; 109 for (; len > 0 && cell; cell = cell->next) { 110 int size = sizeof(struct snd_seq_event); 111 if (len < size) 112 size = len; 113 err = func(private_data, &cell->event, size); 114 if (err < 0) 115 return err; 116 len -= size; 117 } 118 return 0; 119 } 120 121 EXPORT_SYMBOL(snd_seq_dump_var_event); 122 123 124 /* 125 * exported: 126 * expand the variable length event to linear buffer space. 127 */ 128 129 static int seq_copy_in_kernel(char **bufptr, const void *src, int size) 130 { 131 memcpy(*bufptr, src, size); 132 *bufptr += size; 133 return 0; 134 } 135 136 static int seq_copy_in_user(char __user **bufptr, const void *src, int size) 137 { 138 if (copy_to_user(*bufptr, src, size)) 139 return -EFAULT; 140 *bufptr += size; 141 return 0; 142 } 143 144 int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char *buf, 145 int in_kernel, int size_aligned) 146 { 147 int len, newlen; 148 int err; 149 150 if ((len = get_var_len(event)) < 0) 151 return len; 152 newlen = len; 153 if (size_aligned > 0) 154 newlen = roundup(len, size_aligned); 155 if (count < newlen) 156 return -EAGAIN; 157 158 if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) { 159 if (! in_kernel) 160 return -EINVAL; 161 if (copy_from_user(buf, (void __user *)event->data.ext.ptr, len)) 162 return -EFAULT; 163 return newlen; 164 } 165 err = snd_seq_dump_var_event(event, 166 in_kernel ? (snd_seq_dump_func_t)seq_copy_in_kernel : 167 (snd_seq_dump_func_t)seq_copy_in_user, 168 &buf); 169 return err < 0 ? err : newlen; 170 } 171 172 EXPORT_SYMBOL(snd_seq_expand_var_event); 173 174 /* 175 * release this cell, free extended data if available 176 */ 177 178 static inline void free_cell(struct snd_seq_pool *pool, 179 struct snd_seq_event_cell *cell) 180 { 181 cell->next = pool->free; 182 pool->free = cell; 183 atomic_dec(&pool->counter); 184 } 185 186 void snd_seq_cell_free(struct snd_seq_event_cell * cell) 187 { 188 unsigned long flags; 189 struct snd_seq_pool *pool; 190 191 snd_assert(cell != NULL, return); 192 pool = cell->pool; 193 snd_assert(pool != NULL, return); 194 195 spin_lock_irqsave(&pool->lock, flags); 196 free_cell(pool, cell); 197 if (snd_seq_ev_is_variable(&cell->event)) { 198 if (cell->event.data.ext.len & SNDRV_SEQ_EXT_CHAINED) { 199 struct snd_seq_event_cell *curp, *nextptr; 200 curp = cell->event.data.ext.ptr; 201 for (; curp; curp = nextptr) { 202 nextptr = curp->next; 203 curp->next = pool->free; 204 free_cell(pool, curp); 205 } 206 } 207 } 208 if (waitqueue_active(&pool->output_sleep)) { 209 /* has enough space now? */ 210 if (snd_seq_output_ok(pool)) 211 wake_up(&pool->output_sleep); 212 } 213 spin_unlock_irqrestore(&pool->lock, flags); 214 } 215 216 217 /* 218 * allocate an event cell. 219 */ 220 static int snd_seq_cell_alloc(struct snd_seq_pool *pool, 221 struct snd_seq_event_cell **cellp, 222 int nonblock, struct file *file) 223 { 224 struct snd_seq_event_cell *cell; 225 unsigned long flags; 226 int err = -EAGAIN; 227 wait_queue_t wait; 228 229 if (pool == NULL) 230 return -EINVAL; 231 232 *cellp = NULL; 233 234 init_waitqueue_entry(&wait, current); 235 spin_lock_irqsave(&pool->lock, flags); 236 if (pool->ptr == NULL) { /* not initialized */ 237 snd_printd("seq: pool is not initialized\n"); 238 err = -EINVAL; 239 goto __error; 240 } 241 while (pool->free == NULL && ! nonblock && ! pool->closing) { 242 243 set_current_state(TASK_INTERRUPTIBLE); 244 add_wait_queue(&pool->output_sleep, &wait); 245 spin_unlock_irq(&pool->lock); 246 schedule(); 247 spin_lock_irq(&pool->lock); 248 remove_wait_queue(&pool->output_sleep, &wait); 249 /* interrupted? */ 250 if (signal_pending(current)) { 251 err = -ERESTARTSYS; 252 goto __error; 253 } 254 } 255 if (pool->closing) { /* closing.. */ 256 err = -ENOMEM; 257 goto __error; 258 } 259 260 cell = pool->free; 261 if (cell) { 262 int used; 263 pool->free = cell->next; 264 atomic_inc(&pool->counter); 265 used = atomic_read(&pool->counter); 266 if (pool->max_used < used) 267 pool->max_used = used; 268 pool->event_alloc_success++; 269 /* clear cell pointers */ 270 cell->next = NULL; 271 err = 0; 272 } else 273 pool->event_alloc_failures++; 274 *cellp = cell; 275 276 __error: 277 spin_unlock_irqrestore(&pool->lock, flags); 278 return err; 279 } 280 281 282 /* 283 * duplicate the event to a cell. 284 * if the event has external data, the data is decomposed to additional 285 * cells. 286 */ 287 int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, 288 struct snd_seq_event_cell **cellp, int nonblock, 289 struct file *file) 290 { 291 int ncells, err; 292 unsigned int extlen; 293 struct snd_seq_event_cell *cell; 294 295 *cellp = NULL; 296 297 ncells = 0; 298 extlen = 0; 299 if (snd_seq_ev_is_variable(event)) { 300 extlen = event->data.ext.len & ~SNDRV_SEQ_EXT_MASK; 301 ncells = (extlen + sizeof(struct snd_seq_event) - 1) / sizeof(struct snd_seq_event); 302 } 303 if (ncells >= pool->total_elements) 304 return -ENOMEM; 305 306 err = snd_seq_cell_alloc(pool, &cell, nonblock, file); 307 if (err < 0) 308 return err; 309 310 /* copy the event */ 311 cell->event = *event; 312 313 /* decompose */ 314 if (snd_seq_ev_is_variable(event)) { 315 int len = extlen; 316 int is_chained = event->data.ext.len & SNDRV_SEQ_EXT_CHAINED; 317 int is_usrptr = event->data.ext.len & SNDRV_SEQ_EXT_USRPTR; 318 struct snd_seq_event_cell *src, *tmp, *tail; 319 char *buf; 320 321 cell->event.data.ext.len = extlen | SNDRV_SEQ_EXT_CHAINED; 322 cell->event.data.ext.ptr = NULL; 323 324 src = (struct snd_seq_event_cell *)event->data.ext.ptr; 325 buf = (char *)event->data.ext.ptr; 326 tail = NULL; 327 328 while (ncells-- > 0) { 329 int size = sizeof(struct snd_seq_event); 330 if (len < size) 331 size = len; 332 err = snd_seq_cell_alloc(pool, &tmp, nonblock, file); 333 if (err < 0) 334 goto __error; 335 if (cell->event.data.ext.ptr == NULL) 336 cell->event.data.ext.ptr = tmp; 337 if (tail) 338 tail->next = tmp; 339 tail = tmp; 340 /* copy chunk */ 341 if (is_chained && src) { 342 tmp->event = src->event; 343 src = src->next; 344 } else if (is_usrptr) { 345 if (copy_from_user(&tmp->event, (char __user *)buf, size)) { 346 err = -EFAULT; 347 goto __error; 348 } 349 } else { 350 memcpy(&tmp->event, buf, size); 351 } 352 buf += size; 353 len -= size; 354 } 355 } 356 357 *cellp = cell; 358 return 0; 359 360 __error: 361 snd_seq_cell_free(cell); 362 return err; 363 } 364 365 366 /* poll wait */ 367 int snd_seq_pool_poll_wait(struct snd_seq_pool *pool, struct file *file, 368 poll_table *wait) 369 { 370 poll_wait(file, &pool->output_sleep, wait); 371 return snd_seq_output_ok(pool); 372 } 373 374 375 /* allocate room specified number of events */ 376 int snd_seq_pool_init(struct snd_seq_pool *pool) 377 { 378 int cell; 379 struct snd_seq_event_cell *cellptr; 380 unsigned long flags; 381 382 snd_assert(pool != NULL, return -EINVAL); 383 if (pool->ptr) /* should be atomic? */ 384 return 0; 385 386 pool->ptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size); 387 if (pool->ptr == NULL) { 388 snd_printd("seq: malloc for sequencer events failed\n"); 389 return -ENOMEM; 390 } 391 392 /* add new cells to the free cell list */ 393 spin_lock_irqsave(&pool->lock, flags); 394 pool->free = NULL; 395 396 for (cell = 0; cell < pool->size; cell++) { 397 cellptr = pool->ptr + cell; 398 cellptr->pool = pool; 399 cellptr->next = pool->free; 400 pool->free = cellptr; 401 } 402 pool->room = (pool->size + 1) / 2; 403 404 /* init statistics */ 405 pool->max_used = 0; 406 pool->total_elements = pool->size; 407 spin_unlock_irqrestore(&pool->lock, flags); 408 return 0; 409 } 410 411 /* remove events */ 412 int snd_seq_pool_done(struct snd_seq_pool *pool) 413 { 414 unsigned long flags; 415 struct snd_seq_event_cell *ptr; 416 int max_count = 5 * HZ; 417 418 snd_assert(pool != NULL, return -EINVAL); 419 420 /* wait for closing all threads */ 421 spin_lock_irqsave(&pool->lock, flags); 422 pool->closing = 1; 423 spin_unlock_irqrestore(&pool->lock, flags); 424 425 if (waitqueue_active(&pool->output_sleep)) 426 wake_up(&pool->output_sleep); 427 428 while (atomic_read(&pool->counter) > 0) { 429 if (max_count == 0) { 430 snd_printk(KERN_WARNING "snd_seq_pool_done timeout: %d cells remain\n", atomic_read(&pool->counter)); 431 break; 432 } 433 schedule_timeout_uninterruptible(1); 434 max_count--; 435 } 436 437 /* release all resources */ 438 spin_lock_irqsave(&pool->lock, flags); 439 ptr = pool->ptr; 440 pool->ptr = NULL; 441 pool->free = NULL; 442 pool->total_elements = 0; 443 spin_unlock_irqrestore(&pool->lock, flags); 444 445 vfree(ptr); 446 447 spin_lock_irqsave(&pool->lock, flags); 448 pool->closing = 0; 449 spin_unlock_irqrestore(&pool->lock, flags); 450 451 return 0; 452 } 453 454 455 /* init new memory pool */ 456 struct snd_seq_pool *snd_seq_pool_new(int poolsize) 457 { 458 struct snd_seq_pool *pool; 459 460 /* create pool block */ 461 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 462 if (pool == NULL) { 463 snd_printd("seq: malloc failed for pool\n"); 464 return NULL; 465 } 466 spin_lock_init(&pool->lock); 467 pool->ptr = NULL; 468 pool->free = NULL; 469 pool->total_elements = 0; 470 atomic_set(&pool->counter, 0); 471 pool->closing = 0; 472 init_waitqueue_head(&pool->output_sleep); 473 474 pool->size = poolsize; 475 476 /* init statistics */ 477 pool->max_used = 0; 478 return pool; 479 } 480 481 /* remove memory pool */ 482 int snd_seq_pool_delete(struct snd_seq_pool **ppool) 483 { 484 struct snd_seq_pool *pool = *ppool; 485 486 *ppool = NULL; 487 if (pool == NULL) 488 return 0; 489 snd_seq_pool_done(pool); 490 kfree(pool); 491 return 0; 492 } 493 494 /* initialize sequencer memory */ 495 int __init snd_sequencer_memory_init(void) 496 { 497 return 0; 498 } 499 500 /* release sequencer memory */ 501 void __exit snd_sequencer_memory_done(void) 502 { 503 } 504 505 506 /* exported to seq_clientmgr.c */ 507 void snd_seq_info_pool(struct snd_info_buffer *buffer, 508 struct snd_seq_pool *pool, char *space) 509 { 510 if (pool == NULL) 511 return; 512 snd_iprintf(buffer, "%sPool size : %d\n", space, pool->total_elements); 513 snd_iprintf(buffer, "%sCells in use : %d\n", space, atomic_read(&pool->counter)); 514 snd_iprintf(buffer, "%sPeak cells in use : %d\n", space, pool->max_used); 515 snd_iprintf(buffer, "%sAlloc success : %d\n", space, pool->event_alloc_success); 516 snd_iprintf(buffer, "%sAlloc failures : %d\n", space, pool->event_alloc_failures); 517 } 518