1 /* 2 * ALSA sequencer Memory Manager 3 * Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl> 4 * Jaroslav Kysela <perex@perex.cz> 5 * 2000 by Takashi Iwai <tiwai@suse.de> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 * 12 * This program is distributed in the hope that it will be useful, 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * GNU General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 20 * 21 */ 22 23 #include <linux/init.h> 24 #include <linux/export.h> 25 #include <linux/slab.h> 26 #include <linux/sched/signal.h> 27 #include <linux/vmalloc.h> 28 #include <sound/core.h> 29 30 #include <sound/seq_kernel.h> 31 #include "seq_memory.h" 32 #include "seq_queue.h" 33 #include "seq_info.h" 34 #include "seq_lock.h" 35 36 static inline int snd_seq_pool_available(struct snd_seq_pool *pool) 37 { 38 return pool->total_elements - atomic_read(&pool->counter); 39 } 40 41 static inline int snd_seq_output_ok(struct snd_seq_pool *pool) 42 { 43 return snd_seq_pool_available(pool) >= pool->room; 44 } 45 46 /* 47 * Variable length event: 48 * The event like sysex uses variable length type. 49 * The external data may be stored in three different formats. 50 * 1) kernel space 51 * This is the normal case. 52 * ext.data.len = length 53 * ext.data.ptr = buffer pointer 54 * 2) user space 55 * When an event is generated via read(), the external data is 56 * kept in user space until expanded. 57 * ext.data.len = length | SNDRV_SEQ_EXT_USRPTR 58 * ext.data.ptr = userspace pointer 59 * 3) chained cells 60 * When the variable length event is enqueued (in prioq or fifo), 61 * the external data is decomposed to several cells. 62 * ext.data.len = length | SNDRV_SEQ_EXT_CHAINED 63 * ext.data.ptr = the additiona cell head 64 * -> cell.next -> cell.next -> .. 65 */ 66 67 /* 68 * exported: 69 * call dump function to expand external data. 70 */ 71 72 static int get_var_len(const struct snd_seq_event *event) 73 { 74 if ((event->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE) 75 return -EINVAL; 76 77 return event->data.ext.len & ~SNDRV_SEQ_EXT_MASK; 78 } 79 80 int snd_seq_dump_var_event(const struct snd_seq_event *event, 81 snd_seq_dump_func_t func, void *private_data) 82 { 83 int len, err; 84 struct snd_seq_event_cell *cell; 85 86 if ((len = get_var_len(event)) <= 0) 87 return len; 88 89 if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) { 90 char buf[32]; 91 char __user *curptr = (char __force __user *)event->data.ext.ptr; 92 while (len > 0) { 93 int size = sizeof(buf); 94 if (len < size) 95 size = len; 96 if (copy_from_user(buf, curptr, size)) 97 return -EFAULT; 98 err = func(private_data, buf, size); 99 if (err < 0) 100 return err; 101 curptr += size; 102 len -= size; 103 } 104 return 0; 105 } 106 if (!(event->data.ext.len & SNDRV_SEQ_EXT_CHAINED)) 107 return func(private_data, event->data.ext.ptr, len); 108 109 cell = (struct snd_seq_event_cell *)event->data.ext.ptr; 110 for (; len > 0 && cell; cell = cell->next) { 111 int size = sizeof(struct snd_seq_event); 112 if (len < size) 113 size = len; 114 err = func(private_data, &cell->event, size); 115 if (err < 0) 116 return err; 117 len -= size; 118 } 119 return 0; 120 } 121 122 EXPORT_SYMBOL(snd_seq_dump_var_event); 123 124 125 /* 126 * exported: 127 * expand the variable length event to linear buffer space. 128 */ 129 130 static int seq_copy_in_kernel(char **bufptr, const void *src, int size) 131 { 132 memcpy(*bufptr, src, size); 133 *bufptr += size; 134 return 0; 135 } 136 137 static int seq_copy_in_user(char __user **bufptr, const void *src, int size) 138 { 139 if (copy_to_user(*bufptr, src, size)) 140 return -EFAULT; 141 *bufptr += size; 142 return 0; 143 } 144 145 int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char *buf, 146 int in_kernel, int size_aligned) 147 { 148 int len, newlen; 149 int err; 150 151 if ((len = get_var_len(event)) < 0) 152 return len; 153 newlen = len; 154 if (size_aligned > 0) 155 newlen = roundup(len, size_aligned); 156 if (count < newlen) 157 return -EAGAIN; 158 159 if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) { 160 if (! in_kernel) 161 return -EINVAL; 162 if (copy_from_user(buf, (void __force __user *)event->data.ext.ptr, len)) 163 return -EFAULT; 164 return newlen; 165 } 166 err = snd_seq_dump_var_event(event, 167 in_kernel ? (snd_seq_dump_func_t)seq_copy_in_kernel : 168 (snd_seq_dump_func_t)seq_copy_in_user, 169 &buf); 170 return err < 0 ? err : newlen; 171 } 172 173 EXPORT_SYMBOL(snd_seq_expand_var_event); 174 175 /* 176 * release this cell, free extended data if available 177 */ 178 179 static inline void free_cell(struct snd_seq_pool *pool, 180 struct snd_seq_event_cell *cell) 181 { 182 cell->next = pool->free; 183 pool->free = cell; 184 atomic_dec(&pool->counter); 185 } 186 187 void snd_seq_cell_free(struct snd_seq_event_cell * cell) 188 { 189 unsigned long flags; 190 struct snd_seq_pool *pool; 191 192 if (snd_BUG_ON(!cell)) 193 return; 194 pool = cell->pool; 195 if (snd_BUG_ON(!pool)) 196 return; 197 198 spin_lock_irqsave(&pool->lock, flags); 199 free_cell(pool, cell); 200 if (snd_seq_ev_is_variable(&cell->event)) { 201 if (cell->event.data.ext.len & SNDRV_SEQ_EXT_CHAINED) { 202 struct snd_seq_event_cell *curp, *nextptr; 203 curp = cell->event.data.ext.ptr; 204 for (; curp; curp = nextptr) { 205 nextptr = curp->next; 206 curp->next = pool->free; 207 free_cell(pool, curp); 208 } 209 } 210 } 211 if (waitqueue_active(&pool->output_sleep)) { 212 /* has enough space now? */ 213 if (snd_seq_output_ok(pool)) 214 wake_up(&pool->output_sleep); 215 } 216 spin_unlock_irqrestore(&pool->lock, flags); 217 } 218 219 220 /* 221 * allocate an event cell. 222 */ 223 static int snd_seq_cell_alloc(struct snd_seq_pool *pool, 224 struct snd_seq_event_cell **cellp, 225 int nonblock, struct file *file) 226 { 227 struct snd_seq_event_cell *cell; 228 unsigned long flags; 229 int err = -EAGAIN; 230 wait_queue_t wait; 231 232 if (pool == NULL) 233 return -EINVAL; 234 235 *cellp = NULL; 236 237 init_waitqueue_entry(&wait, current); 238 spin_lock_irqsave(&pool->lock, flags); 239 if (pool->ptr == NULL) { /* not initialized */ 240 pr_debug("ALSA: seq: pool is not initialized\n"); 241 err = -EINVAL; 242 goto __error; 243 } 244 while (pool->free == NULL && ! nonblock && ! pool->closing) { 245 246 set_current_state(TASK_INTERRUPTIBLE); 247 add_wait_queue(&pool->output_sleep, &wait); 248 spin_unlock_irq(&pool->lock); 249 schedule(); 250 spin_lock_irq(&pool->lock); 251 remove_wait_queue(&pool->output_sleep, &wait); 252 /* interrupted? */ 253 if (signal_pending(current)) { 254 err = -ERESTARTSYS; 255 goto __error; 256 } 257 } 258 if (pool->closing) { /* closing.. */ 259 err = -ENOMEM; 260 goto __error; 261 } 262 263 cell = pool->free; 264 if (cell) { 265 int used; 266 pool->free = cell->next; 267 atomic_inc(&pool->counter); 268 used = atomic_read(&pool->counter); 269 if (pool->max_used < used) 270 pool->max_used = used; 271 pool->event_alloc_success++; 272 /* clear cell pointers */ 273 cell->next = NULL; 274 err = 0; 275 } else 276 pool->event_alloc_failures++; 277 *cellp = cell; 278 279 __error: 280 spin_unlock_irqrestore(&pool->lock, flags); 281 return err; 282 } 283 284 285 /* 286 * duplicate the event to a cell. 287 * if the event has external data, the data is decomposed to additional 288 * cells. 289 */ 290 int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, 291 struct snd_seq_event_cell **cellp, int nonblock, 292 struct file *file) 293 { 294 int ncells, err; 295 unsigned int extlen; 296 struct snd_seq_event_cell *cell; 297 298 *cellp = NULL; 299 300 ncells = 0; 301 extlen = 0; 302 if (snd_seq_ev_is_variable(event)) { 303 extlen = event->data.ext.len & ~SNDRV_SEQ_EXT_MASK; 304 ncells = (extlen + sizeof(struct snd_seq_event) - 1) / sizeof(struct snd_seq_event); 305 } 306 if (ncells >= pool->total_elements) 307 return -ENOMEM; 308 309 err = snd_seq_cell_alloc(pool, &cell, nonblock, file); 310 if (err < 0) 311 return err; 312 313 /* copy the event */ 314 cell->event = *event; 315 316 /* decompose */ 317 if (snd_seq_ev_is_variable(event)) { 318 int len = extlen; 319 int is_chained = event->data.ext.len & SNDRV_SEQ_EXT_CHAINED; 320 int is_usrptr = event->data.ext.len & SNDRV_SEQ_EXT_USRPTR; 321 struct snd_seq_event_cell *src, *tmp, *tail; 322 char *buf; 323 324 cell->event.data.ext.len = extlen | SNDRV_SEQ_EXT_CHAINED; 325 cell->event.data.ext.ptr = NULL; 326 327 src = (struct snd_seq_event_cell *)event->data.ext.ptr; 328 buf = (char *)event->data.ext.ptr; 329 tail = NULL; 330 331 while (ncells-- > 0) { 332 int size = sizeof(struct snd_seq_event); 333 if (len < size) 334 size = len; 335 err = snd_seq_cell_alloc(pool, &tmp, nonblock, file); 336 if (err < 0) 337 goto __error; 338 if (cell->event.data.ext.ptr == NULL) 339 cell->event.data.ext.ptr = tmp; 340 if (tail) 341 tail->next = tmp; 342 tail = tmp; 343 /* copy chunk */ 344 if (is_chained && src) { 345 tmp->event = src->event; 346 src = src->next; 347 } else if (is_usrptr) { 348 if (copy_from_user(&tmp->event, (char __force __user *)buf, size)) { 349 err = -EFAULT; 350 goto __error; 351 } 352 } else { 353 memcpy(&tmp->event, buf, size); 354 } 355 buf += size; 356 len -= size; 357 } 358 } 359 360 *cellp = cell; 361 return 0; 362 363 __error: 364 snd_seq_cell_free(cell); 365 return err; 366 } 367 368 369 /* poll wait */ 370 int snd_seq_pool_poll_wait(struct snd_seq_pool *pool, struct file *file, 371 poll_table *wait) 372 { 373 poll_wait(file, &pool->output_sleep, wait); 374 return snd_seq_output_ok(pool); 375 } 376 377 378 /* allocate room specified number of events */ 379 int snd_seq_pool_init(struct snd_seq_pool *pool) 380 { 381 int cell; 382 struct snd_seq_event_cell *cellptr; 383 unsigned long flags; 384 385 if (snd_BUG_ON(!pool)) 386 return -EINVAL; 387 388 cellptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size); 389 if (!cellptr) 390 return -ENOMEM; 391 392 /* add new cells to the free cell list */ 393 spin_lock_irqsave(&pool->lock, flags); 394 if (pool->ptr) { 395 spin_unlock_irqrestore(&pool->lock, flags); 396 vfree(cellptr); 397 return 0; 398 } 399 400 pool->ptr = cellptr; 401 pool->free = NULL; 402 403 for (cell = 0; cell < pool->size; cell++) { 404 cellptr = pool->ptr + cell; 405 cellptr->pool = pool; 406 cellptr->next = pool->free; 407 pool->free = cellptr; 408 } 409 pool->room = (pool->size + 1) / 2; 410 411 /* init statistics */ 412 pool->max_used = 0; 413 pool->total_elements = pool->size; 414 spin_unlock_irqrestore(&pool->lock, flags); 415 return 0; 416 } 417 418 /* refuse the further insertion to the pool */ 419 void snd_seq_pool_mark_closing(struct snd_seq_pool *pool) 420 { 421 unsigned long flags; 422 423 if (snd_BUG_ON(!pool)) 424 return; 425 spin_lock_irqsave(&pool->lock, flags); 426 pool->closing = 1; 427 spin_unlock_irqrestore(&pool->lock, flags); 428 } 429 430 /* remove events */ 431 int snd_seq_pool_done(struct snd_seq_pool *pool) 432 { 433 unsigned long flags; 434 struct snd_seq_event_cell *ptr; 435 436 if (snd_BUG_ON(!pool)) 437 return -EINVAL; 438 439 /* wait for closing all threads */ 440 if (waitqueue_active(&pool->output_sleep)) 441 wake_up(&pool->output_sleep); 442 443 while (atomic_read(&pool->counter) > 0) 444 schedule_timeout_uninterruptible(1); 445 446 /* release all resources */ 447 spin_lock_irqsave(&pool->lock, flags); 448 ptr = pool->ptr; 449 pool->ptr = NULL; 450 pool->free = NULL; 451 pool->total_elements = 0; 452 spin_unlock_irqrestore(&pool->lock, flags); 453 454 vfree(ptr); 455 456 spin_lock_irqsave(&pool->lock, flags); 457 pool->closing = 0; 458 spin_unlock_irqrestore(&pool->lock, flags); 459 460 return 0; 461 } 462 463 464 /* init new memory pool */ 465 struct snd_seq_pool *snd_seq_pool_new(int poolsize) 466 { 467 struct snd_seq_pool *pool; 468 469 /* create pool block */ 470 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 471 if (!pool) 472 return NULL; 473 spin_lock_init(&pool->lock); 474 pool->ptr = NULL; 475 pool->free = NULL; 476 pool->total_elements = 0; 477 atomic_set(&pool->counter, 0); 478 pool->closing = 0; 479 init_waitqueue_head(&pool->output_sleep); 480 481 pool->size = poolsize; 482 483 /* init statistics */ 484 pool->max_used = 0; 485 return pool; 486 } 487 488 /* remove memory pool */ 489 int snd_seq_pool_delete(struct snd_seq_pool **ppool) 490 { 491 struct snd_seq_pool *pool = *ppool; 492 493 *ppool = NULL; 494 if (pool == NULL) 495 return 0; 496 snd_seq_pool_mark_closing(pool); 497 snd_seq_pool_done(pool); 498 kfree(pool); 499 return 0; 500 } 501 502 /* initialize sequencer memory */ 503 int __init snd_sequencer_memory_init(void) 504 { 505 return 0; 506 } 507 508 /* release sequencer memory */ 509 void __exit snd_sequencer_memory_done(void) 510 { 511 } 512 513 514 /* exported to seq_clientmgr.c */ 515 void snd_seq_info_pool(struct snd_info_buffer *buffer, 516 struct snd_seq_pool *pool, char *space) 517 { 518 if (pool == NULL) 519 return; 520 snd_iprintf(buffer, "%sPool size : %d\n", space, pool->total_elements); 521 snd_iprintf(buffer, "%sCells in use : %d\n", space, atomic_read(&pool->counter)); 522 snd_iprintf(buffer, "%sPeak cells in use : %d\n", space, pool->max_used); 523 snd_iprintf(buffer, "%sAlloc success : %d\n", space, pool->event_alloc_success); 524 snd_iprintf(buffer, "%sAlloc failures : %d\n", space, pool->event_alloc_failures); 525 } 526