1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * ALSA sequencer Memory Manager 4 * Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl> 5 * Jaroslav Kysela <perex@perex.cz> 6 * 2000 by Takashi Iwai <tiwai@suse.de> 7 */ 8 9 #include <linux/init.h> 10 #include <linux/export.h> 11 #include <linux/slab.h> 12 #include <linux/sched/signal.h> 13 #include <linux/mm.h> 14 #include <sound/core.h> 15 16 #include <sound/seq_kernel.h> 17 #include "seq_memory.h" 18 #include "seq_queue.h" 19 #include "seq_info.h" 20 #include "seq_lock.h" 21 22 static inline int snd_seq_pool_available(struct snd_seq_pool *pool) 23 { 24 return pool->total_elements - atomic_read(&pool->counter); 25 } 26 27 static inline int snd_seq_output_ok(struct snd_seq_pool *pool) 28 { 29 return snd_seq_pool_available(pool) >= pool->room; 30 } 31 32 /* 33 * Variable length event: 34 * The event like sysex uses variable length type. 35 * The external data may be stored in three different formats. 36 * 1) kernel space 37 * This is the normal case. 38 * ext.data.len = length 39 * ext.data.ptr = buffer pointer 40 * 2) user space 41 * When an event is generated via read(), the external data is 42 * kept in user space until expanded. 43 * ext.data.len = length | SNDRV_SEQ_EXT_USRPTR 44 * ext.data.ptr = userspace pointer 45 * 3) chained cells 46 * When the variable length event is enqueued (in prioq or fifo), 47 * the external data is decomposed to several cells. 48 * ext.data.len = length | SNDRV_SEQ_EXT_CHAINED 49 * ext.data.ptr = the additiona cell head 50 * -> cell.next -> cell.next -> .. 51 */ 52 53 /* 54 * exported: 55 * call dump function to expand external data. 56 */ 57 58 static int get_var_len(const struct snd_seq_event *event) 59 { 60 if ((event->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE) 61 return -EINVAL; 62 63 return event->data.ext.len & ~SNDRV_SEQ_EXT_MASK; 64 } 65 66 int snd_seq_dump_var_event(const struct snd_seq_event *event, 67 snd_seq_dump_func_t func, void *private_data) 68 { 69 int len, err; 70 struct snd_seq_event_cell *cell; 71 72 len = get_var_len(event); 73 if (len <= 0) 74 return len; 75 76 if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) { 77 char buf[32]; 78 char __user *curptr = (char __force __user *)event->data.ext.ptr; 79 while (len > 0) { 80 int size = sizeof(buf); 81 if (len < size) 82 size = len; 83 if (copy_from_user(buf, curptr, size)) 84 return -EFAULT; 85 err = func(private_data, buf, size); 86 if (err < 0) 87 return err; 88 curptr += size; 89 len -= size; 90 } 91 return 0; 92 } 93 if (!(event->data.ext.len & SNDRV_SEQ_EXT_CHAINED)) 94 return func(private_data, event->data.ext.ptr, len); 95 96 cell = (struct snd_seq_event_cell *)event->data.ext.ptr; 97 for (; len > 0 && cell; cell = cell->next) { 98 int size = sizeof(struct snd_seq_event); 99 if (len < size) 100 size = len; 101 err = func(private_data, &cell->event, size); 102 if (err < 0) 103 return err; 104 len -= size; 105 } 106 return 0; 107 } 108 EXPORT_SYMBOL(snd_seq_dump_var_event); 109 110 111 /* 112 * exported: 113 * expand the variable length event to linear buffer space. 114 */ 115 116 static int seq_copy_in_kernel(void *ptr, void *src, int size) 117 { 118 char **bufptr = ptr; 119 120 memcpy(*bufptr, src, size); 121 *bufptr += size; 122 return 0; 123 } 124 125 static int seq_copy_in_user(void *ptr, void *src, int size) 126 { 127 char __user **bufptr = ptr; 128 129 if (copy_to_user(*bufptr, src, size)) 130 return -EFAULT; 131 *bufptr += size; 132 return 0; 133 } 134 135 int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char *buf, 136 int in_kernel, int size_aligned) 137 { 138 int len, newlen; 139 int err; 140 141 len = get_var_len(event); 142 if (len < 0) 143 return len; 144 newlen = len; 145 if (size_aligned > 0) 146 newlen = roundup(len, size_aligned); 147 if (count < newlen) 148 return -EAGAIN; 149 150 if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) { 151 if (! in_kernel) 152 return -EINVAL; 153 if (copy_from_user(buf, (void __force __user *)event->data.ext.ptr, len)) 154 return -EFAULT; 155 return newlen; 156 } 157 err = snd_seq_dump_var_event(event, 158 in_kernel ? seq_copy_in_kernel : seq_copy_in_user, 159 &buf); 160 return err < 0 ? err : newlen; 161 } 162 EXPORT_SYMBOL(snd_seq_expand_var_event); 163 164 /* 165 * release this cell, free extended data if available 166 */ 167 168 static inline void free_cell(struct snd_seq_pool *pool, 169 struct snd_seq_event_cell *cell) 170 { 171 cell->next = pool->free; 172 pool->free = cell; 173 atomic_dec(&pool->counter); 174 } 175 176 void snd_seq_cell_free(struct snd_seq_event_cell * cell) 177 { 178 unsigned long flags; 179 struct snd_seq_pool *pool; 180 181 if (snd_BUG_ON(!cell)) 182 return; 183 pool = cell->pool; 184 if (snd_BUG_ON(!pool)) 185 return; 186 187 spin_lock_irqsave(&pool->lock, flags); 188 free_cell(pool, cell); 189 if (snd_seq_ev_is_variable(&cell->event)) { 190 if (cell->event.data.ext.len & SNDRV_SEQ_EXT_CHAINED) { 191 struct snd_seq_event_cell *curp, *nextptr; 192 curp = cell->event.data.ext.ptr; 193 for (; curp; curp = nextptr) { 194 nextptr = curp->next; 195 curp->next = pool->free; 196 free_cell(pool, curp); 197 } 198 } 199 } 200 if (waitqueue_active(&pool->output_sleep)) { 201 /* has enough space now? */ 202 if (snd_seq_output_ok(pool)) 203 wake_up(&pool->output_sleep); 204 } 205 spin_unlock_irqrestore(&pool->lock, flags); 206 } 207 208 209 /* 210 * allocate an event cell. 211 */ 212 static int snd_seq_cell_alloc(struct snd_seq_pool *pool, 213 struct snd_seq_event_cell **cellp, 214 int nonblock, struct file *file, 215 struct mutex *mutexp) 216 { 217 struct snd_seq_event_cell *cell; 218 unsigned long flags; 219 int err = -EAGAIN; 220 wait_queue_entry_t wait; 221 222 if (pool == NULL) 223 return -EINVAL; 224 225 *cellp = NULL; 226 227 init_waitqueue_entry(&wait, current); 228 spin_lock_irqsave(&pool->lock, flags); 229 if (pool->ptr == NULL) { /* not initialized */ 230 pr_debug("ALSA: seq: pool is not initialized\n"); 231 err = -EINVAL; 232 goto __error; 233 } 234 while (pool->free == NULL && ! nonblock && ! pool->closing) { 235 236 set_current_state(TASK_INTERRUPTIBLE); 237 add_wait_queue(&pool->output_sleep, &wait); 238 spin_unlock_irqrestore(&pool->lock, flags); 239 if (mutexp) 240 mutex_unlock(mutexp); 241 schedule(); 242 if (mutexp) 243 mutex_lock(mutexp); 244 spin_lock_irqsave(&pool->lock, flags); 245 remove_wait_queue(&pool->output_sleep, &wait); 246 /* interrupted? */ 247 if (signal_pending(current)) { 248 err = -ERESTARTSYS; 249 goto __error; 250 } 251 } 252 if (pool->closing) { /* closing.. */ 253 err = -ENOMEM; 254 goto __error; 255 } 256 257 cell = pool->free; 258 if (cell) { 259 int used; 260 pool->free = cell->next; 261 atomic_inc(&pool->counter); 262 used = atomic_read(&pool->counter); 263 if (pool->max_used < used) 264 pool->max_used = used; 265 pool->event_alloc_success++; 266 /* clear cell pointers */ 267 cell->next = NULL; 268 err = 0; 269 } else 270 pool->event_alloc_failures++; 271 *cellp = cell; 272 273 __error: 274 spin_unlock_irqrestore(&pool->lock, flags); 275 return err; 276 } 277 278 279 /* 280 * duplicate the event to a cell. 281 * if the event has external data, the data is decomposed to additional 282 * cells. 283 */ 284 int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, 285 struct snd_seq_event_cell **cellp, int nonblock, 286 struct file *file, struct mutex *mutexp) 287 { 288 int ncells, err; 289 unsigned int extlen; 290 struct snd_seq_event_cell *cell; 291 292 *cellp = NULL; 293 294 ncells = 0; 295 extlen = 0; 296 if (snd_seq_ev_is_variable(event)) { 297 extlen = event->data.ext.len & ~SNDRV_SEQ_EXT_MASK; 298 ncells = DIV_ROUND_UP(extlen, sizeof(struct snd_seq_event)); 299 } 300 if (ncells >= pool->total_elements) 301 return -ENOMEM; 302 303 err = snd_seq_cell_alloc(pool, &cell, nonblock, file, mutexp); 304 if (err < 0) 305 return err; 306 307 /* copy the event */ 308 cell->event = *event; 309 310 /* decompose */ 311 if (snd_seq_ev_is_variable(event)) { 312 int len = extlen; 313 int is_chained = event->data.ext.len & SNDRV_SEQ_EXT_CHAINED; 314 int is_usrptr = event->data.ext.len & SNDRV_SEQ_EXT_USRPTR; 315 struct snd_seq_event_cell *src, *tmp, *tail; 316 char *buf; 317 318 cell->event.data.ext.len = extlen | SNDRV_SEQ_EXT_CHAINED; 319 cell->event.data.ext.ptr = NULL; 320 321 src = (struct snd_seq_event_cell *)event->data.ext.ptr; 322 buf = (char *)event->data.ext.ptr; 323 tail = NULL; 324 325 while (ncells-- > 0) { 326 int size = sizeof(struct snd_seq_event); 327 if (len < size) 328 size = len; 329 err = snd_seq_cell_alloc(pool, &tmp, nonblock, file, 330 mutexp); 331 if (err < 0) 332 goto __error; 333 if (cell->event.data.ext.ptr == NULL) 334 cell->event.data.ext.ptr = tmp; 335 if (tail) 336 tail->next = tmp; 337 tail = tmp; 338 /* copy chunk */ 339 if (is_chained && src) { 340 tmp->event = src->event; 341 src = src->next; 342 } else if (is_usrptr) { 343 if (copy_from_user(&tmp->event, (char __force __user *)buf, size)) { 344 err = -EFAULT; 345 goto __error; 346 } 347 } else { 348 memcpy(&tmp->event, buf, size); 349 } 350 buf += size; 351 len -= size; 352 } 353 } 354 355 *cellp = cell; 356 return 0; 357 358 __error: 359 snd_seq_cell_free(cell); 360 return err; 361 } 362 363 364 /* poll wait */ 365 int snd_seq_pool_poll_wait(struct snd_seq_pool *pool, struct file *file, 366 poll_table *wait) 367 { 368 poll_wait(file, &pool->output_sleep, wait); 369 return snd_seq_output_ok(pool); 370 } 371 372 373 /* allocate room specified number of events */ 374 int snd_seq_pool_init(struct snd_seq_pool *pool) 375 { 376 int cell; 377 struct snd_seq_event_cell *cellptr; 378 379 if (snd_BUG_ON(!pool)) 380 return -EINVAL; 381 382 cellptr = kvmalloc_array(sizeof(struct snd_seq_event_cell), pool->size, 383 GFP_KERNEL); 384 if (!cellptr) 385 return -ENOMEM; 386 387 /* add new cells to the free cell list */ 388 spin_lock_irq(&pool->lock); 389 if (pool->ptr) { 390 spin_unlock_irq(&pool->lock); 391 kvfree(cellptr); 392 return 0; 393 } 394 395 pool->ptr = cellptr; 396 pool->free = NULL; 397 398 for (cell = 0; cell < pool->size; cell++) { 399 cellptr = pool->ptr + cell; 400 cellptr->pool = pool; 401 cellptr->next = pool->free; 402 pool->free = cellptr; 403 } 404 pool->room = (pool->size + 1) / 2; 405 406 /* init statistics */ 407 pool->max_used = 0; 408 pool->total_elements = pool->size; 409 spin_unlock_irq(&pool->lock); 410 return 0; 411 } 412 413 /* refuse the further insertion to the pool */ 414 void snd_seq_pool_mark_closing(struct snd_seq_pool *pool) 415 { 416 unsigned long flags; 417 418 if (snd_BUG_ON(!pool)) 419 return; 420 spin_lock_irqsave(&pool->lock, flags); 421 pool->closing = 1; 422 spin_unlock_irqrestore(&pool->lock, flags); 423 } 424 425 /* remove events */ 426 int snd_seq_pool_done(struct snd_seq_pool *pool) 427 { 428 struct snd_seq_event_cell *ptr; 429 430 if (snd_BUG_ON(!pool)) 431 return -EINVAL; 432 433 /* wait for closing all threads */ 434 if (waitqueue_active(&pool->output_sleep)) 435 wake_up(&pool->output_sleep); 436 437 while (atomic_read(&pool->counter) > 0) 438 schedule_timeout_uninterruptible(1); 439 440 /* release all resources */ 441 spin_lock_irq(&pool->lock); 442 ptr = pool->ptr; 443 pool->ptr = NULL; 444 pool->free = NULL; 445 pool->total_elements = 0; 446 spin_unlock_irq(&pool->lock); 447 448 kvfree(ptr); 449 450 spin_lock_irq(&pool->lock); 451 pool->closing = 0; 452 spin_unlock_irq(&pool->lock); 453 454 return 0; 455 } 456 457 458 /* init new memory pool */ 459 struct snd_seq_pool *snd_seq_pool_new(int poolsize) 460 { 461 struct snd_seq_pool *pool; 462 463 /* create pool block */ 464 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 465 if (!pool) 466 return NULL; 467 spin_lock_init(&pool->lock); 468 pool->ptr = NULL; 469 pool->free = NULL; 470 pool->total_elements = 0; 471 atomic_set(&pool->counter, 0); 472 pool->closing = 0; 473 init_waitqueue_head(&pool->output_sleep); 474 475 pool->size = poolsize; 476 477 /* init statistics */ 478 pool->max_used = 0; 479 return pool; 480 } 481 482 /* remove memory pool */ 483 int snd_seq_pool_delete(struct snd_seq_pool **ppool) 484 { 485 struct snd_seq_pool *pool = *ppool; 486 487 *ppool = NULL; 488 if (pool == NULL) 489 return 0; 490 snd_seq_pool_mark_closing(pool); 491 snd_seq_pool_done(pool); 492 kfree(pool); 493 return 0; 494 } 495 496 /* exported to seq_clientmgr.c */ 497 void snd_seq_info_pool(struct snd_info_buffer *buffer, 498 struct snd_seq_pool *pool, char *space) 499 { 500 if (pool == NULL) 501 return; 502 snd_iprintf(buffer, "%sPool size : %d\n", space, pool->total_elements); 503 snd_iprintf(buffer, "%sCells in use : %d\n", space, atomic_read(&pool->counter)); 504 snd_iprintf(buffer, "%sPeak cells in use : %d\n", space, pool->max_used); 505 snd_iprintf(buffer, "%sAlloc success : %d\n", space, pool->event_alloc_success); 506 snd_iprintf(buffer, "%sAlloc failures : %d\n", space, pool->event_alloc_failures); 507 } 508