xref: /openbmc/linux/sound/core/seq/seq_clientmgr.c (revision 9144f784f852f9a125cabe9927b986d909bfa439)
1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*
3   *  ALSA sequencer Client Manager
4   *  Copyright (c) 1998-2001 by Frank van de Pol <fvdpol@coil.demon.nl>
5   *                             Jaroslav Kysela <perex@perex.cz>
6   *                             Takashi Iwai <tiwai@suse.de>
7   */
8  
9  #include <linux/init.h>
10  #include <linux/export.h>
11  #include <linux/slab.h>
12  #include <sound/core.h>
13  #include <sound/minors.h>
14  #include <linux/kmod.h>
15  
16  #include <sound/seq_kernel.h>
17  #include <sound/ump.h>
18  #include "seq_clientmgr.h"
19  #include "seq_memory.h"
20  #include "seq_queue.h"
21  #include "seq_timer.h"
22  #include "seq_info.h"
23  #include "seq_system.h"
24  #include "seq_ump_convert.h"
25  #include <sound/seq_device.h>
26  #ifdef CONFIG_COMPAT
27  #include <linux/compat.h>
28  #endif
29  
30  /* Client Manager
31  
32   * this module handles the connections of userland and kernel clients
33   *
34   */
35  
36  /*
37   * There are four ranges of client numbers (last two shared):
38   * 0..15: global clients
39   * 16..127: statically allocated client numbers for cards 0..27
40   * 128..191: dynamically allocated client numbers for cards 28..31
41   * 128..191: dynamically allocated client numbers for applications
42   */
43  
44  /* number of kernel non-card clients */
45  #define SNDRV_SEQ_GLOBAL_CLIENTS	16
46  /* clients per cards, for static clients */
47  #define SNDRV_SEQ_CLIENTS_PER_CARD	4
48  /* dynamically allocated client numbers (both kernel drivers and user space) */
49  #define SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN	128
50  
51  #define SNDRV_SEQ_LFLG_INPUT	0x0001
52  #define SNDRV_SEQ_LFLG_OUTPUT	0x0002
53  #define SNDRV_SEQ_LFLG_OPEN	(SNDRV_SEQ_LFLG_INPUT|SNDRV_SEQ_LFLG_OUTPUT)
54  
55  static DEFINE_SPINLOCK(clients_lock);
56  static DEFINE_MUTEX(register_mutex);
57  
58  /*
59   * client table
60   */
61  static char clienttablock[SNDRV_SEQ_MAX_CLIENTS];
62  static struct snd_seq_client *clienttab[SNDRV_SEQ_MAX_CLIENTS];
63  static struct snd_seq_usage client_usage;
64  
65  /*
66   * prototypes
67   */
68  static int bounce_error_event(struct snd_seq_client *client,
69  			      struct snd_seq_event *event,
70  			      int err, int atomic, int hop);
71  static int snd_seq_deliver_single_event(struct snd_seq_client *client,
72  					struct snd_seq_event *event,
73  					int filter, int atomic, int hop);
74  
75  #if IS_ENABLED(CONFIG_SND_SEQ_UMP)
76  static void free_ump_info(struct snd_seq_client *client);
77  #endif
78  
79  /*
80   */
snd_seq_file_flags(struct file * file)81  static inline unsigned short snd_seq_file_flags(struct file *file)
82  {
83          switch (file->f_mode & (FMODE_READ | FMODE_WRITE)) {
84          case FMODE_WRITE:
85                  return SNDRV_SEQ_LFLG_OUTPUT;
86          case FMODE_READ:
87                  return SNDRV_SEQ_LFLG_INPUT;
88          default:
89                  return SNDRV_SEQ_LFLG_OPEN;
90          }
91  }
92  
snd_seq_write_pool_allocated(struct snd_seq_client * client)93  static inline int snd_seq_write_pool_allocated(struct snd_seq_client *client)
94  {
95  	return snd_seq_total_cells(client->pool) > 0;
96  }
97  
98  /* return pointer to client structure for specified id */
clientptr(int clientid)99  static struct snd_seq_client *clientptr(int clientid)
100  {
101  	if (clientid < 0 || clientid >= SNDRV_SEQ_MAX_CLIENTS) {
102  		pr_debug("ALSA: seq: oops. Trying to get pointer to client %d\n",
103  			   clientid);
104  		return NULL;
105  	}
106  	return clienttab[clientid];
107  }
108  
snd_seq_client_use_ptr(int clientid)109  struct snd_seq_client *snd_seq_client_use_ptr(int clientid)
110  {
111  	unsigned long flags;
112  	struct snd_seq_client *client;
113  
114  	if (clientid < 0 || clientid >= SNDRV_SEQ_MAX_CLIENTS) {
115  		pr_debug("ALSA: seq: oops. Trying to get pointer to client %d\n",
116  			   clientid);
117  		return NULL;
118  	}
119  	spin_lock_irqsave(&clients_lock, flags);
120  	client = clientptr(clientid);
121  	if (client)
122  		goto __lock;
123  	if (clienttablock[clientid]) {
124  		spin_unlock_irqrestore(&clients_lock, flags);
125  		return NULL;
126  	}
127  	spin_unlock_irqrestore(&clients_lock, flags);
128  #ifdef CONFIG_MODULES
129  	if (!in_interrupt()) {
130  		static DECLARE_BITMAP(client_requested, SNDRV_SEQ_GLOBAL_CLIENTS);
131  		static DECLARE_BITMAP(card_requested, SNDRV_CARDS);
132  
133  		if (clientid < SNDRV_SEQ_GLOBAL_CLIENTS) {
134  			int idx;
135  
136  			if (!test_and_set_bit(clientid, client_requested)) {
137  				for (idx = 0; idx < 15; idx++) {
138  					if (seq_client_load[idx] < 0)
139  						break;
140  					if (seq_client_load[idx] == clientid) {
141  						request_module("snd-seq-client-%i",
142  							       clientid);
143  						break;
144  					}
145  				}
146  			}
147  		} else if (clientid < SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN) {
148  			int card = (clientid - SNDRV_SEQ_GLOBAL_CLIENTS) /
149  				SNDRV_SEQ_CLIENTS_PER_CARD;
150  			if (card < snd_ecards_limit) {
151  				if (!test_and_set_bit(card, card_requested))
152  					snd_request_card(card);
153  				snd_seq_device_load_drivers();
154  			}
155  		}
156  		spin_lock_irqsave(&clients_lock, flags);
157  		client = clientptr(clientid);
158  		if (client)
159  			goto __lock;
160  		spin_unlock_irqrestore(&clients_lock, flags);
161  	}
162  #endif
163  	return NULL;
164  
165        __lock:
166  	snd_use_lock_use(&client->use_lock);
167  	spin_unlock_irqrestore(&clients_lock, flags);
168  	return client;
169  }
170  
171  /* Take refcount and perform ioctl_mutex lock on the given client;
172   * used only for OSS sequencer
173   * Unlock via snd_seq_client_ioctl_unlock() below
174   */
snd_seq_client_ioctl_lock(int clientid)175  bool snd_seq_client_ioctl_lock(int clientid)
176  {
177  	struct snd_seq_client *client;
178  
179  	client = snd_seq_client_use_ptr(clientid);
180  	if (!client)
181  		return false;
182  	mutex_lock(&client->ioctl_mutex);
183  	/* The client isn't unrefed here; see snd_seq_client_ioctl_unlock() */
184  	return true;
185  }
186  EXPORT_SYMBOL_GPL(snd_seq_client_ioctl_lock);
187  
188  /* Unlock and unref the given client; for OSS sequencer use only */
snd_seq_client_ioctl_unlock(int clientid)189  void snd_seq_client_ioctl_unlock(int clientid)
190  {
191  	struct snd_seq_client *client;
192  
193  	client = snd_seq_client_use_ptr(clientid);
194  	if (WARN_ON(!client))
195  		return;
196  	mutex_unlock(&client->ioctl_mutex);
197  	/* The doubly unrefs below are intentional; the first one releases the
198  	 * leftover from snd_seq_client_ioctl_lock() above, and the second one
199  	 * is for releasing snd_seq_client_use_ptr() in this function
200  	 */
201  	snd_seq_client_unlock(client);
202  	snd_seq_client_unlock(client);
203  }
204  EXPORT_SYMBOL_GPL(snd_seq_client_ioctl_unlock);
205  
usage_alloc(struct snd_seq_usage * res,int num)206  static void usage_alloc(struct snd_seq_usage *res, int num)
207  {
208  	res->cur += num;
209  	if (res->cur > res->peak)
210  		res->peak = res->cur;
211  }
212  
usage_free(struct snd_seq_usage * res,int num)213  static void usage_free(struct snd_seq_usage *res, int num)
214  {
215  	res->cur -= num;
216  }
217  
218  /* initialise data structures */
client_init_data(void)219  int __init client_init_data(void)
220  {
221  	/* zap out the client table */
222  	memset(&clienttablock, 0, sizeof(clienttablock));
223  	memset(&clienttab, 0, sizeof(clienttab));
224  	return 0;
225  }
226  
227  
seq_create_client1(int client_index,int poolsize)228  static struct snd_seq_client *seq_create_client1(int client_index, int poolsize)
229  {
230  	int c;
231  	struct snd_seq_client *client;
232  
233  	/* init client data */
234  	client = kzalloc(sizeof(*client), GFP_KERNEL);
235  	if (client == NULL)
236  		return NULL;
237  	client->pool = snd_seq_pool_new(poolsize);
238  	if (client->pool == NULL) {
239  		kfree(client);
240  		return NULL;
241  	}
242  	client->type = NO_CLIENT;
243  	snd_use_lock_init(&client->use_lock);
244  	rwlock_init(&client->ports_lock);
245  	mutex_init(&client->ports_mutex);
246  	INIT_LIST_HEAD(&client->ports_list_head);
247  	mutex_init(&client->ioctl_mutex);
248  	client->ump_endpoint_port = -1;
249  
250  	/* find free slot in the client table */
251  	spin_lock_irq(&clients_lock);
252  	if (client_index < 0) {
253  		for (c = SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN;
254  		     c < SNDRV_SEQ_MAX_CLIENTS;
255  		     c++) {
256  			if (clienttab[c] || clienttablock[c])
257  				continue;
258  			clienttab[client->number = c] = client;
259  			spin_unlock_irq(&clients_lock);
260  			return client;
261  		}
262  	} else {
263  		if (clienttab[client_index] == NULL && !clienttablock[client_index]) {
264  			clienttab[client->number = client_index] = client;
265  			spin_unlock_irq(&clients_lock);
266  			return client;
267  		}
268  	}
269  	spin_unlock_irq(&clients_lock);
270  	snd_seq_pool_delete(&client->pool);
271  	kfree(client);
272  	return NULL;	/* no free slot found or busy, return failure code */
273  }
274  
275  
seq_free_client1(struct snd_seq_client * client)276  static int seq_free_client1(struct snd_seq_client *client)
277  {
278  	if (!client)
279  		return 0;
280  	spin_lock_irq(&clients_lock);
281  	clienttablock[client->number] = 1;
282  	clienttab[client->number] = NULL;
283  	spin_unlock_irq(&clients_lock);
284  	snd_seq_delete_all_ports(client);
285  	snd_seq_queue_client_leave(client->number);
286  	snd_use_lock_sync(&client->use_lock);
287  	if (client->pool)
288  		snd_seq_pool_delete(&client->pool);
289  	spin_lock_irq(&clients_lock);
290  	clienttablock[client->number] = 0;
291  	spin_unlock_irq(&clients_lock);
292  	return 0;
293  }
294  
295  
seq_free_client(struct snd_seq_client * client)296  static void seq_free_client(struct snd_seq_client * client)
297  {
298  	mutex_lock(&register_mutex);
299  	switch (client->type) {
300  	case NO_CLIENT:
301  		pr_warn("ALSA: seq: Trying to free unused client %d\n",
302  			client->number);
303  		break;
304  	case USER_CLIENT:
305  	case KERNEL_CLIENT:
306  		seq_free_client1(client);
307  		usage_free(&client_usage, 1);
308  		break;
309  
310  	default:
311  		pr_err("ALSA: seq: Trying to free client %d with undefined type = %d\n",
312  			   client->number, client->type);
313  	}
314  	mutex_unlock(&register_mutex);
315  
316  	snd_seq_system_client_ev_client_exit(client->number);
317  }
318  
319  
320  
321  /* -------------------------------------------------------- */
322  
323  /* create a user client */
snd_seq_open(struct inode * inode,struct file * file)324  static int snd_seq_open(struct inode *inode, struct file *file)
325  {
326  	int c, mode;			/* client id */
327  	struct snd_seq_client *client;
328  	struct snd_seq_user_client *user;
329  	int err;
330  
331  	err = stream_open(inode, file);
332  	if (err < 0)
333  		return err;
334  
335  	mutex_lock(&register_mutex);
336  	client = seq_create_client1(-1, SNDRV_SEQ_DEFAULT_EVENTS);
337  	if (!client) {
338  		mutex_unlock(&register_mutex);
339  		return -ENOMEM;	/* failure code */
340  	}
341  
342  	mode = snd_seq_file_flags(file);
343  	if (mode & SNDRV_SEQ_LFLG_INPUT)
344  		client->accept_input = 1;
345  	if (mode & SNDRV_SEQ_LFLG_OUTPUT)
346  		client->accept_output = 1;
347  
348  	user = &client->data.user;
349  	user->fifo = NULL;
350  	user->fifo_pool_size = 0;
351  
352  	if (mode & SNDRV_SEQ_LFLG_INPUT) {
353  		user->fifo_pool_size = SNDRV_SEQ_DEFAULT_CLIENT_EVENTS;
354  		user->fifo = snd_seq_fifo_new(user->fifo_pool_size);
355  		if (user->fifo == NULL) {
356  			seq_free_client1(client);
357  			kfree(client);
358  			mutex_unlock(&register_mutex);
359  			return -ENOMEM;
360  		}
361  	}
362  
363  	usage_alloc(&client_usage, 1);
364  	client->type = USER_CLIENT;
365  	mutex_unlock(&register_mutex);
366  
367  	c = client->number;
368  	file->private_data = client;
369  
370  	/* fill client data */
371  	user->file = file;
372  	sprintf(client->name, "Client-%d", c);
373  	client->data.user.owner = get_pid(task_pid(current));
374  
375  	/* make others aware this new client */
376  	snd_seq_system_client_ev_client_start(c);
377  
378  	return 0;
379  }
380  
381  /* delete a user client */
snd_seq_release(struct inode * inode,struct file * file)382  static int snd_seq_release(struct inode *inode, struct file *file)
383  {
384  	struct snd_seq_client *client = file->private_data;
385  
386  	if (client) {
387  		seq_free_client(client);
388  		if (client->data.user.fifo)
389  			snd_seq_fifo_delete(&client->data.user.fifo);
390  #if IS_ENABLED(CONFIG_SND_SEQ_UMP)
391  		free_ump_info(client);
392  #endif
393  		put_pid(client->data.user.owner);
394  		kfree(client);
395  	}
396  
397  	return 0;
398  }
399  
event_is_compatible(const struct snd_seq_client * client,const struct snd_seq_event * ev)400  static bool event_is_compatible(const struct snd_seq_client *client,
401  				const struct snd_seq_event *ev)
402  {
403  	if (snd_seq_ev_is_ump(ev) && !client->midi_version)
404  		return false;
405  	if (snd_seq_ev_is_ump(ev) && snd_seq_ev_is_variable(ev))
406  		return false;
407  	return true;
408  }
409  
410  /* handle client read() */
411  /* possible error values:
412   *	-ENXIO	invalid client or file open mode
413   *	-ENOSPC	FIFO overflow (the flag is cleared after this error report)
414   *	-EINVAL	no enough user-space buffer to write the whole event
415   *	-EFAULT	seg. fault during copy to user space
416   */
snd_seq_read(struct file * file,char __user * buf,size_t count,loff_t * offset)417  static ssize_t snd_seq_read(struct file *file, char __user *buf, size_t count,
418  			    loff_t *offset)
419  {
420  	struct snd_seq_client *client = file->private_data;
421  	struct snd_seq_fifo *fifo;
422  	size_t aligned_size;
423  	int err;
424  	long result = 0;
425  	struct snd_seq_event_cell *cell;
426  
427  	if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_INPUT))
428  		return -ENXIO;
429  
430  	if (!access_ok(buf, count))
431  		return -EFAULT;
432  
433  	/* check client structures are in place */
434  	if (snd_BUG_ON(!client))
435  		return -ENXIO;
436  
437  	if (!client->accept_input)
438  		return -ENXIO;
439  	fifo = client->data.user.fifo;
440  	if (!fifo)
441  		return -ENXIO;
442  
443  	if (atomic_read(&fifo->overflow) > 0) {
444  		/* buffer overflow is detected */
445  		snd_seq_fifo_clear(fifo);
446  		/* return error code */
447  		return -ENOSPC;
448  	}
449  
450  	cell = NULL;
451  	err = 0;
452  	snd_seq_fifo_lock(fifo);
453  
454  	if (IS_ENABLED(CONFIG_SND_SEQ_UMP) && client->midi_version > 0)
455  		aligned_size = sizeof(struct snd_seq_ump_event);
456  	else
457  		aligned_size = sizeof(struct snd_seq_event);
458  
459  	/* while data available in queue */
460  	while (count >= aligned_size) {
461  		int nonblock;
462  
463  		nonblock = (file->f_flags & O_NONBLOCK) || result > 0;
464  		err = snd_seq_fifo_cell_out(fifo, &cell, nonblock);
465  		if (err < 0)
466  			break;
467  		if (!event_is_compatible(client, &cell->event)) {
468  			snd_seq_cell_free(cell);
469  			cell = NULL;
470  			continue;
471  		}
472  		if (snd_seq_ev_is_variable(&cell->event)) {
473  			struct snd_seq_ump_event tmpev;
474  
475  			memcpy(&tmpev, &cell->event, aligned_size);
476  			tmpev.data.ext.len &= ~SNDRV_SEQ_EXT_MASK;
477  			if (copy_to_user(buf, &tmpev, aligned_size)) {
478  				err = -EFAULT;
479  				break;
480  			}
481  			count -= aligned_size;
482  			buf += aligned_size;
483  			err = snd_seq_expand_var_event(&cell->event, count,
484  						       (char __force *)buf, 0,
485  						       aligned_size);
486  			if (err < 0)
487  				break;
488  			result += err;
489  			count -= err;
490  			buf += err;
491  		} else {
492  			if (copy_to_user(buf, &cell->event, aligned_size)) {
493  				err = -EFAULT;
494  				break;
495  			}
496  			count -= aligned_size;
497  			buf += aligned_size;
498  		}
499  		snd_seq_cell_free(cell);
500  		cell = NULL; /* to be sure */
501  		result += aligned_size;
502  	}
503  
504  	if (err < 0) {
505  		if (cell)
506  			snd_seq_fifo_cell_putback(fifo, cell);
507  		if (err == -EAGAIN && result > 0)
508  			err = 0;
509  	}
510  	snd_seq_fifo_unlock(fifo);
511  
512  	return (err < 0) ? err : result;
513  }
514  
515  
516  /*
517   * check access permission to the port
518   */
check_port_perm(struct snd_seq_client_port * port,unsigned int flags)519  static int check_port_perm(struct snd_seq_client_port *port, unsigned int flags)
520  {
521  	if ((port->capability & flags) != flags)
522  		return 0;
523  	return flags;
524  }
525  
526  /*
527   * check if the destination client is available, and return the pointer
528   * if filter is non-zero, client filter bitmap is tested.
529   */
get_event_dest_client(struct snd_seq_event * event,int filter)530  static struct snd_seq_client *get_event_dest_client(struct snd_seq_event *event,
531  						    int filter)
532  {
533  	struct snd_seq_client *dest;
534  
535  	dest = snd_seq_client_use_ptr(event->dest.client);
536  	if (dest == NULL)
537  		return NULL;
538  	if (! dest->accept_input)
539  		goto __not_avail;
540  	if (snd_seq_ev_is_ump(event))
541  		return dest; /* ok - no filter checks */
542  
543  	if ((dest->filter & SNDRV_SEQ_FILTER_USE_EVENT) &&
544  	    ! test_bit(event->type, dest->event_filter))
545  		goto __not_avail;
546  	if (filter && !(dest->filter & filter))
547  		goto __not_avail;
548  
549  	return dest; /* ok - accessible */
550  __not_avail:
551  	snd_seq_client_unlock(dest);
552  	return NULL;
553  }
554  
555  
556  /*
557   * Return the error event.
558   *
559   * If the receiver client is a user client, the original event is
560   * encapsulated in SNDRV_SEQ_EVENT_BOUNCE as variable length event.  If
561   * the original event is also variable length, the external data is
562   * copied after the event record.
563   * If the receiver client is a kernel client, the original event is
564   * quoted in SNDRV_SEQ_EVENT_KERNEL_ERROR, since this requires no extra
565   * kmalloc.
566   */
bounce_error_event(struct snd_seq_client * client,struct snd_seq_event * event,int err,int atomic,int hop)567  static int bounce_error_event(struct snd_seq_client *client,
568  			      struct snd_seq_event *event,
569  			      int err, int atomic, int hop)
570  {
571  	struct snd_seq_event bounce_ev;
572  	int result;
573  
574  	if (client == NULL ||
575  	    ! (client->filter & SNDRV_SEQ_FILTER_BOUNCE) ||
576  	    ! client->accept_input)
577  		return 0; /* ignored */
578  
579  	/* set up quoted error */
580  	memset(&bounce_ev, 0, sizeof(bounce_ev));
581  	bounce_ev.type = SNDRV_SEQ_EVENT_KERNEL_ERROR;
582  	bounce_ev.flags = SNDRV_SEQ_EVENT_LENGTH_FIXED;
583  	bounce_ev.queue = SNDRV_SEQ_QUEUE_DIRECT;
584  	bounce_ev.source.client = SNDRV_SEQ_CLIENT_SYSTEM;
585  	bounce_ev.source.port = SNDRV_SEQ_PORT_SYSTEM_ANNOUNCE;
586  	bounce_ev.dest.client = client->number;
587  	bounce_ev.dest.port = event->source.port;
588  	bounce_ev.data.quote.origin = event->dest;
589  	bounce_ev.data.quote.event = event;
590  	bounce_ev.data.quote.value = -err; /* use positive value */
591  	result = snd_seq_deliver_single_event(NULL, &bounce_ev, 0, atomic, hop + 1);
592  	if (result < 0) {
593  		client->event_lost++;
594  		return result;
595  	}
596  
597  	return result;
598  }
599  
600  
601  /*
602   * rewrite the time-stamp of the event record with the curren time
603   * of the given queue.
604   * return non-zero if updated.
605   */
update_timestamp_of_queue(struct snd_seq_event * event,int queue,int real_time)606  static int update_timestamp_of_queue(struct snd_seq_event *event,
607  				     int queue, int real_time)
608  {
609  	struct snd_seq_queue *q;
610  
611  	q = queueptr(queue);
612  	if (! q)
613  		return 0;
614  	event->queue = queue;
615  	event->flags &= ~SNDRV_SEQ_TIME_STAMP_MASK;
616  	if (real_time) {
617  		event->time.time = snd_seq_timer_get_cur_time(q->timer, true);
618  		event->flags |= SNDRV_SEQ_TIME_STAMP_REAL;
619  	} else {
620  		event->time.tick = snd_seq_timer_get_cur_tick(q->timer);
621  		event->flags |= SNDRV_SEQ_TIME_STAMP_TICK;
622  	}
623  	queuefree(q);
624  	return 1;
625  }
626  
627  /* deliver a single event; called from below and UMP converter */
__snd_seq_deliver_single_event(struct snd_seq_client * dest,struct snd_seq_client_port * dest_port,struct snd_seq_event * event,int atomic,int hop)628  int __snd_seq_deliver_single_event(struct snd_seq_client *dest,
629  				   struct snd_seq_client_port *dest_port,
630  				   struct snd_seq_event *event,
631  				   int atomic, int hop)
632  {
633  	switch (dest->type) {
634  	case USER_CLIENT:
635  		if (!dest->data.user.fifo)
636  			return 0;
637  		return snd_seq_fifo_event_in(dest->data.user.fifo, event);
638  	case KERNEL_CLIENT:
639  		if (!dest_port->event_input)
640  			return 0;
641  		return dest_port->event_input(event,
642  					      snd_seq_ev_is_direct(event),
643  					      dest_port->private_data,
644  					      atomic, hop);
645  	}
646  	return 0;
647  }
648  
649  /*
650   * deliver an event to the specified destination.
651   * if filter is non-zero, client filter bitmap is tested.
652   *
653   *  RETURN VALUE: 0 : if succeeded
654   *		 <0 : error
655   */
snd_seq_deliver_single_event(struct snd_seq_client * client,struct snd_seq_event * event,int filter,int atomic,int hop)656  static int snd_seq_deliver_single_event(struct snd_seq_client *client,
657  					struct snd_seq_event *event,
658  					int filter, int atomic, int hop)
659  {
660  	struct snd_seq_client *dest = NULL;
661  	struct snd_seq_client_port *dest_port = NULL;
662  	int result = -ENOENT;
663  	int direct;
664  
665  	direct = snd_seq_ev_is_direct(event);
666  
667  	dest = get_event_dest_client(event, filter);
668  	if (dest == NULL)
669  		goto __skip;
670  	dest_port = snd_seq_port_use_ptr(dest, event->dest.port);
671  	if (dest_port == NULL)
672  		goto __skip;
673  
674  	/* check permission */
675  	if (! check_port_perm(dest_port, SNDRV_SEQ_PORT_CAP_WRITE)) {
676  		result = -EPERM;
677  		goto __skip;
678  	}
679  
680  	if (dest_port->timestamping)
681  		update_timestamp_of_queue(event, dest_port->time_queue,
682  					  dest_port->time_real);
683  
684  #if IS_ENABLED(CONFIG_SND_SEQ_UMP)
685  	if (!(dest->filter & SNDRV_SEQ_FILTER_NO_CONVERT)) {
686  		if (snd_seq_ev_is_ump(event)) {
687  			result = snd_seq_deliver_from_ump(client, dest, dest_port,
688  							  event, atomic, hop);
689  			goto __skip;
690  		} else if (snd_seq_client_is_ump(dest)) {
691  			result = snd_seq_deliver_to_ump(client, dest, dest_port,
692  							event, atomic, hop);
693  			goto __skip;
694  		}
695  	}
696  #endif /* CONFIG_SND_SEQ_UMP */
697  
698  	result = __snd_seq_deliver_single_event(dest, dest_port, event,
699  						atomic, hop);
700  
701    __skip:
702  	if (dest_port)
703  		snd_seq_port_unlock(dest_port);
704  	if (dest)
705  		snd_seq_client_unlock(dest);
706  
707  	if (result < 0 && !direct) {
708  		result = bounce_error_event(client, event, result, atomic, hop);
709  	}
710  	return result;
711  }
712  
713  
714  /*
715   * send the event to all subscribers:
716   */
__deliver_to_subscribers(struct snd_seq_client * client,struct snd_seq_event * event,struct snd_seq_client_port * src_port,int atomic,int hop)717  static int __deliver_to_subscribers(struct snd_seq_client *client,
718  				    struct snd_seq_event *event,
719  				    struct snd_seq_client_port *src_port,
720  				    int atomic, int hop)
721  {
722  	struct snd_seq_subscribers *subs;
723  	int err, result = 0, num_ev = 0;
724  	union __snd_seq_event event_saved;
725  	size_t saved_size;
726  	struct snd_seq_port_subs_info *grp;
727  
728  	/* save original event record */
729  	saved_size = snd_seq_event_packet_size(event);
730  	memcpy(&event_saved, event, saved_size);
731  	grp = &src_port->c_src;
732  
733  	/* lock list */
734  	if (atomic)
735  		read_lock(&grp->list_lock);
736  	else
737  		down_read_nested(&grp->list_mutex, hop);
738  	list_for_each_entry(subs, &grp->list_head, src_list) {
739  		/* both ports ready? */
740  		if (atomic_read(&subs->ref_count) != 2)
741  			continue;
742  		event->dest = subs->info.dest;
743  		if (subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIMESTAMP)
744  			/* convert time according to flag with subscription */
745  			update_timestamp_of_queue(event, subs->info.queue,
746  						  subs->info.flags & SNDRV_SEQ_PORT_SUBS_TIME_REAL);
747  		err = snd_seq_deliver_single_event(client, event,
748  						   0, atomic, hop);
749  		if (err < 0) {
750  			/* save first error that occurs and continue */
751  			if (!result)
752  				result = err;
753  			continue;
754  		}
755  		num_ev++;
756  		/* restore original event record */
757  		memcpy(event, &event_saved, saved_size);
758  	}
759  	if (atomic)
760  		read_unlock(&grp->list_lock);
761  	else
762  		up_read(&grp->list_mutex);
763  	memcpy(event, &event_saved, saved_size);
764  	return (result < 0) ? result : num_ev;
765  }
766  
deliver_to_subscribers(struct snd_seq_client * client,struct snd_seq_event * event,int atomic,int hop)767  static int deliver_to_subscribers(struct snd_seq_client *client,
768  				  struct snd_seq_event *event,
769  				  int atomic, int hop)
770  {
771  	struct snd_seq_client_port *src_port;
772  	int ret = 0, ret2;
773  
774  	src_port = snd_seq_port_use_ptr(client, event->source.port);
775  	if (src_port) {
776  		ret = __deliver_to_subscribers(client, event, src_port, atomic, hop);
777  		snd_seq_port_unlock(src_port);
778  	}
779  
780  	if (client->ump_endpoint_port < 0 ||
781  	    event->source.port == client->ump_endpoint_port)
782  		return ret;
783  
784  	src_port = snd_seq_port_use_ptr(client, client->ump_endpoint_port);
785  	if (!src_port)
786  		return ret;
787  	ret2 = __deliver_to_subscribers(client, event, src_port, atomic, hop);
788  	snd_seq_port_unlock(src_port);
789  	return ret2 < 0 ? ret2 : ret;
790  }
791  
792  /* deliver an event to the destination port(s).
793   * if the event is to subscribers or broadcast, the event is dispatched
794   * to multiple targets.
795   *
796   * RETURN VALUE: n > 0  : the number of delivered events.
797   *               n == 0 : the event was not passed to any client.
798   *               n < 0  : error - event was not processed.
799   */
snd_seq_deliver_event(struct snd_seq_client * client,struct snd_seq_event * event,int atomic,int hop)800  static int snd_seq_deliver_event(struct snd_seq_client *client, struct snd_seq_event *event,
801  				 int atomic, int hop)
802  {
803  	int result;
804  
805  	hop++;
806  	if (hop >= SNDRV_SEQ_MAX_HOPS) {
807  		pr_debug("ALSA: seq: too long delivery path (%d:%d->%d:%d)\n",
808  			   event->source.client, event->source.port,
809  			   event->dest.client, event->dest.port);
810  		return -EMLINK;
811  	}
812  
813  	if (snd_seq_ev_is_variable(event) &&
814  	    snd_BUG_ON(atomic && (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR)))
815  		return -EINVAL;
816  
817  	if (event->queue == SNDRV_SEQ_ADDRESS_SUBSCRIBERS ||
818  	    event->dest.client == SNDRV_SEQ_ADDRESS_SUBSCRIBERS)
819  		result = deliver_to_subscribers(client, event, atomic, hop);
820  	else
821  		result = snd_seq_deliver_single_event(client, event, 0, atomic, hop);
822  
823  	return result;
824  }
825  
826  /*
827   * dispatch an event cell:
828   * This function is called only from queue check routines in timer
829   * interrupts or after enqueued.
830   * The event cell shall be released or re-queued in this function.
831   *
832   * RETURN VALUE: n > 0  : the number of delivered events.
833   *		 n == 0 : the event was not passed to any client.
834   *		 n < 0  : error - event was not processed.
835   */
snd_seq_dispatch_event(struct snd_seq_event_cell * cell,int atomic,int hop)836  int snd_seq_dispatch_event(struct snd_seq_event_cell *cell, int atomic, int hop)
837  {
838  	struct snd_seq_client *client;
839  	int result;
840  
841  	if (snd_BUG_ON(!cell))
842  		return -EINVAL;
843  
844  	client = snd_seq_client_use_ptr(cell->event.source.client);
845  	if (client == NULL) {
846  		snd_seq_cell_free(cell); /* release this cell */
847  		return -EINVAL;
848  	}
849  
850  	if (!snd_seq_ev_is_ump(&cell->event) &&
851  	    cell->event.type == SNDRV_SEQ_EVENT_NOTE) {
852  		/* NOTE event:
853  		 * the event cell is re-used as a NOTE-OFF event and
854  		 * enqueued again.
855  		 */
856  		struct snd_seq_event tmpev, *ev;
857  
858  		/* reserve this event to enqueue note-off later */
859  		tmpev = cell->event;
860  		tmpev.type = SNDRV_SEQ_EVENT_NOTEON;
861  		result = snd_seq_deliver_event(client, &tmpev, atomic, hop);
862  
863  		/*
864  		 * This was originally a note event.  We now re-use the
865  		 * cell for the note-off event.
866  		 */
867  
868  		ev = &cell->event;
869  		ev->type = SNDRV_SEQ_EVENT_NOTEOFF;
870  		ev->flags |= SNDRV_SEQ_PRIORITY_HIGH;
871  
872  		/* add the duration time */
873  		switch (ev->flags & SNDRV_SEQ_TIME_STAMP_MASK) {
874  		case SNDRV_SEQ_TIME_STAMP_TICK:
875  			cell->event.time.tick += ev->data.note.duration;
876  			break;
877  		case SNDRV_SEQ_TIME_STAMP_REAL:
878  			/* unit for duration is ms */
879  			ev->time.time.tv_nsec += 1000000 * (ev->data.note.duration % 1000);
880  			ev->time.time.tv_sec += ev->data.note.duration / 1000 +
881  						ev->time.time.tv_nsec / 1000000000;
882  			ev->time.time.tv_nsec %= 1000000000;
883  			break;
884  		}
885  		ev->data.note.velocity = ev->data.note.off_velocity;
886  
887  		/* Now queue this cell as the note off event */
888  		if (snd_seq_enqueue_event(cell, atomic, hop) < 0)
889  			snd_seq_cell_free(cell); /* release this cell */
890  
891  	} else {
892  		/* Normal events:
893  		 * event cell is freed after processing the event
894  		 */
895  
896  		result = snd_seq_deliver_event(client, &cell->event, atomic, hop);
897  		snd_seq_cell_free(cell);
898  	}
899  
900  	snd_seq_client_unlock(client);
901  	return result;
902  }
903  
904  
905  /* Allocate a cell from client pool and enqueue it to queue:
906   * if pool is empty and blocking is TRUE, sleep until a new cell is
907   * available.
908   */
snd_seq_client_enqueue_event(struct snd_seq_client * client,struct snd_seq_event * event,struct file * file,int blocking,int atomic,int hop,struct mutex * mutexp)909  static int snd_seq_client_enqueue_event(struct snd_seq_client *client,
910  					struct snd_seq_event *event,
911  					struct file *file, int blocking,
912  					int atomic, int hop,
913  					struct mutex *mutexp)
914  {
915  	struct snd_seq_event_cell *cell;
916  	int err;
917  
918  	/* special queue values - force direct passing */
919  	if (event->queue == SNDRV_SEQ_ADDRESS_SUBSCRIBERS) {
920  		event->dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
921  		event->queue = SNDRV_SEQ_QUEUE_DIRECT;
922  	} else if (event->dest.client == SNDRV_SEQ_ADDRESS_SUBSCRIBERS) {
923  		/* check presence of source port */
924  		struct snd_seq_client_port *src_port = snd_seq_port_use_ptr(client, event->source.port);
925  		if (src_port == NULL)
926  			return -EINVAL;
927  		snd_seq_port_unlock(src_port);
928  	}
929  
930  	/* direct event processing without enqueued */
931  	if (snd_seq_ev_is_direct(event)) {
932  		if (!snd_seq_ev_is_ump(event) &&
933  		    event->type == SNDRV_SEQ_EVENT_NOTE)
934  			return -EINVAL; /* this event must be enqueued! */
935  		return snd_seq_deliver_event(client, event, atomic, hop);
936  	}
937  
938  	/* Not direct, normal queuing */
939  	if (snd_seq_queue_is_used(event->queue, client->number) <= 0)
940  		return -EINVAL;  /* invalid queue */
941  	if (! snd_seq_write_pool_allocated(client))
942  		return -ENXIO; /* queue is not allocated */
943  
944  	/* allocate an event cell */
945  	err = snd_seq_event_dup(client->pool, event, &cell, !blocking || atomic,
946  				file, mutexp);
947  	if (err < 0)
948  		return err;
949  
950  	/* we got a cell. enqueue it. */
951  	err = snd_seq_enqueue_event(cell, atomic, hop);
952  	if (err < 0) {
953  		snd_seq_cell_free(cell);
954  		return err;
955  	}
956  
957  	return 0;
958  }
959  
960  
961  /*
962   * check validity of event type and data length.
963   * return non-zero if invalid.
964   */
check_event_type_and_length(struct snd_seq_event * ev)965  static int check_event_type_and_length(struct snd_seq_event *ev)
966  {
967  	switch (snd_seq_ev_length_type(ev)) {
968  	case SNDRV_SEQ_EVENT_LENGTH_FIXED:
969  		if (snd_seq_ev_is_variable_type(ev))
970  			return -EINVAL;
971  		break;
972  	case SNDRV_SEQ_EVENT_LENGTH_VARIABLE:
973  		if (! snd_seq_ev_is_variable_type(ev) ||
974  		    (ev->data.ext.len & ~SNDRV_SEQ_EXT_MASK) >= SNDRV_SEQ_MAX_EVENT_LEN)
975  			return -EINVAL;
976  		break;
977  	case SNDRV_SEQ_EVENT_LENGTH_VARUSR:
978  		if (! snd_seq_ev_is_direct(ev))
979  			return -EINVAL;
980  		break;
981  	}
982  	return 0;
983  }
984  
985  
986  /* handle write() */
987  /* possible error values:
988   *	-ENXIO	invalid client or file open mode
989   *	-ENOMEM	malloc failed
990   *	-EFAULT	seg. fault during copy from user space
991   *	-EINVAL	invalid event
992   *	-EAGAIN	no space in output pool
993   *	-EINTR	interrupts while sleep
994   *	-EMLINK	too many hops
995   *	others	depends on return value from driver callback
996   */
snd_seq_write(struct file * file,const char __user * buf,size_t count,loff_t * offset)997  static ssize_t snd_seq_write(struct file *file, const char __user *buf,
998  			     size_t count, loff_t *offset)
999  {
1000  	struct snd_seq_client *client = file->private_data;
1001  	int written = 0, len;
1002  	int err, handled;
1003  	union __snd_seq_event __event;
1004  	struct snd_seq_event *ev = &__event.legacy;
1005  
1006  	if (!(snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT))
1007  		return -ENXIO;
1008  
1009  	/* check client structures are in place */
1010  	if (snd_BUG_ON(!client))
1011  		return -ENXIO;
1012  
1013  	if (!client->accept_output || client->pool == NULL)
1014  		return -ENXIO;
1015  
1016   repeat:
1017  	handled = 0;
1018  	/* allocate the pool now if the pool is not allocated yet */
1019  	mutex_lock(&client->ioctl_mutex);
1020  	if (client->pool->size > 0 && !snd_seq_write_pool_allocated(client)) {
1021  		err = snd_seq_pool_init(client->pool);
1022  		if (err < 0)
1023  			goto out;
1024  	}
1025  
1026  	/* only process whole events */
1027  	err = -EINVAL;
1028  	while (count >= sizeof(struct snd_seq_event)) {
1029  		/* Read in the event header from the user */
1030  		len = sizeof(struct snd_seq_event);
1031  		if (copy_from_user(ev, buf, len)) {
1032  			err = -EFAULT;
1033  			break;
1034  		}
1035  		/* read in the rest bytes for UMP events */
1036  		if (snd_seq_ev_is_ump(ev)) {
1037  			if (count < sizeof(struct snd_seq_ump_event))
1038  				break;
1039  			if (copy_from_user((char *)ev + len, buf + len,
1040  					   sizeof(struct snd_seq_ump_event) - len)) {
1041  				err = -EFAULT;
1042  				break;
1043  			}
1044  			len = sizeof(struct snd_seq_ump_event);
1045  		}
1046  
1047  		ev->source.client = client->number;	/* fill in client number */
1048  		/* Check for extension data length */
1049  		if (check_event_type_and_length(ev)) {
1050  			err = -EINVAL;
1051  			break;
1052  		}
1053  
1054  		if (!event_is_compatible(client, ev)) {
1055  			err = -EINVAL;
1056  			break;
1057  		}
1058  
1059  		/* check for special events */
1060  		if (!snd_seq_ev_is_ump(ev)) {
1061  			if (ev->type == SNDRV_SEQ_EVENT_NONE)
1062  				goto __skip_event;
1063  			else if (snd_seq_ev_is_reserved(ev)) {
1064  				err = -EINVAL;
1065  				break;
1066  			}
1067  		}
1068  
1069  		if (snd_seq_ev_is_variable(ev)) {
1070  			int extlen = ev->data.ext.len & ~SNDRV_SEQ_EXT_MASK;
1071  			if ((size_t)(extlen + len) > count) {
1072  				/* back out, will get an error this time or next */
1073  				err = -EINVAL;
1074  				break;
1075  			}
1076  			/* set user space pointer */
1077  			ev->data.ext.len = extlen | SNDRV_SEQ_EXT_USRPTR;
1078  			ev->data.ext.ptr = (char __force *)buf + len;
1079  			len += extlen; /* increment data length */
1080  		} else {
1081  #ifdef CONFIG_COMPAT
1082  			if (client->convert32 && snd_seq_ev_is_varusr(ev))
1083  				ev->data.ext.ptr =
1084  					(void __force *)compat_ptr(ev->data.raw32.d[1]);
1085  #endif
1086  		}
1087  
1088  		/* ok, enqueue it */
1089  		err = snd_seq_client_enqueue_event(client, ev, file,
1090  						   !(file->f_flags & O_NONBLOCK),
1091  						   0, 0, &client->ioctl_mutex);
1092  		if (err < 0)
1093  			break;
1094  		handled++;
1095  
1096  	__skip_event:
1097  		/* Update pointers and counts */
1098  		count -= len;
1099  		buf += len;
1100  		written += len;
1101  
1102  		/* let's have a coffee break if too many events are queued */
1103  		if (++handled >= 200) {
1104  			mutex_unlock(&client->ioctl_mutex);
1105  			goto repeat;
1106  		}
1107  	}
1108  
1109   out:
1110  	mutex_unlock(&client->ioctl_mutex);
1111  	return written ? written : err;
1112  }
1113  
1114  
1115  /*
1116   * handle polling
1117   */
snd_seq_poll(struct file * file,poll_table * wait)1118  static __poll_t snd_seq_poll(struct file *file, poll_table * wait)
1119  {
1120  	struct snd_seq_client *client = file->private_data;
1121  	__poll_t mask = 0;
1122  
1123  	/* check client structures are in place */
1124  	if (snd_BUG_ON(!client))
1125  		return EPOLLERR;
1126  
1127  	if ((snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_INPUT) &&
1128  	    client->data.user.fifo) {
1129  
1130  		/* check if data is available in the outqueue */
1131  		if (snd_seq_fifo_poll_wait(client->data.user.fifo, file, wait))
1132  			mask |= EPOLLIN | EPOLLRDNORM;
1133  	}
1134  
1135  	if (snd_seq_file_flags(file) & SNDRV_SEQ_LFLG_OUTPUT) {
1136  
1137  		/* check if data is available in the pool */
1138  		if (!snd_seq_write_pool_allocated(client) ||
1139  		    snd_seq_pool_poll_wait(client->pool, file, wait))
1140  			mask |= EPOLLOUT | EPOLLWRNORM;
1141  	}
1142  
1143  	return mask;
1144  }
1145  
1146  
1147  /*-----------------------------------------------------*/
1148  
snd_seq_ioctl_pversion(struct snd_seq_client * client,void * arg)1149  static int snd_seq_ioctl_pversion(struct snd_seq_client *client, void *arg)
1150  {
1151  	int *pversion = arg;
1152  
1153  	*pversion = SNDRV_SEQ_VERSION;
1154  	return 0;
1155  }
1156  
snd_seq_ioctl_user_pversion(struct snd_seq_client * client,void * arg)1157  static int snd_seq_ioctl_user_pversion(struct snd_seq_client *client, void *arg)
1158  {
1159  	client->user_pversion = *(unsigned int *)arg;
1160  	return 0;
1161  }
1162  
snd_seq_ioctl_client_id(struct snd_seq_client * client,void * arg)1163  static int snd_seq_ioctl_client_id(struct snd_seq_client *client, void *arg)
1164  {
1165  	int *client_id = arg;
1166  
1167  	*client_id = client->number;
1168  	return 0;
1169  }
1170  
1171  /* SYSTEM_INFO ioctl() */
snd_seq_ioctl_system_info(struct snd_seq_client * client,void * arg)1172  static int snd_seq_ioctl_system_info(struct snd_seq_client *client, void *arg)
1173  {
1174  	struct snd_seq_system_info *info = arg;
1175  
1176  	memset(info, 0, sizeof(*info));
1177  	/* fill the info fields */
1178  	info->queues = SNDRV_SEQ_MAX_QUEUES;
1179  	info->clients = SNDRV_SEQ_MAX_CLIENTS;
1180  	info->ports = SNDRV_SEQ_MAX_PORTS;
1181  	info->channels = 256;	/* fixed limit */
1182  	info->cur_clients = client_usage.cur;
1183  	info->cur_queues = snd_seq_queue_get_cur_queues();
1184  
1185  	return 0;
1186  }
1187  
1188  
1189  /* RUNNING_MODE ioctl() */
snd_seq_ioctl_running_mode(struct snd_seq_client * client,void * arg)1190  static int snd_seq_ioctl_running_mode(struct snd_seq_client *client, void  *arg)
1191  {
1192  	struct snd_seq_running_info *info = arg;
1193  	struct snd_seq_client *cptr;
1194  	int err = 0;
1195  
1196  	/* requested client number */
1197  	cptr = snd_seq_client_use_ptr(info->client);
1198  	if (cptr == NULL)
1199  		return -ENOENT;		/* don't change !!! */
1200  
1201  #ifdef SNDRV_BIG_ENDIAN
1202  	if (!info->big_endian) {
1203  		err = -EINVAL;
1204  		goto __err;
1205  	}
1206  #else
1207  	if (info->big_endian) {
1208  		err = -EINVAL;
1209  		goto __err;
1210  	}
1211  
1212  #endif
1213  	if (info->cpu_mode > sizeof(long)) {
1214  		err = -EINVAL;
1215  		goto __err;
1216  	}
1217  	cptr->convert32 = (info->cpu_mode < sizeof(long));
1218   __err:
1219  	snd_seq_client_unlock(cptr);
1220  	return err;
1221  }
1222  
1223  /* CLIENT_INFO ioctl() */
get_client_info(struct snd_seq_client * cptr,struct snd_seq_client_info * info)1224  static void get_client_info(struct snd_seq_client *cptr,
1225  			    struct snd_seq_client_info *info)
1226  {
1227  	info->client = cptr->number;
1228  
1229  	/* fill the info fields */
1230  	info->type = cptr->type;
1231  	strcpy(info->name, cptr->name);
1232  	info->filter = cptr->filter;
1233  	info->event_lost = cptr->event_lost;
1234  	memcpy(info->event_filter, cptr->event_filter, 32);
1235  	info->group_filter = cptr->group_filter;
1236  	info->num_ports = cptr->num_ports;
1237  
1238  	if (cptr->type == USER_CLIENT)
1239  		info->pid = pid_vnr(cptr->data.user.owner);
1240  	else
1241  		info->pid = -1;
1242  
1243  	if (cptr->type == KERNEL_CLIENT)
1244  		info->card = cptr->data.kernel.card ? cptr->data.kernel.card->number : -1;
1245  	else
1246  		info->card = -1;
1247  
1248  	info->midi_version = cptr->midi_version;
1249  	memset(info->reserved, 0, sizeof(info->reserved));
1250  }
1251  
snd_seq_ioctl_get_client_info(struct snd_seq_client * client,void * arg)1252  static int snd_seq_ioctl_get_client_info(struct snd_seq_client *client,
1253  					 void *arg)
1254  {
1255  	struct snd_seq_client_info *client_info = arg;
1256  	struct snd_seq_client *cptr;
1257  
1258  	/* requested client number */
1259  	cptr = snd_seq_client_use_ptr(client_info->client);
1260  	if (cptr == NULL)
1261  		return -ENOENT;		/* don't change !!! */
1262  
1263  	get_client_info(cptr, client_info);
1264  	snd_seq_client_unlock(cptr);
1265  
1266  	return 0;
1267  }
1268  
1269  
1270  /* CLIENT_INFO ioctl() */
snd_seq_ioctl_set_client_info(struct snd_seq_client * client,void * arg)1271  static int snd_seq_ioctl_set_client_info(struct snd_seq_client *client,
1272  					 void *arg)
1273  {
1274  	struct snd_seq_client_info *client_info = arg;
1275  
1276  	/* it is not allowed to set the info fields for an another client */
1277  	if (client->number != client_info->client)
1278  		return -EPERM;
1279  	/* also client type must be set now */
1280  	if (client->type != client_info->type)
1281  		return -EINVAL;
1282  
1283  	if (client->user_pversion >= SNDRV_PROTOCOL_VERSION(1, 0, 3)) {
1284  		/* check validity of midi_version field */
1285  		if (client_info->midi_version > SNDRV_SEQ_CLIENT_UMP_MIDI_2_0)
1286  			return -EINVAL;
1287  
1288  		/* check if UMP is supported in kernel */
1289  		if (!IS_ENABLED(CONFIG_SND_SEQ_UMP) &&
1290  		    client_info->midi_version > 0)
1291  			return -EINVAL;
1292  	}
1293  
1294  	/* fill the info fields */
1295  	if (client_info->name[0])
1296  		strscpy(client->name, client_info->name, sizeof(client->name));
1297  
1298  	client->filter = client_info->filter;
1299  	client->event_lost = client_info->event_lost;
1300  	if (client->user_pversion >= SNDRV_PROTOCOL_VERSION(1, 0, 3))
1301  		client->midi_version = client_info->midi_version;
1302  	memcpy(client->event_filter, client_info->event_filter, 32);
1303  	client->group_filter = client_info->group_filter;
1304  	return 0;
1305  }
1306  
1307  
1308  /*
1309   * CREATE PORT ioctl()
1310   */
snd_seq_ioctl_create_port(struct snd_seq_client * client,void * arg)1311  static int snd_seq_ioctl_create_port(struct snd_seq_client *client, void *arg)
1312  {
1313  	struct snd_seq_port_info *info = arg;
1314  	struct snd_seq_client_port *port;
1315  	struct snd_seq_port_callback *callback;
1316  	int port_idx, err;
1317  
1318  	/* it is not allowed to create the port for an another client */
1319  	if (info->addr.client != client->number)
1320  		return -EPERM;
1321  	if (client->type == USER_CLIENT && info->kernel)
1322  		return -EINVAL;
1323  	if ((info->capability & SNDRV_SEQ_PORT_CAP_UMP_ENDPOINT) &&
1324  	    client->ump_endpoint_port >= 0)
1325  		return -EBUSY;
1326  
1327  	if (info->flags & SNDRV_SEQ_PORT_FLG_GIVEN_PORT)
1328  		port_idx = info->addr.port;
1329  	else
1330  		port_idx = -1;
1331  	if (port_idx >= SNDRV_SEQ_ADDRESS_UNKNOWN)
1332  		return -EINVAL;
1333  	err = snd_seq_create_port(client, port_idx, &port);
1334  	if (err < 0)
1335  		return err;
1336  
1337  	if (client->type == KERNEL_CLIENT) {
1338  		callback = info->kernel;
1339  		if (callback) {
1340  			if (callback->owner)
1341  				port->owner = callback->owner;
1342  			port->private_data = callback->private_data;
1343  			port->private_free = callback->private_free;
1344  			port->event_input = callback->event_input;
1345  			port->c_src.open = callback->subscribe;
1346  			port->c_src.close = callback->unsubscribe;
1347  			port->c_dest.open = callback->use;
1348  			port->c_dest.close = callback->unuse;
1349  		}
1350  	}
1351  
1352  	info->addr = port->addr;
1353  
1354  	snd_seq_set_port_info(port, info);
1355  	if (info->capability & SNDRV_SEQ_PORT_CAP_UMP_ENDPOINT)
1356  		client->ump_endpoint_port = port->addr.port;
1357  	snd_seq_system_client_ev_port_start(port->addr.client, port->addr.port);
1358  	snd_seq_port_unlock(port);
1359  
1360  	return 0;
1361  }
1362  
1363  /*
1364   * DELETE PORT ioctl()
1365   */
snd_seq_ioctl_delete_port(struct snd_seq_client * client,void * arg)1366  static int snd_seq_ioctl_delete_port(struct snd_seq_client *client, void *arg)
1367  {
1368  	struct snd_seq_port_info *info = arg;
1369  	int err;
1370  
1371  	/* it is not allowed to remove the port for an another client */
1372  	if (info->addr.client != client->number)
1373  		return -EPERM;
1374  
1375  	err = snd_seq_delete_port(client, info->addr.port);
1376  	if (err >= 0) {
1377  		if (client->ump_endpoint_port == info->addr.port)
1378  			client->ump_endpoint_port = -1;
1379  		snd_seq_system_client_ev_port_exit(client->number, info->addr.port);
1380  	}
1381  	return err;
1382  }
1383  
1384  
1385  /*
1386   * GET_PORT_INFO ioctl() (on any client)
1387   */
snd_seq_ioctl_get_port_info(struct snd_seq_client * client,void * arg)1388  static int snd_seq_ioctl_get_port_info(struct snd_seq_client *client, void *arg)
1389  {
1390  	struct snd_seq_port_info *info = arg;
1391  	struct snd_seq_client *cptr;
1392  	struct snd_seq_client_port *port;
1393  
1394  	cptr = snd_seq_client_use_ptr(info->addr.client);
1395  	if (cptr == NULL)
1396  		return -ENXIO;
1397  
1398  	port = snd_seq_port_use_ptr(cptr, info->addr.port);
1399  	if (port == NULL) {
1400  		snd_seq_client_unlock(cptr);
1401  		return -ENOENT;			/* don't change */
1402  	}
1403  
1404  	/* get port info */
1405  	snd_seq_get_port_info(port, info);
1406  	snd_seq_port_unlock(port);
1407  	snd_seq_client_unlock(cptr);
1408  
1409  	return 0;
1410  }
1411  
1412  
1413  /*
1414   * SET_PORT_INFO ioctl() (only ports on this/own client)
1415   */
snd_seq_ioctl_set_port_info(struct snd_seq_client * client,void * arg)1416  static int snd_seq_ioctl_set_port_info(struct snd_seq_client *client, void *arg)
1417  {
1418  	struct snd_seq_port_info *info = arg;
1419  	struct snd_seq_client_port *port;
1420  
1421  	if (info->addr.client != client->number) /* only set our own ports ! */
1422  		return -EPERM;
1423  	port = snd_seq_port_use_ptr(client, info->addr.port);
1424  	if (port) {
1425  		snd_seq_set_port_info(port, info);
1426  		snd_seq_port_unlock(port);
1427  	}
1428  	return 0;
1429  }
1430  
1431  
1432  /*
1433   * port subscription (connection)
1434   */
1435  #define PERM_RD		(SNDRV_SEQ_PORT_CAP_READ|SNDRV_SEQ_PORT_CAP_SUBS_READ)
1436  #define PERM_WR		(SNDRV_SEQ_PORT_CAP_WRITE|SNDRV_SEQ_PORT_CAP_SUBS_WRITE)
1437  
check_subscription_permission(struct snd_seq_client * client,struct snd_seq_client_port * sport,struct snd_seq_client_port * dport,struct snd_seq_port_subscribe * subs)1438  static int check_subscription_permission(struct snd_seq_client *client,
1439  					 struct snd_seq_client_port *sport,
1440  					 struct snd_seq_client_port *dport,
1441  					 struct snd_seq_port_subscribe *subs)
1442  {
1443  	if (client->number != subs->sender.client &&
1444  	    client->number != subs->dest.client) {
1445  		/* connection by third client - check export permission */
1446  		if (check_port_perm(sport, SNDRV_SEQ_PORT_CAP_NO_EXPORT))
1447  			return -EPERM;
1448  		if (check_port_perm(dport, SNDRV_SEQ_PORT_CAP_NO_EXPORT))
1449  			return -EPERM;
1450  	}
1451  
1452  	/* check read permission */
1453  	/* if sender or receiver is the subscribing client itself,
1454  	 * no permission check is necessary
1455  	 */
1456  	if (client->number != subs->sender.client) {
1457  		if (! check_port_perm(sport, PERM_RD))
1458  			return -EPERM;
1459  	}
1460  	/* check write permission */
1461  	if (client->number != subs->dest.client) {
1462  		if (! check_port_perm(dport, PERM_WR))
1463  			return -EPERM;
1464  	}
1465  	return 0;
1466  }
1467  
1468  /*
1469   * send an subscription notify event to user client:
1470   * client must be user client.
1471   */
snd_seq_client_notify_subscription(int client,int port,struct snd_seq_port_subscribe * info,int evtype)1472  int snd_seq_client_notify_subscription(int client, int port,
1473  				       struct snd_seq_port_subscribe *info,
1474  				       int evtype)
1475  {
1476  	struct snd_seq_event event;
1477  
1478  	memset(&event, 0, sizeof(event));
1479  	event.type = evtype;
1480  	event.data.connect.dest = info->dest;
1481  	event.data.connect.sender = info->sender;
1482  
1483  	return snd_seq_system_notify(client, port, &event);  /* non-atomic */
1484  }
1485  
1486  
1487  /*
1488   * add to port's subscription list IOCTL interface
1489   */
snd_seq_ioctl_subscribe_port(struct snd_seq_client * client,void * arg)1490  static int snd_seq_ioctl_subscribe_port(struct snd_seq_client *client,
1491  					void *arg)
1492  {
1493  	struct snd_seq_port_subscribe *subs = arg;
1494  	int result = -EINVAL;
1495  	struct snd_seq_client *receiver = NULL, *sender = NULL;
1496  	struct snd_seq_client_port *sport = NULL, *dport = NULL;
1497  
1498  	receiver = snd_seq_client_use_ptr(subs->dest.client);
1499  	if (!receiver)
1500  		goto __end;
1501  	sender = snd_seq_client_use_ptr(subs->sender.client);
1502  	if (!sender)
1503  		goto __end;
1504  	sport = snd_seq_port_use_ptr(sender, subs->sender.port);
1505  	if (!sport)
1506  		goto __end;
1507  	dport = snd_seq_port_use_ptr(receiver, subs->dest.port);
1508  	if (!dport)
1509  		goto __end;
1510  
1511  	result = check_subscription_permission(client, sport, dport, subs);
1512  	if (result < 0)
1513  		goto __end;
1514  
1515  	/* connect them */
1516  	result = snd_seq_port_connect(client, sender, sport, receiver, dport, subs);
1517  	if (! result) /* broadcast announce */
1518  		snd_seq_client_notify_subscription(SNDRV_SEQ_ADDRESS_SUBSCRIBERS, 0,
1519  						   subs, SNDRV_SEQ_EVENT_PORT_SUBSCRIBED);
1520        __end:
1521        	if (sport)
1522  		snd_seq_port_unlock(sport);
1523  	if (dport)
1524  		snd_seq_port_unlock(dport);
1525  	if (sender)
1526  		snd_seq_client_unlock(sender);
1527  	if (receiver)
1528  		snd_seq_client_unlock(receiver);
1529  	return result;
1530  }
1531  
1532  
1533  /*
1534   * remove from port's subscription list
1535   */
snd_seq_ioctl_unsubscribe_port(struct snd_seq_client * client,void * arg)1536  static int snd_seq_ioctl_unsubscribe_port(struct snd_seq_client *client,
1537  					  void *arg)
1538  {
1539  	struct snd_seq_port_subscribe *subs = arg;
1540  	int result = -ENXIO;
1541  	struct snd_seq_client *receiver = NULL, *sender = NULL;
1542  	struct snd_seq_client_port *sport = NULL, *dport = NULL;
1543  
1544  	receiver = snd_seq_client_use_ptr(subs->dest.client);
1545  	if (!receiver)
1546  		goto __end;
1547  	sender = snd_seq_client_use_ptr(subs->sender.client);
1548  	if (!sender)
1549  		goto __end;
1550  	sport = snd_seq_port_use_ptr(sender, subs->sender.port);
1551  	if (!sport)
1552  		goto __end;
1553  	dport = snd_seq_port_use_ptr(receiver, subs->dest.port);
1554  	if (!dport)
1555  		goto __end;
1556  
1557  	result = check_subscription_permission(client, sport, dport, subs);
1558  	if (result < 0)
1559  		goto __end;
1560  
1561  	result = snd_seq_port_disconnect(client, sender, sport, receiver, dport, subs);
1562  	if (! result) /* broadcast announce */
1563  		snd_seq_client_notify_subscription(SNDRV_SEQ_ADDRESS_SUBSCRIBERS, 0,
1564  						   subs, SNDRV_SEQ_EVENT_PORT_UNSUBSCRIBED);
1565        __end:
1566        	if (sport)
1567  		snd_seq_port_unlock(sport);
1568  	if (dport)
1569  		snd_seq_port_unlock(dport);
1570  	if (sender)
1571  		snd_seq_client_unlock(sender);
1572  	if (receiver)
1573  		snd_seq_client_unlock(receiver);
1574  	return result;
1575  }
1576  
1577  
1578  /* CREATE_QUEUE ioctl() */
snd_seq_ioctl_create_queue(struct snd_seq_client * client,void * arg)1579  static int snd_seq_ioctl_create_queue(struct snd_seq_client *client, void *arg)
1580  {
1581  	struct snd_seq_queue_info *info = arg;
1582  	struct snd_seq_queue *q;
1583  
1584  	q = snd_seq_queue_alloc(client->number, info->locked, info->flags);
1585  	if (IS_ERR(q))
1586  		return PTR_ERR(q);
1587  
1588  	info->queue = q->queue;
1589  	info->locked = q->locked;
1590  	info->owner = q->owner;
1591  
1592  	/* set queue name */
1593  	if (!info->name[0])
1594  		snprintf(info->name, sizeof(info->name), "Queue-%d", q->queue);
1595  	strscpy(q->name, info->name, sizeof(q->name));
1596  	snd_use_lock_free(&q->use_lock);
1597  
1598  	return 0;
1599  }
1600  
1601  /* DELETE_QUEUE ioctl() */
snd_seq_ioctl_delete_queue(struct snd_seq_client * client,void * arg)1602  static int snd_seq_ioctl_delete_queue(struct snd_seq_client *client, void *arg)
1603  {
1604  	struct snd_seq_queue_info *info = arg;
1605  
1606  	return snd_seq_queue_delete(client->number, info->queue);
1607  }
1608  
1609  /* GET_QUEUE_INFO ioctl() */
snd_seq_ioctl_get_queue_info(struct snd_seq_client * client,void * arg)1610  static int snd_seq_ioctl_get_queue_info(struct snd_seq_client *client,
1611  					void *arg)
1612  {
1613  	struct snd_seq_queue_info *info = arg;
1614  	struct snd_seq_queue *q;
1615  
1616  	q = queueptr(info->queue);
1617  	if (q == NULL)
1618  		return -EINVAL;
1619  
1620  	memset(info, 0, sizeof(*info));
1621  	info->queue = q->queue;
1622  	info->owner = q->owner;
1623  	info->locked = q->locked;
1624  	strscpy(info->name, q->name, sizeof(info->name));
1625  	queuefree(q);
1626  
1627  	return 0;
1628  }
1629  
1630  /* SET_QUEUE_INFO ioctl() */
snd_seq_ioctl_set_queue_info(struct snd_seq_client * client,void * arg)1631  static int snd_seq_ioctl_set_queue_info(struct snd_seq_client *client,
1632  					void *arg)
1633  {
1634  	struct snd_seq_queue_info *info = arg;
1635  	struct snd_seq_queue *q;
1636  
1637  	if (info->owner != client->number)
1638  		return -EINVAL;
1639  
1640  	/* change owner/locked permission */
1641  	if (snd_seq_queue_check_access(info->queue, client->number)) {
1642  		if (snd_seq_queue_set_owner(info->queue, client->number, info->locked) < 0)
1643  			return -EPERM;
1644  		if (info->locked)
1645  			snd_seq_queue_use(info->queue, client->number, 1);
1646  	} else {
1647  		return -EPERM;
1648  	}
1649  
1650  	q = queueptr(info->queue);
1651  	if (! q)
1652  		return -EINVAL;
1653  	if (q->owner != client->number) {
1654  		queuefree(q);
1655  		return -EPERM;
1656  	}
1657  	strscpy(q->name, info->name, sizeof(q->name));
1658  	queuefree(q);
1659  
1660  	return 0;
1661  }
1662  
1663  /* GET_NAMED_QUEUE ioctl() */
snd_seq_ioctl_get_named_queue(struct snd_seq_client * client,void * arg)1664  static int snd_seq_ioctl_get_named_queue(struct snd_seq_client *client,
1665  					 void *arg)
1666  {
1667  	struct snd_seq_queue_info *info = arg;
1668  	struct snd_seq_queue *q;
1669  
1670  	q = snd_seq_queue_find_name(info->name);
1671  	if (q == NULL)
1672  		return -EINVAL;
1673  	info->queue = q->queue;
1674  	info->owner = q->owner;
1675  	info->locked = q->locked;
1676  	queuefree(q);
1677  
1678  	return 0;
1679  }
1680  
1681  /* GET_QUEUE_STATUS ioctl() */
snd_seq_ioctl_get_queue_status(struct snd_seq_client * client,void * arg)1682  static int snd_seq_ioctl_get_queue_status(struct snd_seq_client *client,
1683  					  void *arg)
1684  {
1685  	struct snd_seq_queue_status *status = arg;
1686  	struct snd_seq_queue *queue;
1687  	struct snd_seq_timer *tmr;
1688  
1689  	queue = queueptr(status->queue);
1690  	if (queue == NULL)
1691  		return -EINVAL;
1692  	memset(status, 0, sizeof(*status));
1693  	status->queue = queue->queue;
1694  
1695  	tmr = queue->timer;
1696  	status->events = queue->tickq->cells + queue->timeq->cells;
1697  
1698  	status->time = snd_seq_timer_get_cur_time(tmr, true);
1699  	status->tick = snd_seq_timer_get_cur_tick(tmr);
1700  
1701  	status->running = tmr->running;
1702  
1703  	status->flags = queue->flags;
1704  	queuefree(queue);
1705  
1706  	return 0;
1707  }
1708  
1709  
1710  /* GET_QUEUE_TEMPO ioctl() */
snd_seq_ioctl_get_queue_tempo(struct snd_seq_client * client,void * arg)1711  static int snd_seq_ioctl_get_queue_tempo(struct snd_seq_client *client,
1712  					 void *arg)
1713  {
1714  	struct snd_seq_queue_tempo *tempo = arg;
1715  	struct snd_seq_queue *queue;
1716  	struct snd_seq_timer *tmr;
1717  
1718  	queue = queueptr(tempo->queue);
1719  	if (queue == NULL)
1720  		return -EINVAL;
1721  	memset(tempo, 0, sizeof(*tempo));
1722  	tempo->queue = queue->queue;
1723  
1724  	tmr = queue->timer;
1725  
1726  	tempo->tempo = tmr->tempo;
1727  	tempo->ppq = tmr->ppq;
1728  	tempo->skew_value = tmr->skew;
1729  	tempo->skew_base = tmr->skew_base;
1730  	queuefree(queue);
1731  
1732  	return 0;
1733  }
1734  
1735  
1736  /* SET_QUEUE_TEMPO ioctl() */
snd_seq_set_queue_tempo(int client,struct snd_seq_queue_tempo * tempo)1737  int snd_seq_set_queue_tempo(int client, struct snd_seq_queue_tempo *tempo)
1738  {
1739  	if (!snd_seq_queue_check_access(tempo->queue, client))
1740  		return -EPERM;
1741  	return snd_seq_queue_timer_set_tempo(tempo->queue, client, tempo);
1742  }
1743  EXPORT_SYMBOL(snd_seq_set_queue_tempo);
1744  
snd_seq_ioctl_set_queue_tempo(struct snd_seq_client * client,void * arg)1745  static int snd_seq_ioctl_set_queue_tempo(struct snd_seq_client *client,
1746  					 void *arg)
1747  {
1748  	struct snd_seq_queue_tempo *tempo = arg;
1749  	int result;
1750  
1751  	result = snd_seq_set_queue_tempo(client->number, tempo);
1752  	return result < 0 ? result : 0;
1753  }
1754  
1755  
1756  /* GET_QUEUE_TIMER ioctl() */
snd_seq_ioctl_get_queue_timer(struct snd_seq_client * client,void * arg)1757  static int snd_seq_ioctl_get_queue_timer(struct snd_seq_client *client,
1758  					 void *arg)
1759  {
1760  	struct snd_seq_queue_timer *timer = arg;
1761  	struct snd_seq_queue *queue;
1762  	struct snd_seq_timer *tmr;
1763  
1764  	queue = queueptr(timer->queue);
1765  	if (queue == NULL)
1766  		return -EINVAL;
1767  
1768  	mutex_lock(&queue->timer_mutex);
1769  	tmr = queue->timer;
1770  	memset(timer, 0, sizeof(*timer));
1771  	timer->queue = queue->queue;
1772  
1773  	timer->type = tmr->type;
1774  	if (tmr->type == SNDRV_SEQ_TIMER_ALSA) {
1775  		timer->u.alsa.id = tmr->alsa_id;
1776  		timer->u.alsa.resolution = tmr->preferred_resolution;
1777  	}
1778  	mutex_unlock(&queue->timer_mutex);
1779  	queuefree(queue);
1780  
1781  	return 0;
1782  }
1783  
1784  
1785  /* SET_QUEUE_TIMER ioctl() */
snd_seq_ioctl_set_queue_timer(struct snd_seq_client * client,void * arg)1786  static int snd_seq_ioctl_set_queue_timer(struct snd_seq_client *client,
1787  					 void *arg)
1788  {
1789  	struct snd_seq_queue_timer *timer = arg;
1790  	int result = 0;
1791  
1792  	if (timer->type != SNDRV_SEQ_TIMER_ALSA)
1793  		return -EINVAL;
1794  
1795  	if (snd_seq_queue_check_access(timer->queue, client->number)) {
1796  		struct snd_seq_queue *q;
1797  		struct snd_seq_timer *tmr;
1798  
1799  		q = queueptr(timer->queue);
1800  		if (q == NULL)
1801  			return -ENXIO;
1802  		mutex_lock(&q->timer_mutex);
1803  		tmr = q->timer;
1804  		snd_seq_queue_timer_close(timer->queue);
1805  		tmr->type = timer->type;
1806  		if (tmr->type == SNDRV_SEQ_TIMER_ALSA) {
1807  			tmr->alsa_id = timer->u.alsa.id;
1808  			tmr->preferred_resolution = timer->u.alsa.resolution;
1809  		}
1810  		result = snd_seq_queue_timer_open(timer->queue);
1811  		mutex_unlock(&q->timer_mutex);
1812  		queuefree(q);
1813  	} else {
1814  		return -EPERM;
1815  	}
1816  
1817  	return result;
1818  }
1819  
1820  
1821  /* GET_QUEUE_CLIENT ioctl() */
snd_seq_ioctl_get_queue_client(struct snd_seq_client * client,void * arg)1822  static int snd_seq_ioctl_get_queue_client(struct snd_seq_client *client,
1823  					  void *arg)
1824  {
1825  	struct snd_seq_queue_client *info = arg;
1826  	int used;
1827  
1828  	used = snd_seq_queue_is_used(info->queue, client->number);
1829  	if (used < 0)
1830  		return -EINVAL;
1831  	info->used = used;
1832  	info->client = client->number;
1833  
1834  	return 0;
1835  }
1836  
1837  
1838  /* SET_QUEUE_CLIENT ioctl() */
snd_seq_ioctl_set_queue_client(struct snd_seq_client * client,void * arg)1839  static int snd_seq_ioctl_set_queue_client(struct snd_seq_client *client,
1840  					  void *arg)
1841  {
1842  	struct snd_seq_queue_client *info = arg;
1843  	int err;
1844  
1845  	if (info->used >= 0) {
1846  		err = snd_seq_queue_use(info->queue, client->number, info->used);
1847  		if (err < 0)
1848  			return err;
1849  	}
1850  
1851  	return snd_seq_ioctl_get_queue_client(client, arg);
1852  }
1853  
1854  
1855  /* GET_CLIENT_POOL ioctl() */
snd_seq_ioctl_get_client_pool(struct snd_seq_client * client,void * arg)1856  static int snd_seq_ioctl_get_client_pool(struct snd_seq_client *client,
1857  					 void *arg)
1858  {
1859  	struct snd_seq_client_pool *info = arg;
1860  	struct snd_seq_client *cptr;
1861  
1862  	cptr = snd_seq_client_use_ptr(info->client);
1863  	if (cptr == NULL)
1864  		return -ENOENT;
1865  	memset(info, 0, sizeof(*info));
1866  	info->client = cptr->number;
1867  	info->output_pool = cptr->pool->size;
1868  	info->output_room = cptr->pool->room;
1869  	info->output_free = info->output_pool;
1870  	info->output_free = snd_seq_unused_cells(cptr->pool);
1871  	if (cptr->type == USER_CLIENT) {
1872  		info->input_pool = cptr->data.user.fifo_pool_size;
1873  		info->input_free = info->input_pool;
1874  		info->input_free = snd_seq_fifo_unused_cells(cptr->data.user.fifo);
1875  	} else {
1876  		info->input_pool = 0;
1877  		info->input_free = 0;
1878  	}
1879  	snd_seq_client_unlock(cptr);
1880  
1881  	return 0;
1882  }
1883  
1884  /* SET_CLIENT_POOL ioctl() */
snd_seq_ioctl_set_client_pool(struct snd_seq_client * client,void * arg)1885  static int snd_seq_ioctl_set_client_pool(struct snd_seq_client *client,
1886  					 void *arg)
1887  {
1888  	struct snd_seq_client_pool *info = arg;
1889  	int rc;
1890  
1891  	if (client->number != info->client)
1892  		return -EINVAL; /* can't change other clients */
1893  
1894  	if (info->output_pool >= 1 && info->output_pool <= SNDRV_SEQ_MAX_EVENTS &&
1895  	    (! snd_seq_write_pool_allocated(client) ||
1896  	     info->output_pool != client->pool->size)) {
1897  		if (snd_seq_write_pool_allocated(client)) {
1898  			/* is the pool in use? */
1899  			if (atomic_read(&client->pool->counter))
1900  				return -EBUSY;
1901  			/* remove all existing cells */
1902  			snd_seq_pool_mark_closing(client->pool);
1903  			snd_seq_pool_done(client->pool);
1904  		}
1905  		client->pool->size = info->output_pool;
1906  		rc = snd_seq_pool_init(client->pool);
1907  		if (rc < 0)
1908  			return rc;
1909  	}
1910  	if (client->type == USER_CLIENT && client->data.user.fifo != NULL &&
1911  	    info->input_pool >= 1 &&
1912  	    info->input_pool <= SNDRV_SEQ_MAX_CLIENT_EVENTS &&
1913  	    info->input_pool != client->data.user.fifo_pool_size) {
1914  		/* change pool size */
1915  		rc = snd_seq_fifo_resize(client->data.user.fifo, info->input_pool);
1916  		if (rc < 0)
1917  			return rc;
1918  		client->data.user.fifo_pool_size = info->input_pool;
1919  	}
1920  	if (info->output_room >= 1 &&
1921  	    info->output_room <= client->pool->size) {
1922  		client->pool->room  = info->output_room;
1923  	}
1924  
1925  	return snd_seq_ioctl_get_client_pool(client, arg);
1926  }
1927  
1928  
1929  /* REMOVE_EVENTS ioctl() */
snd_seq_ioctl_remove_events(struct snd_seq_client * client,void * arg)1930  static int snd_seq_ioctl_remove_events(struct snd_seq_client *client,
1931  				       void *arg)
1932  {
1933  	struct snd_seq_remove_events *info = arg;
1934  
1935  	/*
1936  	 * Input mostly not implemented XXX.
1937  	 */
1938  	if (info->remove_mode & SNDRV_SEQ_REMOVE_INPUT) {
1939  		/*
1940  		 * No restrictions so for a user client we can clear
1941  		 * the whole fifo
1942  		 */
1943  		if (client->type == USER_CLIENT && client->data.user.fifo)
1944  			snd_seq_fifo_clear(client->data.user.fifo);
1945  	}
1946  
1947  	if (info->remove_mode & SNDRV_SEQ_REMOVE_OUTPUT)
1948  		snd_seq_queue_remove_cells(client->number, info);
1949  
1950  	return 0;
1951  }
1952  
1953  
1954  /*
1955   * get subscription info
1956   */
snd_seq_ioctl_get_subscription(struct snd_seq_client * client,void * arg)1957  static int snd_seq_ioctl_get_subscription(struct snd_seq_client *client,
1958  					  void *arg)
1959  {
1960  	struct snd_seq_port_subscribe *subs = arg;
1961  	int result;
1962  	struct snd_seq_client *sender = NULL;
1963  	struct snd_seq_client_port *sport = NULL;
1964  
1965  	result = -EINVAL;
1966  	sender = snd_seq_client_use_ptr(subs->sender.client);
1967  	if (!sender)
1968  		goto __end;
1969  	sport = snd_seq_port_use_ptr(sender, subs->sender.port);
1970  	if (!sport)
1971  		goto __end;
1972  	result = snd_seq_port_get_subscription(&sport->c_src, &subs->dest,
1973  					       subs);
1974        __end:
1975        	if (sport)
1976  		snd_seq_port_unlock(sport);
1977  	if (sender)
1978  		snd_seq_client_unlock(sender);
1979  
1980  	return result;
1981  }
1982  
1983  
1984  /*
1985   * get subscription info - check only its presence
1986   */
snd_seq_ioctl_query_subs(struct snd_seq_client * client,void * arg)1987  static int snd_seq_ioctl_query_subs(struct snd_seq_client *client, void *arg)
1988  {
1989  	struct snd_seq_query_subs *subs = arg;
1990  	int result = -ENXIO;
1991  	struct snd_seq_client *cptr = NULL;
1992  	struct snd_seq_client_port *port = NULL;
1993  	struct snd_seq_port_subs_info *group;
1994  	struct list_head *p;
1995  	int i;
1996  
1997  	cptr = snd_seq_client_use_ptr(subs->root.client);
1998  	if (!cptr)
1999  		goto __end;
2000  	port = snd_seq_port_use_ptr(cptr, subs->root.port);
2001  	if (!port)
2002  		goto __end;
2003  
2004  	switch (subs->type) {
2005  	case SNDRV_SEQ_QUERY_SUBS_READ:
2006  		group = &port->c_src;
2007  		break;
2008  	case SNDRV_SEQ_QUERY_SUBS_WRITE:
2009  		group = &port->c_dest;
2010  		break;
2011  	default:
2012  		goto __end;
2013  	}
2014  
2015  	down_read(&group->list_mutex);
2016  	/* search for the subscriber */
2017  	subs->num_subs = group->count;
2018  	i = 0;
2019  	result = -ENOENT;
2020  	list_for_each(p, &group->list_head) {
2021  		if (i++ == subs->index) {
2022  			/* found! */
2023  			struct snd_seq_subscribers *s;
2024  			if (subs->type == SNDRV_SEQ_QUERY_SUBS_READ) {
2025  				s = list_entry(p, struct snd_seq_subscribers, src_list);
2026  				subs->addr = s->info.dest;
2027  			} else {
2028  				s = list_entry(p, struct snd_seq_subscribers, dest_list);
2029  				subs->addr = s->info.sender;
2030  			}
2031  			subs->flags = s->info.flags;
2032  			subs->queue = s->info.queue;
2033  			result = 0;
2034  			break;
2035  		}
2036  	}
2037  	up_read(&group->list_mutex);
2038  
2039        __end:
2040     	if (port)
2041  		snd_seq_port_unlock(port);
2042  	if (cptr)
2043  		snd_seq_client_unlock(cptr);
2044  
2045  	return result;
2046  }
2047  
2048  
2049  /*
2050   * query next client
2051   */
snd_seq_ioctl_query_next_client(struct snd_seq_client * client,void * arg)2052  static int snd_seq_ioctl_query_next_client(struct snd_seq_client *client,
2053  					   void *arg)
2054  {
2055  	struct snd_seq_client_info *info = arg;
2056  	struct snd_seq_client *cptr = NULL;
2057  
2058  	/* search for next client */
2059  	if (info->client < INT_MAX)
2060  		info->client++;
2061  	if (info->client < 0)
2062  		info->client = 0;
2063  	for (; info->client < SNDRV_SEQ_MAX_CLIENTS; info->client++) {
2064  		cptr = snd_seq_client_use_ptr(info->client);
2065  		if (cptr)
2066  			break; /* found */
2067  	}
2068  	if (cptr == NULL)
2069  		return -ENOENT;
2070  
2071  	get_client_info(cptr, info);
2072  	snd_seq_client_unlock(cptr);
2073  
2074  	return 0;
2075  }
2076  
2077  /*
2078   * query next port
2079   */
snd_seq_ioctl_query_next_port(struct snd_seq_client * client,void * arg)2080  static int snd_seq_ioctl_query_next_port(struct snd_seq_client *client,
2081  					 void *arg)
2082  {
2083  	struct snd_seq_port_info *info = arg;
2084  	struct snd_seq_client *cptr;
2085  	struct snd_seq_client_port *port = NULL;
2086  
2087  	cptr = snd_seq_client_use_ptr(info->addr.client);
2088  	if (cptr == NULL)
2089  		return -ENXIO;
2090  
2091  	/* search for next port */
2092  	info->addr.port++;
2093  	port = snd_seq_port_query_nearest(cptr, info);
2094  	if (port == NULL) {
2095  		snd_seq_client_unlock(cptr);
2096  		return -ENOENT;
2097  	}
2098  
2099  	/* get port info */
2100  	info->addr = port->addr;
2101  	snd_seq_get_port_info(port, info);
2102  	snd_seq_port_unlock(port);
2103  	snd_seq_client_unlock(cptr);
2104  
2105  	return 0;
2106  }
2107  
2108  #if IS_ENABLED(CONFIG_SND_SEQ_UMP)
2109  #define NUM_UMP_INFOS (SNDRV_UMP_MAX_BLOCKS + 1)
2110  
free_ump_info(struct snd_seq_client * client)2111  static void free_ump_info(struct snd_seq_client *client)
2112  {
2113  	int i;
2114  
2115  	if (!client->ump_info)
2116  		return;
2117  	for (i = 0; i < NUM_UMP_INFOS; i++)
2118  		kfree(client->ump_info[i]);
2119  	kfree(client->ump_info);
2120  	client->ump_info = NULL;
2121  }
2122  
terminate_ump_info_strings(void * p,int type)2123  static void terminate_ump_info_strings(void *p, int type)
2124  {
2125  	if (type == SNDRV_SEQ_CLIENT_UMP_INFO_ENDPOINT) {
2126  		struct snd_ump_endpoint_info *ep = p;
2127  		ep->name[sizeof(ep->name) - 1] = 0;
2128  	} else {
2129  		struct snd_ump_block_info *bp = p;
2130  		bp->name[sizeof(bp->name) - 1] = 0;
2131  	}
2132  }
2133  
2134  #ifdef CONFIG_SND_PROC_FS
dump_ump_info(struct snd_info_buffer * buffer,struct snd_seq_client * client)2135  static void dump_ump_info(struct snd_info_buffer *buffer,
2136  			  struct snd_seq_client *client)
2137  {
2138  	struct snd_ump_endpoint_info *ep;
2139  	struct snd_ump_block_info *bp;
2140  	int i;
2141  
2142  	if (!client->ump_info)
2143  		return;
2144  	ep = client->ump_info[SNDRV_SEQ_CLIENT_UMP_INFO_ENDPOINT];
2145  	if (ep && *ep->name)
2146  		snd_iprintf(buffer, "  UMP Endpoint: \"%s\"\n", ep->name);
2147  	for (i = 0; i < SNDRV_UMP_MAX_BLOCKS; i++) {
2148  		bp = client->ump_info[i + 1];
2149  		if (bp && *bp->name) {
2150  			snd_iprintf(buffer, "  UMP Block %d: \"%s\" [%s]\n",
2151  				    i, bp->name,
2152  				    bp->active ? "Active" : "Inactive");
2153  			snd_iprintf(buffer, "    Groups: %d-%d\n",
2154  				    bp->first_group + 1,
2155  				    bp->first_group + bp->num_groups);
2156  		}
2157  	}
2158  }
2159  #endif
2160  
2161  /* UMP-specific ioctls -- called directly without data copy */
snd_seq_ioctl_client_ump_info(struct snd_seq_client * caller,unsigned int cmd,unsigned long arg)2162  static int snd_seq_ioctl_client_ump_info(struct snd_seq_client *caller,
2163  					 unsigned int cmd,
2164  					 unsigned long arg)
2165  {
2166  	struct snd_seq_client_ump_info __user *argp =
2167  		(struct snd_seq_client_ump_info __user *)arg;
2168  	struct snd_seq_client *cptr;
2169  	int client, type, err = 0;
2170  	size_t size;
2171  	void *p;
2172  
2173  	if (get_user(client, &argp->client) || get_user(type, &argp->type))
2174  		return -EFAULT;
2175  	if (cmd == SNDRV_SEQ_IOCTL_SET_CLIENT_UMP_INFO &&
2176  	    caller->number != client)
2177  		return -EPERM;
2178  	if (type < 0 || type >= NUM_UMP_INFOS)
2179  		return -EINVAL;
2180  	if (type == SNDRV_SEQ_CLIENT_UMP_INFO_ENDPOINT)
2181  		size = sizeof(struct snd_ump_endpoint_info);
2182  	else
2183  		size = sizeof(struct snd_ump_block_info);
2184  	cptr = snd_seq_client_use_ptr(client);
2185  	if (!cptr)
2186  		return -ENOENT;
2187  
2188  	mutex_lock(&cptr->ioctl_mutex);
2189  	if (!cptr->midi_version) {
2190  		err = -EBADFD;
2191  		goto error;
2192  	}
2193  
2194  	if (cmd == SNDRV_SEQ_IOCTL_GET_CLIENT_UMP_INFO) {
2195  		if (!cptr->ump_info)
2196  			p = NULL;
2197  		else
2198  			p = cptr->ump_info[type];
2199  		if (!p) {
2200  			err = -ENODEV;
2201  			goto error;
2202  		}
2203  		if (copy_to_user(argp->info, p, size)) {
2204  			err = -EFAULT;
2205  			goto error;
2206  		}
2207  	} else {
2208  		if (cptr->type != USER_CLIENT) {
2209  			err = -EBADFD;
2210  			goto error;
2211  		}
2212  		if (!cptr->ump_info) {
2213  			cptr->ump_info = kcalloc(NUM_UMP_INFOS,
2214  						 sizeof(void *), GFP_KERNEL);
2215  			if (!cptr->ump_info) {
2216  				err = -ENOMEM;
2217  				goto error;
2218  			}
2219  		}
2220  		p = memdup_user(argp->info, size);
2221  		if (IS_ERR(p)) {
2222  			err = PTR_ERR(p);
2223  			goto error;
2224  		}
2225  		kfree(cptr->ump_info[type]);
2226  		terminate_ump_info_strings(p, type);
2227  		cptr->ump_info[type] = p;
2228  	}
2229  
2230   error:
2231  	mutex_unlock(&cptr->ioctl_mutex);
2232  	snd_seq_client_unlock(cptr);
2233  	return err;
2234  }
2235  #endif
2236  
2237  /* -------------------------------------------------------- */
2238  
2239  static const struct ioctl_handler {
2240  	unsigned int cmd;
2241  	int (*func)(struct snd_seq_client *client, void *arg);
2242  } ioctl_handlers[] = {
2243  	{ SNDRV_SEQ_IOCTL_PVERSION, snd_seq_ioctl_pversion },
2244  	{ SNDRV_SEQ_IOCTL_USER_PVERSION, snd_seq_ioctl_user_pversion },
2245  	{ SNDRV_SEQ_IOCTL_CLIENT_ID, snd_seq_ioctl_client_id },
2246  	{ SNDRV_SEQ_IOCTL_SYSTEM_INFO, snd_seq_ioctl_system_info },
2247  	{ SNDRV_SEQ_IOCTL_RUNNING_MODE, snd_seq_ioctl_running_mode },
2248  	{ SNDRV_SEQ_IOCTL_GET_CLIENT_INFO, snd_seq_ioctl_get_client_info },
2249  	{ SNDRV_SEQ_IOCTL_SET_CLIENT_INFO, snd_seq_ioctl_set_client_info },
2250  	{ SNDRV_SEQ_IOCTL_CREATE_PORT, snd_seq_ioctl_create_port },
2251  	{ SNDRV_SEQ_IOCTL_DELETE_PORT, snd_seq_ioctl_delete_port },
2252  	{ SNDRV_SEQ_IOCTL_GET_PORT_INFO, snd_seq_ioctl_get_port_info },
2253  	{ SNDRV_SEQ_IOCTL_SET_PORT_INFO, snd_seq_ioctl_set_port_info },
2254  	{ SNDRV_SEQ_IOCTL_SUBSCRIBE_PORT, snd_seq_ioctl_subscribe_port },
2255  	{ SNDRV_SEQ_IOCTL_UNSUBSCRIBE_PORT, snd_seq_ioctl_unsubscribe_port },
2256  	{ SNDRV_SEQ_IOCTL_CREATE_QUEUE, snd_seq_ioctl_create_queue },
2257  	{ SNDRV_SEQ_IOCTL_DELETE_QUEUE, snd_seq_ioctl_delete_queue },
2258  	{ SNDRV_SEQ_IOCTL_GET_QUEUE_INFO, snd_seq_ioctl_get_queue_info },
2259  	{ SNDRV_SEQ_IOCTL_SET_QUEUE_INFO, snd_seq_ioctl_set_queue_info },
2260  	{ SNDRV_SEQ_IOCTL_GET_NAMED_QUEUE, snd_seq_ioctl_get_named_queue },
2261  	{ SNDRV_SEQ_IOCTL_GET_QUEUE_STATUS, snd_seq_ioctl_get_queue_status },
2262  	{ SNDRV_SEQ_IOCTL_GET_QUEUE_TEMPO, snd_seq_ioctl_get_queue_tempo },
2263  	{ SNDRV_SEQ_IOCTL_SET_QUEUE_TEMPO, snd_seq_ioctl_set_queue_tempo },
2264  	{ SNDRV_SEQ_IOCTL_GET_QUEUE_TIMER, snd_seq_ioctl_get_queue_timer },
2265  	{ SNDRV_SEQ_IOCTL_SET_QUEUE_TIMER, snd_seq_ioctl_set_queue_timer },
2266  	{ SNDRV_SEQ_IOCTL_GET_QUEUE_CLIENT, snd_seq_ioctl_get_queue_client },
2267  	{ SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT, snd_seq_ioctl_set_queue_client },
2268  	{ SNDRV_SEQ_IOCTL_GET_CLIENT_POOL, snd_seq_ioctl_get_client_pool },
2269  	{ SNDRV_SEQ_IOCTL_SET_CLIENT_POOL, snd_seq_ioctl_set_client_pool },
2270  	{ SNDRV_SEQ_IOCTL_GET_SUBSCRIPTION, snd_seq_ioctl_get_subscription },
2271  	{ SNDRV_SEQ_IOCTL_QUERY_NEXT_CLIENT, snd_seq_ioctl_query_next_client },
2272  	{ SNDRV_SEQ_IOCTL_QUERY_NEXT_PORT, snd_seq_ioctl_query_next_port },
2273  	{ SNDRV_SEQ_IOCTL_REMOVE_EVENTS, snd_seq_ioctl_remove_events },
2274  	{ SNDRV_SEQ_IOCTL_QUERY_SUBS, snd_seq_ioctl_query_subs },
2275  	{ 0, NULL },
2276  };
2277  
snd_seq_ioctl(struct file * file,unsigned int cmd,unsigned long arg)2278  static long snd_seq_ioctl(struct file *file, unsigned int cmd,
2279  			  unsigned long arg)
2280  {
2281  	struct snd_seq_client *client = file->private_data;
2282  	/* To use kernel stack for ioctl data. */
2283  	union {
2284  		int pversion;
2285  		int client_id;
2286  		struct snd_seq_system_info	system_info;
2287  		struct snd_seq_running_info	running_info;
2288  		struct snd_seq_client_info	client_info;
2289  		struct snd_seq_port_info	port_info;
2290  		struct snd_seq_port_subscribe	port_subscribe;
2291  		struct snd_seq_queue_info	queue_info;
2292  		struct snd_seq_queue_status	queue_status;
2293  		struct snd_seq_queue_tempo	tempo;
2294  		struct snd_seq_queue_timer	queue_timer;
2295  		struct snd_seq_queue_client	queue_client;
2296  		struct snd_seq_client_pool	client_pool;
2297  		struct snd_seq_remove_events	remove_events;
2298  		struct snd_seq_query_subs	query_subs;
2299  	} buf;
2300  	const struct ioctl_handler *handler;
2301  	unsigned long size;
2302  	int err;
2303  
2304  	if (snd_BUG_ON(!client))
2305  		return -ENXIO;
2306  
2307  #if IS_ENABLED(CONFIG_SND_SEQ_UMP)
2308  	/* exception - handling large data */
2309  	switch (cmd) {
2310  	case SNDRV_SEQ_IOCTL_GET_CLIENT_UMP_INFO:
2311  	case SNDRV_SEQ_IOCTL_SET_CLIENT_UMP_INFO:
2312  		return snd_seq_ioctl_client_ump_info(client, cmd, arg);
2313  	}
2314  #endif
2315  
2316  	for (handler = ioctl_handlers; handler->cmd > 0; ++handler) {
2317  		if (handler->cmd == cmd)
2318  			break;
2319  	}
2320  	if (handler->cmd == 0)
2321  		return -ENOTTY;
2322  
2323  	memset(&buf, 0, sizeof(buf));
2324  
2325  	/*
2326  	 * All of ioctl commands for ALSA sequencer get an argument of size
2327  	 * within 13 bits. We can safely pick up the size from the command.
2328  	 */
2329  	size = _IOC_SIZE(handler->cmd);
2330  	if (handler->cmd & IOC_IN) {
2331  		if (copy_from_user(&buf, (const void __user *)arg, size))
2332  			return -EFAULT;
2333  	}
2334  
2335  	mutex_lock(&client->ioctl_mutex);
2336  	err = handler->func(client, &buf);
2337  	mutex_unlock(&client->ioctl_mutex);
2338  	if (err >= 0) {
2339  		/* Some commands includes a bug in 'dir' field. */
2340  		if (handler->cmd == SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT ||
2341  		    handler->cmd == SNDRV_SEQ_IOCTL_SET_CLIENT_POOL ||
2342  		    (handler->cmd & IOC_OUT))
2343  			if (copy_to_user((void __user *)arg, &buf, size))
2344  				return -EFAULT;
2345  	}
2346  
2347  	return err;
2348  }
2349  
2350  #ifdef CONFIG_COMPAT
2351  #include "seq_compat.c"
2352  #else
2353  #define snd_seq_ioctl_compat	NULL
2354  #endif
2355  
2356  /* -------------------------------------------------------- */
2357  
2358  
2359  /* exported to kernel modules */
snd_seq_create_kernel_client(struct snd_card * card,int client_index,const char * name_fmt,...)2360  int snd_seq_create_kernel_client(struct snd_card *card, int client_index,
2361  				 const char *name_fmt, ...)
2362  {
2363  	struct snd_seq_client *client;
2364  	va_list args;
2365  
2366  	if (snd_BUG_ON(in_interrupt()))
2367  		return -EBUSY;
2368  
2369  	if (card && client_index >= SNDRV_SEQ_CLIENTS_PER_CARD)
2370  		return -EINVAL;
2371  	if (card == NULL && client_index >= SNDRV_SEQ_GLOBAL_CLIENTS)
2372  		return -EINVAL;
2373  
2374  	mutex_lock(&register_mutex);
2375  
2376  	if (card) {
2377  		client_index += SNDRV_SEQ_GLOBAL_CLIENTS
2378  			+ card->number * SNDRV_SEQ_CLIENTS_PER_CARD;
2379  		if (client_index >= SNDRV_SEQ_DYNAMIC_CLIENTS_BEGIN)
2380  			client_index = -1;
2381  	}
2382  
2383  	/* empty write queue as default */
2384  	client = seq_create_client1(client_index, 0);
2385  	if (client == NULL) {
2386  		mutex_unlock(&register_mutex);
2387  		return -EBUSY;	/* failure code */
2388  	}
2389  	usage_alloc(&client_usage, 1);
2390  
2391  	client->accept_input = 1;
2392  	client->accept_output = 1;
2393  	client->data.kernel.card = card;
2394  	client->user_pversion = SNDRV_SEQ_VERSION;
2395  
2396  	va_start(args, name_fmt);
2397  	vsnprintf(client->name, sizeof(client->name), name_fmt, args);
2398  	va_end(args);
2399  
2400  	client->type = KERNEL_CLIENT;
2401  	mutex_unlock(&register_mutex);
2402  
2403  	/* make others aware this new client */
2404  	snd_seq_system_client_ev_client_start(client->number);
2405  
2406  	/* return client number to caller */
2407  	return client->number;
2408  }
2409  EXPORT_SYMBOL(snd_seq_create_kernel_client);
2410  
2411  /* exported to kernel modules */
snd_seq_delete_kernel_client(int client)2412  int snd_seq_delete_kernel_client(int client)
2413  {
2414  	struct snd_seq_client *ptr;
2415  
2416  	if (snd_BUG_ON(in_interrupt()))
2417  		return -EBUSY;
2418  
2419  	ptr = clientptr(client);
2420  	if (ptr == NULL)
2421  		return -EINVAL;
2422  
2423  	seq_free_client(ptr);
2424  	kfree(ptr);
2425  	return 0;
2426  }
2427  EXPORT_SYMBOL(snd_seq_delete_kernel_client);
2428  
2429  /*
2430   * exported, called by kernel clients to enqueue events (w/o blocking)
2431   *
2432   * RETURN VALUE: zero if succeed, negative if error
2433   */
snd_seq_kernel_client_enqueue(int client,struct snd_seq_event * ev,struct file * file,bool blocking)2434  int snd_seq_kernel_client_enqueue(int client, struct snd_seq_event *ev,
2435  				  struct file *file, bool blocking)
2436  {
2437  	struct snd_seq_client *cptr;
2438  	int result;
2439  
2440  	if (snd_BUG_ON(!ev))
2441  		return -EINVAL;
2442  
2443  	if (!snd_seq_ev_is_ump(ev)) {
2444  		if (ev->type == SNDRV_SEQ_EVENT_NONE)
2445  			return 0; /* ignore this */
2446  		if (ev->type == SNDRV_SEQ_EVENT_KERNEL_ERROR)
2447  			return -EINVAL; /* quoted events can't be enqueued */
2448  	}
2449  
2450  	/* fill in client number */
2451  	ev->source.client = client;
2452  
2453  	if (check_event_type_and_length(ev))
2454  		return -EINVAL;
2455  
2456  	cptr = snd_seq_client_use_ptr(client);
2457  	if (cptr == NULL)
2458  		return -EINVAL;
2459  
2460  	if (!cptr->accept_output) {
2461  		result = -EPERM;
2462  	} else { /* send it */
2463  		mutex_lock(&cptr->ioctl_mutex);
2464  		result = snd_seq_client_enqueue_event(cptr, ev, file, blocking,
2465  						      false, 0,
2466  						      &cptr->ioctl_mutex);
2467  		mutex_unlock(&cptr->ioctl_mutex);
2468  	}
2469  
2470  	snd_seq_client_unlock(cptr);
2471  	return result;
2472  }
2473  EXPORT_SYMBOL(snd_seq_kernel_client_enqueue);
2474  
2475  /*
2476   * exported, called by kernel clients to dispatch events directly to other
2477   * clients, bypassing the queues.  Event time-stamp will be updated.
2478   *
2479   * RETURN VALUE: negative = delivery failed,
2480   *		 zero, or positive: the number of delivered events
2481   */
snd_seq_kernel_client_dispatch(int client,struct snd_seq_event * ev,int atomic,int hop)2482  int snd_seq_kernel_client_dispatch(int client, struct snd_seq_event * ev,
2483  				   int atomic, int hop)
2484  {
2485  	struct snd_seq_client *cptr;
2486  	int result;
2487  
2488  	if (snd_BUG_ON(!ev))
2489  		return -EINVAL;
2490  
2491  	/* fill in client number */
2492  	ev->queue = SNDRV_SEQ_QUEUE_DIRECT;
2493  	ev->source.client = client;
2494  
2495  	if (check_event_type_and_length(ev))
2496  		return -EINVAL;
2497  
2498  	cptr = snd_seq_client_use_ptr(client);
2499  	if (cptr == NULL)
2500  		return -EINVAL;
2501  
2502  	if (!cptr->accept_output)
2503  		result = -EPERM;
2504  	else
2505  		result = snd_seq_deliver_event(cptr, ev, atomic, hop);
2506  
2507  	snd_seq_client_unlock(cptr);
2508  	return result;
2509  }
2510  EXPORT_SYMBOL(snd_seq_kernel_client_dispatch);
2511  
2512  /**
2513   * snd_seq_kernel_client_ctl - operate a command for a client with data in
2514   *			       kernel space.
2515   * @clientid:	A numerical ID for a client.
2516   * @cmd:	An ioctl(2) command for ALSA sequencer operation.
2517   * @arg:	A pointer to data in kernel space.
2518   *
2519   * Against its name, both kernel/application client can be handled by this
2520   * kernel API. A pointer of 'arg' argument should be in kernel space.
2521   *
2522   * Return: 0 at success. Negative error code at failure.
2523   */
snd_seq_kernel_client_ctl(int clientid,unsigned int cmd,void * arg)2524  int snd_seq_kernel_client_ctl(int clientid, unsigned int cmd, void *arg)
2525  {
2526  	const struct ioctl_handler *handler;
2527  	struct snd_seq_client *client;
2528  
2529  	client = clientptr(clientid);
2530  	if (client == NULL)
2531  		return -ENXIO;
2532  
2533  	for (handler = ioctl_handlers; handler->cmd > 0; ++handler) {
2534  		if (handler->cmd == cmd)
2535  			return handler->func(client, arg);
2536  	}
2537  
2538  	pr_debug("ALSA: seq unknown ioctl() 0x%x (type='%c', number=0x%02x)\n",
2539  		 cmd, _IOC_TYPE(cmd), _IOC_NR(cmd));
2540  	return -ENOTTY;
2541  }
2542  EXPORT_SYMBOL(snd_seq_kernel_client_ctl);
2543  
2544  /* exported (for OSS emulator) */
snd_seq_kernel_client_write_poll(int clientid,struct file * file,poll_table * wait)2545  int snd_seq_kernel_client_write_poll(int clientid, struct file *file, poll_table *wait)
2546  {
2547  	struct snd_seq_client *client;
2548  
2549  	client = clientptr(clientid);
2550  	if (client == NULL)
2551  		return -ENXIO;
2552  
2553  	if (! snd_seq_write_pool_allocated(client))
2554  		return 1;
2555  	if (snd_seq_pool_poll_wait(client->pool, file, wait))
2556  		return 1;
2557  	return 0;
2558  }
2559  EXPORT_SYMBOL(snd_seq_kernel_client_write_poll);
2560  
2561  /* get a sequencer client object; for internal use from a kernel client */
snd_seq_kernel_client_get(int id)2562  struct snd_seq_client *snd_seq_kernel_client_get(int id)
2563  {
2564  	return snd_seq_client_use_ptr(id);
2565  }
2566  EXPORT_SYMBOL_GPL(snd_seq_kernel_client_get);
2567  
2568  /* put a sequencer client object; for internal use from a kernel client */
snd_seq_kernel_client_put(struct snd_seq_client * cptr)2569  void snd_seq_kernel_client_put(struct snd_seq_client *cptr)
2570  {
2571  	if (cptr)
2572  		snd_seq_client_unlock(cptr);
2573  }
2574  EXPORT_SYMBOL_GPL(snd_seq_kernel_client_put);
2575  
2576  /*---------------------------------------------------------------------------*/
2577  
2578  #ifdef CONFIG_SND_PROC_FS
2579  /*
2580   *  /proc interface
2581   */
snd_seq_info_dump_subscribers(struct snd_info_buffer * buffer,struct snd_seq_port_subs_info * group,int is_src,char * msg)2582  static void snd_seq_info_dump_subscribers(struct snd_info_buffer *buffer,
2583  					  struct snd_seq_port_subs_info *group,
2584  					  int is_src, char *msg)
2585  {
2586  	struct list_head *p;
2587  	struct snd_seq_subscribers *s;
2588  	int count = 0;
2589  
2590  	down_read(&group->list_mutex);
2591  	if (list_empty(&group->list_head)) {
2592  		up_read(&group->list_mutex);
2593  		return;
2594  	}
2595  	snd_iprintf(buffer, msg);
2596  	list_for_each(p, &group->list_head) {
2597  		if (is_src)
2598  			s = list_entry(p, struct snd_seq_subscribers, src_list);
2599  		else
2600  			s = list_entry(p, struct snd_seq_subscribers, dest_list);
2601  		if (count++)
2602  			snd_iprintf(buffer, ", ");
2603  		snd_iprintf(buffer, "%d:%d",
2604  			    is_src ? s->info.dest.client : s->info.sender.client,
2605  			    is_src ? s->info.dest.port : s->info.sender.port);
2606  		if (s->info.flags & SNDRV_SEQ_PORT_SUBS_TIMESTAMP)
2607  			snd_iprintf(buffer, "[%c:%d]", ((s->info.flags & SNDRV_SEQ_PORT_SUBS_TIME_REAL) ? 'r' : 't'), s->info.queue);
2608  		if (group->exclusive)
2609  			snd_iprintf(buffer, "[ex]");
2610  	}
2611  	up_read(&group->list_mutex);
2612  	snd_iprintf(buffer, "\n");
2613  }
2614  
2615  #define FLAG_PERM_RD(perm) ((perm) & SNDRV_SEQ_PORT_CAP_READ ? ((perm) & SNDRV_SEQ_PORT_CAP_SUBS_READ ? 'R' : 'r') : '-')
2616  #define FLAG_PERM_WR(perm) ((perm) & SNDRV_SEQ_PORT_CAP_WRITE ? ((perm) & SNDRV_SEQ_PORT_CAP_SUBS_WRITE ? 'W' : 'w') : '-')
2617  #define FLAG_PERM_EX(perm) ((perm) & SNDRV_SEQ_PORT_CAP_NO_EXPORT ? '-' : 'e')
2618  
2619  #define FLAG_PERM_DUPLEX(perm) ((perm) & SNDRV_SEQ_PORT_CAP_DUPLEX ? 'X' : '-')
2620  
port_direction_name(unsigned char dir)2621  static const char *port_direction_name(unsigned char dir)
2622  {
2623  	static const char *names[4] = {
2624  		"-", "In", "Out", "In/Out"
2625  	};
2626  
2627  	if (dir > SNDRV_SEQ_PORT_DIR_BIDIRECTION)
2628  		return "Invalid";
2629  	return names[dir];
2630  }
2631  
snd_seq_info_dump_ports(struct snd_info_buffer * buffer,struct snd_seq_client * client)2632  static void snd_seq_info_dump_ports(struct snd_info_buffer *buffer,
2633  				    struct snd_seq_client *client)
2634  {
2635  	struct snd_seq_client_port *p;
2636  
2637  	mutex_lock(&client->ports_mutex);
2638  	list_for_each_entry(p, &client->ports_list_head, list) {
2639  		if (p->capability & SNDRV_SEQ_PORT_CAP_INACTIVE)
2640  			continue;
2641  		snd_iprintf(buffer, "  Port %3d : \"%s\" (%c%c%c%c) [%s]\n",
2642  			    p->addr.port, p->name,
2643  			    FLAG_PERM_RD(p->capability),
2644  			    FLAG_PERM_WR(p->capability),
2645  			    FLAG_PERM_EX(p->capability),
2646  			    FLAG_PERM_DUPLEX(p->capability),
2647  			    port_direction_name(p->direction));
2648  		snd_seq_info_dump_subscribers(buffer, &p->c_src, 1, "    Connecting To: ");
2649  		snd_seq_info_dump_subscribers(buffer, &p->c_dest, 0, "    Connected From: ");
2650  	}
2651  	mutex_unlock(&client->ports_mutex);
2652  }
2653  
midi_version_string(unsigned int version)2654  static const char *midi_version_string(unsigned int version)
2655  {
2656  	switch (version) {
2657  	case SNDRV_SEQ_CLIENT_LEGACY_MIDI:
2658  		return "Legacy";
2659  	case SNDRV_SEQ_CLIENT_UMP_MIDI_1_0:
2660  		return "UMP MIDI1";
2661  	case SNDRV_SEQ_CLIENT_UMP_MIDI_2_0:
2662  		return "UMP MIDI2";
2663  	default:
2664  		return "Unknown";
2665  	}
2666  }
2667  
2668  /* exported to seq_info.c */
snd_seq_info_clients_read(struct snd_info_entry * entry,struct snd_info_buffer * buffer)2669  void snd_seq_info_clients_read(struct snd_info_entry *entry,
2670  			       struct snd_info_buffer *buffer)
2671  {
2672  	int c;
2673  	struct snd_seq_client *client;
2674  
2675  	snd_iprintf(buffer, "Client info\n");
2676  	snd_iprintf(buffer, "  cur  clients : %d\n", client_usage.cur);
2677  	snd_iprintf(buffer, "  peak clients : %d\n", client_usage.peak);
2678  	snd_iprintf(buffer, "  max  clients : %d\n", SNDRV_SEQ_MAX_CLIENTS);
2679  	snd_iprintf(buffer, "\n");
2680  
2681  	/* list the client table */
2682  	for (c = 0; c < SNDRV_SEQ_MAX_CLIENTS; c++) {
2683  		client = snd_seq_client_use_ptr(c);
2684  		if (client == NULL)
2685  			continue;
2686  		if (client->type == NO_CLIENT) {
2687  			snd_seq_client_unlock(client);
2688  			continue;
2689  		}
2690  
2691  		snd_iprintf(buffer, "Client %3d : \"%s\" [%s %s]\n",
2692  			    c, client->name,
2693  			    client->type == USER_CLIENT ? "User" : "Kernel",
2694  			    midi_version_string(client->midi_version));
2695  #if IS_ENABLED(CONFIG_SND_SEQ_UMP)
2696  		dump_ump_info(buffer, client);
2697  #endif
2698  		snd_seq_info_dump_ports(buffer, client);
2699  		if (snd_seq_write_pool_allocated(client)) {
2700  			snd_iprintf(buffer, "  Output pool :\n");
2701  			snd_seq_info_pool(buffer, client->pool, "    ");
2702  		}
2703  		if (client->type == USER_CLIENT && client->data.user.fifo &&
2704  		    client->data.user.fifo->pool) {
2705  			snd_iprintf(buffer, "  Input pool :\n");
2706  			snd_seq_info_pool(buffer, client->data.user.fifo->pool, "    ");
2707  		}
2708  		snd_seq_client_unlock(client);
2709  	}
2710  }
2711  #endif /* CONFIG_SND_PROC_FS */
2712  
2713  /*---------------------------------------------------------------------------*/
2714  
2715  
2716  /*
2717   *  REGISTRATION PART
2718   */
2719  
2720  static const struct file_operations snd_seq_f_ops =
2721  {
2722  	.owner =	THIS_MODULE,
2723  	.read =		snd_seq_read,
2724  	.write =	snd_seq_write,
2725  	.open =		snd_seq_open,
2726  	.release =	snd_seq_release,
2727  	.llseek =	no_llseek,
2728  	.poll =		snd_seq_poll,
2729  	.unlocked_ioctl =	snd_seq_ioctl,
2730  	.compat_ioctl =	snd_seq_ioctl_compat,
2731  };
2732  
2733  static struct device *seq_dev;
2734  
2735  /*
2736   * register sequencer device
2737   */
snd_sequencer_device_init(void)2738  int __init snd_sequencer_device_init(void)
2739  {
2740  	int err;
2741  
2742  	err = snd_device_alloc(&seq_dev, NULL);
2743  	if (err < 0)
2744  		return err;
2745  	dev_set_name(seq_dev, "seq");
2746  
2747  	mutex_lock(&register_mutex);
2748  	err = snd_register_device(SNDRV_DEVICE_TYPE_SEQUENCER, NULL, 0,
2749  				  &snd_seq_f_ops, NULL, seq_dev);
2750  	mutex_unlock(&register_mutex);
2751  	if (err < 0) {
2752  		put_device(seq_dev);
2753  		return err;
2754  	}
2755  
2756  	return 0;
2757  }
2758  
2759  
2760  
2761  /*
2762   * unregister sequencer device
2763   */
snd_sequencer_device_done(void)2764  void snd_sequencer_device_done(void)
2765  {
2766  	snd_unregister_device(seq_dev);
2767  	put_device(seq_dev);
2768  }
2769