2 * Generic Instrument routines for ALSA sequencer
3 * Copyright (c) 1999 by Jaroslav Kysela <perex@suse.cz>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include <sound/driver.h>
22 #include <linux/init.h>
23 #include <linux/slab.h>
24 #include <sound/core.h>
25 #include "seq_clientmgr.h"
26 #include <sound/seq_instr.h>
27 #include <sound/initval.h>
29 MODULE_AUTHOR("Jaroslav Kysela <perex@suse.cz>");
30 MODULE_DESCRIPTION("Advanced Linux Sound Architecture sequencer instrument library.");
31 MODULE_LICENSE("GPL");
32 MODULE_CLASSES("{sound}");
33 MODULE_SUPPORTED_DEVICE("sound");
36 static void snd_instr_lock_ops(snd_seq_kinstr_list_t *list)
38 if (!(list->flags & SNDRV_SEQ_INSTR_FLG_DIRECT)) {
39 spin_lock_irqsave(&list->ops_lock, list->ops_flags);
41 down(&list->ops_mutex);
45 static void snd_instr_unlock_ops(snd_seq_kinstr_list_t *list)
47 if (!(list->flags & SNDRV_SEQ_INSTR_FLG_DIRECT)) {
48 spin_unlock_irqrestore(&list->ops_lock, list->ops_flags);
54 snd_seq_kcluster_t *snd_seq_cluster_new(int atomic)
56 snd_seq_kcluster_t *cluster;
58 cluster = (snd_seq_kcluster_t *) snd_kcalloc(sizeof(snd_seq_kcluster_t), atomic ? GFP_ATOMIC : GFP_KERNEL);
62 void snd_seq_cluster_free(snd_seq_kcluster_t *cluster, int atomic)
69 snd_seq_kinstr_t *snd_seq_instr_new(int add_len, int atomic)
71 snd_seq_kinstr_t *instr;
73 instr = (snd_seq_kinstr_t *) snd_kcalloc(sizeof(snd_seq_kinstr_t) + add_len, atomic ? GFP_ATOMIC : GFP_KERNEL);
76 instr->add_len = add_len;
80 int snd_seq_instr_free(snd_seq_kinstr_t *instr, int atomic)
86 if (instr->ops && instr->ops->remove)
87 result = instr->ops->remove(instr->ops->private_data, instr, 1);
93 snd_seq_kinstr_list_t *snd_seq_instr_list_new(void)
95 snd_seq_kinstr_list_t *list;
97 list = (snd_seq_kinstr_list_t *) snd_kcalloc(sizeof(snd_seq_kinstr_list_t), GFP_KERNEL);
100 spin_lock_init(&list->lock);
101 spin_lock_init(&list->ops_lock);
102 init_MUTEX(&list->ops_mutex);
107 void snd_seq_instr_list_free(snd_seq_kinstr_list_t **list_ptr)
109 snd_seq_kinstr_list_t *list;
110 snd_seq_kinstr_t *instr;
111 snd_seq_kcluster_t *cluster;
115 if (list_ptr == NULL)
122 for (idx = 0; idx < SNDRV_SEQ_INSTR_HASH_SIZE; idx++) {
123 while ((instr = list->hash[idx]) != NULL) {
124 list->hash[idx] = instr->next;
126 spin_lock_irqsave(&list->lock, flags);
128 spin_unlock_irqrestore(&list->lock, flags);
129 set_current_state(TASK_INTERRUPTIBLE);
131 spin_lock_irqsave(&list->lock, flags);
133 spin_unlock_irqrestore(&list->lock, flags);
134 if (snd_seq_instr_free(instr, 0)<0)
135 snd_printk(KERN_WARNING "instrument free problem\n");
137 while ((cluster = list->chash[idx]) != NULL) {
138 list->chash[idx] = cluster->next;
140 snd_seq_cluster_free(cluster, 0);
146 static int instr_free_compare(snd_seq_kinstr_t *instr,
147 snd_seq_instr_header_t *ifree,
150 switch (ifree->cmd) {
151 case SNDRV_SEQ_INSTR_FREE_CMD_ALL:
152 /* all, except private for other clients */
153 if ((instr->instr.std & 0xff000000) == 0)
155 if (((instr->instr.std >> 24) & 0xff) == client)
158 case SNDRV_SEQ_INSTR_FREE_CMD_PRIVATE:
159 /* all my private instruments */
160 if ((instr->instr.std & 0xff000000) == 0)
162 if (((instr->instr.std >> 24) & 0xff) == client)
165 case SNDRV_SEQ_INSTR_FREE_CMD_CLUSTER:
166 /* all my private instruments */
167 if ((instr->instr.std & 0xff000000) == 0) {
168 if (instr->instr.cluster == ifree->id.cluster)
172 if (((instr->instr.std >> 24) & 0xff) == client) {
173 if (instr->instr.cluster == ifree->id.cluster)
181 int snd_seq_instr_list_free_cond(snd_seq_kinstr_list_t *list,
182 snd_seq_instr_header_t *ifree,
186 snd_seq_kinstr_t *instr, *prev, *next, *flist;
190 snd_instr_lock_ops(list);
191 for (idx = 0; idx < SNDRV_SEQ_INSTR_HASH_SIZE; idx++) {
192 spin_lock_irqsave(&list->lock, flags);
193 instr = list->hash[idx];
196 while (instr && instr_free_compare(instr, ifree, (unsigned int)client)) {
202 if (instr->ops && instr->ops->notify)
203 instr->ops->notify(instr->ops->private_data, instr, SNDRV_SEQ_INSTR_NOTIFY_REMOVE);
206 list->hash[idx] = next;
215 spin_unlock_irqrestore(&list->lock, flags);
220 set_current_state(TASK_INTERRUPTIBLE);
223 if (snd_seq_instr_free(instr, atomic)<0)
224 snd_printk(KERN_WARNING "instrument free problem\n");
228 snd_instr_unlock_ops(list);
232 static int compute_hash_instr_key(snd_seq_instr_t *instr)
236 result = instr->bank | (instr->prg << 16);
237 result += result >> 24;
238 result += result >> 16;
239 result += result >> 8;
240 return result & (SNDRV_SEQ_INSTR_HASH_SIZE-1);
244 static int compute_hash_cluster_key(snd_seq_instr_cluster_t cluster)
249 result += result >> 24;
250 result += result >> 16;
251 result += result >> 8;
252 return result & (SNDRV_SEQ_INSTR_HASH_SIZE-1);
256 static int compare_instr(snd_seq_instr_t *i1, snd_seq_instr_t *i2, int exact)
259 if (i1->cluster != i2->cluster ||
260 i1->bank != i2->bank ||
263 if ((i1->std & 0xff000000) != (i2->std & 0xff000000))
265 if (!(i1->std & i2->std))
269 unsigned int client_check;
271 if (i2->cluster && i1->cluster != i2->cluster)
273 client_check = i2->std & 0xff000000;
275 if ((i1->std & 0xff000000) != client_check)
278 if ((i1->std & i2->std) != i2->std)
281 return i1->bank != i2->bank || i1->prg != i2->prg;
285 snd_seq_kinstr_t *snd_seq_instr_find(snd_seq_kinstr_list_t *list,
286 snd_seq_instr_t *instr,
292 snd_seq_kinstr_t *result;
294 if (list == NULL || instr == NULL)
296 spin_lock_irqsave(&list->lock, flags);
298 result = list->hash[compute_hash_instr_key(instr)];
300 if (!compare_instr(&result->instr, instr, exact)) {
301 if (follow_alias && (result->type == SNDRV_SEQ_INSTR_ATYPE_ALIAS)) {
302 instr = (snd_seq_instr_t *)KINSTR_DATA(result);
308 spin_unlock_irqrestore(&list->lock, flags);
311 result = result->next;
314 spin_unlock_irqrestore(&list->lock, flags);
318 void snd_seq_instr_free_use(snd_seq_kinstr_list_t *list,
319 snd_seq_kinstr_t *instr)
323 if (list == NULL || instr == NULL)
325 spin_lock_irqsave(&list->lock, flags);
326 if (instr->use <= 0) {
327 snd_printk(KERN_ERR "free_use: fatal!!! use = %i, name = '%s'\n", instr->use, instr->name);
331 spin_unlock_irqrestore(&list->lock, flags);
334 static snd_seq_kinstr_ops_t *instr_ops(snd_seq_kinstr_ops_t *ops, char *instr_type)
337 if (!strcmp(ops->instr_type, instr_type))
344 static int instr_result(snd_seq_event_t *ev,
345 int type, int result,
350 memset(&sev, 0, sizeof(sev));
351 sev.type = SNDRV_SEQ_EVENT_RESULT;
352 sev.flags = SNDRV_SEQ_TIME_STAMP_REAL | SNDRV_SEQ_EVENT_LENGTH_FIXED |
353 SNDRV_SEQ_PRIORITY_NORMAL;
354 sev.source = ev->dest;
355 sev.dest = ev->source;
356 sev.data.result.event = type;
357 sev.data.result.result = result;
359 printk("instr result - type = %i, result = %i, queue = %i, source.client:port = %i:%i, dest.client:port = %i:%i\n",
362 sev.source.client, sev.source.port,
363 sev.dest.client, sev.dest.port);
365 return snd_seq_kernel_client_dispatch(sev.source.client, &sev, atomic, 0);
368 static int instr_begin(snd_seq_kinstr_ops_t *ops,
369 snd_seq_kinstr_list_t *list,
375 spin_lock_irqsave(&list->lock, flags);
376 if (list->owner >= 0 && list->owner != ev->source.client) {
377 spin_unlock_irqrestore(&list->lock, flags);
378 return instr_result(ev, SNDRV_SEQ_EVENT_INSTR_BEGIN, -EBUSY, atomic);
380 list->owner = ev->source.client;
381 spin_unlock_irqrestore(&list->lock, flags);
382 return instr_result(ev, SNDRV_SEQ_EVENT_INSTR_BEGIN, 0, atomic);
385 static int instr_end(snd_seq_kinstr_ops_t *ops,
386 snd_seq_kinstr_list_t *list,
392 /* TODO: timeout handling */
393 spin_lock_irqsave(&list->lock, flags);
394 if (list->owner == ev->source.client) {
396 spin_unlock_irqrestore(&list->lock, flags);
397 return instr_result(ev, SNDRV_SEQ_EVENT_INSTR_END, 0, atomic);
399 spin_unlock_irqrestore(&list->lock, flags);
400 return instr_result(ev, SNDRV_SEQ_EVENT_INSTR_END, -EINVAL, atomic);
403 static int instr_info(snd_seq_kinstr_ops_t *ops,
404 snd_seq_kinstr_list_t *list,
411 static int instr_format_info(snd_seq_kinstr_ops_t *ops,
412 snd_seq_kinstr_list_t *list,
419 static int instr_reset(snd_seq_kinstr_ops_t *ops,
420 snd_seq_kinstr_list_t *list,
427 static int instr_status(snd_seq_kinstr_ops_t *ops,
428 snd_seq_kinstr_list_t *list,
435 static int instr_put(snd_seq_kinstr_ops_t *ops,
436 snd_seq_kinstr_list_t *list,
441 snd_seq_instr_header_t put;
442 snd_seq_kinstr_t *instr;
443 int result = -EINVAL, len, key;
445 if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARUSR)
448 if (ev->data.ext.len < sizeof(snd_seq_instr_header_t))
450 if (copy_from_user(&put, ev->data.ext.ptr, sizeof(snd_seq_instr_header_t))) {
454 snd_instr_lock_ops(list);
455 if (put.id.instr.std & 0xff000000) { /* private instrument */
456 put.id.instr.std &= 0x00ffffff;
457 put.id.instr.std |= (unsigned int)ev->source.client << 24;
459 if ((instr = snd_seq_instr_find(list, &put.id.instr, 1, 0))) {
460 snd_seq_instr_free_use(list, instr);
461 snd_instr_unlock_ops(list);
465 ops = instr_ops(ops, put.data.data.format);
467 snd_instr_unlock_ops(list);
471 if (put.data.type == SNDRV_SEQ_INSTR_ATYPE_ALIAS)
472 len = sizeof(snd_seq_instr_t);
473 instr = snd_seq_instr_new(len, atomic);
475 snd_instr_unlock_ops(list);
480 instr->instr = put.id.instr;
481 strlcpy(instr->name, put.data.name, sizeof(instr->name));
482 instr->type = put.data.type;
483 if (instr->type == SNDRV_SEQ_INSTR_ATYPE_DATA) {
484 result = ops->put(ops->private_data,
486 ev->data.ext.ptr + sizeof(snd_seq_instr_header_t),
487 ev->data.ext.len - sizeof(snd_seq_instr_header_t),
491 snd_seq_instr_free(instr, atomic);
492 snd_instr_unlock_ops(list);
496 key = compute_hash_instr_key(&instr->instr);
497 spin_lock_irqsave(&list->lock, flags);
498 instr->next = list->hash[key];
499 list->hash[key] = instr;
501 spin_unlock_irqrestore(&list->lock, flags);
502 snd_instr_unlock_ops(list);
505 instr_result(ev, SNDRV_SEQ_EVENT_INSTR_PUT, result, atomic);
509 static int instr_get(snd_seq_kinstr_ops_t *ops,
510 snd_seq_kinstr_list_t *list,
517 static int instr_free(snd_seq_kinstr_ops_t *ops,
518 snd_seq_kinstr_list_t *list,
522 snd_seq_instr_header_t ifree;
523 snd_seq_kinstr_t *instr, *prev;
524 int result = -EINVAL;
528 if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARUSR)
531 if (ev->data.ext.len < sizeof(snd_seq_instr_header_t))
533 if (copy_from_user(&ifree, ev->data.ext.ptr, sizeof(snd_seq_instr_header_t))) {
537 if (ifree.cmd == SNDRV_SEQ_INSTR_FREE_CMD_ALL ||
538 ifree.cmd == SNDRV_SEQ_INSTR_FREE_CMD_PRIVATE ||
539 ifree.cmd == SNDRV_SEQ_INSTR_FREE_CMD_CLUSTER) {
540 result = snd_seq_instr_list_free_cond(list, &ifree, ev->dest.client, atomic);
543 if (ifree.cmd == SNDRV_SEQ_INSTR_FREE_CMD_SINGLE) {
544 if (ifree.id.instr.std & 0xff000000) {
545 ifree.id.instr.std &= 0x00ffffff;
546 ifree.id.instr.std |= (unsigned int)ev->source.client << 24;
548 hash = compute_hash_instr_key(&ifree.id.instr);
549 snd_instr_lock_ops(list);
550 spin_lock_irqsave(&list->lock, flags);
551 instr = list->hash[hash];
554 if (!compare_instr(&instr->instr, &ifree.id.instr, 1))
560 spin_unlock_irqrestore(&list->lock, flags);
561 snd_instr_unlock_ops(list);
566 prev->next = instr->next;
568 list->hash[hash] = instr->next;
570 if (instr->ops && instr->ops->notify)
571 instr->ops->notify(instr->ops->private_data, instr, SNDRV_SEQ_INSTR_NOTIFY_REMOVE);
573 spin_unlock_irqrestore(&list->lock, flags);
574 set_current_state(TASK_INTERRUPTIBLE);
576 spin_lock_irqsave(&list->lock, flags);
578 spin_unlock_irqrestore(&list->lock, flags);
579 result = snd_seq_instr_free(instr, atomic);
580 snd_instr_unlock_ops(list);
585 instr_result(ev, SNDRV_SEQ_EVENT_INSTR_FREE, result, atomic);
589 static int instr_list(snd_seq_kinstr_ops_t *ops,
590 snd_seq_kinstr_list_t *list,
597 static int instr_cluster(snd_seq_kinstr_ops_t *ops,
598 snd_seq_kinstr_list_t *list,
605 int snd_seq_instr_event(snd_seq_kinstr_ops_t *ops,
606 snd_seq_kinstr_list_t *list,
614 snd_assert(ops != NULL && list != NULL && ev != NULL, return -EINVAL);
615 if (snd_seq_ev_is_direct(ev)) {
618 case SNDRV_SEQ_EVENT_INSTR_BEGIN:
619 return instr_begin(ops, list, ev, atomic, hop);
620 case SNDRV_SEQ_EVENT_INSTR_END:
621 return instr_end(ops, list, ev, atomic, hop);
624 if ((list->flags & SNDRV_SEQ_INSTR_FLG_DIRECT) && !direct)
627 case SNDRV_SEQ_EVENT_INSTR_INFO:
628 return instr_info(ops, list, ev, atomic, hop);
629 case SNDRV_SEQ_EVENT_INSTR_FINFO:
630 return instr_format_info(ops, list, ev, atomic, hop);
631 case SNDRV_SEQ_EVENT_INSTR_RESET:
632 return instr_reset(ops, list, ev, atomic, hop);
633 case SNDRV_SEQ_EVENT_INSTR_STATUS:
634 return instr_status(ops, list, ev, atomic, hop);
635 case SNDRV_SEQ_EVENT_INSTR_PUT:
636 return instr_put(ops, list, ev, atomic, hop);
637 case SNDRV_SEQ_EVENT_INSTR_GET:
638 return instr_get(ops, list, ev, atomic, hop);
639 case SNDRV_SEQ_EVENT_INSTR_FREE:
640 return instr_free(ops, list, ev, atomic, hop);
641 case SNDRV_SEQ_EVENT_INSTR_LIST:
642 return instr_list(ops, list, ev, atomic, hop);
643 case SNDRV_SEQ_EVENT_INSTR_CLUSTER:
644 return instr_cluster(ops, list, ev, atomic, hop);
653 static int __init alsa_seq_instr_init(void)
658 static void __exit alsa_seq_instr_exit(void)
662 module_init(alsa_seq_instr_init)
663 module_exit(alsa_seq_instr_exit)
665 EXPORT_SYMBOL(snd_seq_instr_list_new);
666 EXPORT_SYMBOL(snd_seq_instr_list_free);
667 EXPORT_SYMBOL(snd_seq_instr_list_free_cond);
668 EXPORT_SYMBOL(snd_seq_instr_find);
669 EXPORT_SYMBOL(snd_seq_instr_free_use);
670 EXPORT_SYMBOL(snd_seq_instr_event);