2
* Purpose: A simple software MIDI synthesizer program with GTK GUI.
3
* Copyright (C) 4Front Technologies, 2002-2004. Released in public domain.
7
* This is an expanded version of the {!nlink softsynth.c} program
8
* (please take a look at the original program first). Also this program
9
* is more or less just an programming example that demontstrates how to
10
* implement "real-time" GUI controls in the simpliest possible way. While
11
* you can use this program as a program template, you may equally well start
12
* from the original one.
14
* Like the original there is not much to learn about synthesis in this program
15
* the only design goal has been to demonstrate some additional OSS MIDI
16
* features and to work as a test bed for them.
18
* The primary thing that has been done is replacing the original select() loop
19
* with a GTK (actually GDK) compatible mechanism (gdk_add_input and
20
* gtk_main). This is done in the main program.
22
* There are few other added features that should be mentioned:
24
* 1) The "Enhance switch" that makes the sound slightly "different" by
25
* adding some harmonics of the original note frequency. The sound is just
26
* different (not necessarily better). This together with the "Mute" button
27
* demonstrates how to do real-time adjustment of the settings.
28
* 2) The "mute" button was added just to be able to check if the latencies
30
* 3) The new SNDCTL_SETNAME call is used to change the description of the
31
* MIDI device. In this way the user of the client program can select the
32
* right device much easier.
33
* 4) The SNDCTL_GETSONG call is used to obtain the song name given
34
* by the client (ossmplay does this using SNDCTL_SETSONG).
35
* 5) Some code has been added to handle the MIDI timer start (0xfa) and
36
* stop (0xfc) messages. WIth loopback devices these messages can be
37
* used to find out when the client closed/opened the device. However if
38
* the server starts in the middle of a song there will be no start message.
40
* To use this program you will need to install the "4Front MIDI loopback"
41
* driver using the "Add new card/device" function of soundconf.
42
* Then start this program in background (the audio and MIDI device names
43
* can be given as command line arguments. For example
45
* softsynth /dev/dsp /dev/midi01
47
* You can find out the loopback MIDI device number by looking for the
48
* "MIDI loopback server side" devices using the {!xlink ossinfo} -m
49
* command. Btw, nothing prevents using any "real" physical MIDI port as the
52
* When the synthesizer server is running you can play any MIDI file using
53
* some OSS based MIDI sequencer/player such as {!xlink ossmplay}.
61
#include <sys/select.h>
62
#include <sys/soundcard.h>
63
#include <midiparser.h>
64
midiparser_common_t *parser = NULL;
68
int sample_rate = 48000;
71
int enhanced_mode = 0;
75
* The open_audio_device routine opens the audio device and initializes it
76
* for the required mode. This code was borrowed directly from the
77
* {!nlink singen.c} sample program. However since the buffer size
78
* is inportant with this kind of application we have added a call that
79
* sets the fragment and buffer sizes.
83
open_audio_device (char *name, int mode)
87
if ((fd = open (name, mode, 0)) == -1)
94
* Setup the audio buffering policy so that reasonably small latencies
97
* 4 fragments of 256 samples (512 bytes) might be good. 256 samples
98
* will give timing granularity of 256/sample_rate seconds (5.33 msec)
99
* which is fairly adequate. The effect of the granularity (fragment size) in
100
* this type of applications is timing jitter (or choking). Every event that
101
* occurs withing the 5.33 msec period (fragment time) will get executed
102
* in the beginning of the next period. If the fragment size is decreased
103
* then the granularity will decrease too. However this will cause slight
104
* increase in the CPU consumption of the application.
106
* The total buffer size (number_of_fragments*fragment_time) will define the
107
* maximum latency between the event (note on/off) and the actual change in the
108
* audio output. The average latency will be something like
109
* (number_of_fragments-0.5)*fragment_time). The theoretical average latency
110
* caused by this application is (4-0.5)*5.33 msec = ~19 msec).
112
* In musical terms 5.33 msec granularity equals to 1/750 note at 60 bpm
113
* and 19 msecs equals to 1/214. This should be pretty adequate.
115
* The latency can be decreased by limiting the number of fragments and/or the
116
* fragment size. However the after the buffer size drops close to the
117
* capabilities of the system (delays caused by the other applications) the
118
* audio output will start breaking. This can cured only by tuning the
119
* hardware and the software environments (tuning some kernel parameters and
120
* by killing all the other applications). However this is in no way an OSS
123
* With these parameters it was possible to compile Linux kernel in another
124
* terminal window without any hiccup (fairly entry level 2.4 GHz P4 system
125
* running Linux 2.6.x).
128
if (ioctl (fd, SNDCTL_DSP_SETFRAGMENT, &tmp) == -1)
130
perror ("SNDCTL_DSP_SETFRAGMENT");
134
* Setup the device. Note that it's important to set the
135
* sample format, number of channels and sample rate exactly in this order.
136
* Some devices depend on the order.
140
* Set the sample format
142
tmp = AFMT_S16_NE; /* Native 16 bits */
143
if (ioctl (fd, SNDCTL_DSP_SETFMT, &tmp) == -1)
145
perror ("SNDCTL_DSP_SETFMT");
149
if (tmp != AFMT_S16_NE)
152
"The device doesn't support the 16 bit sample format.\n");
157
* Set the number of channels (mono)
160
if (ioctl (fd, SNDCTL_DSP_CHANNELS, &tmp) == -1)
162
perror ("SNDCTL_DSP_CHANNELS");
168
fprintf (stderr, "The device doesn't support mono mode.\n");
173
* Set the sample rate
176
if (ioctl (fd, SNDCTL_DSP_SPEED, &sample_rate) == -1)
178
perror ("SNDCTL_DSP_SPEED");
183
* No need for rate checking because we will automatically adjust the
184
* signal based on the actual sample rate. However most application must
185
* check the value of sample_rate and compare it to the requested rate.
187
* Small differences between the rates (10% or less) are normal and the
188
* applications should usually tolerate them. However larger differences may
189
* cause annoying pitch problems (Mickey Mouse).
196
open_midi_device (char *name, int mode)
201
* This is pretty much all we nbeed.
204
if ((fd = open (name, mode, 0)) == -1)
214
************** Some GTK+ related routines **********************
218
GtkWidget *main_window, *song_name, *time_code;
221
close_all (GtkWidget * window, gpointer data)
228
* toggle_enhance() gets called when the "Enhance" button is hit. It just
229
* changes the state of the enhanced_mode flag which is used by the
230
* note_on() routine. This setting affects only the notes to be started
231
* after the change. It doesn't affect any of the currently playing voices.
235
toggle_enhance (GtkWidget * window, gpointer data)
237
enhanced_mode = !enhanced_mode; /* ON <-> OFF */
241
* toggle_mute() handles the "Mute" button. The change will take effect
242
* at the moment when the next audio block to be computed starts playing
243
* on the device. So this button can be used to check how long the total
244
* latencies are (including any delays caused by device level FIFOs).
248
toggle_mute (GtkWidget * window, gpointer data)
257
update_song_name (void)
263
* Get the song name from the client and update the label. Notice that
264
* SNDCTL_GETSONG will return error (EINVAL) if the device doesn't
265
* support song names. This is not an error. It simple means that no
266
* song information is available. The song name may also be an empty
267
* string if the client has not registered any song name. Also this is
270
* The difference between EINVAL and an empty string (if it matters) is that
271
* EINVAL means that the device will not return this info later (the
272
* application may stop polling for it).
275
if (ioctl (midi_fd, SNDCTL_GETSONG, name) != -1)
278
strcpy (name, "Unknown song");
279
sprintf (tmp, "Song: %s", name);
280
gtk_label_set (GTK_LABEL (song_name), tmp);
282
/* Forward the song name to the audio device too */
283
ioctl (audio_fd, SNDCTL_SETSONG, name);
290
* The create_user_interface() routine is pretty much a standard
291
* GUI initialization for any GTK based application. Not important
292
* for the logic of this program. We just create some buttons and one
293
* label and assign the callbacks to handle them.
297
create_user_interface (void)
299
GtkWidget *button, *vbox;
300
main_window = gtk_window_new (GTK_WINDOW_TOPLEVEL);
302
gtk_signal_connect (GTK_OBJECT (main_window),
303
"destroy", GTK_SIGNAL_FUNC (close_all), NULL);
305
vbox = gtk_vbox_new (FALSE, 0);
306
gtk_container_add (GTK_CONTAINER (main_window), vbox);
308
song_name = gtk_label_new ("Song: Unknown song");
309
gtk_box_pack_start (GTK_BOX (vbox), song_name, FALSE, FALSE, 0);
310
gtk_widget_show (song_name);
312
time_code = gtk_label_new ("Song: Unknown song");
313
gtk_box_pack_start (GTK_BOX (vbox), time_code, FALSE, FALSE, 0);
314
gtk_widget_show (time_code);
318
button = gtk_check_button_new_with_label ("Mute");
319
gtk_signal_connect (GTK_OBJECT (button),
320
"clicked", GTK_SIGNAL_FUNC (toggle_mute), NULL);
321
gtk_box_pack_start (GTK_BOX (vbox), button, FALSE, FALSE, 0);
322
gtk_widget_show (button);
324
button = gtk_check_button_new_with_label ("Enhance");
325
gtk_signal_connect (GTK_OBJECT (button),
326
"clicked", GTK_SIGNAL_FUNC (toggle_enhance), NULL);
327
gtk_box_pack_start (GTK_BOX (vbox), button, FALSE, FALSE, 0);
328
gtk_widget_show (button);
330
button = gtk_button_new_with_label ("Exit");
331
gtk_signal_connect (GTK_OBJECT (button),
332
"clicked", GTK_SIGNAL_FUNC (close_all), NULL);
333
gtk_box_pack_start (GTK_BOX (vbox), button, FALSE, FALSE, 0);
334
gtk_widget_show (button);
336
gtk_widget_show (vbox);
337
gtk_widget_show (main_window);
341
************** The actual synthesis engine *********************
344
#define MAX_VOICES 256
348
int active; /* ON/OFF */
349
int chn, note, velocity; /* MIDI note parameters */
351
float phase, step; /* Sine frequency generator */
353
float volume; /* Note volume */
355
float envelope, envelopestep; /* Envelope generator */
356
int envelopedir; /* 0=fixed level, 1=attack, -1=decay */
359
static voice_t voices[MAX_VOICES] = { 0 };
362
note_to_freq (int note_num)
366
* This routine converts a midi note to a frequency (multiplied by 1000)
367
* Notice! This routine was copied from the OSS sequencer code.
370
int note, octave, note_freq;
371
static int notes[] = {
372
261632, 277189, 293671, 311132, 329632, 349232,
373
369998, 391998, 415306, 440000, 466162, 493880
376
#define BASE_OCTAVE 5
378
octave = note_num / 12;
379
note = note_num % 12;
381
note_freq = notes[note];
383
if (octave < BASE_OCTAVE)
384
note_freq >>= (BASE_OCTAVE - octave);
385
else if (octave > BASE_OCTAVE)
386
note_freq <<= (octave - BASE_OCTAVE);
392
* fork_voice() creates another instance of a voice (just like the
393
* fork system call does). It's possible to change the pitch and volume
394
* by setting the freq_ratio and vol_scale parameters to values below 1.0.
397
fork_voice (voice_t * orig, float freq_ratio, float vol_scale)
401
for (i = 0; i < MAX_VOICES; i++)
402
if (!voices[i].active)
404
voice_t *v = &voices[i];
406
memcpy (v, orig, sizeof (voice_t));
407
v->step /= freq_ratio;
408
v->volume *= vol_scale;
414
* The note_on() routine initializes a voice with the right
415
* frequency, volume and envelope parameters.
419
note_on (int ch, int note, int velocity)
423
for (i = 0; i < MAX_VOICES; i++)
424
if (!voices[i].active)
426
voice_t *v = &voices[i];
431
* Record the MIDI note on message parameters (just in case)
436
v->velocity = velocity;
439
* Convert the note number to the actual frequency (multiplied by 1000).
440
* Then compute the step to be added to the phase angle to get the right
444
freq = note_to_freq (note);
445
step = 1000.0 * (float) sample_rate / (float) freq; /* Samples/cycle */
446
v->step = 2.0 * M_PI / step;
447
if (v->step > M_PI) /* Nyqvist was here */
452
* Compute the note volume based on the velocity. Use linear scale which
453
* maps velocity=0 to the 25% volume level. Proper synthesizers will use more
454
* advanced methods (such as logarithmic scales) but this is good for our
457
v->volume = 0.25 + ((float) velocity / 127.0) * 0.75;
460
* Initialize the envelope engine to start from zero level and to add
461
* some fixed amount to the envelope level after each sample.
465
v->envelopestep = 0.01;
468
* Fire the voice. However nothing will happen before the next audio
469
* period (fragment) gets computed. This means that all the voices started
470
* during the ending period will be rounded to start at the same moment.
477
* Stupid test that adds some harmonic frequencies. This makes the output
478
* to sound bolder. This algorithm is called additive synthesis. However
479
* this program is not the best possible one for learning that technique.
481
fork_voice (v, 1.001, 0.9); /* Add some beating */
482
fork_voice (v, 2.0, 0.1);
483
fork_voice (v, 3.0, 0.2);
484
fork_voice (v, 4.0, 0.02);
485
fork_voice (v, 6.0, 0.01);
486
fork_voice (v, 8.0, 0.01);
493
* The note_off() routine finds all the voices that have matching channel and
494
* note numbers. Then it starts the envelope decay phase (10 times slower
495
* than the attack phase.
499
note_off (int ch, int note, int velocity)
503
for (i = 0; i < MAX_VOICES; i++)
504
if (voices[i].active && voices[i].chn == ch)
505
if (voices[i].note = note)
507
voice_t *v = &voices[i];
509
v->envelopestep = -0.001;
514
* all_notes_off() is a version of note_off() that checks only the channel
515
* number. Used for the All Notes Off MIDI controller (123).
519
all_notes_off (int ch)
523
for (i = 0; i < MAX_VOICES; i++)
524
if (voices[i].active && voices[i].chn == ch)
526
voice_t *v = &voices[i];
528
v->envelopestep = -0.01;
533
* all_voices_off() mutes all voices immediately.
537
all_voices_off (int ch)
541
for (i = 0; i < MAX_VOICES; i++)
542
if (voices[i].active && voices[i].chn == ch)
544
voice_t *v = &voices[i];
551
* Compute voice computes few samples (nloops) and sums them to the
552
* buffer (that contains the sum of all previously computed voices).
554
* In real world applications it may be necessary to convert this routine to
555
* use floating point buffers (-1.0 to 1.0 range) and do the conversion
556
* to fixed point only in the final output stage. Another change you may
557
* want to do is using multiple output buffers (for stereo or multiple
558
* channels) instead of the current mono scheme.
560
* For clarity reasons we have not done that.
564
compute_voice (voice_t * v, short *buf, int nloops)
568
for (i = 0; i < nloops; i++)
573
* First compute the sine wave (-1.0 to 1.0) and scale it to the right
574
* level. Finally sum the sample with the earlier voices in the buffer.
576
val = sin (v->phase) * 1024.0 * v->envelope * v->volume * main_vol;
577
buf[i] += (short) val;
580
* Increase the phase angle for the next sample.
585
* Handle envelope attack or decay
587
switch (v->envelopedir)
590
v->envelope += v->envelopestep;
591
if (v->envelope >= 1.0) /* Full level ? */
594
v->envelopestep = 0.0;
600
v->envelope += v->envelopestep;
601
if (v->envelope <= 0.0) /* Decay done */
604
v->envelopestep = 0.0;
606
v->active = 0; /* Shut up */
614
*********** Handling of OSS MIDI input and audio output ***********
618
* The midi_callback() function is called by the midi parser library when
619
* a complete MIDI message is seen in the input. The MIDI message number
620
* (lowest 4 bits usually set to zero), the channel (0-15), as well as the
621
* remaining bytes will be passed in the parameters.
623
* The MIDI parser library will handle oddities (like running status
624
* or use of note on with velocity of 0 as note off) so the application
625
* doesn't need to care about such nasty things.
627
* Note that the MIDI percussion channel 10 (9 as passed in the ch parameter)
628
* will be ignored. All other MIDI messages other than note on, note off
629
* and the "all notes off" controller are simply ignored.
631
* Macros like MIDI_NOTEON and MIDI_NOTEOFF are defined in soundcard.h.
635
midi_callback (void *context, int category, unsigned char msg,
636
unsigned char ch, unsigned char *parms, int len)
644
if (ch != 9) /* Avoid percussions */
645
note_on (ch, parms[0], parms[1]);
649
if (ch != 9) /* Avoid percussions */
650
note_off (ch, parms[0], parms[1]);
653
case MIDI_CTL_CHANGE:
660
case 0xfa: /* Start */
662
* Note that the start message arrives at the moment when the
663
* client side of the loopback device is opened. At that moment
664
* the client has not updated the song name so we should
665
* not try to read it immediately. Instead we have to do it
666
* (for example) at the moment the first note is started.
671
case 0xfc: /* Stop */
673
* The stop message arrives after the client side of the
674
* loopback device has been closed. We will just re-draw
675
* the song name (to clear the display field on the screen).
684
* The handle_midi_input() routine reads all the MIDI input bytes
685
* that have been received by OSS since the last read. Note that
686
* this read will not block.
688
* Finally the received buffer is sent to the midi parser library which in turn
689
* calls midi_callback (see above) to handle the actual events.
693
handle_midi_input (gpointer data, gint source, GdkInputCondition cond)
695
unsigned char buffer[256];
698
if ((l = read (midi_fd, buffer, sizeof (buffer))) == -1)
700
perror ("MIDI read");
705
midiparser_input_buf (parser, buffer, l);
709
* handle_audio_output() computes a new block of audio and writes it to the
710
* audio device. As you see there is no checking for blocking or available
711
* buffer space because it's simply not necessary with OSS 4.0 any more.
712
* If there is any blocking then the time below our "tolerances".
716
handle_audio_output (gpointer data, gint source, GdkInputCondition cond)
719
* Ideally the buffer size equals to the fragment size (in samples).
720
* Using different sizes is not a big mistake but the granularity is
721
* defined by the buffer size or the fragment size (depending on which
727
memset (buf, 0, sizeof (buf));
729
/* Loop all the active voices */
730
for (i = 0; i < MAX_VOICES; i++)
731
if (voices[i].active)
732
compute_voice (&voices[i], buf, sizeof (buf) / sizeof (*buf));
734
if (write (audio_fd, buf, sizeof (buf)) == -1)
736
perror ("Audio write");
742
* The mtc_callback() routine updates the SMTPE/MTC time display on the
743
* screen. The quarter frames (qframes) field is not shown.
747
mtc_callback (void *context, oss_mtc_data_t * mtc)
751
if (mtc->qframes != 0)
754
sprintf (tmp, "%02d:%02d:%02d.%02d\n",
755
mtc->hours, mtc->minutes, mtc->seconds, mtc->frames);
756
gtk_label_set (GTK_LABEL (time_code), tmp);
760
* Finally the main program
764
main (int argc, char *argv[])
766
fd_set readfds, writefds;
768
* Use /dev/dsp as the default device because the system administrator
769
* may select the device using the {!xlink ossctl} program or some other
778
* It's recommended to provide some method for selecting some other
779
* device than the default. We use command line argument but in some cases
780
* an environment variable or some configuration file setting may be better.
784
fprintf (stderr, "Usage: %s audio_device midi_device\n", argv[0]);
788
audiodev_name = argv[1];
789
mididev_name = argv[2];
792
* It's mandatory to use O_WRONLY in programs that do only playback. Other
793
* modes may cause increased resource (memory) usage in the driver. It may
794
* also prevent other applications from using the same device for
795
* recording at the same time.
797
audio_fd = open_audio_device (audiodev_name, O_WRONLY);
800
* Open the MIDI device for read access (only).
803
midi_fd = open_midi_device (mididev_name, O_RDONLY);
806
* Request input of MTC time (25 FPS). This is just for fun.
809
ioctl (midi_fd, SNDCTL_MIDI_MTCINPUT, &tmp);
812
* Report the server name to the client side. This name will be reported
813
* by applications that check the device names. It will also be shown
816
* SNDCTL_SETNAME is a new ioctl call in OSS 4.0. It doesn't make any
817
* sense to do error checking with it.
820
ioctl (midi_fd, SNDCTL_SETNAME, "OSS user land synth demo");
823
* Init the MIDI input parser (from OSSlib)
826
if ((parser = midiparser_create (midi_callback, NULL)) == NULL)
828
fprintf (stderr, "Creating a MIDI parser failed\n");
833
* Register the MTC timecode handler
835
midiparser_mtc_callback (parser, mtc_callback);
838
* Standard GTK+ initialization.
841
gtk_init (&argc, &argv);
843
create_user_interface ();
845
gdk_input_add (audio_fd, GDK_INPUT_WRITE, handle_audio_output, NULL);
846
gdk_input_add (midi_fd, GDK_INPUT_READ, handle_midi_input, NULL);
853
* This is pretty much all of it. This program can be easily improced by
854
* using some more advanced synthesis algorithm (wave table, sample playback,
855
* physical modelling or whatever else) and by interpreting all the MIDI
856
* messages. You can also add a nice GUI. You have complete freedom to
857
* modify this program and distribute it as your own work (under GPL, BSD
858
* proprietary or whatever license you can imagine) but only AS LONG AS YOU
859
* DON*T DO ANY STUPID CHANGES THAT BREAK THE RELIABILITY AND ROBUSTNESS.
861
* The point is that regardless of what you do there is no need to touch the
862
* audio/MIDI device related parts. They are already "state of the art".
863
* So you can spend all your time to work on the "payload" code. What you
864
* can do is changing the compute_voice() and midi_callback() routines and
865
* everything called by them.