summaryrefslogtreecommitdiffstats
path: root/src/modules
diff options
context:
space:
mode:
Diffstat (limited to 'src/modules')
l---------src/modules/Makefile1
-rw-r--r--src/modules/alsa-util.c799
-rw-r--r--src/modules/alsa-util.h76
-rw-r--r--src/modules/bt-proximity-helper.c210
-rw-r--r--src/modules/dbus-util.c329
-rw-r--r--src/modules/dbus-util.h40
-rw-r--r--src/modules/gconf/Makefile13
-rw-r--r--src/modules/gconf/gconf-helper.c135
-rw-r--r--src/modules/gconf/module-gconf.c397
-rw-r--r--src/modules/ladspa.h603
-rw-r--r--src/modules/module-alsa-sink.c995
-rw-r--r--src/modules/module-alsa-source.c968
-rw-r--r--src/modules/module-bt-proximity.c492
-rw-r--r--src/modules/module-cli.c123
-rw-r--r--src/modules/module-combine.c1193
-rw-r--r--src/modules/module-default-device-restore.c101
-rw-r--r--src/modules/module-defs.h.m432
-rw-r--r--src/modules/module-detect.c272
-rw-r--r--src/modules/module-esound-compat-spawnfd.c80
-rw-r--r--src/modules/module-esound-compat-spawnpid.c77
-rw-r--r--src/modules/module-esound-sink.c661
-rw-r--r--src/modules/module-hal-detect.c851
-rw-r--r--src/modules/module-jack-sink.c456
-rw-r--r--src/modules/module-jack-source.c427
-rw-r--r--src/modules/module-ladspa-sink.c684
-rw-r--r--src/modules/module-lirc.c259
-rw-r--r--src/modules/module-match.c244
-rw-r--r--src/modules/module-mmkbd-evdev.c262
-rw-r--r--src/modules/module-native-protocol-fd.c89
-rw-r--r--src/modules/module-null-sink.c257
-rw-r--r--src/modules/module-oss.c1499
-rw-r--r--src/modules/module-pipe-sink.c333
-rw-r--r--src/modules/module-pipe-source.c309
-rw-r--r--src/modules/module-protocol-stub.c376
-rw-r--r--src/modules/module-remap-sink.c335
-rw-r--r--src/modules/module-rescue-streams.c164
-rw-r--r--src/modules/module-sine.c206
-rw-r--r--src/modules/module-solaris.c766
-rw-r--r--src/modules/module-suspend-on-idle.c446
-rw-r--r--src/modules/module-tunnel.c1509
-rw-r--r--src/modules/module-volume-restore.c580
-rw-r--r--src/modules/module-waveout.c649
-rw-r--r--src/modules/module-x11-bell.c171
-rw-r--r--src/modules/module-x11-publish.c198
-rw-r--r--src/modules/module-x11-xsmp.c196
-rw-r--r--src/modules/module-zeroconf-discover.c443
-rw-r--r--src/modules/module-zeroconf-publish.c650
-rw-r--r--src/modules/oss-util.c419
-rw-r--r--src/modules/oss-util.h43
-rw-r--r--src/modules/rtp/Makefile13
-rw-r--r--src/modules/rtp/module-rtp-recv.c600
-rw-r--r--src/modules/rtp/module-rtp-send.c397
-rw-r--r--src/modules/rtp/rfc2327.txt2355
-rw-r--r--src/modules/rtp/rfc2974.txt1011
-rw-r--r--src/modules/rtp/rfc3550.txt5827
-rw-r--r--src/modules/rtp/rfc3551.txt2467
-rw-r--r--src/modules/rtp/rtp.c364
-rw-r--r--src/modules/rtp/rtp.h59
-rw-r--r--src/modules/rtp/sap.c223
-rw-r--r--src/modules/rtp/sap.h48
-rw-r--r--src/modules/rtp/sdp.c261
-rw-r--r--src/modules/rtp/sdp.h52
62 files changed, 34095 insertions, 0 deletions
diff --git a/src/modules/Makefile b/src/modules/Makefile
new file mode 120000
index 00000000..c110232d
--- /dev/null
+++ b/src/modules/Makefile
@@ -0,0 +1 @@
+../pulse/Makefile \ No newline at end of file
diff --git a/src/modules/alsa-util.c b/src/modules/alsa-util.c
new file mode 100644
index 00000000..6afec3bc
--- /dev/null
+++ b/src/modules/alsa-util.c
@@ -0,0 +1,799 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2004-2006 Lennart Poettering
+ Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <sys/types.h>
+#include <asoundlib.h>
+
+#include <pulse/sample.h>
+#include <pulse/xmalloc.h>
+
+#include <pulsecore/log.h>
+#include <pulsecore/macro.h>
+#include <pulsecore/core-util.h>
+
+#include "alsa-util.h"
+
+struct pa_alsa_fdlist {
+ int num_fds;
+ struct pollfd *fds;
+ /* This is a temporary buffer used to avoid lots of mallocs */
+ struct pollfd *work_fds;
+
+ snd_mixer_t *mixer;
+
+ pa_mainloop_api *m;
+ pa_defer_event *defer;
+ pa_io_event **ios;
+
+ int polled;
+
+ void (*cb)(void *userdata);
+ void *userdata;
+};
+
+static void io_cb(pa_mainloop_api*a, pa_io_event* e, PA_GCC_UNUSED int fd, pa_io_event_flags_t events, void *userdata) {
+
+ struct pa_alsa_fdlist *fdl = userdata;
+ int err, i;
+ unsigned short revents;
+
+ pa_assert(a);
+ pa_assert(fdl);
+ pa_assert(fdl->mixer);
+ pa_assert(fdl->fds);
+ pa_assert(fdl->work_fds);
+
+ if (fdl->polled)
+ return;
+
+ fdl->polled = 1;
+
+ memcpy(fdl->work_fds, fdl->fds, sizeof(struct pollfd) * fdl->num_fds);
+
+ for (i = 0;i < fdl->num_fds; i++) {
+ if (e == fdl->ios[i]) {
+ if (events & PA_IO_EVENT_INPUT)
+ fdl->work_fds[i].revents |= POLLIN;
+ if (events & PA_IO_EVENT_OUTPUT)
+ fdl->work_fds[i].revents |= POLLOUT;
+ if (events & PA_IO_EVENT_ERROR)
+ fdl->work_fds[i].revents |= POLLERR;
+ if (events & PA_IO_EVENT_HANGUP)
+ fdl->work_fds[i].revents |= POLLHUP;
+ break;
+ }
+ }
+
+ pa_assert(i != fdl->num_fds);
+
+ if ((err = snd_mixer_poll_descriptors_revents(fdl->mixer, fdl->work_fds, fdl->num_fds, &revents)) < 0) {
+ pa_log_error("Unable to get poll revent: %s", snd_strerror(err));
+ return;
+ }
+
+ a->defer_enable(fdl->defer, 1);
+
+ if (revents)
+ snd_mixer_handle_events(fdl->mixer);
+}
+
+static void defer_cb(pa_mainloop_api*a, PA_GCC_UNUSED pa_defer_event* e, void *userdata) {
+ struct pa_alsa_fdlist *fdl = userdata;
+ int num_fds, i, err;
+ struct pollfd *temp;
+
+ pa_assert(a);
+ pa_assert(fdl);
+ pa_assert(fdl->mixer);
+
+ a->defer_enable(fdl->defer, 0);
+
+ num_fds = snd_mixer_poll_descriptors_count(fdl->mixer);
+ pa_assert(num_fds > 0);
+
+ if (num_fds != fdl->num_fds) {
+ if (fdl->fds)
+ pa_xfree(fdl->fds);
+ if (fdl->work_fds)
+ pa_xfree(fdl->work_fds);
+ fdl->fds = pa_xnew0(struct pollfd, num_fds);
+ fdl->work_fds = pa_xnew(struct pollfd, num_fds);
+ }
+
+ memset(fdl->work_fds, 0, sizeof(struct pollfd) * num_fds);
+
+ if ((err = snd_mixer_poll_descriptors(fdl->mixer, fdl->work_fds, num_fds)) < 0) {
+ pa_log_error("Unable to get poll descriptors: %s", snd_strerror(err));
+ return;
+ }
+
+ fdl->polled = 0;
+
+ if (memcmp(fdl->fds, fdl->work_fds, sizeof(struct pollfd) * num_fds) == 0)
+ return;
+
+ if (fdl->ios) {
+ for (i = 0; i < fdl->num_fds; i++)
+ a->io_free(fdl->ios[i]);
+
+ if (num_fds != fdl->num_fds) {
+ pa_xfree(fdl->ios);
+ fdl->ios = NULL;
+ }
+ }
+
+ if (!fdl->ios)
+ fdl->ios = pa_xnew(pa_io_event*, num_fds);
+
+ /* Swap pointers */
+ temp = fdl->work_fds;
+ fdl->work_fds = fdl->fds;
+ fdl->fds = temp;
+
+ fdl->num_fds = num_fds;
+
+ for (i = 0;i < num_fds;i++)
+ fdl->ios[i] = a->io_new(a, fdl->fds[i].fd,
+ ((fdl->fds[i].events & POLLIN) ? PA_IO_EVENT_INPUT : 0) |
+ ((fdl->fds[i].events & POLLOUT) ? PA_IO_EVENT_OUTPUT : 0),
+ io_cb, fdl);
+}
+
+struct pa_alsa_fdlist *pa_alsa_fdlist_new(void) {
+ struct pa_alsa_fdlist *fdl;
+
+ fdl = pa_xnew0(struct pa_alsa_fdlist, 1);
+
+ fdl->num_fds = 0;
+ fdl->fds = NULL;
+ fdl->work_fds = NULL;
+ fdl->mixer = NULL;
+ fdl->m = NULL;
+ fdl->defer = NULL;
+ fdl->ios = NULL;
+ fdl->polled = 0;
+
+ return fdl;
+}
+
+void pa_alsa_fdlist_free(struct pa_alsa_fdlist *fdl) {
+ pa_assert(fdl);
+
+ if (fdl->defer) {
+ pa_assert(fdl->m);
+ fdl->m->defer_free(fdl->defer);
+ }
+
+ if (fdl->ios) {
+ int i;
+ pa_assert(fdl->m);
+ for (i = 0;i < fdl->num_fds;i++)
+ fdl->m->io_free(fdl->ios[i]);
+ pa_xfree(fdl->ios);
+ }
+
+ if (fdl->fds)
+ pa_xfree(fdl->fds);
+ if (fdl->work_fds)
+ pa_xfree(fdl->work_fds);
+
+ pa_xfree(fdl);
+}
+
+int pa_alsa_fdlist_set_mixer(struct pa_alsa_fdlist *fdl, snd_mixer_t *mixer_handle, pa_mainloop_api* m) {
+ pa_assert(fdl);
+ pa_assert(mixer_handle);
+ pa_assert(m);
+ pa_assert(!fdl->m);
+
+ fdl->mixer = mixer_handle;
+ fdl->m = m;
+ fdl->defer = m->defer_new(m, defer_cb, fdl);
+
+ return 0;
+}
+
+static int set_format(snd_pcm_t *pcm_handle, snd_pcm_hw_params_t *hwparams, pa_sample_format_t *f) {
+
+ static const snd_pcm_format_t format_trans[] = {
+ [PA_SAMPLE_U8] = SND_PCM_FORMAT_U8,
+ [PA_SAMPLE_ALAW] = SND_PCM_FORMAT_A_LAW,
+ [PA_SAMPLE_ULAW] = SND_PCM_FORMAT_MU_LAW,
+ [PA_SAMPLE_S16LE] = SND_PCM_FORMAT_S16_LE,
+ [PA_SAMPLE_S16BE] = SND_PCM_FORMAT_S16_BE,
+ [PA_SAMPLE_FLOAT32LE] = SND_PCM_FORMAT_FLOAT_LE,
+ [PA_SAMPLE_FLOAT32BE] = SND_PCM_FORMAT_FLOAT_BE,
+ [PA_SAMPLE_S32LE] = SND_PCM_FORMAT_S32_LE,
+ [PA_SAMPLE_S32BE] = SND_PCM_FORMAT_S32_BE,
+ };
+
+ static const pa_sample_format_t try_order[] = {
+ PA_SAMPLE_FLOAT32NE,
+ PA_SAMPLE_FLOAT32RE,
+ PA_SAMPLE_S32NE,
+ PA_SAMPLE_S32RE,
+ PA_SAMPLE_S16NE,
+ PA_SAMPLE_S16RE,
+ PA_SAMPLE_ALAW,
+ PA_SAMPLE_ULAW,
+ PA_SAMPLE_U8,
+ PA_SAMPLE_INVALID
+ };
+
+ int i, ret;
+
+ pa_assert(pcm_handle);
+ pa_assert(f);
+
+ if ((ret = snd_pcm_hw_params_set_format(pcm_handle, hwparams, format_trans[*f])) >= 0)
+ return ret;
+
+ if (*f == PA_SAMPLE_FLOAT32BE)
+ *f = PA_SAMPLE_FLOAT32LE;
+ else if (*f == PA_SAMPLE_FLOAT32LE)
+ *f = PA_SAMPLE_FLOAT32BE;
+ else if (*f == PA_SAMPLE_S16BE)
+ *f = PA_SAMPLE_S16LE;
+ else if (*f == PA_SAMPLE_S16LE)
+ *f = PA_SAMPLE_S16BE;
+ else if (*f == PA_SAMPLE_S32BE)
+ *f = PA_SAMPLE_S32LE;
+ else if (*f == PA_SAMPLE_S32LE)
+ *f = PA_SAMPLE_S32BE;
+ else
+ goto try_auto;
+
+ if ((ret = snd_pcm_hw_params_set_format(pcm_handle, hwparams, format_trans[*f])) >= 0)
+ return ret;
+
+try_auto:
+
+ for (i = 0; try_order[i] != PA_SAMPLE_INVALID; i++) {
+ *f = try_order[i];
+
+ if ((ret = snd_pcm_hw_params_set_format(pcm_handle, hwparams, format_trans[*f])) >= 0)
+ return ret;
+ }
+
+ return -1;
+}
+
+/* Set the hardware parameters of the given ALSA device. Returns the
+ * selected fragment settings in *period and *period_size */
+int pa_alsa_set_hw_params(
+ snd_pcm_t *pcm_handle,
+ pa_sample_spec *ss,
+ uint32_t *periods,
+ snd_pcm_uframes_t *period_size,
+ pa_bool_t *use_mmap,
+ pa_bool_t require_exact_channel_number) {
+
+ int ret = -1;
+ snd_pcm_uframes_t buffer_size;
+ unsigned int r = ss->rate;
+ unsigned int c = ss->channels;
+ pa_sample_format_t f = ss->format;
+ snd_pcm_hw_params_t *hwparams;
+ pa_bool_t _use_mmap = use_mmap && *use_mmap;
+
+ pa_assert(pcm_handle);
+ pa_assert(ss);
+ pa_assert(periods);
+ pa_assert(period_size);
+
+ snd_pcm_hw_params_alloca(&hwparams);
+
+ buffer_size = *periods * *period_size;
+
+ if ((ret = snd_pcm_hw_params_any(pcm_handle, hwparams)) < 0)
+ goto finish;
+
+ if ((ret = snd_pcm_hw_params_set_rate_resample(pcm_handle, hwparams, 0)) < 0)
+ goto finish;
+
+ if (_use_mmap) {
+ if ((ret = snd_pcm_hw_params_set_access(pcm_handle, hwparams, SND_PCM_ACCESS_MMAP_INTERLEAVED)) < 0) {
+
+ /* mmap() didn't work, fall back to interleaved */
+
+ if ((ret = snd_pcm_hw_params_set_access(pcm_handle, hwparams, SND_PCM_ACCESS_RW_INTERLEAVED)) < 0)
+ goto finish;
+
+ _use_mmap = FALSE;
+ }
+
+ } else if ((ret = snd_pcm_hw_params_set_access(pcm_handle, hwparams, SND_PCM_ACCESS_RW_INTERLEAVED)) < 0)
+ goto finish;
+
+ if ((ret = set_format(pcm_handle, hwparams, &f)) < 0)
+ goto finish;
+
+ if ((ret = snd_pcm_hw_params_set_rate_near(pcm_handle, hwparams, &r, NULL)) < 0)
+ goto finish;
+
+ if (require_exact_channel_number) {
+ if ((ret = snd_pcm_hw_params_set_channels(pcm_handle, hwparams, c)) < 0)
+ goto finish;
+ } else {
+ if ((ret = snd_pcm_hw_params_set_channels_near(pcm_handle, hwparams, &c)) < 0)
+ goto finish;
+ }
+
+ if ((*period_size > 0 && (ret = snd_pcm_hw_params_set_period_size_near(pcm_handle, hwparams, period_size, NULL)) < 0) ||
+ (*periods > 0 && (ret = snd_pcm_hw_params_set_buffer_size_near(pcm_handle, hwparams, &buffer_size)) < 0))
+ goto finish;
+
+ if ((ret = snd_pcm_hw_params(pcm_handle, hwparams)) < 0)
+ goto finish;
+
+ if (ss->rate != r)
+ pa_log_warn("Device %s doesn't support %u Hz, changed to %u Hz.", snd_pcm_name(pcm_handle), ss->rate, r);
+
+ if (ss->channels != c)
+ pa_log_warn("Device %s doesn't support %u channels, changed to %u.", snd_pcm_name(pcm_handle), ss->channels, c);
+
+ if (ss->format != f)
+ pa_log_warn("Device %s doesn't support sample format %s, changed to %s.", snd_pcm_name(pcm_handle), pa_sample_format_to_string(ss->format), pa_sample_format_to_string(f));
+
+ if ((ret = snd_pcm_prepare(pcm_handle)) < 0)
+ goto finish;
+
+ if ((ret = snd_pcm_hw_params_get_buffer_size(hwparams, &buffer_size)) < 0 ||
+ (ret = snd_pcm_hw_params_get_period_size(hwparams, period_size, NULL)) < 0)
+ goto finish;
+
+ /* If the sample rate deviates too much, we need to resample */
+ if (r < ss->rate*.95 || r > ss->rate*1.05)
+ ss->rate = r;
+ ss->channels = c;
+ ss->format = f;
+
+ pa_assert(buffer_size > 0);
+ pa_assert(*period_size > 0);
+ *periods = buffer_size / *period_size;
+ pa_assert(*periods > 0);
+
+ if (use_mmap)
+ *use_mmap = _use_mmap;
+
+ ret = 0;
+
+finish:
+
+ return ret;
+}
+
+int pa_alsa_set_sw_params(snd_pcm_t *pcm) {
+ snd_pcm_sw_params_t *swparams;
+ int err;
+
+ pa_assert(pcm);
+
+ snd_pcm_sw_params_alloca(&swparams);
+
+ if ((err = snd_pcm_sw_params_current(pcm, swparams) < 0)) {
+ pa_log_warn("Unable to determine current swparams: %s\n", snd_strerror(err));
+ return err;
+ }
+
+ if ((err = snd_pcm_sw_params_set_stop_threshold(pcm, swparams, (snd_pcm_uframes_t) -1)) < 0) {
+ pa_log_warn("Unable to set stop threshold: %s\n", snd_strerror(err));
+ return err;
+ }
+
+ if ((err = snd_pcm_sw_params_set_start_threshold(pcm, swparams, (snd_pcm_uframes_t) -1)) < 0) {
+ pa_log_warn("Unable to set start threshold: %s\n", snd_strerror(err));
+ return err;
+ }
+
+ if ((err = snd_pcm_sw_params(pcm, swparams)) < 0) {
+ pa_log_warn("Unable to set sw params: %s\n", snd_strerror(err));
+ return err;
+ }
+
+ return 0;
+}
+
+struct device_info {
+ pa_channel_map map;
+ const char *name;
+};
+
+static const struct device_info device_table[] = {
+ {{ 2, { PA_CHANNEL_POSITION_LEFT, PA_CHANNEL_POSITION_RIGHT } }, "front" },
+
+ {{ 4, { PA_CHANNEL_POSITION_FRONT_LEFT, PA_CHANNEL_POSITION_FRONT_RIGHT,
+ PA_CHANNEL_POSITION_REAR_LEFT, PA_CHANNEL_POSITION_REAR_RIGHT }}, "surround40" },
+
+ {{ 5, { PA_CHANNEL_POSITION_FRONT_LEFT, PA_CHANNEL_POSITION_FRONT_RIGHT,
+ PA_CHANNEL_POSITION_REAR_LEFT, PA_CHANNEL_POSITION_REAR_RIGHT,
+ PA_CHANNEL_POSITION_LFE }}, "surround41" },
+
+ {{ 5, { PA_CHANNEL_POSITION_FRONT_LEFT, PA_CHANNEL_POSITION_FRONT_RIGHT,
+ PA_CHANNEL_POSITION_REAR_LEFT, PA_CHANNEL_POSITION_REAR_RIGHT,
+ PA_CHANNEL_POSITION_CENTER }}, "surround50" },
+
+ {{ 6, { PA_CHANNEL_POSITION_FRONT_LEFT, PA_CHANNEL_POSITION_FRONT_RIGHT,
+ PA_CHANNEL_POSITION_REAR_LEFT, PA_CHANNEL_POSITION_REAR_RIGHT,
+ PA_CHANNEL_POSITION_CENTER, PA_CHANNEL_POSITION_LFE }}, "surround51" },
+
+ {{ 8, { PA_CHANNEL_POSITION_FRONT_LEFT, PA_CHANNEL_POSITION_FRONT_RIGHT,
+ PA_CHANNEL_POSITION_REAR_LEFT, PA_CHANNEL_POSITION_REAR_RIGHT,
+ PA_CHANNEL_POSITION_CENTER, PA_CHANNEL_POSITION_LFE,
+ PA_CHANNEL_POSITION_SIDE_LEFT, PA_CHANNEL_POSITION_SIDE_RIGHT }} , "surround71" },
+
+ {{ 0, { 0 }}, NULL }
+};
+
+static pa_bool_t channel_map_superset(const pa_channel_map *a, const pa_channel_map *b) {
+ pa_bool_t in_a[PA_CHANNEL_POSITION_MAX];
+ unsigned i;
+
+ pa_assert(a);
+ pa_assert(b);
+
+ memset(in_a, 0, sizeof(in_a));
+
+ for (i = 0; i < a->channels; i++)
+ in_a[a->map[i]] = TRUE;
+
+ for (i = 0; i < b->channels; i++)
+ if (!in_a[b->map[i]])
+ return FALSE;
+
+ return TRUE;
+}
+
+snd_pcm_t *pa_alsa_open_by_device_id(
+ const char *dev_id,
+ char **dev,
+ pa_sample_spec *ss,
+ pa_channel_map* map,
+ int mode,
+ uint32_t *nfrags,
+ snd_pcm_uframes_t *period_size,
+ pa_bool_t *use_mmap) {
+
+ int i;
+ int direction = 1;
+ int err;
+ char *d;
+ snd_pcm_t *pcm_handle;
+
+ pa_assert(dev_id);
+ pa_assert(dev);
+ pa_assert(ss);
+ pa_assert(map);
+ pa_assert(nfrags);
+ pa_assert(period_size);
+
+ /* First we try to find a device string with a superset of the
+ * requested channel map and open it without the plug: prefix. We
+ * iterate through our device table from top to bottom and take
+ * the first that matches. If we didn't find a working device that
+ * way, we iterate backwards, and check all devices that do not
+ * provide a superset of the requested channel map.*/
+
+ for (i = 0;; i += direction) {
+ pa_sample_spec try_ss;
+
+ if (i < 0) {
+ pa_assert(direction == -1);
+
+ /* OK, so we iterated backwards, and now are at the
+ * beginning of our list. */
+
+ break;
+
+ } else if (!device_table[i].name) {
+ pa_assert(direction == 1);
+
+ /* OK, so we are at the end of our list. at iterated
+ * forwards. */
+
+ i--;
+ direction = -1;
+ }
+
+ if ((direction > 0) == !channel_map_superset(&device_table[i].map, map))
+ continue;
+
+ d = pa_sprintf_malloc("%s:%s", device_table[i].name, dev_id);
+ pa_log_debug("Trying %s...", d);
+
+ if ((err = snd_pcm_open(&pcm_handle, d, mode, SND_PCM_NONBLOCK)) < 0) {
+ pa_log_info("Couldn't open PCM device %s: %s", d, snd_strerror(err));
+ pa_xfree(d);
+ continue;
+ }
+
+ try_ss.channels = device_table[i].map.channels;
+ try_ss.rate = ss->rate;
+ try_ss.format = ss->format;
+
+ if ((err = pa_alsa_set_hw_params(pcm_handle, &try_ss, nfrags, period_size, use_mmap, TRUE)) < 0) {
+ pa_log_info("PCM device %s refused our hw parameters: %s", d, snd_strerror(err));
+ pa_xfree(d);
+ snd_pcm_close(pcm_handle);
+ continue;
+ }
+
+ *ss = try_ss;
+ *map = device_table[i].map;
+ pa_assert(map->channels == ss->channels);
+ *dev = d;
+ return pcm_handle;
+ }
+
+ /* OK, we didn't find any good device, so let's try the raw hw: stuff */
+
+ d = pa_sprintf_malloc("hw:%s", dev_id);
+ pa_log_debug("Trying %s as last resort...", d);
+ pcm_handle = pa_alsa_open_by_device_string(d, dev, ss, map, mode, nfrags, period_size, use_mmap);
+ pa_xfree(d);
+
+ return pcm_handle;
+}
+
+snd_pcm_t *pa_alsa_open_by_device_string(
+ const char *device,
+ char **dev,
+ pa_sample_spec *ss,
+ pa_channel_map* map,
+ int mode,
+ uint32_t *nfrags,
+ snd_pcm_uframes_t *period_size,
+ pa_bool_t *use_mmap) {
+
+ int err;
+ char *d;
+ snd_pcm_t *pcm_handle;
+
+ pa_assert(device);
+ pa_assert(dev);
+ pa_assert(ss);
+ pa_assert(map);
+ pa_assert(nfrags);
+ pa_assert(period_size);
+
+ d = pa_xstrdup(device);
+
+ for (;;) {
+
+ if ((err = snd_pcm_open(&pcm_handle, d, mode, SND_PCM_NONBLOCK)) < 0) {
+ pa_log("Error opening PCM device %s: %s", d, snd_strerror(err));
+ pa_xfree(d);
+ return NULL;
+ }
+
+ if ((err = pa_alsa_set_hw_params(pcm_handle, ss, nfrags, period_size, use_mmap, FALSE)) < 0) {
+
+ if (err == -EPERM) {
+ /* Hmm, some hw is very exotic, so we retry with plug, if without it didn't work */
+
+ if (pa_startswith(d, "hw:")) {
+ char *t = pa_sprintf_malloc("plughw:%s", d+3);
+ pa_log_debug("Opening the device as '%s' didn't work, retrying with '%s'.", d, t);
+ pa_xfree(d);
+ d = t;
+
+ snd_pcm_close(pcm_handle);
+ continue;
+ }
+
+ pa_log("Failed to set hardware parameters on %s: %s", d, snd_strerror(err));
+ pa_xfree(d);
+ snd_pcm_close(pcm_handle);
+ return NULL;
+ }
+ }
+
+ *dev = d;
+
+ if (ss->channels != map->channels) {
+ pa_assert_se(pa_channel_map_init_auto(map, ss->channels, PA_CHANNEL_MAP_AUX));
+ pa_channel_map_init_auto(map, ss->channels, PA_CHANNEL_MAP_ALSA);
+ }
+
+ return pcm_handle;
+ }
+}
+
+int pa_alsa_prepare_mixer(snd_mixer_t *mixer, const char *dev) {
+ int err;
+
+ pa_assert(mixer);
+ pa_assert(dev);
+
+ if ((err = snd_mixer_attach(mixer, dev)) < 0) {
+ pa_log_info("Unable to attach to mixer %s: %s", dev, snd_strerror(err));
+ return -1;
+ }
+
+ if ((err = snd_mixer_selem_register(mixer, NULL, NULL)) < 0) {
+ pa_log_warn("Unable to register mixer: %s", snd_strerror(err));
+ return -1;
+ }
+
+ if ((err = snd_mixer_load(mixer)) < 0) {
+ pa_log_warn("Unable to load mixer: %s", snd_strerror(err));
+ return -1;
+ }
+
+ pa_log_info("Successfully attached to mixer '%s'", dev);
+
+ return 0;
+}
+
+snd_mixer_elem_t *pa_alsa_find_elem(snd_mixer_t *mixer, const char *name, const char *fallback) {
+ snd_mixer_elem_t *elem;
+ snd_mixer_selem_id_t *sid = NULL;
+
+ snd_mixer_selem_id_alloca(&sid);
+
+ pa_assert(mixer);
+ pa_assert(name);
+
+ snd_mixer_selem_id_set_name(sid, name);
+
+ if (!(elem = snd_mixer_find_selem(mixer, sid))) {
+ pa_log_info("Cannot find mixer control \"%s\".", snd_mixer_selem_id_get_name(sid));
+
+ if (fallback) {
+ snd_mixer_selem_id_set_name(sid, fallback);
+
+ if (!(elem = snd_mixer_find_selem(mixer, sid)))
+ pa_log_warn("Cannot find fallback mixer control \"%s\".", snd_mixer_selem_id_get_name(sid));
+ }
+ }
+
+ if (elem)
+ pa_log_info("Using mixer control \"%s\".", snd_mixer_selem_id_get_name(sid));
+
+ return elem;
+}
+
+static const snd_mixer_selem_channel_id_t alsa_channel_ids[PA_CHANNEL_POSITION_MAX] = {
+ [PA_CHANNEL_POSITION_MONO] = SND_MIXER_SCHN_MONO, /* The ALSA name is just an alias! */
+
+ [PA_CHANNEL_POSITION_FRONT_CENTER] = SND_MIXER_SCHN_FRONT_CENTER,
+ [PA_CHANNEL_POSITION_FRONT_LEFT] = SND_MIXER_SCHN_FRONT_LEFT,
+ [PA_CHANNEL_POSITION_FRONT_RIGHT] = SND_MIXER_SCHN_FRONT_RIGHT,
+
+ [PA_CHANNEL_POSITION_REAR_CENTER] = SND_MIXER_SCHN_REAR_CENTER,
+ [PA_CHANNEL_POSITION_REAR_LEFT] = SND_MIXER_SCHN_REAR_LEFT,
+ [PA_CHANNEL_POSITION_REAR_RIGHT] = SND_MIXER_SCHN_REAR_RIGHT,
+
+ [PA_CHANNEL_POSITION_LFE] = SND_MIXER_SCHN_WOOFER,
+
+ [PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER] = SND_MIXER_SCHN_UNKNOWN,
+ [PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER] = SND_MIXER_SCHN_UNKNOWN,
+
+ [PA_CHANNEL_POSITION_SIDE_LEFT] = SND_MIXER_SCHN_SIDE_LEFT,
+ [PA_CHANNEL_POSITION_SIDE_RIGHT] = SND_MIXER_SCHN_SIDE_RIGHT,
+
+ [PA_CHANNEL_POSITION_AUX0] = SND_MIXER_SCHN_UNKNOWN,
+ [PA_CHANNEL_POSITION_AUX1] = SND_MIXER_SCHN_UNKNOWN,
+ [PA_CHANNEL_POSITION_AUX2] = SND_MIXER_SCHN_UNKNOWN,
+ [PA_CHANNEL_POSITION_AUX3] = SND_MIXER_SCHN_UNKNOWN,
+ [PA_CHANNEL_POSITION_AUX4] = SND_MIXER_SCHN_UNKNOWN,
+ [PA_CHANNEL_POSITION_AUX5] = SND_MIXER_SCHN_UNKNOWN,
+ [PA_CHANNEL_POSITION_AUX6] = SND_MIXER_SCHN_UNKNOWN,
+ [PA_CHANNEL_POSITION_AUX7] = SND_MIXER_SCHN_UNKNOWN,
+ [PA_CHANNEL_POSITION_AUX8] = SND_MIXER_SCHN_UNKNOWN,
+ [PA_CHANNEL_POSITION_AUX9] = SND_MIXER_SCHN_UNKNOWN,
+ [PA_CHANNEL_POSITION_AUX10] = SND_MIXER_SCHN_UNKNOWN,
+ [PA_CHANNEL_POSITION_AUX11] = SND_MIXER_SCHN_UNKNOWN,
+ [PA_CHANNEL_POSITION_AUX12] = SND_MIXER_SCHN_UNKNOWN,
+ [PA_CHANNEL_POSITION_AUX13] = SND_MIXER_SCHN_UNKNOWN,
+ [PA_CHANNEL_POSITION_AUX14] = SND_MIXER_SCHN_UNKNOWN,
+ [PA_CHANNEL_POSITION_AUX15] = SND_MIXER_SCHN_UNKNOWN,
+ [PA_CHANNEL_POSITION_AUX16] = SND_MIXER_SCHN_UNKNOWN,
+ [PA_CHANNEL_POSITION_AUX17] = SND_MIXER_SCHN_UNKNOWN,
+ [PA_CHANNEL_POSITION_AUX18] = SND_MIXER_SCHN_UNKNOWN,
+ [PA_CHANNEL_POSITION_AUX19] = SND_MIXER_SCHN_UNKNOWN,
+ [PA_CHANNEL_POSITION_AUX20] = SND_MIXER_SCHN_UNKNOWN,
+ [PA_CHANNEL_POSITION_AUX21] = SND_MIXER_SCHN_UNKNOWN,
+ [PA_CHANNEL_POSITION_AUX22] = SND_MIXER_SCHN_UNKNOWN,
+ [PA_CHANNEL_POSITION_AUX23] = SND_MIXER_SCHN_UNKNOWN,
+ [PA_CHANNEL_POSITION_AUX24] = SND_MIXER_SCHN_UNKNOWN,
+ [PA_CHANNEL_POSITION_AUX25] = SND_MIXER_SCHN_UNKNOWN,
+ [PA_CHANNEL_POSITION_AUX26] = SND_MIXER_SCHN_UNKNOWN,
+ [PA_CHANNEL_POSITION_AUX27] = SND_MIXER_SCHN_UNKNOWN,
+ [PA_CHANNEL_POSITION_AUX28] = SND_MIXER_SCHN_UNKNOWN,
+ [PA_CHANNEL_POSITION_AUX29] = SND_MIXER_SCHN_UNKNOWN,
+ [PA_CHANNEL_POSITION_AUX30] = SND_MIXER_SCHN_UNKNOWN,
+ [PA_CHANNEL_POSITION_AUX31] = SND_MIXER_SCHN_UNKNOWN,
+
+ [PA_CHANNEL_POSITION_TOP_CENTER] = SND_MIXER_SCHN_UNKNOWN,
+
+ [PA_CHANNEL_POSITION_TOP_FRONT_CENTER] = SND_MIXER_SCHN_UNKNOWN,
+ [PA_CHANNEL_POSITION_TOP_FRONT_LEFT] = SND_MIXER_SCHN_UNKNOWN,
+ [PA_CHANNEL_POSITION_TOP_FRONT_RIGHT] = SND_MIXER_SCHN_UNKNOWN,
+
+ [PA_CHANNEL_POSITION_TOP_REAR_CENTER] = SND_MIXER_SCHN_UNKNOWN,
+ [PA_CHANNEL_POSITION_TOP_REAR_LEFT] = SND_MIXER_SCHN_UNKNOWN,
+ [PA_CHANNEL_POSITION_TOP_REAR_RIGHT] = SND_MIXER_SCHN_UNKNOWN
+};
+
+
+int pa_alsa_calc_mixer_map(snd_mixer_elem_t *elem, const pa_channel_map *channel_map, snd_mixer_selem_channel_id_t mixer_map[], pa_bool_t playback) {
+ unsigned i;
+ pa_bool_t alsa_channel_used[SND_MIXER_SCHN_LAST];
+ pa_bool_t mono_used = FALSE;
+
+ pa_assert(elem);
+ pa_assert(channel_map);
+ pa_assert(mixer_map);
+
+ memset(&alsa_channel_used, 0, sizeof(alsa_channel_used));
+
+ if (channel_map->channels > 1 &&
+ ((playback && snd_mixer_selem_has_playback_volume_joined(elem)) ||
+ (!playback && snd_mixer_selem_has_capture_volume_joined(elem)))) {
+ pa_log_info("ALSA device lacks independant volume controls for each channel, falling back to software volume control.");
+ return -1;
+ }
+
+ for (i = 0; i < channel_map->channels; i++) {
+ snd_mixer_selem_channel_id_t id;
+ pa_bool_t is_mono;
+
+ is_mono = channel_map->map[i] == PA_CHANNEL_POSITION_MONO;
+ id = alsa_channel_ids[channel_map->map[i]];
+
+ if (!is_mono && id == SND_MIXER_SCHN_UNKNOWN) {
+ pa_log_info("Configured channel map contains channel '%s' that is unknown to the ALSA mixer. Falling back to software volume control.", pa_channel_position_to_string(channel_map->map[i]));
+ return -1;
+ }
+
+ if ((is_mono && mono_used) || (!is_mono && alsa_channel_used[id])) {
+ pa_log_info("Channel map has duplicate channel '%s', failling back to software volume control.", pa_channel_position_to_string(channel_map->map[i]));
+ return -1;
+ }
+
+ if ((playback && (!snd_mixer_selem_has_playback_channel(elem, id) || (is_mono && !snd_mixer_selem_is_playback_mono(elem)))) ||
+ (!playback && (!snd_mixer_selem_has_capture_channel(elem, id) || (is_mono && !snd_mixer_selem_is_capture_mono(elem))))) {
+
+ pa_log_info("ALSA device lacks separate volumes control for channel '%s', falling back to software volume control.", pa_channel_position_to_string(channel_map->map[i]));
+ return -1;
+ }
+
+ if (is_mono) {
+ mixer_map[i] = SND_MIXER_SCHN_MONO;
+ mono_used = TRUE;
+ } else {
+ mixer_map[i] = id;
+ alsa_channel_used[id] = TRUE;
+ }
+ }
+
+ pa_log_info("All %u channels can be mapped to mixer channels. Using hardware volume control.", channel_map->channels);
+
+ return 0;
+}
diff --git a/src/modules/alsa-util.h b/src/modules/alsa-util.h
new file mode 100644
index 00000000..53d9a2fb
--- /dev/null
+++ b/src/modules/alsa-util.h
@@ -0,0 +1,76 @@
+#ifndef fooalsautilhfoo
+#define fooalsautilhfoo
+
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2004-2006 Lennart Poettering
+ Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#include <asoundlib.h>
+
+#include <pulse/sample.h>
+#include <pulse/mainloop-api.h>
+
+#include <pulse/channelmap.h>
+
+typedef struct pa_alsa_fdlist pa_alsa_fdlist;
+
+struct pa_alsa_fdlist *pa_alsa_fdlist_new(void);
+void pa_alsa_fdlist_free(struct pa_alsa_fdlist *fdl);
+int pa_alsa_fdlist_set_mixer(struct pa_alsa_fdlist *fdl, snd_mixer_t *mixer_handle, pa_mainloop_api* m);
+
+int pa_alsa_set_hw_params(
+ snd_pcm_t *pcm_handle,
+ pa_sample_spec *ss,
+ uint32_t *periods,
+ snd_pcm_uframes_t *period_size,
+ pa_bool_t *use_mmap,
+ pa_bool_t require_exact_channel_number);
+
+int pa_alsa_set_sw_params(snd_pcm_t *pcm);
+
+int pa_alsa_prepare_mixer(snd_mixer_t *mixer, const char *dev);
+snd_mixer_elem_t *pa_alsa_find_elem(snd_mixer_t *mixer, const char *name, const char *fallback);
+
+snd_pcm_t *pa_alsa_open_by_device_id(
+ const char *dev_id,
+ char **dev,
+ pa_sample_spec *ss,
+ pa_channel_map* map,
+ int mode,
+ uint32_t *nfrags,
+ snd_pcm_uframes_t *period_size,
+ pa_bool_t *use_mmap);
+
+snd_pcm_t *pa_alsa_open_by_device_string(
+ const char *device,
+ char **dev,
+ pa_sample_spec *ss,
+ pa_channel_map* map,
+ int mode,
+ uint32_t *nfrags,
+ snd_pcm_uframes_t *period_size,
+ pa_bool_t *use_mmap);
+
+int pa_alsa_calc_mixer_map(snd_mixer_elem_t *elem, const pa_channel_map *channel_map, snd_mixer_selem_channel_id_t mixer_map[], pa_bool_t playback);
+
+#endif
diff --git a/src/modules/bt-proximity-helper.c b/src/modules/bt-proximity-helper.c
new file mode 100644
index 00000000..d80cc0c1
--- /dev/null
+++ b/src/modules/bt-proximity-helper.c
@@ -0,0 +1,210 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2007 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+/*
+ * Small SUID helper that allows us to ping a BT device. Borrows
+ * heavily from bluez-utils' l2ping, which is licensed as GPL2+, too
+ * and comes with a copyright like this:
+ *
+ * Copyright (C) 2000-2001 Qualcomm Incorporated
+ * Copyright (C) 2002-2003 Maxim Krasnyansky <maxk@qualcomm.com>
+ * Copyright (C) 2002-2007 Marcel Holtmann <marcel@holtmann.org>
+ *
+ */
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#undef NDEBUG
+
+#include <assert.h>
+#include <stdio.h>
+#include <errno.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <sys/time.h>
+#include <sys/select.h>
+
+#include <bluetooth/bluetooth.h>
+#include <bluetooth/hci.h>
+#include <bluetooth/hci_lib.h>
+#include <bluetooth/l2cap.h>
+
+#define PING_STRING "PulseAudio"
+#define IDENT 200
+#define TIMEOUT 4
+#define INTERVAL 2
+
+static void update_status(int found) {
+ static int status = -1;
+
+ if (!found && status != 0)
+ printf("-");
+ if (found && status <= 0)
+ printf("+");
+
+ fflush(stdout);
+ status = !!found;
+}
+
+int main(int argc, char *argv[]) {
+ struct sockaddr_l2 addr;
+ union {
+ l2cap_cmd_hdr hdr;
+ uint8_t buf[L2CAP_CMD_HDR_SIZE + sizeof(PING_STRING)];
+ } packet;
+ int fd = -1;
+ uint8_t id = IDENT;
+ int connected = 0;
+
+ assert(argc == 2);
+
+ for (;;) {
+ fd_set fds;
+ struct timeval end;
+ ssize_t r;
+
+ if (!connected) {
+
+ if (fd >= 0)
+ close(fd);
+
+ if ((fd = socket(PF_BLUETOOTH, SOCK_RAW, BTPROTO_L2CAP)) < 0) {
+ fprintf(stderr, "socket(PF_BLUETOOTH, SOCK_RAW, BTPROTO_L2CAP) failed: %s", strerror(errno));
+ goto finish;
+ }
+
+ memset(&addr, 0, sizeof(addr));
+ addr.l2_family = AF_BLUETOOTH;
+ bacpy(&addr.l2_bdaddr, BDADDR_ANY);
+
+ if (bind(fd, (struct sockaddr *) &addr, sizeof(addr)) < 0) {
+ fprintf(stderr, "bind() failed: %s", strerror(errno));
+ goto finish;
+ }
+
+ memset(&addr, 0, sizeof(addr));
+ addr.l2_family = AF_BLUETOOTH;
+ str2ba(argv[1], &addr.l2_bdaddr);
+
+ if (connect(fd, (struct sockaddr *) &addr, sizeof(addr)) < 0) {
+
+ if (errno == EHOSTDOWN || errno == ECONNRESET || errno == ETIMEDOUT) {
+ update_status(0);
+ sleep(INTERVAL);
+ continue;
+ }
+
+ fprintf(stderr, "connect() failed: %s", strerror(errno));
+ goto finish;
+ }
+
+ connected = 1;
+ }
+
+ assert(connected);
+
+ memset(&packet, 0, sizeof(packet));
+ strcpy((char*) packet.buf + L2CAP_CMD_HDR_SIZE, PING_STRING);
+ packet.hdr.ident = id;
+ packet.hdr.len = htobs(sizeof(PING_STRING));
+ packet.hdr.code = L2CAP_ECHO_REQ;
+
+ if ((r = send(fd, &packet, sizeof(packet), 0)) < 0) {
+
+ if (errno == EHOSTDOWN || errno == ECONNRESET || errno == ETIMEDOUT) {
+ update_status(0);
+ connected = 0;
+ sleep(INTERVAL);
+ continue;
+ }
+
+ fprintf(stderr, "send() failed: %s", strerror(errno));
+ goto finish;
+ }
+
+ assert(r == sizeof(packet));
+
+ gettimeofday(&end, NULL);
+ end.tv_sec += TIMEOUT;
+
+ for (;;) {
+ struct timeval now, delta;
+
+ gettimeofday(&now, NULL);
+
+ if (timercmp(&end, &now, <=)) {
+ update_status(0);
+ connected = 0;
+ sleep(INTERVAL);
+ break;
+ }
+
+ timersub(&end, &now, &delta);
+
+ FD_ZERO(&fds);
+ FD_SET(fd, &fds);
+
+ if (select(fd+1, &fds, NULL, NULL, &delta) < 0) {
+ fprintf(stderr, "select() failed: %s", strerror(errno));
+ goto finish;
+ }
+
+ if ((r = recv(fd, &packet, sizeof(packet), 0)) <= 0) {
+
+ if (errno == EHOSTDOWN || errno == ECONNRESET || errno == ETIMEDOUT) {
+ update_status(0);
+ connected = 0;
+ sleep(INTERVAL);
+ break;
+ }
+
+ fprintf(stderr, "send() failed: %s", r == 0 ? "EOF" : strerror(errno));
+ goto finish;
+ }
+
+ assert(r >= L2CAP_CMD_HDR_SIZE);
+
+ if (packet.hdr.ident != id)
+ continue;
+
+ if (packet.hdr.code == L2CAP_ECHO_RSP || packet.hdr.code == L2CAP_COMMAND_REJ) {
+
+ if (++id >= 0xFF)
+ id = IDENT;
+
+ update_status(1);
+ sleep(INTERVAL);
+ break;
+ }
+ }
+ }
+
+finish:
+
+ if (fd >= 0)
+ close(fd);
+
+ return 1;
+}
diff --git a/src/modules/dbus-util.c b/src/modules/dbus-util.c
new file mode 100644
index 00000000..fc1e91ea
--- /dev/null
+++ b/src/modules/dbus-util.c
@@ -0,0 +1,329 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2006 Lennart Poettering
+ Copyright 2006 Shams E. King
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <pulse/xmalloc.h>
+#include <pulse/timeval.h>
+#include <pulsecore/log.h>
+#include <pulsecore/props.h>
+
+#include "dbus-util.h"
+
+struct pa_dbus_connection {
+ PA_REFCNT_DECLARE;
+
+ pa_core *core;
+ DBusConnection *connection;
+ const char *property_name;
+ pa_defer_event* dispatch_event;
+};
+
+static void dispatch_cb(pa_mainloop_api *ea, pa_defer_event *ev, void *userdata) {
+ DBusConnection *conn = userdata;
+
+ if (dbus_connection_dispatch(conn) == DBUS_DISPATCH_COMPLETE) {
+ /* no more data to process, disable the deferred */
+ ea->defer_enable(ev, 0);
+ }
+}
+
+/* DBusDispatchStatusFunction callback for the pa mainloop */
+static void dispatch_status(DBusConnection *conn, DBusDispatchStatus status, void *userdata) {
+ pa_dbus_connection *c = userdata;
+
+ pa_assert(c);
+
+ switch(status) {
+
+ case DBUS_DISPATCH_COMPLETE:
+ c->core->mainloop->defer_enable(c->dispatch_event, 0);
+ break;
+
+ case DBUS_DISPATCH_DATA_REMAINS:
+ case DBUS_DISPATCH_NEED_MEMORY:
+ default:
+ c->core->mainloop->defer_enable(c->dispatch_event, 1);
+ break;
+ }
+}
+
+static pa_io_event_flags_t get_watch_flags(DBusWatch *watch) {
+ unsigned int flags;
+ pa_io_event_flags_t events = 0;
+
+ pa_assert(watch);
+
+ flags = dbus_watch_get_flags(watch);
+
+ /* no watch flags for disabled watches */
+ if (!dbus_watch_get_enabled(watch))
+ return PA_IO_EVENT_NULL;
+
+ if (flags & DBUS_WATCH_READABLE)
+ events |= PA_IO_EVENT_INPUT;
+ if (flags & DBUS_WATCH_WRITABLE)
+ events |= PA_IO_EVENT_OUTPUT;
+
+ return events | PA_IO_EVENT_HANGUP | PA_IO_EVENT_ERROR;
+}
+
+/* pa_io_event_cb_t IO event handler */
+static void handle_io_event(PA_GCC_UNUSED pa_mainloop_api *ea, pa_io_event *e, int fd, pa_io_event_flags_t events, void *userdata) {
+ unsigned int flags = 0;
+ DBusWatch *watch = userdata;
+
+#if HAVE_DBUS_WATCH_GET_UNIX_FD
+ pa_assert(fd == dbus_watch_get_unix_fd(watch));
+#else
+ pa_assert(fd == dbus_watch_get_fd(watch));
+#endif
+
+ if (!dbus_watch_get_enabled(watch)) {
+ pa_log_warn("Asked to handle disabled watch: %p %i", (void*) watch, fd);
+ return;
+ }
+
+ if (events & PA_IO_EVENT_INPUT)
+ flags |= DBUS_WATCH_READABLE;
+ if (events & PA_IO_EVENT_OUTPUT)
+ flags |= DBUS_WATCH_WRITABLE;
+ if (events & PA_IO_EVENT_HANGUP)
+ flags |= DBUS_WATCH_HANGUP;
+ if (events & PA_IO_EVENT_ERROR)
+ flags |= DBUS_WATCH_ERROR;
+
+ dbus_watch_handle(watch, flags);
+}
+
+/* pa_time_event_cb_t timer event handler */
+static void handle_time_event(pa_mainloop_api *ea, pa_time_event* e, const struct timeval *tv, void *userdata) {
+ DBusTimeout *timeout = userdata;
+
+ if (dbus_timeout_get_enabled(timeout)) {
+ struct timeval next = *tv;
+ dbus_timeout_handle(timeout);
+
+ /* restart it for the next scheduled time */
+ pa_timeval_add(&next, dbus_timeout_get_interval(timeout) * 1000);
+ ea->time_restart(e, &next);
+ }
+}
+
+/* DBusAddWatchFunction callback for pa mainloop */
+static dbus_bool_t add_watch(DBusWatch *watch, void *data) {
+ pa_core *c = PA_CORE(data);
+ pa_io_event *ev;
+
+ pa_assert(watch);
+ pa_assert(c);
+
+ ev = c->mainloop->io_new(
+ c->mainloop,
+#if HAVE_DBUS_WATCH_GET_UNIX_FD
+ dbus_watch_get_unix_fd(watch),
+#else
+ dbus_watch_get_fd(watch),
+#endif
+ get_watch_flags(watch), handle_io_event, watch);
+
+ dbus_watch_set_data(watch, ev, NULL);
+
+ return TRUE;
+}
+
+/* DBusRemoveWatchFunction callback for pa mainloop */
+static void remove_watch(DBusWatch *watch, void *data) {
+ pa_core *c = PA_CORE(data);
+ pa_io_event *ev;
+
+ pa_assert(watch);
+ pa_assert(c);
+
+ if ((ev = dbus_watch_get_data(watch)))
+ c->mainloop->io_free(ev);
+}
+
+/* DBusWatchToggledFunction callback for pa mainloop */
+static void toggle_watch(DBusWatch *watch, void *data) {
+ pa_core *c = PA_CORE(data);
+ pa_io_event *ev;
+
+ pa_assert(watch);
+ pa_core_assert_ref(c);
+
+ pa_assert_se(ev = dbus_watch_get_data(watch));
+
+ /* get_watch_flags() checks if the watch is enabled */
+ c->mainloop->io_enable(ev, get_watch_flags(watch));
+}
+
+/* DBusAddTimeoutFunction callback for pa mainloop */
+static dbus_bool_t add_timeout(DBusTimeout *timeout, void *data) {
+ pa_core *c = PA_CORE(data);
+ pa_time_event *ev;
+ struct timeval tv;
+
+ pa_assert(timeout);
+ pa_assert(c);
+
+ if (!dbus_timeout_get_enabled(timeout))
+ return FALSE;
+
+ pa_gettimeofday(&tv);
+ pa_timeval_add(&tv, dbus_timeout_get_interval(timeout) * 1000);
+
+ ev = c->mainloop->time_new(c->mainloop, &tv, handle_time_event, timeout);
+
+ dbus_timeout_set_data(timeout, ev, NULL);
+
+ return TRUE;
+}
+
+/* DBusRemoveTimeoutFunction callback for pa mainloop */
+static void remove_timeout(DBusTimeout *timeout, void *data) {
+ pa_core *c = PA_CORE(data);
+ pa_time_event *ev;
+
+ pa_assert(timeout);
+ pa_assert(c);
+
+ if ((ev = dbus_timeout_get_data(timeout)))
+ c->mainloop->time_free(ev);
+}
+
+/* DBusTimeoutToggledFunction callback for pa mainloop */
+static void toggle_timeout(DBusTimeout *timeout, void *data) {
+ pa_core *c = PA_CORE(data);
+ pa_time_event *ev;
+
+ pa_assert(timeout);
+ pa_assert(c);
+
+ pa_assert_se(ev = dbus_timeout_get_data(timeout));
+
+ if (dbus_timeout_get_enabled(timeout)) {
+ struct timeval tv;
+
+ pa_gettimeofday(&tv);
+ pa_timeval_add(&tv, dbus_timeout_get_interval(timeout) * 1000);
+
+ c->mainloop->time_restart(ev, &tv);
+ } else
+ c->mainloop->time_restart(ev, NULL);
+}
+
+static void wakeup_main(void *userdata) {
+ pa_dbus_connection *c = userdata;
+
+ pa_assert(c);
+
+ /* this will wakeup the mainloop and dispatch events, although
+ * it may not be the cleanest way of accomplishing it */
+ c->core->mainloop->defer_enable(c->dispatch_event, 1);
+}
+
+static pa_dbus_connection* pa_dbus_connection_new(pa_core* c, DBusConnection *conn, const char* name) {
+ pa_dbus_connection *pconn;
+
+ pconn = pa_xnew(pa_dbus_connection, 1);
+ PA_REFCNT_INIT(pconn);
+ pconn->core = c;
+ pconn->property_name = name;
+ pconn->connection = conn;
+ pconn->dispatch_event = c->mainloop->defer_new(c->mainloop, dispatch_cb, conn);
+
+ pa_property_set(c, name, pconn);
+
+ return pconn;
+}
+
+DBusConnection* pa_dbus_connection_get(pa_dbus_connection *c){
+ pa_assert(c);
+ pa_assert(PA_REFCNT_VALUE(c) > 0);
+ pa_assert(c->connection);
+
+ return c->connection;
+}
+
+void pa_dbus_connection_unref(pa_dbus_connection *c) {
+ pa_assert(c);
+ pa_assert(PA_REFCNT_VALUE(c) > 0);
+
+ if (PA_REFCNT_DEC(c) > 0)
+ return;
+
+ if (dbus_connection_get_is_connected(c->connection)) {
+ dbus_connection_close(c->connection);
+ /* must process remaining messages, bit of a kludge to handle
+ * both unload and shutdown */
+ while (dbus_connection_read_write_dispatch(c->connection, -1));
+ }
+
+ /* already disconnected, just free */
+ pa_property_remove(c->core, c->property_name);
+ c->core->mainloop->defer_free(c->dispatch_event);
+ dbus_connection_unref(c->connection);
+ pa_xfree(c);
+}
+
+pa_dbus_connection* pa_dbus_connection_ref(pa_dbus_connection *c) {
+ pa_assert(c);
+ pa_assert(PA_REFCNT_VALUE(c) > 0);
+
+ PA_REFCNT_INC(c);
+
+ return c;
+}
+
+pa_dbus_connection* pa_dbus_bus_get(pa_core *c, DBusBusType type, DBusError *error) {
+
+ static const char *const prop_name[] = {
+ [DBUS_BUS_SESSION] = "dbus-connection-session",
+ [DBUS_BUS_SYSTEM] = "dbus-connection-system",
+ [DBUS_BUS_STARTER] = "dbus-connection-starter"
+ };
+ DBusConnection *conn;
+ pa_dbus_connection *pconn;
+
+ pa_assert(type == DBUS_BUS_SYSTEM || type == DBUS_BUS_SESSION || type == DBUS_BUS_STARTER);
+
+ if ((pconn = pa_property_get(c, prop_name[type])))
+ return pa_dbus_connection_ref(pconn);
+
+ if (!(conn = dbus_bus_get_private(type, error)))
+ return NULL;
+
+ pconn = pa_dbus_connection_new(c, conn, prop_name[type]);
+
+ dbus_connection_set_exit_on_disconnect(conn, FALSE);
+ dbus_connection_set_dispatch_status_function(conn, dispatch_status, pconn, NULL);
+ dbus_connection_set_watch_functions(conn, add_watch, remove_watch, toggle_watch, c, NULL);
+ dbus_connection_set_timeout_functions(conn, add_timeout, remove_timeout, toggle_timeout, c, NULL);
+ dbus_connection_set_wakeup_main_function(conn, wakeup_main, pconn, NULL);
+
+ return pconn;
+}
diff --git a/src/modules/dbus-util.h b/src/modules/dbus-util.h
new file mode 100644
index 00000000..8dca54fe
--- /dev/null
+++ b/src/modules/dbus-util.h
@@ -0,0 +1,40 @@
+#ifndef foodbusutilhfoo
+#define foodbusutilhfoo
+
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2006 Shams E. King
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#include <dbus/dbus.h>
+
+typedef struct pa_dbus_connection pa_dbus_connection;
+
+/* return the DBusConnection of the specified type for the given core,
+ * like dbus_bus_get(), but integrates the connection with the pa_core */
+pa_dbus_connection* pa_dbus_bus_get(pa_core *c, DBusBusType type, DBusError *error);
+
+DBusConnection* pa_dbus_connection_get(pa_dbus_connection *conn);
+
+pa_dbus_connection* pa_dbus_connection_ref(pa_dbus_connection *conn);
+void pa_dbus_connection_unref(pa_dbus_connection *conn);
+
+#endif
diff --git a/src/modules/gconf/Makefile b/src/modules/gconf/Makefile
new file mode 100644
index 00000000..316beb72
--- /dev/null
+++ b/src/modules/gconf/Makefile
@@ -0,0 +1,13 @@
+# This is a dirty trick just to ease compilation with emacs
+#
+# This file is not intended to be distributed or anything
+#
+# So: don't touch it, even better ignore it!
+
+all:
+ $(MAKE) -C ../..
+
+clean:
+ $(MAKE) -C ../.. clean
+
+.PHONY: all clean
diff --git a/src/modules/gconf/gconf-helper.c b/src/modules/gconf/gconf-helper.c
new file mode 100644
index 00000000..abd13287
--- /dev/null
+++ b/src/modules/gconf/gconf-helper.c
@@ -0,0 +1,135 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2006 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#include <gconf/gconf-client.h>
+#include <glib.h>
+
+#include <pulsecore/core-util.h>
+
+#define PA_GCONF_ROOT "/system/pulseaudio"
+#define PA_GCONF_PATH_MODULES PA_GCONF_ROOT"/modules"
+
+static void handle_module(GConfClient *client, const char *name) {
+ gchar p[1024];
+ gboolean enabled, locked;
+ int i;
+
+ pa_snprintf(p, sizeof(p), PA_GCONF_PATH_MODULES"/%s/locked", name);
+ locked = gconf_client_get_bool(client, p, FALSE);
+
+ if (locked)
+ return;
+
+ pa_snprintf(p, sizeof(p), PA_GCONF_PATH_MODULES"/%s/enabled", name);
+ enabled = gconf_client_get_bool(client, p, FALSE);
+
+ printf("%c%s%c", enabled ? '+' : '-', name, 0);
+
+ if (enabled) {
+
+ for (i = 0; i < 10; i++) {
+ gchar *n, *a;
+
+ pa_snprintf(p, sizeof(p), PA_GCONF_PATH_MODULES"/%s/name%i", name, i);
+ if (!(n = gconf_client_get_string(client, p, NULL)) || !*n)
+ break;
+
+ pa_snprintf(p, sizeof(p), PA_GCONF_PATH_MODULES"/%s/args%i", name, i);
+ a = gconf_client_get_string(client, p, NULL);
+
+ printf("%s%c%s%c", n, 0, a ? a : "", 0);
+
+ g_free(n);
+ g_free(a);
+ }
+
+ printf("%c", 0);
+ }
+
+ fflush(stdout);
+}
+
+static void modules_callback(
+ GConfClient* client,
+ guint cnxn_id,
+ GConfEntry *entry,
+ gpointer user_data) {
+
+ const char *n;
+ char buf[128];
+
+ g_assert(strncmp(entry->key, PA_GCONF_PATH_MODULES"/", sizeof(PA_GCONF_PATH_MODULES)) == 0);
+
+ n = entry->key + sizeof(PA_GCONF_PATH_MODULES);
+
+ g_strlcpy(buf, n, sizeof(buf));
+ buf[strcspn(buf, "/")] = 0;
+
+ handle_module(client, buf);
+}
+
+int main(int argc, char *argv[]) {
+ GMainLoop *g;
+ GConfClient *client;
+ GSList *modules, *m;
+
+ g_type_init();
+
+ if (!(client = gconf_client_get_default()))
+ goto fail;
+
+ gconf_client_add_dir(client, PA_GCONF_ROOT, GCONF_CLIENT_PRELOAD_RECURSIVE, NULL);
+ gconf_client_notify_add(client, PA_GCONF_PATH_MODULES, modules_callback, NULL, NULL, NULL);
+
+ modules = gconf_client_all_dirs(client, PA_GCONF_PATH_MODULES, NULL);
+
+ for (m = modules; m; m = m->next) {
+ char *e = strrchr(m->data, '/');
+ handle_module(client, e ? e+1 : m->data);
+ }
+
+ g_slist_free(modules);
+
+ /* Signal the parent that we are now initialized */
+ printf("!");
+ fflush(stdout);
+
+ g = g_main_loop_new(NULL, FALSE);
+ g_main_loop_run(g);
+ g_main_loop_unref(g);
+
+ g_object_unref(G_OBJECT(client));
+
+ return 0;
+
+fail:
+ return 1;
+}
diff --git a/src/modules/gconf/module-gconf.c b/src/modules/gconf/module-gconf.c
new file mode 100644
index 00000000..836157d0
--- /dev/null
+++ b/src/modules/gconf/module-gconf.c
@@ -0,0 +1,397 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2006 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <string.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <signal.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <fcntl.h>
+
+#include <pulse/xmalloc.h>
+#include <pulsecore/module.h>
+#include <pulsecore/core.h>
+#include <pulsecore/llist.h>
+#include <pulsecore/core-util.h>
+#include <pulsecore/log.h>
+#include <pulse/mainloop-api.h>
+#include <pulsecore/core-error.h>
+#include <pulsecore/start-child.h>
+
+#include "module-gconf-symdef.h"
+
+PA_MODULE_AUTHOR("Lennart Poettering");
+PA_MODULE_DESCRIPTION("GConf Adapter");
+PA_MODULE_VERSION(PACKAGE_VERSION);
+PA_MODULE_LOAD_ONCE(TRUE);
+
+#define MAX_MODULES 10
+#define BUF_MAX 2048
+
+/* #undef PA_GCONF_HELPER */
+/* #define PA_GCONF_HELPER "/home/lennart/projects/pulseaudio/src/gconf-helper" */
+
+struct module_item {
+ char *name;
+ char *args;
+ uint32_t index;
+};
+
+struct module_info {
+ char *name;
+
+ struct module_item items[MAX_MODULES];
+ unsigned n_items;
+};
+
+struct userdata {
+ pa_core *core;
+ pa_module *module;
+
+ pa_hashmap *module_infos;
+
+ pid_t pid;
+
+ int fd;
+ int fd_type;
+ pa_io_event *io_event;
+
+ char buf[BUF_MAX];
+ size_t buf_fill;
+};
+
+static int fill_buf(struct userdata *u) {
+ ssize_t r;
+ pa_assert(u);
+
+ if (u->buf_fill >= BUF_MAX) {
+ pa_log("read buffer overflow");
+ return -1;
+ }
+
+ if ((r = pa_read(u->fd, u->buf + u->buf_fill, BUF_MAX - u->buf_fill, &u->fd_type)) <= 0)
+ return -1;
+
+ u->buf_fill += r;
+ return 0;
+}
+
+static int read_byte(struct userdata *u) {
+ int ret;
+ pa_assert(u);
+
+ if (u->buf_fill < 1)
+ if (fill_buf(u) < 0)
+ return -1;
+
+ ret = u->buf[0];
+ pa_assert(u->buf_fill > 0);
+ u->buf_fill--;
+ memmove(u->buf, u->buf+1, u->buf_fill);
+ return ret;
+}
+
+static char *read_string(struct userdata *u) {
+ pa_assert(u);
+
+ for (;;) {
+ char *e;
+
+ if ((e = memchr(u->buf, 0, u->buf_fill))) {
+ char *ret = pa_xstrdup(u->buf);
+ u->buf_fill -= e - u->buf +1;
+ memmove(u->buf, e+1, u->buf_fill);
+ return ret;
+ }
+
+ if (fill_buf(u) < 0)
+ return NULL;
+ }
+}
+
+static void unload_one_module(struct userdata *u, struct module_info*m, unsigned i) {
+ pa_assert(u);
+ pa_assert(m);
+ pa_assert(i < m->n_items);
+
+ if (m->items[i].index == PA_INVALID_INDEX)
+ return;
+
+ pa_log_debug("Unloading module #%i", m->items[i].index);
+ pa_module_unload_by_index(u->core, m->items[i].index);
+ m->items[i].index = PA_INVALID_INDEX;
+ pa_xfree(m->items[i].name);
+ pa_xfree(m->items[i].args);
+ m->items[i].name = m->items[i].args = NULL;
+}
+
+static void unload_all_modules(struct userdata *u, struct module_info*m) {
+ unsigned i;
+
+ pa_assert(u);
+ pa_assert(m);
+
+ for (i = 0; i < m->n_items; i++)
+ unload_one_module(u, m, i);
+
+ m->n_items = 0;
+}
+
+static void load_module(
+ struct userdata *u,
+ struct module_info *m,
+ int i,
+ const char *name,
+ const char *args,
+ int is_new) {
+
+ pa_module *mod;
+
+ pa_assert(u);
+ pa_assert(m);
+ pa_assert(name);
+ pa_assert(args);
+
+ if (!is_new) {
+ if (m->items[i].index != PA_INVALID_INDEX &&
+ strcmp(m->items[i].name, name) == 0 &&
+ strcmp(m->items[i].args, args) == 0)
+ return;
+
+ unload_one_module(u, m, i);
+ }
+
+ pa_log_debug("Loading module '%s' with args '%s' due to GConf configuration.", name, args);
+
+ m->items[i].name = pa_xstrdup(name);
+ m->items[i].args = pa_xstrdup(args);
+ m->items[i].index = PA_INVALID_INDEX;
+
+ if (!(mod = pa_module_load(u->core, name, args))) {
+ pa_log("pa_module_load() failed");
+ return;
+ }
+
+ m->items[i].index = mod->index;
+}
+
+static void module_info_free(void *p, void *userdata) {
+ struct module_info *m = p;
+ struct userdata *u = userdata;
+
+ pa_assert(m);
+ pa_assert(u);
+
+ unload_all_modules(u, m);
+ pa_xfree(m->name);
+ pa_xfree(m);
+}
+
+static int handle_event(struct userdata *u) {
+ int opcode;
+ int ret = 0;
+
+ do {
+ if ((opcode = read_byte(u)) < 0){
+ if (errno == EINTR || errno == EAGAIN)
+ break;
+ goto fail;
+ }
+
+ switch (opcode) {
+ case '!':
+ /* The helper tool is now initialized */
+ ret = 1;
+ break;
+
+ case '+': {
+ char *name;
+ struct module_info *m;
+ unsigned i, j;
+
+ if (!(name = read_string(u)))
+ goto fail;
+
+ if (!(m = pa_hashmap_get(u->module_infos, name))) {
+ m = pa_xnew(struct module_info, 1);
+ m->name = name;
+ m->n_items = 0;
+ pa_hashmap_put(u->module_infos, m->name, m);
+ } else
+ pa_xfree(name);
+
+ i = 0;
+ while (i < MAX_MODULES) {
+ char *module, *args;
+
+ if (!(module = read_string(u))) {
+ if (i > m->n_items) m->n_items = i;
+ goto fail;
+ }
+
+ if (!*module) {
+ pa_xfree(module);
+ break;
+ }
+
+ if (!(args = read_string(u))) {
+ pa_xfree(module);
+
+ if (i > m->n_items) m->n_items = i;
+ goto fail;
+ }
+
+ load_module(u, m, i, module, args, i >= m->n_items);
+
+ i++;
+
+ pa_xfree(module);
+ pa_xfree(args);
+ }
+
+ /* Unload all removed modules */
+ for (j = i; j < m->n_items; j++)
+ unload_one_module(u, m, j);
+
+ m->n_items = i;
+
+ break;
+ }
+
+ case '-': {
+ char *name;
+ struct module_info *m;
+
+ if (!(name = read_string(u)))
+ goto fail;
+
+ if ((m = pa_hashmap_get(u->module_infos, name))) {
+ pa_hashmap_remove(u->module_infos, name);
+ module_info_free(m, u);
+ }
+
+ pa_xfree(name);
+
+ break;
+ }
+ }
+ } while (u->buf_fill > 0 && ret == 0);
+
+ return ret;
+
+fail:
+ pa_log("Unable to read or parse data from client.");
+ return -1;
+}
+
+static void io_event_cb(
+ pa_mainloop_api*a,
+ pa_io_event* e,
+ int fd,
+ pa_io_event_flags_t events,
+ void *userdata) {
+
+ struct userdata *u = userdata;
+
+ if (handle_event(u) < 0) {
+
+ if (u->io_event) {
+ u->core->mainloop->io_free(u->io_event);
+ u->io_event = NULL;
+ }
+
+ pa_module_unload_request(u->module);
+ }
+}
+
+int pa__init(pa_module*m) {
+ struct userdata *u;
+ int r;
+
+ u = pa_xnew(struct userdata, 1);
+ u->core = m->core;
+ u->module = m;
+ m->userdata = u;
+ u->module_infos = pa_hashmap_new(pa_idxset_string_hash_func, pa_idxset_string_compare_func);
+ u->pid = (pid_t) -1;
+ u->fd = -1;
+ u->fd_type = 0;
+ u->io_event = NULL;
+ u->buf_fill = 0;
+
+ if ((u->fd = pa_start_child_for_read(PA_GCONF_HELPER, NULL, &u->pid)) < 0)
+ goto fail;
+
+ u->io_event = m->core->mainloop->io_new(
+ m->core->mainloop,
+ u->fd,
+ PA_IO_EVENT_INPUT,
+ io_event_cb,
+ u);
+
+ do {
+ if ((r = handle_event(u)) < 0)
+ goto fail;
+
+ /* Read until the client signalled us that it is ready with
+ * initialization */
+ } while (r != 1);
+
+ return 0;
+
+fail:
+ pa__done(m);
+ return -1;
+}
+
+void pa__done(pa_module*m) {
+ struct userdata *u;
+
+ pa_assert(m);
+
+ if (!(u = m->userdata))
+ return;
+
+ if (u->pid != (pid_t) -1) {
+ kill(u->pid, SIGTERM);
+ waitpid(u->pid, NULL, 0);
+ }
+
+ if (u->io_event)
+ m->core->mainloop->io_free(u->io_event);
+
+ if (u->fd >= 0)
+ pa_close(u->fd);
+
+
+ if (u->module_infos)
+ pa_hashmap_free(u->module_infos, module_info_free, u);
+
+ pa_xfree(u);
+}
diff --git a/src/modules/ladspa.h b/src/modules/ladspa.h
new file mode 100644
index 00000000..b1a9c4e5
--- /dev/null
+++ b/src/modules/ladspa.h
@@ -0,0 +1,603 @@
+/* ladspa.h
+
+ Linux Audio Developer's Simple Plugin API Version 1.1[LGPL].
+ Copyright (C) 2000-2002 Richard W.E. Furse, Paul Barton-Davis,
+ Stefan Westerfeld.
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public License
+ as published by the Free Software Foundation; either version 2.1 of
+ the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA. */
+
+#ifndef LADSPA_INCLUDED
+#define LADSPA_INCLUDED
+
+#define LADSPA_VERSION "1.1"
+#define LADSPA_VERSION_MAJOR 1
+#define LADSPA_VERSION_MINOR 1
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*****************************************************************************/
+
+/* Overview:
+
+ There is a large number of synthesis packages in use or development
+ on the Linux platform at this time. This API (`The Linux Audio
+ Developer's Simple Plugin API') attempts to give programmers the
+ ability to write simple `plugin' audio processors in C/C++ and link
+ them dynamically (`plug') into a range of these packages (`hosts').
+ It should be possible for any host and any plugin to communicate
+ completely through this interface.
+
+ This API is deliberately short and simple. To achieve compatibility
+ with a range of promising Linux sound synthesis packages it
+ attempts to find the `greatest common divisor' in their logical
+ behaviour. Having said this, certain limiting decisions are
+ implicit, notably the use of a fixed type (LADSPA_Data) for all
+ data transfer and absence of a parameterised `initialisation'
+ phase. See below for the LADSPA_Data typedef.
+
+ Plugins are expected to distinguish between control and audio
+ data. Plugins have `ports' that are inputs or outputs for audio or
+ control data and each plugin is `run' for a `block' corresponding
+ to a short time interval measured in samples. Audio data is
+ communicated using arrays of LADSPA_Data, allowing a block of audio
+ to be processed by the plugin in a single pass. Control data is
+ communicated using single LADSPA_Data values. Control data has a
+ single value at the start of a call to the `run()' or `run_adding()'
+ function, and may be considered to remain this value for its
+ duration. The plugin may assume that all its input and output ports
+ have been connected to the relevant data location (see the
+ `connect_port()' function below) before it is asked to run.
+
+ Plugins will reside in shared object files suitable for dynamic
+ linking by dlopen() and family. The file will provide a number of
+ `plugin types' that can be used to instantiate actual plugins
+ (sometimes known as `plugin instances') that can be connected
+ together to perform tasks.
+
+ This API contains very limited error-handling. */
+
+/*****************************************************************************/
+
+/* Fundamental data type passed in and out of plugin. This data type
+ is used to communicate audio samples and control values. It is
+ assumed that the plugin will work sensibly given any numeric input
+ value although it may have a preferred range (see hints below).
+
+ For audio it is generally assumed that 1.0f is the `0dB' reference
+ amplitude and is a `normal' signal level. */
+
+typedef float LADSPA_Data;
+
+/*****************************************************************************/
+
+/* Special Plugin Properties:
+
+ Optional features of the plugin type are encapsulated in the
+ LADSPA_Properties type. This is assembled by ORing individual
+ properties together. */
+
+typedef int LADSPA_Properties;
+
+/* Property LADSPA_PROPERTY_REALTIME indicates that the plugin has a
+ real-time dependency (e.g. listens to a MIDI device) and so its
+ output must not be cached or subject to significant latency. */
+#define LADSPA_PROPERTY_REALTIME 0x1
+
+/* Property LADSPA_PROPERTY_INPLACE_BROKEN indicates that the plugin
+ may cease to work correctly if the host elects to use the same data
+ location for both input and output (see connect_port()). This
+ should be avoided as enabling this flag makes it impossible for
+ hosts to use the plugin to process audio `in-place.' */
+#define LADSPA_PROPERTY_INPLACE_BROKEN 0x2
+
+/* Property LADSPA_PROPERTY_HARD_RT_CAPABLE indicates that the plugin
+ is capable of running not only in a conventional host but also in a
+ `hard real-time' environment. To qualify for this the plugin must
+ satisfy all of the following:
+
+ (1) The plugin must not use malloc(), free() or other heap memory
+ management within its run() or run_adding() functions. All new
+ memory used in run() must be managed via the stack. These
+ restrictions only apply to the run() function.
+
+ (2) The plugin will not attempt to make use of any library
+ functions with the exceptions of functions in the ANSI standard C
+ and C maths libraries, which the host is expected to provide.
+
+ (3) The plugin will not access files, devices, pipes, sockets, IPC
+ or any other mechanism that might result in process or thread
+ blocking.
+
+ (4) The plugin will take an amount of time to execute a run() or
+ run_adding() call approximately of form (A+B*SampleCount) where A
+ and B depend on the machine and host in use. This amount of time
+ may not depend on input signals or plugin state. The host is left
+ the responsibility to perform timings to estimate upper bounds for
+ A and B. */
+#define LADSPA_PROPERTY_HARD_RT_CAPABLE 0x4
+
+#define LADSPA_IS_REALTIME(x) ((x) & LADSPA_PROPERTY_REALTIME)
+#define LADSPA_IS_INPLACE_BROKEN(x) ((x) & LADSPA_PROPERTY_INPLACE_BROKEN)
+#define LADSPA_IS_HARD_RT_CAPABLE(x) ((x) & LADSPA_PROPERTY_HARD_RT_CAPABLE)
+
+/*****************************************************************************/
+
+/* Plugin Ports:
+
+ Plugins have `ports' that are inputs or outputs for audio or
+ data. Ports can communicate arrays of LADSPA_Data (for audio
+ inputs/outputs) or single LADSPA_Data values (for control
+ input/outputs). This information is encapsulated in the
+ LADSPA_PortDescriptor type which is assembled by ORing individual
+ properties together.
+
+ Note that a port must be an input or an output port but not both
+ and that a port must be a control or audio port but not both. */
+
+typedef int LADSPA_PortDescriptor;
+
+/* Property LADSPA_PORT_INPUT indicates that the port is an input. */
+#define LADSPA_PORT_INPUT 0x1
+
+/* Property LADSPA_PORT_OUTPUT indicates that the port is an output. */
+#define LADSPA_PORT_OUTPUT 0x2
+
+/* Property LADSPA_PORT_CONTROL indicates that the port is a control
+ port. */
+#define LADSPA_PORT_CONTROL 0x4
+
+/* Property LADSPA_PORT_AUDIO indicates that the port is a audio
+ port. */
+#define LADSPA_PORT_AUDIO 0x8
+
+#define LADSPA_IS_PORT_INPUT(x) ((x) & LADSPA_PORT_INPUT)
+#define LADSPA_IS_PORT_OUTPUT(x) ((x) & LADSPA_PORT_OUTPUT)
+#define LADSPA_IS_PORT_CONTROL(x) ((x) & LADSPA_PORT_CONTROL)
+#define LADSPA_IS_PORT_AUDIO(x) ((x) & LADSPA_PORT_AUDIO)
+
+/*****************************************************************************/
+
+/* Plugin Port Range Hints:
+
+ The host may wish to provide a representation of data entering or
+ leaving a plugin (e.g. to generate a GUI automatically). To make
+ this more meaningful, the plugin should provide `hints' to the host
+ describing the usual values taken by the data.
+
+ Note that these are only hints. The host may ignore them and the
+ plugin must not assume that data supplied to it is meaningful. If
+ the plugin receives invalid input data it is expected to continue
+ to run without failure and, where possible, produce a sensible
+ output (e.g. a high-pass filter given a negative cutoff frequency
+ might switch to an all-pass mode).
+
+ Hints are meaningful for all input and output ports but hints for
+ input control ports are expected to be particularly useful.
+
+ More hint information is encapsulated in the
+ LADSPA_PortRangeHintDescriptor type which is assembled by ORing
+ individual hint types together. Hints may require further
+ LowerBound and UpperBound information.
+
+ All the hint information for a particular port is aggregated in the
+ LADSPA_PortRangeHint structure. */
+
+typedef int LADSPA_PortRangeHintDescriptor;
+
+/* Hint LADSPA_HINT_BOUNDED_BELOW indicates that the LowerBound field
+ of the LADSPA_PortRangeHint should be considered meaningful. The
+ value in this field should be considered the (inclusive) lower
+ bound of the valid range. If LADSPA_HINT_SAMPLE_RATE is also
+ specified then the value of LowerBound should be multiplied by the
+ sample rate. */
+#define LADSPA_HINT_BOUNDED_BELOW 0x1
+
+/* Hint LADSPA_HINT_BOUNDED_ABOVE indicates that the UpperBound field
+ of the LADSPA_PortRangeHint should be considered meaningful. The
+ value in this field should be considered the (inclusive) upper
+ bound of the valid range. If LADSPA_HINT_SAMPLE_RATE is also
+ specified then the value of UpperBound should be multiplied by the
+ sample rate. */
+#define LADSPA_HINT_BOUNDED_ABOVE 0x2
+
+/* Hint LADSPA_HINT_TOGGLED indicates that the data item should be
+ considered a Boolean toggle. Data less than or equal to zero should
+ be considered `off' or `false,' and data above zero should be
+ considered `on' or `true.' LADSPA_HINT_TOGGLED may not be used in
+ conjunction with any other hint except LADSPA_HINT_DEFAULT_0 or
+ LADSPA_HINT_DEFAULT_1. */
+#define LADSPA_HINT_TOGGLED 0x4
+
+/* Hint LADSPA_HINT_SAMPLE_RATE indicates that any bounds specified
+ should be interpreted as multiples of the sample rate. For
+ instance, a frequency range from 0Hz to the Nyquist frequency (half
+ the sample rate) could be requested by this hint in conjunction
+ with LowerBound = 0 and UpperBound = 0.5. Hosts that support bounds
+ at all must support this hint to retain meaning. */
+#define LADSPA_HINT_SAMPLE_RATE 0x8
+
+/* Hint LADSPA_HINT_LOGARITHMIC indicates that it is likely that the
+ user will find it more intuitive to view values using a logarithmic
+ scale. This is particularly useful for frequencies and gains. */
+#define LADSPA_HINT_LOGARITHMIC 0x10
+
+/* Hint LADSPA_HINT_INTEGER indicates that a user interface would
+ probably wish to provide a stepped control taking only integer
+ values. Any bounds set should be slightly wider than the actual
+ integer range required to avoid floating point rounding errors. For
+ instance, the integer set {0,1,2,3} might be described as [-0.1,
+ 3.1]. */
+#define LADSPA_HINT_INTEGER 0x20
+
+/* The various LADSPA_HINT_HAS_DEFAULT_* hints indicate a `normal'
+ value for the port that is sensible as a default. For instance,
+ this value is suitable for use as an initial value in a user
+ interface or as a value the host might assign to a control port
+ when the user has not provided one. Defaults are encoded using a
+ mask so only one default may be specified for a port. Some of the
+ hints make use of lower and upper bounds, in which case the
+ relevant bound or bounds must be available and
+ LADSPA_HINT_SAMPLE_RATE must be applied as usual. The resulting
+ default must be rounded if LADSPA_HINT_INTEGER is present. Default
+ values were introduced in LADSPA v1.1. */
+#define LADSPA_HINT_DEFAULT_MASK 0x3C0
+
+/* This default values indicates that no default is provided. */
+#define LADSPA_HINT_DEFAULT_NONE 0x0
+
+/* This default hint indicates that the suggested lower bound for the
+ port should be used. */
+#define LADSPA_HINT_DEFAULT_MINIMUM 0x40
+
+/* This default hint indicates that a low value between the suggested
+ lower and upper bounds should be chosen. For ports with
+ LADSPA_HINT_LOGARITHMIC, this should be exp(log(lower) * 0.75 +
+ log(upper) * 0.25). Otherwise, this should be (lower * 0.75 + upper
+ * 0.25). */
+#define LADSPA_HINT_DEFAULT_LOW 0x80
+
+/* This default hint indicates that a middle value between the
+ suggested lower and upper bounds should be chosen. For ports with
+ LADSPA_HINT_LOGARITHMIC, this should be exp(log(lower) * 0.5 +
+ log(upper) * 0.5). Otherwise, this should be (lower * 0.5 + upper *
+ 0.5). */
+#define LADSPA_HINT_DEFAULT_MIDDLE 0xC0
+
+/* This default hint indicates that a high value between the suggested
+ lower and upper bounds should be chosen. For ports with
+ LADSPA_HINT_LOGARITHMIC, this should be exp(log(lower) * 0.25 +
+ log(upper) * 0.75). Otherwise, this should be (lower * 0.25 + upper
+ * 0.75). */
+#define LADSPA_HINT_DEFAULT_HIGH 0x100
+
+/* This default hint indicates that the suggested upper bound for the
+ port should be used. */
+#define LADSPA_HINT_DEFAULT_MAXIMUM 0x140
+
+/* This default hint indicates that the number 0 should be used. Note
+ that this default may be used in conjunction with
+ LADSPA_HINT_TOGGLED. */
+#define LADSPA_HINT_DEFAULT_0 0x200
+
+/* This default hint indicates that the number 1 should be used. Note
+ that this default may be used in conjunction with
+ LADSPA_HINT_TOGGLED. */
+#define LADSPA_HINT_DEFAULT_1 0x240
+
+/* This default hint indicates that the number 100 should be used. */
+#define LADSPA_HINT_DEFAULT_100 0x280
+
+/* This default hint indicates that the Hz frequency of `concert A'
+ should be used. This will be 440 unless the host uses an unusual
+ tuning convention, in which case it may be within a few Hz. */
+#define LADSPA_HINT_DEFAULT_440 0x2C0
+
+#define LADSPA_IS_HINT_BOUNDED_BELOW(x) ((x) & LADSPA_HINT_BOUNDED_BELOW)
+#define LADSPA_IS_HINT_BOUNDED_ABOVE(x) ((x) & LADSPA_HINT_BOUNDED_ABOVE)
+#define LADSPA_IS_HINT_TOGGLED(x) ((x) & LADSPA_HINT_TOGGLED)
+#define LADSPA_IS_HINT_SAMPLE_RATE(x) ((x) & LADSPA_HINT_SAMPLE_RATE)
+#define LADSPA_IS_HINT_LOGARITHMIC(x) ((x) & LADSPA_HINT_LOGARITHMIC)
+#define LADSPA_IS_HINT_INTEGER(x) ((x) & LADSPA_HINT_INTEGER)
+
+#define LADSPA_IS_HINT_HAS_DEFAULT(x) ((x) & LADSPA_HINT_DEFAULT_MASK)
+#define LADSPA_IS_HINT_DEFAULT_MINIMUM(x) (((x) & LADSPA_HINT_DEFAULT_MASK) \
+ == LADSPA_HINT_DEFAULT_MINIMUM)
+#define LADSPA_IS_HINT_DEFAULT_LOW(x) (((x) & LADSPA_HINT_DEFAULT_MASK) \
+ == LADSPA_HINT_DEFAULT_LOW)
+#define LADSPA_IS_HINT_DEFAULT_MIDDLE(x) (((x) & LADSPA_HINT_DEFAULT_MASK) \
+ == LADSPA_HINT_DEFAULT_MIDDLE)
+#define LADSPA_IS_HINT_DEFAULT_HIGH(x) (((x) & LADSPA_HINT_DEFAULT_MASK) \
+ == LADSPA_HINT_DEFAULT_HIGH)
+#define LADSPA_IS_HINT_DEFAULT_MAXIMUM(x) (((x) & LADSPA_HINT_DEFAULT_MASK) \
+ == LADSPA_HINT_DEFAULT_MAXIMUM)
+#define LADSPA_IS_HINT_DEFAULT_0(x) (((x) & LADSPA_HINT_DEFAULT_MASK) \
+ == LADSPA_HINT_DEFAULT_0)
+#define LADSPA_IS_HINT_DEFAULT_1(x) (((x) & LADSPA_HINT_DEFAULT_MASK) \
+ == LADSPA_HINT_DEFAULT_1)
+#define LADSPA_IS_HINT_DEFAULT_100(x) (((x) & LADSPA_HINT_DEFAULT_MASK) \
+ == LADSPA_HINT_DEFAULT_100)
+#define LADSPA_IS_HINT_DEFAULT_440(x) (((x) & LADSPA_HINT_DEFAULT_MASK) \
+ == LADSPA_HINT_DEFAULT_440)
+
+typedef struct _LADSPA_PortRangeHint {
+
+ /* Hints about the port. */
+ LADSPA_PortRangeHintDescriptor HintDescriptor;
+
+ /* Meaningful when hint LADSPA_HINT_BOUNDED_BELOW is active. When
+ LADSPA_HINT_SAMPLE_RATE is also active then this value should be
+ multiplied by the relevant sample rate. */
+ LADSPA_Data LowerBound;
+
+ /* Meaningful when hint LADSPA_HINT_BOUNDED_ABOVE is active. When
+ LADSPA_HINT_SAMPLE_RATE is also active then this value should be
+ multiplied by the relevant sample rate. */
+ LADSPA_Data UpperBound;
+
+} LADSPA_PortRangeHint;
+
+/*****************************************************************************/
+
+/* Plugin Handles:
+
+ This plugin handle indicates a particular instance of the plugin
+ concerned. It is valid to compare this to NULL (0 for C++) but
+ otherwise the host should not attempt to interpret it. The plugin
+ may use it to reference internal instance data. */
+
+typedef void * LADSPA_Handle;
+
+/*****************************************************************************/
+
+/* Descriptor for a Type of Plugin:
+
+ This structure is used to describe a plugin type. It provides a
+ number of functions to examine the type, instantiate it, link it to
+ buffers and workspaces and to run it. */
+
+typedef struct _LADSPA_Descriptor {
+
+ /* This numeric identifier indicates the plugin type
+ uniquely. Plugin programmers may reserve ranges of IDs from a
+ central body to avoid clashes. Hosts may assume that IDs are
+ below 0x1000000. */
+ unsigned long UniqueID;
+
+ /* This identifier can be used as a unique, case-sensitive
+ identifier for the plugin type within the plugin file. Plugin
+ types should be identified by file and label rather than by index
+ or plugin name, which may be changed in new plugin
+ versions. Labels must not contain white-space characters. */
+ const char * Label;
+
+ /* This indicates a number of properties of the plugin. */
+ LADSPA_Properties Properties;
+
+ /* This member points to the null-terminated name of the plugin
+ (e.g. "Sine Oscillator"). */
+ const char * Name;
+
+ /* This member points to the null-terminated string indicating the
+ maker of the plugin. This can be an empty string but not NULL. */
+ const char * Maker;
+
+ /* This member points to the null-terminated string indicating any
+ copyright applying to the plugin. If no Copyright applies the
+ string "None" should be used. */
+ const char * Copyright;
+
+ /* This indicates the number of ports (input AND output) present on
+ the plugin. */
+ unsigned long PortCount;
+
+ /* This member indicates an array of port descriptors. Valid indices
+ vary from 0 to PortCount-1. */
+ const LADSPA_PortDescriptor * PortDescriptors;
+
+ /* This member indicates an array of null-terminated strings
+ describing ports (e.g. "Frequency (Hz)"). Valid indices vary from
+ 0 to PortCount-1. */
+ const char * const * PortNames;
+
+ /* This member indicates an array of range hints for each port (see
+ above). Valid indices vary from 0 to PortCount-1. */
+ const LADSPA_PortRangeHint * PortRangeHints;
+
+ /* This may be used by the plugin developer to pass any custom
+ implementation data into an instantiate call. It must not be used
+ or interpreted by the host. It is expected that most plugin
+ writers will not use this facility as LADSPA_Handle should be
+ used to hold instance data. */
+ void * ImplementationData;
+
+ /* This member is a function pointer that instantiates a plugin. A
+ handle is returned indicating the new plugin instance. The
+ instantiation function accepts a sample rate as a parameter. The
+ plugin descriptor from which this instantiate function was found
+ must also be passed. This function must return NULL if
+ instantiation fails.
+
+ Note that instance initialisation should generally occur in
+ activate() rather than here. */
+ LADSPA_Handle (*instantiate)(const struct _LADSPA_Descriptor * Descriptor,
+ unsigned long SampleRate);
+
+ /* This member is a function pointer that connects a port on an
+ instantiated plugin to a memory location at which a block of data
+ for the port will be read/written. The data location is expected
+ to be an array of LADSPA_Data for audio ports or a single
+ LADSPA_Data value for control ports. Memory issues will be
+ managed by the host. The plugin must read/write the data at these
+ locations every time run() or run_adding() is called and the data
+ present at the time of this connection call should not be
+ considered meaningful.
+
+ connect_port() may be called more than once for a plugin instance
+ to allow the host to change the buffers that the plugin is
+ reading or writing. These calls may be made before or after
+ activate() or deactivate() calls.
+
+ connect_port() must be called at least once for each port before
+ run() or run_adding() is called. When working with blocks of
+ LADSPA_Data the plugin should pay careful attention to the block
+ size passed to the run function as the block allocated may only
+ just be large enough to contain the block of samples.
+
+ Plugin writers should be aware that the host may elect to use the
+ same buffer for more than one port and even use the same buffer
+ for both input and output (see LADSPA_PROPERTY_INPLACE_BROKEN).
+ However, overlapped buffers or use of a single buffer for both
+ audio and control data may result in unexpected behaviour. */
+ void (*connect_port)(LADSPA_Handle Instance,
+ unsigned long Port,
+ LADSPA_Data * DataLocation);
+
+ /* This member is a function pointer that initialises a plugin
+ instance and activates it for use. This is separated from
+ instantiate() to aid real-time support and so that hosts can
+ reinitialise a plugin instance by calling deactivate() and then
+ activate(). In this case the plugin instance must reset all state
+ information dependent on the history of the plugin instance
+ except for any data locations provided by connect_port() and any
+ gain set by set_run_adding_gain(). If there is nothing for
+ activate() to do then the plugin writer may provide a NULL rather
+ than an empty function.
+
+ When present, hosts must call this function once before run() (or
+ run_adding()) is called for the first time. This call should be
+ made as close to the run() call as possible and indicates to
+ real-time plugins that they are now live. Plugins should not rely
+ on a prompt call to run() after activate(). activate() may not be
+ called again unless deactivate() is called first. Note that
+ connect_port() may be called before or after a call to
+ activate(). */
+ void (*activate)(LADSPA_Handle Instance);
+
+ /* This method is a function pointer that runs an instance of a
+ plugin for a block. Two parameters are required: the first is a
+ handle to the particular instance to be run and the second
+ indicates the block size (in samples) for which the plugin
+ instance may run.
+
+ Note that if an activate() function exists then it must be called
+ before run() or run_adding(). If deactivate() is called for a
+ plugin instance then the plugin instance may not be reused until
+ activate() has been called again.
+
+ If the plugin has the property LADSPA_PROPERTY_HARD_RT_CAPABLE
+ then there are various things that the plugin should not do
+ within the run() or run_adding() functions (see above). */
+ void (*run)(LADSPA_Handle Instance,
+ unsigned long SampleCount);
+
+ /* This method is a function pointer that runs an instance of a
+ plugin for a block. This has identical behaviour to run() except
+ in the way data is output from the plugin. When run() is used,
+ values are written directly to the memory areas associated with
+ the output ports. However when run_adding() is called, values
+ must be added to the values already present in the memory
+ areas. Furthermore, output values written must be scaled by the
+ current gain set by set_run_adding_gain() (see below) before
+ addition.
+
+ run_adding() is optional. When it is not provided by a plugin,
+ this function pointer must be set to NULL. When it is provided,
+ the function set_run_adding_gain() must be provided also. */
+ void (*run_adding)(LADSPA_Handle Instance,
+ unsigned long SampleCount);
+
+ /* This method is a function pointer that sets the output gain for
+ use when run_adding() is called (see above). If this function is
+ never called the gain is assumed to default to 1. Gain
+ information should be retained when activate() or deactivate()
+ are called.
+
+ This function should be provided by the plugin if and only if the
+ run_adding() function is provided. When it is absent this
+ function pointer must be set to NULL. */
+ void (*set_run_adding_gain)(LADSPA_Handle Instance,
+ LADSPA_Data Gain);
+
+ /* This is the counterpart to activate() (see above). If there is
+ nothing for deactivate() to do then the plugin writer may provide
+ a NULL rather than an empty function.
+
+ Hosts must deactivate all activated units after they have been
+ run() (or run_adding()) for the last time. This call should be
+ made as close to the last run() call as possible and indicates to
+ real-time plugins that they are no longer live. Plugins should
+ not rely on prompt deactivation. Note that connect_port() may be
+ called before or after a call to deactivate().
+
+ Deactivation is not similar to pausing as the plugin instance
+ will be reinitialised when activate() is called to reuse it. */
+ void (*deactivate)(LADSPA_Handle Instance);
+
+ /* Once an instance of a plugin has been finished with it can be
+ deleted using the following function. The instance handle passed
+ ceases to be valid after this call.
+
+ If activate() was called for a plugin instance then a
+ corresponding call to deactivate() must be made before cleanup()
+ is called. */
+ void (*cleanup)(LADSPA_Handle Instance);
+
+} LADSPA_Descriptor;
+
+/**********************************************************************/
+
+/* Accessing a Plugin: */
+
+/* The exact mechanism by which plugins are loaded is host-dependent,
+ however all most hosts will need to know is the name of shared
+ object file containing the plugin types. To allow multiple hosts to
+ share plugin types, hosts may wish to check for environment
+ variable LADSPA_PATH. If present, this should contain a
+ colon-separated path indicating directories that should be searched
+ (in order) when loading plugin types.
+
+ A plugin programmer must include a function called
+ "ladspa_descriptor" with the following function prototype within
+ the shared object file. This function will have C-style linkage (if
+ you are using C++ this is taken care of by the `extern "C"' clause
+ at the top of the file).
+
+ A host will find the plugin shared object file by one means or
+ another, find the ladspa_descriptor() function, call it, and
+ proceed from there.
+
+ Plugin types are accessed by index (not ID) using values from 0
+ upwards. Out of range indexes must result in this function
+ returning NULL, so the plugin count can be determined by checking
+ for the least index that results in NULL being returned. */
+
+const LADSPA_Descriptor * ladspa_descriptor(unsigned long Index);
+
+/* Datatype corresponding to the ladspa_descriptor() function. */
+typedef const LADSPA_Descriptor *
+(*LADSPA_Descriptor_Function)(unsigned long Index);
+
+/**********************************************************************/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* LADSPA_INCLUDED */
+
+/* EOF */
diff --git a/src/modules/module-alsa-sink.c b/src/modules/module-alsa-sink.c
new file mode 100644
index 00000000..14aef7c9
--- /dev/null
+++ b/src/modules/module-alsa-sink.c
@@ -0,0 +1,995 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2004-2006 Lennart Poettering
+ Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <stdio.h>
+
+#include <asoundlib.h>
+
+#include <pulse/xmalloc.h>
+#include <pulse/util.h>
+
+#include <pulsecore/core.h>
+#include <pulsecore/module.h>
+#include <pulsecore/memchunk.h>
+#include <pulsecore/sink.h>
+#include <pulsecore/modargs.h>
+#include <pulsecore/core-util.h>
+#include <pulsecore/sample-util.h>
+#include <pulsecore/log.h>
+#include <pulsecore/macro.h>
+#include <pulsecore/thread.h>
+#include <pulsecore/core-error.h>
+#include <pulsecore/thread-mq.h>
+#include <pulsecore/rtpoll.h>
+
+#include "alsa-util.h"
+#include "module-alsa-sink-symdef.h"
+
+PA_MODULE_AUTHOR("Lennart Poettering");
+PA_MODULE_DESCRIPTION("ALSA Sink");
+PA_MODULE_VERSION(PACKAGE_VERSION);
+PA_MODULE_LOAD_ONCE(FALSE);
+PA_MODULE_USAGE(
+ "sink_name=<name for the sink> "
+ "device=<ALSA device> "
+ "device_id=<ALSA device id> "
+ "format=<sample format> "
+ "channels=<number of channels> "
+ "rate=<sample rate> "
+ "fragments=<number of fragments> "
+ "fragment_size=<fragment size> "
+ "channel_map=<channel map> "
+ "mmap=<enable memory mapping?>");
+
+#define DEFAULT_DEVICE "default"
+
+struct userdata {
+ pa_core *core;
+ pa_module *module;
+ pa_sink *sink;
+
+ pa_thread *thread;
+ pa_thread_mq thread_mq;
+ pa_rtpoll *rtpoll;
+
+ snd_pcm_t *pcm_handle;
+
+ pa_alsa_fdlist *mixer_fdl;
+ snd_mixer_t *mixer_handle;
+ snd_mixer_elem_t *mixer_elem;
+ long hw_volume_max, hw_volume_min;
+
+ size_t frame_size, fragment_size, hwbuf_size;
+ unsigned nfragments;
+ pa_memchunk memchunk;
+
+ char *device_name;
+
+ pa_bool_t use_mmap;
+
+ pa_bool_t first;
+
+ pa_rtpoll_item *alsa_rtpoll_item;
+
+ snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
+};
+
+static const char* const valid_modargs[] = {
+ "device",
+ "device_id",
+ "sink_name",
+ "format",
+ "channels",
+ "rate",
+ "fragments",
+ "fragment_size",
+ "channel_map",
+ "mmap",
+ NULL
+};
+
+static int mmap_write(struct userdata *u) {
+ int work_done = 0;
+
+ pa_assert(u);
+ pa_sink_assert_ref(u->sink);
+
+ for (;;) {
+ pa_memchunk chunk;
+ void *p;
+ snd_pcm_sframes_t n;
+ int err;
+ const snd_pcm_channel_area_t *areas;
+ snd_pcm_uframes_t offset, frames;
+
+ if ((n = snd_pcm_avail_update(u->pcm_handle)) < 0) {
+
+ if (n == -EPIPE) {
+ pa_log_debug("snd_pcm_avail_update: Buffer underrun!");
+ u->first = TRUE;
+ }
+
+ if ((err = snd_pcm_recover(u->pcm_handle, n, 1)) == 0)
+ continue;
+
+ if (err == -EAGAIN)
+ return work_done;
+
+ pa_log("snd_pcm_avail_update: %s", snd_strerror(err));
+ return -1;
+ }
+
+/* pa_log("Got request for %i samples", (int) n); */
+
+ if (n <= 0)
+ return work_done;
+
+ frames = n;
+
+ if ((err = snd_pcm_mmap_begin(u->pcm_handle, &areas, &offset, &frames)) < 0) {
+
+ if (err == -EPIPE) {
+ pa_log_debug("snd_pcm_mmap_begin: Buffer underrun!");
+ u->first = TRUE;
+ }
+
+ if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) == 0)
+ continue;
+
+ if (err == -EAGAIN)
+ return work_done;
+
+ pa_log("Failed to write data to DSP: %s", snd_strerror(err));
+ return -1;
+ }
+
+ /* Check these are multiples of 8 bit */
+ pa_assert((areas[0].first & 7) == 0);
+ pa_assert((areas[0].step & 7)== 0);
+
+ /* We assume a single interleaved memory buffer */
+ pa_assert((areas[0].first >> 3) == 0);
+ pa_assert((areas[0].step >> 3) == u->frame_size);
+
+ p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
+
+ chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, 1);
+ chunk.length = pa_memblock_get_length(chunk.memblock);
+ chunk.index = 0;
+
+ pa_sink_render_into_full(u->sink, &chunk);
+
+ /* FIXME: Maybe we can do something to keep this memory block
+ * a little bit longer around? */
+ pa_memblock_unref_fixed(chunk.memblock);
+
+ if ((err = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0) {
+
+ if (err == -EPIPE) {
+ pa_log_debug("snd_pcm_mmap_commit: Buffer underrun!");
+ u->first = TRUE;
+ }
+
+ if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) == 0)
+ continue;
+
+ if (err == -EAGAIN)
+ return work_done;
+
+ pa_log("Failed to write data to DSP: %s", snd_strerror(err));
+ return -1;
+ }
+
+ work_done = 1;
+
+ if (frames >= (snd_pcm_uframes_t) n)
+ return work_done;
+
+/* pa_log("wrote %i samples", (int) frames); */
+ }
+}
+
+static int unix_write(struct userdata *u) {
+ snd_pcm_status_t *status;
+ int work_done = 0;
+
+ snd_pcm_status_alloca(&status);
+
+ pa_assert(u);
+ pa_sink_assert_ref(u->sink);
+
+ for (;;) {
+ void *p;
+ snd_pcm_sframes_t t;
+ ssize_t l;
+ int err;
+
+ if ((err = snd_pcm_status(u->pcm_handle, status)) < 0) {
+ pa_log("Failed to query DSP status data: %s", snd_strerror(err));
+ return -1;
+ }
+
+ if (snd_pcm_status_get_avail_max(status)*u->frame_size >= u->hwbuf_size)
+ pa_log_debug("Buffer underrun!");
+
+ l = snd_pcm_status_get_avail(status) * u->frame_size;
+
+/* pa_log("%u bytes to write", l); */
+
+ if (l <= 0)
+ return work_done;
+
+ if (u->memchunk.length <= 0)
+ pa_sink_render(u->sink, l, &u->memchunk);
+
+ pa_assert(u->memchunk.length > 0);
+
+ p = pa_memblock_acquire(u->memchunk.memblock);
+ t = snd_pcm_writei(u->pcm_handle, (const uint8_t*) p + u->memchunk.index, u->memchunk.length / u->frame_size);
+ pa_memblock_release(u->memchunk.memblock);
+
+/* pa_log("wrote %i bytes of %u (%u)", t*u->frame_size, u->memchunk.length, l); */
+
+ pa_assert(t != 0);
+
+ if (t < 0) {
+
+ if ((t = snd_pcm_recover(u->pcm_handle, t, 1)) == 0)
+ continue;
+
+ if (t == -EAGAIN) {
+ pa_log_debug("EAGAIN");
+ return work_done;
+ } else {
+ pa_log("Failed to write data to DSP: %s", snd_strerror(t));
+ return -1;
+ }
+ }
+
+ u->memchunk.index += t * u->frame_size;
+ u->memchunk.length -= t * u->frame_size;
+
+ if (u->memchunk.length <= 0) {
+ pa_memblock_unref(u->memchunk.memblock);
+ pa_memchunk_reset(&u->memchunk);
+ }
+
+ work_done = 1;
+
+ if (t * u->frame_size >= (unsigned) l)
+ return work_done;
+ }
+}
+
+static pa_usec_t sink_get_latency(struct userdata *u) {
+ pa_usec_t r = 0;
+ snd_pcm_status_t *status;
+ snd_pcm_sframes_t frames = 0;
+ int err;
+
+ snd_pcm_status_alloca(&status);
+
+ pa_assert(u);
+ pa_assert(u->pcm_handle);
+
+ if ((err = snd_pcm_status(u->pcm_handle, status)) < 0)
+ pa_log("Failed to get delay: %s", snd_strerror(err));
+ else
+ frames = snd_pcm_status_get_delay(status);
+
+ if (frames > 0)
+ r = pa_bytes_to_usec(frames * u->frame_size, &u->sink->sample_spec);
+
+ if (u->memchunk.memblock)
+ r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
+
+ return r;
+}
+
+static int build_pollfd(struct userdata *u) {
+ int err;
+ struct pollfd *pollfd;
+ int n;
+
+ pa_assert(u);
+ pa_assert(u->pcm_handle);
+
+ if ((n = snd_pcm_poll_descriptors_count(u->pcm_handle)) < 0) {
+ pa_log("snd_pcm_poll_descriptors_count() failed: %s", snd_strerror(n));
+ return -1;
+ }
+
+ if (u->alsa_rtpoll_item)
+ pa_rtpoll_item_free(u->alsa_rtpoll_item);
+
+ u->alsa_rtpoll_item = pa_rtpoll_item_new(u->rtpoll, PA_RTPOLL_NEVER, n);
+ pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, NULL);
+
+ if ((err = snd_pcm_poll_descriptors(u->pcm_handle, pollfd, n)) < 0) {
+ pa_log("snd_pcm_poll_descriptors() failed: %s", snd_strerror(err));
+ return -1;
+ }
+
+ return 0;
+}
+
+static int suspend(struct userdata *u) {
+ pa_assert(u);
+ pa_assert(u->pcm_handle);
+
+ /* Let's suspend */
+ snd_pcm_drain(u->pcm_handle);
+ snd_pcm_close(u->pcm_handle);
+ u->pcm_handle = NULL;
+
+ if (u->alsa_rtpoll_item) {
+ pa_rtpoll_item_free(u->alsa_rtpoll_item);
+ u->alsa_rtpoll_item = NULL;
+ }
+
+ pa_log_info("Device suspended...");
+
+ return 0;
+}
+
+static int unsuspend(struct userdata *u) {
+ pa_sample_spec ss;
+ int err;
+ pa_bool_t b;
+ unsigned nfrags;
+ snd_pcm_uframes_t period_size;
+
+ pa_assert(u);
+ pa_assert(!u->pcm_handle);
+
+ pa_log_info("Trying resume...");
+
+ snd_config_update_free_global();
+ if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_PLAYBACK, SND_PCM_NONBLOCK)) < 0) {
+ pa_log("Error opening PCM device %s: %s", u->device_name, snd_strerror(err));
+ goto fail;
+ }
+
+ ss = u->sink->sample_spec;
+ nfrags = u->nfragments;
+ period_size = u->fragment_size / u->frame_size;
+ b = u->use_mmap;
+
+ if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, &b, TRUE)) < 0) {
+ pa_log("Failed to set hardware parameters: %s", snd_strerror(err));
+ goto fail;
+ }
+
+ if (b != u->use_mmap) {
+ pa_log_warn("Resume failed, couldn't get original access mode.");
+ goto fail;
+ }
+
+ if (!pa_sample_spec_equal(&ss, &u->sink->sample_spec)) {
+ pa_log_warn("Resume failed, couldn't restore original sample settings.");
+ goto fail;
+ }
+
+ if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
+ pa_log_warn("Resume failed, couldn't restore original fragment settings.");
+ goto fail;
+ }
+
+ if ((err = pa_alsa_set_sw_params(u->pcm_handle)) < 0) {
+ pa_log("Failed to set software parameters: %s", snd_strerror(err));
+ goto fail;
+ }
+
+ if (build_pollfd(u) < 0)
+ goto fail;
+
+ /* FIXME: We need to reload the volume somehow */
+
+ u->first = TRUE;
+
+ pa_log_info("Resumed successfully...");
+
+ return 0;
+
+fail:
+ if (u->pcm_handle) {
+ snd_pcm_close(u->pcm_handle);
+ u->pcm_handle = NULL;
+ }
+
+ return -1;
+}
+
+static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
+ struct userdata *u = PA_SINK(o)->userdata;
+
+ switch (code) {
+
+ case PA_SINK_MESSAGE_GET_LATENCY: {
+ pa_usec_t r = 0;
+
+ if (u->pcm_handle)
+ r = sink_get_latency(u);
+
+ *((pa_usec_t*) data) = r;
+
+ return 0;
+ }
+
+ case PA_SINK_MESSAGE_SET_STATE:
+
+ switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
+
+ case PA_SINK_SUSPENDED:
+ pa_assert(PA_SINK_OPENED(u->sink->thread_info.state));
+
+ if (suspend(u) < 0)
+ return -1;
+
+ break;
+
+ case PA_SINK_IDLE:
+ case PA_SINK_RUNNING:
+
+ if (u->sink->thread_info.state == PA_SINK_INIT) {
+ if (build_pollfd(u) < 0)
+ return -1;
+ }
+
+ if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
+ if (unsuspend(u) < 0)
+ return -1;
+ }
+
+ break;
+
+ case PA_SINK_UNLINKED:
+ case PA_SINK_INIT:
+ ;
+ }
+
+ break;
+ }
+
+ return pa_sink_process_msg(o, code, data, offset, chunk);
+}
+
+static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
+ struct userdata *u = snd_mixer_elem_get_callback_private(elem);
+
+ pa_assert(u);
+ pa_assert(u->mixer_handle);
+
+ if (mask == SND_CTL_EVENT_MASK_REMOVE)
+ return 0;
+
+ if (mask & SND_CTL_EVENT_MASK_VALUE) {
+ pa_sink_get_volume(u->sink);
+ pa_sink_get_mute(u->sink);
+ }
+
+ return 0;
+}
+
+static int sink_get_volume_cb(pa_sink *s) {
+ struct userdata *u = s->userdata;
+ int err;
+ int i;
+
+ pa_assert(u);
+ pa_assert(u->mixer_elem);
+
+ for (i = 0; i < s->sample_spec.channels; i++) {
+ long set_vol, vol;
+
+ pa_assert(snd_mixer_selem_has_playback_channel(u->mixer_elem, u->mixer_map[i]));
+
+ if ((err = snd_mixer_selem_get_playback_volume(u->mixer_elem, u->mixer_map[i], &vol)) < 0)
+ goto fail;
+
+ set_vol = (long) roundf(((float) s->volume.values[i] * (u->hw_volume_max - u->hw_volume_min)) / PA_VOLUME_NORM) + u->hw_volume_min;
+
+ /* Try to avoid superfluous volume changes */
+ if (set_vol != vol)
+ s->volume.values[i] = (pa_volume_t) roundf(((float) (vol - u->hw_volume_min) * PA_VOLUME_NORM) / (u->hw_volume_max - u->hw_volume_min));
+ }
+
+ return 0;
+
+fail:
+ pa_log_error("Unable to read volume: %s", snd_strerror(err));
+
+ s->get_volume = NULL;
+ s->set_volume = NULL;
+ return -1;
+}
+
+static int sink_set_volume_cb(pa_sink *s) {
+ struct userdata *u = s->userdata;
+ int err;
+ int i;
+
+ pa_assert(u);
+ pa_assert(u->mixer_elem);
+
+ for (i = 0; i < s->sample_spec.channels; i++) {
+ long alsa_vol;
+ pa_volume_t vol;
+
+ pa_assert(snd_mixer_selem_has_playback_channel(u->mixer_elem, u->mixer_map[i]));
+
+ vol = s->volume.values[i];
+
+ if (vol > PA_VOLUME_NORM)
+ vol = PA_VOLUME_NORM;
+
+ alsa_vol = (long) roundf(((float) vol * (u->hw_volume_max - u->hw_volume_min)) / PA_VOLUME_NORM) + u->hw_volume_min;
+
+ if ((err = snd_mixer_selem_set_playback_volume(u->mixer_elem, u->mixer_map[i], alsa_vol)) < 0)
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ pa_log_error("Unable to set volume: %s", snd_strerror(err));
+
+ s->get_volume = NULL;
+ s->set_volume = NULL;
+ return -1;
+}
+
+static int sink_get_mute_cb(pa_sink *s) {
+ struct userdata *u = s->userdata;
+ int err, sw;
+
+ pa_assert(u);
+ pa_assert(u->mixer_elem);
+
+ if ((err = snd_mixer_selem_get_playback_switch(u->mixer_elem, 0, &sw)) < 0) {
+ pa_log_error("Unable to get switch: %s", snd_strerror(err));
+
+ s->get_mute = NULL;
+ s->set_mute = NULL;
+ return -1;
+ }
+
+ s->muted = !sw;
+
+ return 0;
+}
+
+static int sink_set_mute_cb(pa_sink *s) {
+ struct userdata *u = s->userdata;
+ int err;
+
+ pa_assert(u);
+ pa_assert(u->mixer_elem);
+
+ if ((err = snd_mixer_selem_set_playback_switch_all(u->mixer_elem, !s->muted)) < 0) {
+ pa_log_error("Unable to set switch: %s", snd_strerror(err));
+
+ s->get_mute = NULL;
+ s->set_mute = NULL;
+ return -1;
+ }
+
+ return 0;
+}
+
+static void thread_func(void *userdata) {
+ struct userdata *u = userdata;
+
+ pa_assert(u);
+
+ pa_log_debug("Thread starting up");
+
+ if (u->core->realtime_scheduling)
+ pa_make_realtime(u->core->realtime_priority);
+
+ pa_thread_mq_install(&u->thread_mq);
+ pa_rtpoll_install(u->rtpoll);
+
+ for (;;) {
+ int ret;
+
+ /* Render some data and write it to the dsp */
+ if (PA_SINK_OPENED(u->sink->thread_info.state)) {
+ int work_done = 0;
+
+ if (u->use_mmap) {
+ if ((work_done = mmap_write(u)) < 0)
+ goto fail;
+ } else {
+ if ((work_done = unix_write(u)) < 0)
+ goto fail;
+ }
+
+ if (work_done && u->first) {
+ pa_log_info("Starting playback.");
+ snd_pcm_start(u->pcm_handle);
+ u->first = FALSE;
+ continue;
+ }
+ }
+
+ /* Hmm, nothing to do. Let's sleep */
+ if ((ret = pa_rtpoll_run(u->rtpoll, 1)) < 0)
+ goto fail;
+
+ if (ret == 0)
+ goto finish;
+
+ /* Tell ALSA about this and process its response */
+ if (PA_SINK_OPENED(u->sink->thread_info.state)) {
+ struct pollfd *pollfd;
+ unsigned short revents = 0;
+ int err;
+ unsigned n;
+
+ pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
+
+ if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
+ pa_log("snd_pcm_poll_descriptors_revents() failed: %s", snd_strerror(err));
+ goto fail;
+ }
+
+ if (revents & (POLLERR|POLLNVAL|POLLHUP)) {
+
+ if (revents & POLLERR)
+ pa_log_warn("Got POLLERR from ALSA");
+ if (revents & POLLNVAL)
+ pa_log_warn("Got POLLNVAL from ALSA");
+ if (revents & POLLHUP)
+ pa_log_warn("Got POLLHUP from ALSA");
+
+ /* Try to recover from this error */
+
+ switch (snd_pcm_state(u->pcm_handle)) {
+
+ case SND_PCM_STATE_XRUN:
+ if ((err = snd_pcm_recover(u->pcm_handle, -EPIPE, 1)) != 0) {
+ pa_log_warn("Could not recover from POLLERR|POLLNVAL|POLLHUP and XRUN: %s", snd_strerror(err));
+ goto fail;
+ }
+ break;
+
+ case SND_PCM_STATE_SUSPENDED:
+ if ((err = snd_pcm_recover(u->pcm_handle, -ESTRPIPE, 1)) != 0) {
+ pa_log_warn("Could not recover from POLLERR|POLLNVAL|POLLHUP and SUSPENDED: %s", snd_strerror(err));
+ goto fail;
+ }
+ break;
+
+ default:
+
+ snd_pcm_drop(u->pcm_handle);
+
+ if ((err = snd_pcm_prepare(u->pcm_handle)) < 0) {
+ pa_log_warn("Could not recover from POLLERR|POLLNVAL|POLLHUP with snd_pcm_prepare(): %s", snd_strerror(err));
+ goto fail;
+ }
+ break;
+ }
+ }
+ }
+ }
+
+fail:
+ /* If this was no regular exit from the loop we have to continue
+ * processing messages until we received PA_MESSAGE_SHUTDOWN */
+ pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
+ pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
+
+finish:
+ pa_log_debug("Thread shutting down");
+}
+
+int pa__init(pa_module*m) {
+
+ pa_modargs *ma = NULL;
+ struct userdata *u = NULL;
+ const char *dev_id;
+ pa_sample_spec ss;
+ pa_channel_map map;
+ uint32_t nfrags, frag_size;
+ snd_pcm_uframes_t period_size;
+ size_t frame_size;
+ snd_pcm_info_t *pcm_info = NULL;
+ int err;
+ char *t;
+ const char *name;
+ char *name_buf = NULL;
+ int namereg_fail;
+ pa_bool_t use_mmap = TRUE, b;
+
+ snd_pcm_info_alloca(&pcm_info);
+
+ pa_assert(m);
+
+ if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
+ pa_log("Failed to parse module arguments");
+ goto fail;
+ }
+
+ ss = m->core->default_sample_spec;
+ if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
+ pa_log("Failed to parse sample specification and channel map");
+ goto fail;
+ }
+
+ frame_size = pa_frame_size(&ss);
+
+ nfrags = m->core->default_n_fragments;
+ frag_size = pa_usec_to_bytes(m->core->default_fragment_size_msec*1000, &ss);
+ if (frag_size <= 0)
+ frag_size = frame_size;
+
+ if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 || pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0) {
+ pa_log("Failed to parse buffer metrics");
+ goto fail;
+ }
+ period_size = frag_size/frame_size;
+
+ if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
+ pa_log("Failed to parse mmap argument.");
+ goto fail;
+ }
+
+ u = pa_xnew0(struct userdata, 1);
+ u->core = m->core;
+ u->module = m;
+ m->userdata = u;
+ u->use_mmap = use_mmap;
+ u->first = TRUE;
+ pa_thread_mq_init(&u->thread_mq, m->core->mainloop);
+ u->rtpoll = pa_rtpoll_new();
+ u->alsa_rtpoll_item = NULL;
+ pa_rtpoll_item_new_asyncmsgq(u->rtpoll, PA_RTPOLL_EARLY, u->thread_mq.inq);
+
+ snd_config_update_free_global();
+
+ b = use_mmap;
+
+ if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
+
+ if (!(u->pcm_handle = pa_alsa_open_by_device_id(
+ dev_id,
+ &u->device_name,
+ &ss, &map,
+ SND_PCM_STREAM_PLAYBACK,
+ &nfrags, &period_size,
+ &b)))
+
+ goto fail;
+
+ } else {
+
+ if (!(u->pcm_handle = pa_alsa_open_by_device_string(
+ pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
+ &u->device_name,
+ &ss, &map,
+ SND_PCM_STREAM_PLAYBACK,
+ &nfrags, &period_size,
+ &b)))
+ goto fail;
+
+ }
+
+ pa_assert(u->device_name);
+ pa_log_info("Successfully opened device %s.", u->device_name);
+
+ if (use_mmap && !b) {
+ pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
+ u->use_mmap = use_mmap = b;
+ }
+
+ if (u->use_mmap)
+ pa_log_info("Successfully enabled mmap() mode.");
+
+ if ((err = snd_pcm_info(u->pcm_handle, pcm_info)) < 0) {
+ pa_log("Error fetching PCM info: %s", snd_strerror(err));
+ goto fail;
+ }
+
+ if ((err = pa_alsa_set_sw_params(u->pcm_handle)) < 0) {
+ pa_log("Failed to set software parameters: %s", snd_strerror(err));
+ goto fail;
+ }
+
+ /* ALSA might tweak the sample spec, so recalculate the frame size */
+ frame_size = pa_frame_size(&ss);
+
+ if ((err = snd_mixer_open(&u->mixer_handle, 0)) < 0)
+ pa_log_warn("Error opening mixer: %s", snd_strerror(err));
+ else {
+ pa_bool_t found = FALSE;
+
+ if (pa_alsa_prepare_mixer(u->mixer_handle, u->device_name) >= 0)
+ found = TRUE;
+ else {
+ char *md = pa_sprintf_malloc("hw:%s", dev_id);
+
+ if (strcmp(u->device_name, md))
+ if (pa_alsa_prepare_mixer(u->mixer_handle, md) >= 0)
+ found = TRUE;
+
+ pa_xfree(md);
+ }
+
+ if (found)
+ if (!(u->mixer_elem = pa_alsa_find_elem(u->mixer_handle, "Master", "PCM")))
+ found = FALSE;
+
+ if (!found) {
+ snd_mixer_close(u->mixer_handle);
+ u->mixer_handle = NULL;
+ }
+ }
+
+ if ((name = pa_modargs_get_value(ma, "sink_name", NULL)))
+ namereg_fail = 1;
+ else {
+ name = name_buf = pa_sprintf_malloc("alsa_output.%s", u->device_name);
+ namereg_fail = 0;
+ }
+
+ u->sink = pa_sink_new(m->core, __FILE__, name, namereg_fail, &ss, &map);
+ pa_xfree(name_buf);
+
+ if (!u->sink) {
+ pa_log("Failed to create sink object");
+ goto fail;
+ }
+
+ u->sink->parent.process_msg = sink_process_msg;
+ u->sink->userdata = u;
+
+ pa_sink_set_module(u->sink, m);
+ pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
+ pa_sink_set_rtpoll(u->sink, u->rtpoll);
+ pa_sink_set_description(u->sink, t = pa_sprintf_malloc(
+ "ALSA PCM on %s (%s)%s",
+ u->device_name,
+ snd_pcm_info_get_name(pcm_info),
+ use_mmap ? " via DMA" : ""));
+ pa_xfree(t);
+
+ u->sink->flags = PA_SINK_HARDWARE|PA_SINK_LATENCY;
+
+ u->frame_size = frame_size;
+ u->fragment_size = frag_size = period_size * frame_size;
+ u->nfragments = nfrags;
+ u->hwbuf_size = u->fragment_size * nfrags;
+
+ pa_log_info("Using %u fragments of size %lu bytes.", nfrags, (long unsigned) u->fragment_size);
+
+ pa_memchunk_reset(&u->memchunk);
+
+ if (u->mixer_handle) {
+ pa_assert(u->mixer_elem);
+
+ if (snd_mixer_selem_has_playback_volume(u->mixer_elem))
+ if (pa_alsa_calc_mixer_map(u->mixer_elem, &map, u->mixer_map, TRUE) >= 0) {
+ u->sink->get_volume = sink_get_volume_cb;
+ u->sink->set_volume = sink_set_volume_cb;
+ snd_mixer_selem_get_playback_volume_range(u->mixer_elem, &u->hw_volume_min, &u->hw_volume_max);
+ u->sink->flags |= PA_SINK_HW_VOLUME_CTRL;
+ }
+
+ if (snd_mixer_selem_has_playback_switch(u->mixer_elem)) {
+ u->sink->get_mute = sink_get_mute_cb;
+ u->sink->set_mute = sink_set_mute_cb;
+ u->sink->flags |= PA_SINK_HW_VOLUME_CTRL;
+ }
+
+ u->mixer_fdl = pa_alsa_fdlist_new();
+
+ if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, m->core->mainloop) < 0) {
+ pa_log("Failed to initialize file descriptor monitoring");
+ goto fail;
+ }
+
+ snd_mixer_elem_set_callback(u->mixer_elem, mixer_callback);
+ snd_mixer_elem_set_callback_private(u->mixer_elem, u);
+ } else
+ u->mixer_fdl = NULL;
+
+ if (!(u->thread = pa_thread_new(thread_func, u))) {
+ pa_log("Failed to create thread.");
+ goto fail;
+ }
+
+ /* Get initial mixer settings */
+ if (u->sink->get_volume)
+ u->sink->get_volume(u->sink);
+ if (u->sink->get_mute)
+ u->sink->get_mute(u->sink);
+
+ pa_sink_put(u->sink);
+
+ pa_modargs_free(ma);
+
+ return 0;
+
+fail:
+
+ if (ma)
+ pa_modargs_free(ma);
+
+ pa__done(m);
+
+ return -1;
+}
+
+void pa__done(pa_module*m) {
+ struct userdata *u;
+
+ pa_assert(m);
+
+ if (!(u = m->userdata))
+ return;
+
+ if (u->sink)
+ pa_sink_unlink(u->sink);
+
+ if (u->thread) {
+ pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
+ pa_thread_free(u->thread);
+ }
+
+ pa_thread_mq_done(&u->thread_mq);
+
+ if (u->sink)
+ pa_sink_unref(u->sink);
+
+ if (u->memchunk.memblock)
+ pa_memblock_unref(u->memchunk.memblock);
+
+ if (u->alsa_rtpoll_item)
+ pa_rtpoll_item_free(u->alsa_rtpoll_item);
+
+ if (u->rtpoll)
+ pa_rtpoll_free(u->rtpoll);
+
+ if (u->mixer_fdl)
+ pa_alsa_fdlist_free(u->mixer_fdl);
+
+ if (u->mixer_handle)
+ snd_mixer_close(u->mixer_handle);
+
+ if (u->pcm_handle) {
+ snd_pcm_drop(u->pcm_handle);
+ snd_pcm_close(u->pcm_handle);
+ }
+
+ pa_xfree(u->device_name);
+ pa_xfree(u);
+
+ snd_config_update_free_global();
+}
diff --git a/src/modules/module-alsa-source.c b/src/modules/module-alsa-source.c
new file mode 100644
index 00000000..23a2f921
--- /dev/null
+++ b/src/modules/module-alsa-source.c
@@ -0,0 +1,968 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2004-2006 Lennart Poettering
+ Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <stdio.h>
+
+#include <asoundlib.h>
+
+#include <pulse/xmalloc.h>
+#include <pulse/util.h>
+
+#include <pulsecore/core-error.h>
+#include <pulsecore/core.h>
+#include <pulsecore/module.h>
+#include <pulsecore/memchunk.h>
+#include <pulsecore/sink.h>
+#include <pulsecore/modargs.h>
+#include <pulsecore/core-util.h>
+#include <pulsecore/sample-util.h>
+#include <pulsecore/log.h>
+#include <pulsecore/macro.h>
+#include <pulsecore/thread.h>
+#include <pulsecore/core-error.h>
+#include <pulsecore/thread-mq.h>
+#include <pulsecore/rtpoll.h>
+
+#include "alsa-util.h"
+#include "module-alsa-source-symdef.h"
+
+PA_MODULE_AUTHOR("Lennart Poettering");
+PA_MODULE_DESCRIPTION("ALSA Source");
+PA_MODULE_VERSION(PACKAGE_VERSION);
+PA_MODULE_LOAD_ONCE(FALSE);
+PA_MODULE_USAGE(
+ "source_name=<name for the source> "
+ "device=<ALSA device> "
+ "device_id=<ALSA device id> "
+ "format=<sample format> "
+ "channels=<number of channels> "
+ "rate=<sample rate> "
+ "fragments=<number of fragments> "
+ "fragment_size=<fragment size> "
+ "channel_map=<channel map> "
+ "mmap=<enable memory mapping?>");
+
+#define DEFAULT_DEVICE "default"
+
+struct userdata {
+ pa_core *core;
+ pa_module *module;
+ pa_source *source;
+
+ pa_thread *thread;
+ pa_thread_mq thread_mq;
+ pa_rtpoll *rtpoll;
+
+ snd_pcm_t *pcm_handle;
+
+ pa_alsa_fdlist *mixer_fdl;
+ snd_mixer_t *mixer_handle;
+ snd_mixer_elem_t *mixer_elem;
+ long hw_volume_max, hw_volume_min;
+
+ size_t frame_size, fragment_size, hwbuf_size;
+ unsigned nfragments;
+
+ char *device_name;
+
+ pa_bool_t use_mmap;
+
+ pa_rtpoll_item *alsa_rtpoll_item;
+
+ snd_mixer_selem_channel_id_t mixer_map[SND_MIXER_SCHN_LAST];
+};
+
+static const char* const valid_modargs[] = {
+ "device",
+ "device_id",
+ "source_name",
+ "channels",
+ "rate",
+ "format",
+ "fragments",
+ "fragment_size",
+ "channel_map",
+ "mmap",
+ NULL
+};
+
+static int mmap_read(struct userdata *u) {
+ int work_done = 0;
+
+ pa_assert(u);
+ pa_source_assert_ref(u->source);
+
+ for (;;) {
+ snd_pcm_sframes_t n;
+ int err;
+ const snd_pcm_channel_area_t *areas;
+ snd_pcm_uframes_t offset, frames;
+ pa_memchunk chunk;
+ void *p;
+
+ if ((n = snd_pcm_avail_update(u->pcm_handle)) < 0) {
+
+ if (n == -EPIPE)
+ pa_log_debug("snd_pcm_avail_update: Buffer underrun!");
+
+ if ((err = snd_pcm_recover(u->pcm_handle, n, 1)) == 0)
+ continue;
+
+ if (err == -EAGAIN)
+ return work_done;
+
+ pa_log("snd_pcm_avail_update: %s", snd_strerror(err));
+ return -1;
+ }
+
+/* pa_log("Got request for %i samples", (int) n); */
+
+ if (n <= 0)
+ return work_done;
+
+ frames = n;
+
+ if ((err = snd_pcm_mmap_begin(u->pcm_handle, &areas, &offset, &frames)) < 0) {
+
+ if (err == -EPIPE)
+ pa_log_debug("snd_pcm_mmap_begin: Buffer underrun!");
+
+ if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) == 0)
+ continue;
+
+ if (err == -EAGAIN)
+ return work_done;
+
+ pa_log("Failed to write data to DSP: %s", snd_strerror(err));
+ return -1;
+ }
+
+ /* Check these are multiples of 8 bit */
+ pa_assert((areas[0].first & 7) == 0);
+ pa_assert((areas[0].step & 7)== 0);
+
+ /* We assume a single interleaved memory buffer */
+ pa_assert((areas[0].first >> 3) == 0);
+ pa_assert((areas[0].step >> 3) == u->frame_size);
+
+ p = (uint8_t*) areas[0].addr + (offset * u->frame_size);
+
+ chunk.memblock = pa_memblock_new_fixed(u->core->mempool, p, frames * u->frame_size, 1);
+ chunk.length = pa_memblock_get_length(chunk.memblock);
+ chunk.index = 0;
+
+ pa_source_post(u->source, &chunk);
+
+ /* FIXME: Maybe we can do something to keep this memory block
+ * a little bit longer around? */
+ pa_memblock_unref_fixed(chunk.memblock);
+
+ if ((err = snd_pcm_mmap_commit(u->pcm_handle, offset, frames)) < 0) {
+
+ if (err == -EPIPE)
+ pa_log_debug("snd_pcm_mmap_commit: Buffer underrun!");
+
+ if ((err = snd_pcm_recover(u->pcm_handle, err, 1)) == 0)
+ continue;
+
+ if (err == -EAGAIN)
+ return work_done;
+
+ pa_log("Failed to write data to DSP: %s", snd_strerror(err));
+ return -1;
+ }
+
+ work_done = 1;
+
+/* pa_log("wrote %i samples", (int) frames); */
+ }
+}
+
+static int unix_read(struct userdata *u) {
+ snd_pcm_status_t *status;
+ int work_done = 0;
+
+ snd_pcm_status_alloca(&status);
+
+ pa_assert(u);
+ pa_source_assert_ref(u->source);
+
+ for (;;) {
+ void *p;
+ snd_pcm_sframes_t t, k;
+ ssize_t l;
+ int err;
+ pa_memchunk chunk;
+
+ if ((err = snd_pcm_status(u->pcm_handle, status)) < 0) {
+ pa_log("Failed to query DSP status data: %s", snd_strerror(err));
+ return -1;
+ }
+
+ if (snd_pcm_status_get_avail_max(status)*u->frame_size >= u->hwbuf_size)
+ pa_log_debug("Buffer overrun!");
+
+ l = snd_pcm_status_get_avail(status) * u->frame_size;
+
+ if (l <= 0)
+ return work_done;
+
+ chunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
+
+ k = pa_memblock_get_length(chunk.memblock);
+
+ if (k > l)
+ k = l;
+
+ k = (k/u->frame_size)*u->frame_size;
+
+ p = pa_memblock_acquire(chunk.memblock);
+ t = snd_pcm_readi(u->pcm_handle, (uint8_t*) p, k / u->frame_size);
+ pa_memblock_release(chunk.memblock);
+
+/* pa_log("wrote %i bytes of %u (%u)", t*u->frame_size, u->memchunk.length, l); */
+
+ pa_assert(t != 0);
+
+ if (t < 0) {
+ pa_memblock_unref(chunk.memblock);
+
+ if ((t = snd_pcm_recover(u->pcm_handle, t, 1)) == 0)
+ continue;
+
+ if (t == -EAGAIN) {
+ pa_log_debug("EAGAIN");
+ return work_done;
+ } else {
+ pa_log("Failed to read data from DSP: %s", snd_strerror(t));
+ return -1;
+ }
+ }
+
+ chunk.index = 0;
+ chunk.length = t * u->frame_size;
+
+ pa_source_post(u->source, &chunk);
+ pa_memblock_unref(chunk.memblock);
+
+ work_done = 1;
+
+ if (t * u->frame_size >= (unsigned) l)
+ return work_done;
+ }
+}
+
+static pa_usec_t source_get_latency(struct userdata *u) {
+ pa_usec_t r = 0;
+ snd_pcm_status_t *status;
+ snd_pcm_sframes_t frames = 0;
+ int err;
+
+ snd_pcm_status_alloca(&status);
+
+ pa_assert(u);
+ pa_assert(u->pcm_handle);
+
+ if ((err = snd_pcm_status(u->pcm_handle, status)) < 0)
+ pa_log("Failed to get delay: %s", snd_strerror(err));
+ else
+ frames = snd_pcm_status_get_delay(status);
+
+ if (frames > 0)
+ r = pa_bytes_to_usec(frames * u->frame_size, &u->source->sample_spec);
+
+ return r;
+}
+
+static int build_pollfd(struct userdata *u) {
+ int err;
+ struct pollfd *pollfd;
+ int n;
+
+ pa_assert(u);
+ pa_assert(u->pcm_handle);
+
+ if ((n = snd_pcm_poll_descriptors_count(u->pcm_handle)) < 0) {
+ pa_log("snd_pcm_poll_descriptors_count() failed: %s", snd_strerror(n));
+ return -1;
+ }
+
+ if (u->alsa_rtpoll_item)
+ pa_rtpoll_item_free(u->alsa_rtpoll_item);
+
+ u->alsa_rtpoll_item = pa_rtpoll_item_new(u->rtpoll, PA_RTPOLL_NEVER, n);
+ pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, NULL);
+
+ if ((err = snd_pcm_poll_descriptors(u->pcm_handle, pollfd, n)) < 0) {
+ pa_log("snd_pcm_poll_descriptors() failed: %s", snd_strerror(err));
+ return -1;
+ }
+
+ return 0;
+}
+
+static int suspend(struct userdata *u) {
+ pa_assert(u);
+ pa_assert(u->pcm_handle);
+
+ /* Let's suspend */
+ snd_pcm_close(u->pcm_handle);
+ u->pcm_handle = NULL;
+
+ if (u->alsa_rtpoll_item) {
+ pa_rtpoll_item_free(u->alsa_rtpoll_item);
+ u->alsa_rtpoll_item = NULL;
+ }
+
+ pa_log_info("Device suspended...");
+
+ return 0;
+}
+
+static int unsuspend(struct userdata *u) {
+ pa_sample_spec ss;
+ int err;
+ pa_bool_t b;
+ unsigned nfrags;
+ snd_pcm_uframes_t period_size;
+
+ pa_assert(u);
+ pa_assert(!u->pcm_handle);
+
+ pa_log_info("Trying resume...");
+
+ snd_config_update_free_global();
+ if ((err = snd_pcm_open(&u->pcm_handle, u->device_name, SND_PCM_STREAM_CAPTURE, SND_PCM_NONBLOCK)) < 0) {
+ pa_log("Error opening PCM device %s: %s", u->device_name, snd_strerror(err));
+ goto fail;
+ }
+
+ ss = u->source->sample_spec;
+ nfrags = u->nfragments;
+ period_size = u->fragment_size / u->frame_size;
+ b = u->use_mmap;
+
+ if ((err = pa_alsa_set_hw_params(u->pcm_handle, &ss, &nfrags, &period_size, &b, TRUE)) < 0) {
+ pa_log("Failed to set hardware parameters: %s", snd_strerror(err));
+ goto fail;
+ }
+
+ if (b != u->use_mmap) {
+ pa_log_warn("Resume failed, couldn't get original access mode.");
+ goto fail;
+ }
+
+ if (!pa_sample_spec_equal(&ss, &u->source->sample_spec)) {
+ pa_log_warn("Resume failed, couldn't restore original sample settings.");
+ goto fail;
+ }
+
+ if (nfrags != u->nfragments || period_size*u->frame_size != u->fragment_size) {
+ pa_log_warn("Resume failed, couldn't restore original fragment settings.");
+ goto fail;
+ }
+
+ if ((err = pa_alsa_set_sw_params(u->pcm_handle)) < 0) {
+ pa_log("Failed to set software parameters: %s", snd_strerror(err));
+ goto fail;
+ }
+
+ if (build_pollfd(u) < 0)
+ goto fail;
+
+ snd_pcm_start(u->pcm_handle);
+
+ /* FIXME: We need to reload the volume somehow */
+
+ pa_log_info("Resumed successfully...");
+
+ return 0;
+
+fail:
+ if (u->pcm_handle) {
+ snd_pcm_close(u->pcm_handle);
+ u->pcm_handle = NULL;
+ }
+
+ return -1;
+}
+
+static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
+ struct userdata *u = PA_SOURCE(o)->userdata;
+
+ switch (code) {
+
+ case PA_SOURCE_MESSAGE_GET_LATENCY: {
+ pa_usec_t r = 0;
+
+ if (u->pcm_handle)
+ r = source_get_latency(u);
+
+ *((pa_usec_t*) data) = r;
+
+ return 0;
+ }
+
+ case PA_SOURCE_MESSAGE_SET_STATE:
+
+ switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
+
+ case PA_SOURCE_SUSPENDED:
+ pa_assert(PA_SOURCE_OPENED(u->source->thread_info.state));
+
+ if (suspend(u) < 0)
+ return -1;
+
+ break;
+
+ case PA_SOURCE_IDLE:
+ case PA_SOURCE_RUNNING:
+
+ if (u->source->thread_info.state == PA_SOURCE_INIT) {
+ if (build_pollfd(u) < 0)
+ return -1;
+
+ snd_pcm_start(u->pcm_handle);
+ }
+
+ if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
+ if (unsuspend(u) < 0)
+ return -1;
+ }
+
+ break;
+
+ case PA_SOURCE_UNLINKED:
+ case PA_SOURCE_INIT:
+ ;
+ }
+
+ break;
+ }
+
+ return pa_source_process_msg(o, code, data, offset, chunk);
+}
+
+static int mixer_callback(snd_mixer_elem_t *elem, unsigned int mask) {
+ struct userdata *u = snd_mixer_elem_get_callback_private(elem);
+
+ pa_assert(u);
+ pa_assert(u->mixer_handle);
+
+ if (mask == SND_CTL_EVENT_MASK_REMOVE)
+ return 0;
+
+ if (mask & SND_CTL_EVENT_MASK_VALUE) {
+ pa_source_get_volume(u->source);
+ pa_source_get_mute(u->source);
+ }
+
+ return 0;
+}
+
+static int source_get_volume_cb(pa_source *s) {
+ struct userdata *u = s->userdata;
+ int err;
+ int i;
+
+ pa_assert(u);
+ pa_assert(u->mixer_elem);
+
+ for (i = 0; i < s->sample_spec.channels; i++) {
+ long set_vol, vol;
+
+ pa_assert(snd_mixer_selem_has_capture_channel(u->mixer_elem, u->mixer_map[i]));
+
+ if ((err = snd_mixer_selem_get_capture_volume(u->mixer_elem, u->mixer_map[i], &vol)) < 0)
+ goto fail;
+
+ set_vol = (long) roundf(((float) s->volume.values[i] * (u->hw_volume_max - u->hw_volume_min)) / PA_VOLUME_NORM) + u->hw_volume_min;
+
+ /* Try to avoid superfluous volume changes */
+ if (set_vol != vol)
+ s->volume.values[i] = (pa_volume_t) roundf(((float) (vol - u->hw_volume_min) * PA_VOLUME_NORM) / (u->hw_volume_max - u->hw_volume_min));
+ }
+
+ return 0;
+
+fail:
+ pa_log_error("Unable to read volume: %s", snd_strerror(err));
+
+ s->get_volume = NULL;
+ s->set_volume = NULL;
+ return -1;
+}
+
+static int source_set_volume_cb(pa_source *s) {
+ struct userdata *u = s->userdata;
+ int err;
+ int i;
+
+ pa_assert(u);
+ pa_assert(u->mixer_elem);
+
+ for (i = 0; i < s->sample_spec.channels; i++) {
+ long alsa_vol;
+ pa_volume_t vol;
+
+ pa_assert(snd_mixer_selem_has_capture_channel(u->mixer_elem, u->mixer_map[i]));
+
+ vol = s->volume.values[i];
+
+ if (vol > PA_VOLUME_NORM)
+ vol = PA_VOLUME_NORM;
+
+ alsa_vol = (long) roundf(((float) vol * (u->hw_volume_max - u->hw_volume_min)) / PA_VOLUME_NORM) + u->hw_volume_min;
+
+ if ((err = snd_mixer_selem_set_capture_volume(u->mixer_elem, u->mixer_map[i], alsa_vol)) < 0)
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ pa_log_error("Unable to set volume: %s", snd_strerror(err));
+
+ s->get_volume = NULL;
+ s->set_volume = NULL;
+ return -1;
+}
+
+static int source_get_mute_cb(pa_source *s) {
+ struct userdata *u = s->userdata;
+ int err, sw;
+
+ pa_assert(u);
+ pa_assert(u->mixer_elem);
+
+ if ((err = snd_mixer_selem_get_capture_switch(u->mixer_elem, 0, &sw)) < 0) {
+ pa_log_error("Unable to get switch: %s", snd_strerror(err));
+
+ s->get_mute = NULL;
+ s->set_mute = NULL;
+ return -1;
+ }
+
+ s->muted = !sw;
+
+ return 0;
+}
+
+static int source_set_mute_cb(pa_source *s) {
+ struct userdata *u = s->userdata;
+ int err;
+
+ pa_assert(u);
+ pa_assert(u->mixer_elem);
+
+ if ((err = snd_mixer_selem_set_capture_switch_all(u->mixer_elem, !s->muted)) < 0) {
+ pa_log_error("Unable to set switch: %s", snd_strerror(err));
+
+ s->get_mute = NULL;
+ s->set_mute = NULL;
+ return -1;
+ }
+
+ return 0;
+}
+
+static void thread_func(void *userdata) {
+ struct userdata *u = userdata;
+
+ pa_assert(u);
+
+ pa_log_debug("Thread starting up");
+
+ if (u->core->realtime_scheduling)
+ pa_make_realtime(u->core->realtime_priority);
+
+ pa_thread_mq_install(&u->thread_mq);
+ pa_rtpoll_install(u->rtpoll);
+
+ for (;;) {
+ int ret;
+
+ /* Read some data and pass it to the sources */
+ if (PA_SOURCE_OPENED(u->source->thread_info.state)) {
+
+ if (u->use_mmap) {
+ if (mmap_read(u) < 0)
+ goto fail;
+
+ } else {
+ if (unix_read(u) < 0)
+ goto fail;
+ }
+ }
+
+ /* Hmm, nothing to do. Let's sleep */
+ if ((ret = pa_rtpoll_run(u->rtpoll, 1)) < 0)
+ goto fail;
+
+ if (ret == 0)
+ goto finish;
+
+ /* Tell ALSA about this and process its response */
+ if (PA_SOURCE_OPENED(u->source->thread_info.state)) {
+ struct pollfd *pollfd;
+ unsigned short revents = 0;
+ int err;
+ unsigned n;
+
+ pollfd = pa_rtpoll_item_get_pollfd(u->alsa_rtpoll_item, &n);
+
+ if ((err = snd_pcm_poll_descriptors_revents(u->pcm_handle, pollfd, n, &revents)) < 0) {
+ pa_log("snd_pcm_poll_descriptors_revents() failed: %s", snd_strerror(err));
+ goto fail;
+ }
+
+ if (revents & (POLLERR|POLLNVAL|POLLHUP)) {
+
+ if (revents & POLLERR)
+ pa_log_warn("Got POLLERR from ALSA");
+ if (revents & POLLNVAL)
+ pa_log_warn("Got POLLNVAL from ALSA");
+ if (revents & POLLHUP)
+ pa_log_warn("Got POLLHUP from ALSA");
+
+ /* Try to recover from this error */
+
+ switch (snd_pcm_state(u->pcm_handle)) {
+
+ case SND_PCM_STATE_XRUN:
+ if ((err = snd_pcm_recover(u->pcm_handle, -EPIPE, 1)) != 0) {
+ pa_log_warn("Could not recover from POLLERR|POLLNVAL|POLLHUP and XRUN: %s", snd_strerror(err));
+ goto fail;
+ }
+ break;
+
+ case SND_PCM_STATE_SUSPENDED:
+ if ((err = snd_pcm_recover(u->pcm_handle, -ESTRPIPE, 1)) != 0) {
+ pa_log_warn("Could not recover from POLLERR|POLLNVAL|POLLHUP and SUSPENDED: %s", snd_strerror(err));
+ goto fail;
+ }
+ break;
+
+ default:
+
+ snd_pcm_drop(u->pcm_handle);
+
+ if ((err = snd_pcm_prepare(u->pcm_handle)) < 0) {
+ pa_log_warn("Could not recover from POLLERR|POLLNVAL|POLLHUP with snd_pcm_prepare(): %s", snd_strerror(err));
+ goto fail;
+ }
+ break;
+ }
+ }
+ }
+ }
+
+fail:
+ /* If this was no regular exit from the loop we have to continue
+ * processing messages until we received PA_MESSAGE_SHUTDOWN */
+ pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
+ pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
+
+finish:
+ pa_log_debug("Thread shutting down");
+}
+
+int pa__init(pa_module*m) {
+
+ pa_modargs *ma = NULL;
+ struct userdata *u = NULL;
+ const char *dev_id;
+ pa_sample_spec ss;
+ pa_channel_map map;
+ uint32_t nfrags, frag_size;
+ snd_pcm_uframes_t period_size;
+ size_t frame_size;
+ snd_pcm_info_t *pcm_info = NULL;
+ int err;
+ char *t;
+ const char *name;
+ char *name_buf = NULL;
+ int namereg_fail;
+ pa_bool_t use_mmap = TRUE, b;
+
+ snd_pcm_info_alloca(&pcm_info);
+
+ pa_assert(m);
+
+ if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
+ pa_log("Failed to parse module arguments");
+ goto fail;
+ }
+
+ ss = m->core->default_sample_spec;
+ if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_ALSA) < 0) {
+ pa_log("Failed to parse sample specification");
+ goto fail;
+ }
+
+ frame_size = pa_frame_size(&ss);
+
+ nfrags = m->core->default_n_fragments;
+ frag_size = pa_usec_to_bytes(m->core->default_fragment_size_msec*1000, &ss);
+ if (frag_size <= 0)
+ frag_size = frame_size;
+
+ if (pa_modargs_get_value_u32(ma, "fragments", &nfrags) < 0 || pa_modargs_get_value_u32(ma, "fragment_size", &frag_size) < 0) {
+ pa_log("Failed to parse buffer metrics");
+ goto fail;
+ }
+ period_size = frag_size/frame_size;
+
+ if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
+ pa_log("Failed to parse mmap argument.");
+ goto fail;
+ }
+
+ u = pa_xnew0(struct userdata, 1);
+ u->core = m->core;
+ u->module = m;
+ m->userdata = u;
+ u->use_mmap = use_mmap;
+ pa_thread_mq_init(&u->thread_mq, m->core->mainloop);
+ u->rtpoll = pa_rtpoll_new();
+ u->alsa_rtpoll_item = NULL;
+ pa_rtpoll_item_new_asyncmsgq(u->rtpoll, PA_RTPOLL_EARLY, u->thread_mq.inq);
+
+ snd_config_update_free_global();
+
+ b = use_mmap;
+
+ if ((dev_id = pa_modargs_get_value(ma, "device_id", NULL))) {
+
+ if (!(u->pcm_handle = pa_alsa_open_by_device_id(
+ dev_id,
+ &u->device_name,
+ &ss, &map,
+ SND_PCM_STREAM_CAPTURE,
+ &nfrags, &period_size,
+ &b)))
+ goto fail;
+
+ } else {
+
+ if (!(u->pcm_handle = pa_alsa_open_by_device_string(
+ pa_modargs_get_value(ma, "device", DEFAULT_DEVICE),
+ &u->device_name,
+ &ss, &map,
+ SND_PCM_STREAM_CAPTURE,
+ &nfrags, &period_size,
+ &b)))
+ goto fail;
+ }
+
+ pa_assert(u->device_name);
+ pa_log_info("Successfully opened device %s.", u->device_name);
+
+ if (use_mmap && !b) {
+ pa_log_info("Device doesn't support mmap(), falling back to UNIX read/write mode.");
+ u->use_mmap = use_mmap = b;
+ }
+
+ if (u->use_mmap)
+ pa_log_info("Successfully enabled mmap() mode.");
+
+ if ((err = snd_pcm_info(u->pcm_handle, pcm_info)) < 0) {
+ pa_log("Error fetching PCM info: %s", snd_strerror(err));
+ goto fail;
+ }
+
+ if ((err = pa_alsa_set_sw_params(u->pcm_handle)) < 0) {
+ pa_log("Failed to set software parameters: %s", snd_strerror(err));
+ goto fail;
+ }
+
+ /* ALSA might tweak the sample spec, so recalculate the frame size */
+ frame_size = pa_frame_size(&ss);
+
+ if ((err = snd_mixer_open(&u->mixer_handle, 0)) < 0)
+ pa_log("Error opening mixer: %s", snd_strerror(err));
+ else {
+ pa_bool_t found = FALSE;
+
+ if (pa_alsa_prepare_mixer(u->mixer_handle, u->device_name) >= 0)
+ found = TRUE;
+ else {
+ char *md = pa_sprintf_malloc("hw:%s", dev_id);
+
+ if (strcmp(u->device_name, md))
+ if (pa_alsa_prepare_mixer(u->mixer_handle, md) >= 0)
+ found = TRUE;
+
+ pa_xfree(md);
+ }
+
+ if (found)
+ if (!(u->mixer_elem = pa_alsa_find_elem(u->mixer_handle, "Capture", "Mic")))
+ found = FALSE;
+
+ if (!found) {
+ snd_mixer_close(u->mixer_handle);
+ u->mixer_handle = NULL;
+ }
+ }
+
+ if ((name = pa_modargs_get_value(ma, "source_name", NULL)))
+ namereg_fail = 1;
+ else {
+ name = name_buf = pa_sprintf_malloc("alsa_input.%s", u->device_name);
+ namereg_fail = 0;
+ }
+
+ u->source = pa_source_new(m->core, __FILE__, name, namereg_fail, &ss, &map);
+ pa_xfree(name_buf);
+
+ if (!u->source) {
+ pa_log("Failed to create source object");
+ goto fail;
+ }
+
+ u->source->parent.process_msg = source_process_msg;
+ u->source->userdata = u;
+
+ pa_source_set_module(u->source, m);
+ pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
+ pa_source_set_rtpoll(u->source, u->rtpoll);
+ pa_source_set_description(u->source, t = pa_sprintf_malloc(
+ "ALSA PCM on %s (%s)%s",
+ u->device_name,
+ snd_pcm_info_get_name(pcm_info),
+ use_mmap ? " via DMA" : ""));
+ pa_xfree(t);
+
+ u->source->flags = PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY;
+
+ u->frame_size = frame_size;
+ u->fragment_size = frag_size = period_size * frame_size;
+ u->nfragments = nfrags;
+ u->hwbuf_size = u->fragment_size * nfrags;
+
+ pa_log_info("Using %u fragments of size %lu bytes.", nfrags, (long unsigned) u->fragment_size);
+
+ if (u->mixer_handle) {
+ pa_assert(u->mixer_elem);
+
+ if (snd_mixer_selem_has_capture_volume(u->mixer_elem))
+ if (pa_alsa_calc_mixer_map(u->mixer_elem, &map, u->mixer_map, FALSE) >= 0) {
+ u->source->get_volume = source_get_volume_cb;
+ u->source->set_volume = source_set_volume_cb;
+ snd_mixer_selem_get_capture_volume_range(u->mixer_elem, &u->hw_volume_min, &u->hw_volume_max);
+ u->source->flags |= PA_SOURCE_HW_VOLUME_CTRL;
+ }
+
+ if (snd_mixer_selem_has_capture_switch(u->mixer_elem)) {
+ u->source->get_mute = source_get_mute_cb;
+ u->source->set_mute = source_set_mute_cb;
+ u->source->flags |= PA_SOURCE_HW_VOLUME_CTRL;
+ }
+
+ u->mixer_fdl = pa_alsa_fdlist_new();
+
+ if (pa_alsa_fdlist_set_mixer(u->mixer_fdl, u->mixer_handle, m->core->mainloop) < 0) {
+ pa_log("Failed to initialize file descriptor monitoring");
+ goto fail;
+ }
+
+ snd_mixer_elem_set_callback(u->mixer_elem, mixer_callback);
+ snd_mixer_elem_set_callback_private(u->mixer_elem, u);
+ } else
+ u->mixer_fdl = NULL;
+
+ if (!(u->thread = pa_thread_new(thread_func, u))) {
+ pa_log("Failed to create thread.");
+ goto fail;
+ }
+ /* Get initial mixer settings */
+ if (u->source->get_volume)
+ u->source->get_volume(u->source);
+ if (u->source->get_mute)
+ u->source->get_mute(u->source);
+
+ pa_source_put(u->source);
+
+ pa_modargs_free(ma);
+
+ return 0;
+
+fail:
+
+ if (ma)
+ pa_modargs_free(ma);
+
+ pa__done(m);
+
+ return -1;
+}
+
+void pa__done(pa_module*m) {
+ struct userdata *u;
+
+ pa_assert(m);
+
+ if (!(u = m->userdata))
+ return;
+
+ if (u->source)
+ pa_source_unlink(u->source);
+
+ if (u->thread) {
+ pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
+ pa_thread_free(u->thread);
+ }
+
+ pa_thread_mq_done(&u->thread_mq);
+
+ if (u->source)
+ pa_source_unref(u->source);
+
+ if (u->alsa_rtpoll_item)
+ pa_rtpoll_item_free(u->alsa_rtpoll_item);
+
+ if (u->rtpoll)
+ pa_rtpoll_free(u->rtpoll);
+
+ if (u->mixer_fdl)
+ pa_alsa_fdlist_free(u->mixer_fdl);
+
+ if (u->mixer_handle)
+ snd_mixer_close(u->mixer_handle);
+
+ if (u->pcm_handle) {
+ snd_pcm_drop(u->pcm_handle);
+ snd_pcm_close(u->pcm_handle);
+ }
+
+ pa_xfree(u->device_name);
+ pa_xfree(u);
+
+ snd_config_update_free_global();
+}
diff --git a/src/modules/module-bt-proximity.c b/src/modules/module-bt-proximity.c
new file mode 100644
index 00000000..62d530d4
--- /dev/null
+++ b/src/modules/module-bt-proximity.c
@@ -0,0 +1,492 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2005-2006 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <stdio.h>
+#include <unistd.h>
+#include <string.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+
+#include <pulse/xmalloc.h>
+#include <pulsecore/module.h>
+#include <pulsecore/log.h>
+#include <pulsecore/namereg.h>
+#include <pulsecore/sink.h>
+#include <pulsecore/modargs.h>
+#include <pulsecore/macro.h>
+#include <pulsecore/core-util.h>
+#include <pulsecore/core-error.h>
+#include <pulsecore/start-child.h>
+
+#include "dbus-util.h"
+#include "module-bt-proximity-symdef.h"
+
+PA_MODULE_AUTHOR("Lennart Poettering");
+PA_MODULE_DESCRIPTION("Bluetooth Proximity Volume Control");
+PA_MODULE_VERSION(PACKAGE_VERSION);
+PA_MODULE_LOAD_ONCE(TRUE);
+PA_MODULE_USAGE(
+ "sink=<sink name> "
+ "hci=<hci device> "
+);
+
+#define DEFAULT_HCI "hci0"
+
+static const char* const valid_modargs[] = {
+ "sink",
+ "rssi",
+ "hci",
+ NULL,
+};
+
+struct bonding {
+ struct userdata *userdata;
+ char address[18];
+
+ pid_t pid;
+ int fd;
+
+ pa_io_event *io_event;
+
+ enum {
+ UNKNOWN,
+ FOUND,
+ NOT_FOUND
+ } state;
+};
+
+struct userdata {
+ pa_module *module;
+ pa_dbus_connection *dbus_connection;
+
+ char *sink_name;
+ char *hci, *hci_path;
+
+ pa_hashmap *bondings;
+
+ unsigned n_found;
+ unsigned n_unknown;
+
+ pa_bool_t muted;
+};
+
+static void update_volume(struct userdata *u) {
+ pa_assert(u);
+
+ if (u->muted && u->n_found > 0) {
+ pa_sink *s;
+
+ u->muted = FALSE;
+
+ if (!(s = pa_namereg_get(u->module->core, u->sink_name, PA_NAMEREG_SINK, FALSE))) {
+ pa_log_warn("Sink device '%s' not available for unmuting.", pa_strnull(u->sink_name));
+ return;
+ }
+
+ pa_log_info("Found %u BT devices, unmuting.", u->n_found);
+ pa_sink_set_mute(s, FALSE);
+
+ } else if (!u->muted && (u->n_found+u->n_unknown) <= 0) {
+ pa_sink *s;
+
+ u->muted = TRUE;
+
+ if (!(s = pa_namereg_get(u->module->core, u->sink_name, PA_NAMEREG_SINK, FALSE))) {
+ pa_log_warn("Sink device '%s' not available for muting.", pa_strnull(u->sink_name));
+ return;
+ }
+
+ pa_log_info("No BT devices found, muting.");
+ pa_sink_set_mute(s, TRUE);
+
+ } else
+ pa_log_info("%u devices now active, %u with unknown state.", u->n_found, u->n_unknown);
+}
+
+static void bonding_free(struct bonding *b) {
+ pa_assert(b);
+
+ if (b->state == FOUND)
+ pa_assert_se(b->userdata->n_found-- >= 1);
+
+ if (b->state == UNKNOWN)
+ pa_assert_se(b->userdata->n_unknown-- >= 1);
+
+ if (b->pid != (pid_t) -1) {
+ kill(b->pid, SIGTERM);
+ waitpid(b->pid, NULL, 0);
+ }
+
+ if (b->fd >= 0)
+ pa_close(b->fd);
+
+ if (b->io_event)
+ b->userdata->module->core->mainloop->io_free(b->io_event);
+
+ pa_xfree(b);
+}
+
+static void io_event_cb(
+ pa_mainloop_api*a,
+ pa_io_event* e,
+ int fd,
+ pa_io_event_flags_t events,
+ void *userdata) {
+
+ struct bonding *b = userdata;
+ char x;
+ ssize_t r;
+
+ pa_assert(b);
+
+ if ((r = read(fd, &x, 1)) <= 0) {
+ pa_log_warn("Child watching '%s' died abnormally: %s", b->address, r == 0 ? "EOF" : pa_cstrerror(errno));
+
+ pa_assert_se(pa_hashmap_remove(b->userdata->bondings, b->address) == b);
+ bonding_free(b);
+ return;
+ }
+
+ pa_assert_se(r == 1);
+
+ if (b->state == UNKNOWN)
+ pa_assert_se(b->userdata->n_unknown-- >= 1);
+
+ if (x == '+') {
+ pa_assert(b->state == UNKNOWN || b->state == NOT_FOUND);
+
+ b->state = FOUND;
+ b->userdata->n_found++;
+
+ pa_log_info("Device '%s' is alive.", b->address);
+
+ } else {
+ pa_assert(x == '-');
+ pa_assert(b->state == UNKNOWN || b->state == FOUND);
+
+ if (b->state == FOUND)
+ b->userdata->n_found--;
+
+ b->state = NOT_FOUND;
+
+ pa_log_info("Device '%s' is dead.", b->address);
+ }
+
+ update_volume(b->userdata);
+}
+
+static struct bonding* bonding_new(struct userdata *u, const char *a) {
+ struct bonding *b = NULL;
+ DBusMessage *m = NULL, *r = NULL;
+ DBusError e;
+ const char *class;
+
+ pa_assert(u);
+ pa_assert(a);
+
+ pa_return_val_if_fail(strlen(a) == 17, NULL);
+ pa_return_val_if_fail(!pa_hashmap_get(u->bondings, a), NULL);
+
+ dbus_error_init(&e);
+
+ pa_assert_se(m = dbus_message_new_method_call("org.bluez", u->hci_path, "org.bluez.Adapter", "GetRemoteMajorClass"));
+ pa_assert_se(dbus_message_append_args(m, DBUS_TYPE_STRING, &a, DBUS_TYPE_INVALID));
+ r = dbus_connection_send_with_reply_and_block(pa_dbus_connection_get(u->dbus_connection), m, -1, &e);
+
+ if (!r) {
+ pa_log("org.bluez.Adapter.GetRemoteMajorClass(%s) failed: %s", a, e.message);
+ goto fail;
+ }
+
+ if (!(dbus_message_get_args(r, &e, DBUS_TYPE_STRING, &class, DBUS_TYPE_INVALID))) {
+ pa_log("Malformed org.bluez.Adapter.GetRemoteMajorClass signal: %s", e.message);
+ goto fail;
+ }
+
+ if (strcmp(class, "phone")) {
+ pa_log_info("Found device '%s' of class '%s', ignoring.", a, class);
+ goto fail;
+ }
+
+ b = pa_xnew(struct bonding, 1);
+ b->userdata = u;
+ pa_strlcpy(b->address, a, sizeof(b->address));
+ b->pid = (pid_t) -1;
+ b->fd = -1;
+ b->io_event = NULL;
+ b->state = UNKNOWN;
+ u->n_unknown ++;
+
+ pa_log_info("Watching device '%s' of class '%s'.", b->address, class);
+
+ if ((b->fd = pa_start_child_for_read(PA_BT_PROXIMITY_HELPER, a, &b->pid)) < 0) {
+ pa_log("Failed to start helper tool.");
+ goto fail;
+ }
+
+ b->io_event = u->module->core->mainloop->io_new(
+ u->module->core->mainloop,
+ b->fd,
+ PA_IO_EVENT_INPUT,
+ io_event_cb,
+ b);
+
+ dbus_message_unref(m);
+ dbus_message_unref(r);
+
+ pa_hashmap_put(u->bondings, b->address, b);
+
+ return b;
+
+fail:
+ if (m)
+ dbus_message_unref(m);
+ if (r)
+ dbus_message_unref(r);
+
+ if (b)
+ bonding_free(b);
+
+ dbus_error_free(&e);
+ return NULL;
+}
+
+static void bonding_remove(struct userdata *u, const char *a) {
+ struct bonding *b;
+ pa_assert(u);
+
+ pa_return_if_fail((b = pa_hashmap_remove(u->bondings, a)));
+
+ pa_log_info("No longer watching device '%s'", b->address);
+ bonding_free(b);
+}
+
+static DBusHandlerResult filter_func(DBusConnection *connection, DBusMessage *m, void *userdata) {
+ struct userdata *u = userdata;
+ DBusError e;
+
+ dbus_error_init(&e);
+
+ if (dbus_message_is_signal(m, "org.bluez.Adapter", "BondingCreated")) {
+ const char *a;
+
+ if (!(dbus_message_get_args(m, &e, DBUS_TYPE_STRING, &a, DBUS_TYPE_INVALID))) {
+ pa_log("Malformed org.bluez.Adapter.BondingCreated signal: %s", e.message);
+ goto finish;
+ }
+
+ bonding_new(u, a);
+
+ return DBUS_HANDLER_RESULT_HANDLED;
+
+ } else if (dbus_message_is_signal(m, "org.bluez.Adapter", "BondingRemoved")) {
+
+ const char *a;
+
+ if (!(dbus_message_get_args(m, &e, DBUS_TYPE_STRING, &a, DBUS_TYPE_INVALID))) {
+ pa_log("Malformed org.bluez.Adapter.BondingRemoved signal: %s", e.message);
+ goto finish;
+ }
+
+ bonding_remove(u, a);
+
+ return DBUS_HANDLER_RESULT_HANDLED;
+ }
+
+finish:
+
+ dbus_error_free(&e);
+
+ return DBUS_HANDLER_RESULT_NOT_YET_HANDLED;
+}
+
+static int add_matches(struct userdata *u, pa_bool_t add) {
+ char *filter1, *filter2;
+ DBusError e;
+ int r = -1;
+
+ pa_assert(u);
+ dbus_error_init(&e);
+
+ filter1 = pa_sprintf_malloc("type='signal',sender='org.bluez',interface='org.bluez.Adapter',member='BondingCreated',path='%s'", u->hci_path);
+ filter2 = pa_sprintf_malloc("type='signal',sender='org.bluez',interface='org.bluez.Adapter',member='BondingRemoved',path='%s'", u->hci_path);
+
+ if (add) {
+ dbus_bus_add_match(pa_dbus_connection_get(u->dbus_connection), filter1, &e);
+
+ if (dbus_error_is_set(&e)) {
+ pa_log("dbus_bus_add_match(%s) failed: %s", filter1, e.message);
+ goto finish;
+ }
+ } else
+ dbus_bus_remove_match(pa_dbus_connection_get(u->dbus_connection), filter1, &e);
+
+
+ if (add) {
+ dbus_bus_add_match(pa_dbus_connection_get(u->dbus_connection), filter2, &e);
+
+ if (dbus_error_is_set(&e)) {
+ pa_log("dbus_bus_add_match(%s) failed: %s", filter2, e.message);
+ dbus_bus_remove_match(pa_dbus_connection_get(u->dbus_connection), filter2, &e);
+ goto finish;
+ }
+ } else
+ dbus_bus_remove_match(pa_dbus_connection_get(u->dbus_connection), filter2, &e);
+
+
+ if (add)
+ pa_assert_se(dbus_connection_add_filter(pa_dbus_connection_get(u->dbus_connection), filter_func, u, NULL));
+ else
+ dbus_connection_remove_filter(pa_dbus_connection_get(u->dbus_connection), filter_func, u);
+
+ r = 0;
+
+finish:
+ pa_xfree(filter1);
+ pa_xfree(filter2);
+ dbus_error_free(&e);
+
+ return r;
+}
+
+int pa__init(pa_module*m) {
+ pa_modargs *ma = NULL;
+ struct userdata *u;
+ DBusError e;
+ DBusMessage *msg = NULL, *r = NULL;
+ DBusMessageIter iter, sub;
+
+ pa_assert(m);
+ dbus_error_init(&e);
+
+ if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
+ pa_log("Failed to parse module arguments");
+ goto fail;
+ }
+
+ m->userdata = u = pa_xnew0(struct userdata, 1);
+ u->module = m;
+ u->sink_name = pa_xstrdup(pa_modargs_get_value(ma, "sink", NULL));
+ u->hci = pa_xstrdup(pa_modargs_get_value(ma, "hci", DEFAULT_HCI));
+ u->hci_path = pa_sprintf_malloc("/org/bluez/%s", u->hci);
+ u->n_found = u->n_unknown = 0;
+ u->muted = FALSE;
+
+ u->bondings = pa_hashmap_new(pa_idxset_string_hash_func, pa_idxset_string_compare_func);
+
+ if (!(u->dbus_connection = pa_dbus_bus_get(m->core, DBUS_BUS_SYSTEM, &e))) {
+ pa_log("Failed to get D-Bus connection: %s", e.message);
+ goto fail;
+ }
+
+ if (add_matches(u, TRUE) < 0)
+ goto fail;
+
+ pa_assert_se(msg = dbus_message_new_method_call("org.bluez", u->hci_path, "org.bluez.Adapter", "ListBondings"));
+
+ if (!(r = dbus_connection_send_with_reply_and_block(pa_dbus_connection_get(u->dbus_connection), msg, -1, &e))) {
+ pa_log("org.bluez.Adapter.ListBondings failed: %s", e.message);
+ goto fail;
+ }
+
+ dbus_message_iter_init(r, &iter);
+
+ if (dbus_message_iter_get_arg_type(&iter) != DBUS_TYPE_ARRAY) {
+ pa_log("Malformed reply to org.bluez.Adapter.ListBondings.");
+ goto fail;
+ }
+
+ dbus_message_iter_recurse(&iter, &sub);
+
+ while (dbus_message_iter_get_arg_type(&sub) == DBUS_TYPE_STRING) {
+ const char *a = NULL;
+
+ dbus_message_iter_get_basic(&sub, &a);
+ bonding_new(u, a);
+
+ dbus_message_iter_next(&sub);
+ }
+
+ dbus_message_unref(r);
+ dbus_message_unref(msg);
+
+ pa_modargs_free(ma);
+
+ if (pa_hashmap_size(u->bondings) == 0)
+ pa_log_warn("Warning: no phone device bonded.");
+
+ update_volume(u);
+
+ return 0;
+
+fail:
+
+ if (ma)
+ pa_modargs_free(ma);
+
+ pa__done(m);
+
+ dbus_error_free(&e);
+
+ if (msg)
+ dbus_message_unref(msg);
+
+ if (r)
+ dbus_message_unref(r);
+
+ return -1;
+}
+
+void pa__done(pa_module*m) {
+ struct userdata *u;
+ pa_assert(m);
+
+ if (!(u = m->userdata))
+ return;
+
+ if (u->bondings) {
+ struct bonding *b;
+
+ while ((b = pa_hashmap_steal_first(u->bondings)))
+ bonding_free(b);
+
+ pa_hashmap_free(u->bondings, NULL, NULL);
+ }
+
+ if (u->dbus_connection) {
+ add_matches(u, FALSE);
+ pa_dbus_connection_unref(u->dbus_connection);
+ }
+
+ pa_xfree(u->sink_name);
+ pa_xfree(u->hci_path);
+ pa_xfree(u->hci);
+ pa_xfree(u);
+}
diff --git a/src/modules/module-cli.c b/src/modules/module-cli.c
new file mode 100644
index 00000000..ab311a82
--- /dev/null
+++ b/src/modules/module-cli.c
@@ -0,0 +1,123 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2004-2006 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <stdio.h>
+#include <unistd.h>
+
+#include <pulsecore/module.h>
+#include <pulsecore/iochannel.h>
+#include <pulsecore/cli.h>
+#include <pulsecore/sioman.h>
+#include <pulsecore/log.h>
+#include <pulsecore/modargs.h>
+#include <pulsecore/macro.h>
+
+#include "module-cli-symdef.h"
+
+PA_MODULE_AUTHOR("Lennart Poettering");
+PA_MODULE_DESCRIPTION("Command line interface");
+PA_MODULE_VERSION(PACKAGE_VERSION);
+PA_MODULE_LOAD_ONCE(TRUE);
+PA_MODULE_USAGE("exit_on_eof=<exit daemon after EOF?>");
+
+static const char* const valid_modargs[] = {
+ "exit_on_eof",
+ NULL
+};
+
+static void eof_and_unload_cb(pa_cli*c, void *userdata) {
+ pa_module *m = userdata;
+
+ pa_assert(c);
+ pa_assert(m);
+
+ pa_module_unload_request(m);
+}
+
+static void eof_and_exit_cb(pa_cli*c, void *userdata) {
+ pa_module *m = userdata;
+
+ pa_assert(c);
+ pa_assert(m);
+
+ m->core->mainloop->quit(m->core->mainloop, 0);
+}
+
+int pa__init(pa_module*m) {
+ pa_iochannel *io;
+ pa_modargs *ma;
+ pa_bool_t exit_on_eof = FALSE;
+
+ pa_assert(m);
+
+ if (m->core->running_as_daemon) {
+ pa_log_info("Running as daemon, refusing to load this module.");
+ return 0;
+ }
+
+ if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
+ pa_log("failed to parse module arguments.");
+ goto fail;
+ }
+
+ if (pa_modargs_get_value_boolean(ma, "exit_on_eof", &exit_on_eof) < 0) {
+ pa_log("exit_on_eof= expects boolean argument.");
+ goto fail;
+ }
+
+ if (pa_stdio_acquire() < 0) {
+ pa_log("STDIN/STDUSE already in use.");
+ goto fail;
+ }
+
+ io = pa_iochannel_new(m->core->mainloop, STDIN_FILENO, STDOUT_FILENO);
+ pa_iochannel_set_noclose(io, 1);
+
+ m->userdata = pa_cli_new(m->core, io, m);
+
+ pa_cli_set_eof_callback(m->userdata, exit_on_eof ? eof_and_exit_cb : eof_and_unload_cb, m);
+
+ pa_modargs_free(ma);
+
+ return 0;
+
+fail:
+
+ if (ma)
+ pa_modargs_free(ma);
+
+ return -1;
+}
+
+void pa__done(pa_module*m) {
+ pa_assert(m);
+
+ if (m->core->running_as_daemon == 0) {
+ pa_cli_free(m->userdata);
+ pa_stdio_release();
+ }
+}
diff --git a/src/modules/module-combine.c b/src/modules/module-combine.c
new file mode 100644
index 00000000..996cd4f6
--- /dev/null
+++ b/src/modules/module-combine.c
@@ -0,0 +1,1193 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2004-2006 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <stdio.h>
+#include <errno.h>
+
+#include <pulse/timeval.h>
+#include <pulse/xmalloc.h>
+
+#include <pulsecore/macro.h>
+#include <pulsecore/module.h>
+#include <pulsecore/llist.h>
+#include <pulsecore/sink.h>
+#include <pulsecore/sink-input.h>
+#include <pulsecore/memblockq.h>
+#include <pulsecore/log.h>
+#include <pulsecore/core-util.h>
+#include <pulsecore/modargs.h>
+#include <pulsecore/namereg.h>
+#include <pulsecore/mutex.h>
+#include <pulsecore/thread.h>
+#include <pulsecore/thread-mq.h>
+#include <pulsecore/rtpoll.h>
+#include <pulsecore/rtclock.h>
+#include <pulsecore/core-error.h>
+
+#include "module-combine-symdef.h"
+
+PA_MODULE_AUTHOR("Lennart Poettering");
+PA_MODULE_DESCRIPTION("Combine multiple sinks to one");
+PA_MODULE_VERSION(PACKAGE_VERSION);
+PA_MODULE_LOAD_ONCE(FALSE);
+PA_MODULE_USAGE(
+ "sink_name=<name for the sink> "
+ "master=<master sink> "
+ "slaves=<slave sinks> "
+ "adjust_time=<seconds> "
+ "resample_method=<method> "
+ "format=<sample format> "
+ "channels=<number of channels> "
+ "rate=<sample rate> "
+ "channel_map=<channel map>");
+
+#define DEFAULT_SINK_NAME "combined"
+#define MEMBLOCKQ_MAXLENGTH (1024*170)
+
+#define DEFAULT_ADJUST_TIME 10
+
+static const char* const valid_modargs[] = {
+ "sink_name",
+ "master",
+ "slaves",
+ "adjust_time",
+ "resample_method",
+ "format",
+ "channels",
+ "rate",
+ "channel_map",
+ NULL
+};
+
+struct output {
+ struct userdata *userdata;
+
+ pa_sink *sink;
+ pa_sink_input *sink_input;
+
+ pa_asyncmsgq *inq, /* Message queue from the sink thread to this sink input */
+ *outq; /* Message queue from this sink input to the sink thread */
+ pa_rtpoll_item *inq_rtpoll_item, *outq_rtpoll_item;
+
+ pa_memblockq *memblockq;
+
+ pa_usec_t total_latency;
+
+ PA_LLIST_FIELDS(struct output);
+};
+
+struct userdata {
+ pa_core *core;
+ pa_module *module;
+ pa_sink *sink;
+
+ pa_thread *thread;
+ pa_thread_mq thread_mq;
+ pa_rtpoll *rtpoll;
+
+ pa_time_event *time_event;
+ uint32_t adjust_time;
+
+ pa_bool_t automatic;
+ size_t block_size;
+
+ pa_hook_slot *sink_new_slot, *sink_unlink_slot, *sink_state_changed_slot;
+
+ pa_resample_method_t resample_method;
+
+ struct timeval adjust_timestamp;
+
+ struct output *master;
+ pa_idxset* outputs; /* managed in main context */
+
+ struct {
+ PA_LLIST_HEAD(struct output, active_outputs); /* managed in IO thread context */
+ pa_atomic_t running; /* we cache that value here, so that every thread can query it cheaply */
+ struct timeval timestamp;
+ pa_bool_t in_null_mode;
+ } thread_info;
+};
+
+enum {
+ SINK_MESSAGE_ADD_OUTPUT = PA_SINK_MESSAGE_MAX,
+ SINK_MESSAGE_REMOVE_OUTPUT,
+ SINK_MESSAGE_NEED
+};
+
+enum {
+ SINK_INPUT_MESSAGE_POST = PA_SINK_INPUT_MESSAGE_MAX
+};
+
+static void output_free(struct output *o);
+static int output_create_sink_input(struct output *o);
+static void update_master(struct userdata *u, struct output *o);
+static void pick_master(struct userdata *u, struct output *except);
+
+static void adjust_rates(struct userdata *u) {
+ struct output *o;
+ pa_usec_t max_sink_latency = 0, min_total_latency = (pa_usec_t) -1, target_latency;
+ uint32_t base_rate;
+ uint32_t idx;
+
+ pa_assert(u);
+ pa_sink_assert_ref(u->sink);
+
+ if (pa_idxset_size(u->outputs) <= 0)
+ return;
+
+ if (!u->master)
+ return;
+
+ if (!PA_SINK_OPENED(pa_sink_get_state(u->sink)))
+ return;
+
+ for (o = pa_idxset_first(u->outputs, &idx); o; o = pa_idxset_next(u->outputs, &idx)) {
+ pa_usec_t sink_latency;
+
+ if (!o->sink_input || !PA_SINK_OPENED(pa_sink_get_state(o->sink)))
+ continue;
+
+ sink_latency = pa_sink_get_latency(o->sink);
+ o->total_latency = sink_latency + pa_sink_input_get_latency(o->sink_input);
+
+ if (sink_latency > max_sink_latency)
+ max_sink_latency = sink_latency;
+
+ if (min_total_latency == (pa_usec_t) -1 || o->total_latency < min_total_latency)
+ min_total_latency = o->total_latency;
+ }
+
+ if (min_total_latency == (pa_usec_t) -1)
+ return;
+
+ target_latency = max_sink_latency > min_total_latency ? max_sink_latency : min_total_latency;
+
+ pa_log_info("[%s] target latency is %0.0f usec.", u->sink->name, (float) target_latency);
+ pa_log_info("[%s] master %s latency %0.0f usec.", u->sink->name, u->master->sink->name, (float) u->master->total_latency);
+
+ base_rate = u->sink->sample_spec.rate;
+
+ for (o = pa_idxset_first(u->outputs, &idx); o; o = pa_idxset_next(u->outputs, &idx)) {
+ uint32_t r = base_rate;
+
+ if (!o->sink_input || !PA_SINK_OPENED(pa_sink_get_state(o->sink)))
+ continue;
+
+ if (o->total_latency < target_latency)
+ r -= (uint32_t) (((((double) target_latency - o->total_latency))/u->adjust_time)*r/PA_USEC_PER_SEC);
+ else if (o->total_latency > target_latency)
+ r += (uint32_t) (((((double) o->total_latency - target_latency))/u->adjust_time)*r/PA_USEC_PER_SEC);
+
+ if (r < (uint32_t) (base_rate*0.9) || r > (uint32_t) (base_rate*1.1)) {
+ pa_log_warn("[%s] sample rates too different, not adjusting (%u vs. %u).", o->sink_input->name, base_rate, r);
+ pa_sink_input_set_rate(o->sink_input, base_rate);
+ } else {
+ pa_log_info("[%s] new rate is %u Hz; ratio is %0.3f; latency is %0.0f usec.", o->sink_input->name, r, (double) r / base_rate, (float) o->total_latency);
+ pa_sink_input_set_rate(o->sink_input, r);
+ }
+ }
+}
+
+static void time_callback(pa_mainloop_api*a, pa_time_event* e, const struct timeval *tv, void *userdata) {
+ struct userdata *u = userdata;
+ struct timeval n;
+
+ pa_assert(u);
+ pa_assert(a);
+ pa_assert(u->time_event == e);
+
+ adjust_rates(u);
+
+ pa_gettimeofday(&n);
+ n.tv_sec += u->adjust_time;
+ u->sink->core->mainloop->time_restart(e, &n);
+}
+
+static void thread_func(void *userdata) {
+ struct userdata *u = userdata;
+
+ pa_assert(u);
+
+ pa_log_debug("Thread starting up");
+
+ if (u->core->realtime_scheduling)
+ pa_make_realtime(u->core->realtime_priority+1);
+
+ pa_thread_mq_install(&u->thread_mq);
+ pa_rtpoll_install(u->rtpoll);
+
+ pa_rtclock_get(&u->thread_info.timestamp);
+ u->thread_info.in_null_mode = FALSE;
+
+ for (;;) {
+ int ret;
+
+ /* If no outputs are connected, render some data and drop it immediately. */
+ if (u->sink->thread_info.state == PA_SINK_RUNNING && !u->thread_info.active_outputs) {
+ struct timeval now;
+
+ pa_rtclock_get(&now);
+
+ if (!u->thread_info.in_null_mode || pa_timeval_cmp(&u->thread_info.timestamp, &now) <= 0) {
+ pa_sink_skip(u->sink, u->block_size);
+
+ if (!u->thread_info.in_null_mode)
+ u->thread_info.timestamp = now;
+
+ pa_timeval_add(&u->thread_info.timestamp, pa_bytes_to_usec(u->block_size, &u->sink->sample_spec));
+ }
+
+ pa_rtpoll_set_timer_absolute(u->rtpoll, &u->thread_info.timestamp);
+ u->thread_info.in_null_mode = TRUE;
+
+ } else {
+ pa_rtpoll_set_timer_disabled(u->rtpoll);
+ u->thread_info.in_null_mode = FALSE;
+ }
+
+ /* Hmm, nothing to do. Let's sleep */
+ if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0) {
+ pa_log_info("pa_rtpoll_run() = %i", ret);
+ goto fail;
+ }
+
+ if (ret == 0)
+ goto finish;
+ }
+
+fail:
+ /* If this was no regular exit from the loop we have to continue
+ * processing messages until we received PA_MESSAGE_SHUTDOWN */
+ pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
+ pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
+
+finish:
+ pa_log_debug("Thread shutting down");
+}
+
+/* Called from I/O thread context */
+static void render_memblock(struct userdata *u, struct output *o, size_t length) {
+ pa_assert(u);
+ pa_assert(o);
+
+ /* We are run by the sink thread, on behalf of an output (o). The
+ * other output is waiting for us, hence it is safe to access its
+ * mainblockq and asyncmsgq directly. */
+
+ /* If we are not running, we cannot produce any data */
+ if (!pa_atomic_load(&u->thread_info.running))
+ return;
+
+ /* Maybe there's some data in the requesting output's queue
+ * now? */
+ while (pa_asyncmsgq_process_one(o->inq) > 0)
+ ;
+
+ /* Ok, now let's prepare some data if we really have to */
+ while (!pa_memblockq_is_readable(o->memblockq)) {
+ struct output *j;
+ pa_memchunk chunk;
+
+ /* Render data! */
+ pa_sink_render(u->sink, length, &chunk);
+
+ /* OK, let's send this data to the other threads */
+ for (j = u->thread_info.active_outputs; j; j = j->next)
+
+ /* Send to other outputs, which are not the requesting
+ * one */
+
+ if (j != o)
+ pa_asyncmsgq_post(j->inq, PA_MSGOBJECT(j->sink_input), SINK_INPUT_MESSAGE_POST, NULL, 0, &chunk, NULL);
+
+ /* And place it directly into the requesting output's queue */
+ if (o)
+ pa_memblockq_push_align(o->memblockq, &chunk);
+
+ pa_memblock_unref(chunk.memblock);
+ }
+}
+
+/* Called from I/O thread context */
+static void request_memblock(struct output *o, size_t length) {
+ pa_assert(o);
+ pa_sink_input_assert_ref(o->sink_input);
+ pa_sink_assert_ref(o->userdata->sink);
+
+ /* If another thread already prepared some data we received
+ * the data over the asyncmsgq, hence let's first process
+ * it. */
+ while (pa_asyncmsgq_process_one(o->inq) > 0)
+ ;
+
+ /* Check whether we're now readable */
+ if (pa_memblockq_is_readable(o->memblockq))
+ return;
+
+ /* OK, we need to prepare new data, but only if the sink is actually running */
+ if (pa_atomic_load(&o->userdata->thread_info.running))
+ pa_asyncmsgq_send(o->outq, PA_MSGOBJECT(o->userdata->sink), SINK_MESSAGE_NEED, o, length, NULL);
+}
+
+/* Called from I/O thread context */
+static int sink_input_peek_cb(pa_sink_input *i, size_t length, pa_memchunk *chunk) {
+ struct output *o;
+
+ pa_sink_input_assert_ref(i);
+ pa_assert_se(o = i->userdata);
+
+ /* If necessary, get some new data */
+ request_memblock(o, length);
+
+ return pa_memblockq_peek(o->memblockq, chunk);
+}
+
+/* Called from I/O thread context */
+static void sink_input_drop_cb(pa_sink_input *i, size_t length) {
+ struct output *o;
+
+ pa_sink_input_assert_ref(i);
+ pa_assert(length > 0);
+ pa_assert_se(o = i->userdata);
+
+ pa_memblockq_drop(o->memblockq, length);
+}
+
+/* Called from I/O thread context */
+static void sink_input_attach_cb(pa_sink_input *i) {
+ struct output *o;
+
+ pa_sink_input_assert_ref(i);
+ pa_assert_se(o = i->userdata);
+
+ /* Set up the queue from the sink thread to us */
+ pa_assert(!o->inq_rtpoll_item);
+ o->inq_rtpoll_item = pa_rtpoll_item_new_asyncmsgq(
+ i->sink->rtpoll,
+ PA_RTPOLL_LATE, /* This one is not that important, since we check for data in _peek() anyway. */
+ o->inq);
+}
+
+/* Called from I/O thread context */
+static void sink_input_detach_cb(pa_sink_input *i) {
+ struct output *o;
+
+ pa_sink_input_assert_ref(i);
+ pa_assert_se(o = i->userdata);
+
+ /* Shut down the queue from the sink thread to us */
+ pa_assert(o->inq_rtpoll_item);
+ pa_rtpoll_item_free(o->inq_rtpoll_item);
+ o->inq_rtpoll_item = NULL;
+}
+
+/* Called from main context */
+static void sink_input_kill_cb(pa_sink_input *i) {
+ struct output *o;
+
+ pa_sink_input_assert_ref(i);
+ pa_assert(o = i->userdata);
+
+ pa_module_unload_request(o->userdata->module);
+ output_free(o);
+}
+
+/* Called from thread context */
+static int sink_input_process_msg(pa_msgobject *obj, int code, void *data, int64_t offset, pa_memchunk *chunk) {
+ struct output *o = PA_SINK_INPUT(obj)->userdata;
+
+ switch (code) {
+
+ case PA_SINK_INPUT_MESSAGE_GET_LATENCY: {
+ pa_usec_t *r = data;
+
+ *r = pa_bytes_to_usec(pa_memblockq_get_length(o->memblockq), &o->sink_input->sample_spec);
+
+ /* Fall through, the default handler will add in the extra
+ * latency added by the resampler */
+ break;
+ }
+
+ case SINK_INPUT_MESSAGE_POST:
+
+ if (PA_SINK_OPENED(o->sink_input->sink->thread_info.state))
+ pa_memblockq_push_align(o->memblockq, chunk);
+ else
+ pa_memblockq_flush(o->memblockq);
+
+ break;
+ }
+
+ return pa_sink_input_process_msg(obj, code, data, offset, chunk);
+}
+
+/* Called from main context */
+static void disable_output(struct output *o) {
+ pa_assert(o);
+
+ if (!o->sink_input)
+ return;
+
+ pa_asyncmsgq_send(o->userdata->sink->asyncmsgq, PA_MSGOBJECT(o->userdata->sink), SINK_MESSAGE_REMOVE_OUTPUT, o, 0, NULL);
+ pa_sink_input_unlink(o->sink_input);
+ pa_sink_input_unref(o->sink_input);
+ o->sink_input = NULL;
+
+}
+
+/* Called from main context */
+static void enable_output(struct output *o) {
+ pa_assert(o);
+
+ if (o->sink_input)
+ return;
+
+ if (output_create_sink_input(o) >= 0) {
+
+ pa_memblockq_flush(o->memblockq);
+
+ pa_sink_input_put(o->sink_input);
+
+ if (o->userdata->sink && PA_SINK_LINKED(pa_sink_get_state(o->userdata->sink)))
+ pa_asyncmsgq_send(o->userdata->sink->asyncmsgq, PA_MSGOBJECT(o->userdata->sink), SINK_MESSAGE_ADD_OUTPUT, o, 0, NULL);
+ }
+}
+
+/* Called from main context */
+static void suspend(struct userdata *u) {
+ struct output *o;
+ uint32_t idx;
+
+ pa_assert(u);
+
+ /* Let's suspend by unlinking all streams */
+ for (o = pa_idxset_first(u->outputs, &idx); o; o = pa_idxset_next(u->outputs, &idx))
+ disable_output(o);
+
+ pick_master(u, NULL);
+
+ pa_log_info("Device suspended...");
+}
+
+/* Called from main context */
+static void unsuspend(struct userdata *u) {
+ struct output *o;
+ uint32_t idx;
+
+ pa_assert(u);
+
+ /* Let's resume */
+ for (o = pa_idxset_first(u->outputs, &idx); o; o = pa_idxset_next(u->outputs, &idx)) {
+
+ pa_sink_suspend(o->sink, FALSE);
+
+ if (PA_SINK_OPENED(pa_sink_get_state(o->sink)))
+ enable_output(o);
+ }
+
+ pick_master(u, NULL);
+
+ pa_log_info("Resumed successfully...");
+}
+
+/* Called from main context */
+static int sink_set_state(pa_sink *sink, pa_sink_state_t state) {
+ struct userdata *u;
+
+ pa_sink_assert_ref(sink);
+ pa_assert_se(u = sink->userdata);
+
+ /* Please note that in contrast to the ALSA modules we call
+ * suspend/unsuspend from main context here! */
+
+ switch (state) {
+ case PA_SINK_SUSPENDED:
+ pa_assert(PA_SINK_OPENED(pa_sink_get_state(u->sink)));
+
+ suspend(u);
+ break;
+
+ case PA_SINK_IDLE:
+ case PA_SINK_RUNNING:
+
+ if (pa_sink_get_state(u->sink) == PA_SINK_SUSPENDED)
+ unsuspend(u);
+
+ break;
+
+ case PA_SINK_UNLINKED:
+ case PA_SINK_INIT:
+ ;
+ }
+
+ return 0;
+}
+
+/* Called from thread context of the master */
+static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
+ struct userdata *u = PA_SINK(o)->userdata;
+
+ switch (code) {
+
+ case PA_SINK_MESSAGE_SET_STATE:
+ pa_atomic_store(&u->thread_info.running, PA_PTR_TO_UINT(data) == PA_SINK_RUNNING);
+ break;
+
+ case PA_SINK_MESSAGE_GET_LATENCY:
+
+ /* This code will only be called when running in NULL
+ * mode, i.e. when no output is attached. See
+ * sink_get_latency_cb() below */
+
+ if (u->thread_info.in_null_mode) {
+ struct timeval now;
+
+ if (pa_timeval_cmp(&u->thread_info.timestamp, pa_rtclock_get(&now)) > 0) {
+ *((pa_usec_t*) data) = pa_timeval_diff(&u->thread_info.timestamp, &now);
+ break;
+ }
+ }
+
+ *((pa_usec_t*) data) = 0;
+
+ break;
+
+ case SINK_MESSAGE_ADD_OUTPUT: {
+ struct output *op = data;
+
+ PA_LLIST_PREPEND(struct output, u->thread_info.active_outputs, op);
+
+ pa_assert(!op->outq_rtpoll_item);
+
+ /* Create pa_asyncmsgq to the sink thread */
+
+ op->outq_rtpoll_item = pa_rtpoll_item_new_asyncmsgq(
+ u->rtpoll,
+ PA_RTPOLL_EARLY-1, /* This item is very important */
+ op->outq);
+
+ return 0;
+ }
+
+ case SINK_MESSAGE_REMOVE_OUTPUT: {
+ struct output *op = data;
+
+ PA_LLIST_REMOVE(struct output, u->thread_info.active_outputs, op);
+
+ /* Remove the q that leads from this output to the sink thread */
+
+ pa_assert(op->outq_rtpoll_item);
+ pa_rtpoll_item_free(op->outq_rtpoll_item);
+ op->outq_rtpoll_item = NULL;
+
+ return 0;
+ }
+
+ case SINK_MESSAGE_NEED:
+ render_memblock(u, data, (size_t) offset);
+ return 0;
+ }
+
+ return pa_sink_process_msg(o, code, data, offset, chunk);
+}
+
+/* Called from main context */
+static pa_usec_t sink_get_latency_cb(pa_sink *s) {
+ struct userdata *u;
+
+ pa_sink_assert_ref(s);
+ pa_assert_se(u = s->userdata);
+
+ if (u->master) {
+ /* If we have a master sink, we just return the latency of it
+ * and add our own buffering on top */
+
+ if (!u->master->sink_input)
+ return 0;
+
+ return
+ pa_sink_input_get_latency(u->master->sink_input) +
+ pa_sink_get_latency(u->master->sink);
+
+ } else {
+ pa_usec_t usec = 0;
+
+ /* We have no master, hence let's ask our own thread which
+ * implements the NULL sink */
+
+ if (pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
+ return 0;
+
+ return usec;
+ }
+}
+
+static void update_description(struct userdata *u) {
+ int first = 1;
+ char *t;
+ struct output *o;
+ uint32_t idx;
+
+ pa_assert(u);
+
+ if (pa_idxset_isempty(u->outputs)) {
+ pa_sink_set_description(u->sink, "Simultaneous output");
+ return;
+ }
+
+ t = pa_xstrdup("Simultaneous output to");
+
+ for (o = pa_idxset_first(u->outputs, &idx); o; o = pa_idxset_next(u->outputs, &idx)) {
+ char *e;
+
+ if (first) {
+ e = pa_sprintf_malloc("%s %s", t, o->sink->description);
+ first = 0;
+ } else
+ e = pa_sprintf_malloc("%s, %s", t, o->sink->description);
+
+ pa_xfree(t);
+ t = e;
+ }
+
+ pa_sink_set_description(u->sink, t);
+ pa_xfree(t);
+}
+
+static void update_master(struct userdata *u, struct output *o) {
+ pa_assert(u);
+
+ if (u->master == o)
+ return;
+
+ if ((u->master = o))
+ pa_log_info("Master sink is now '%s'", o->sink_input->sink->name);
+ else
+ pa_log_info("No master selected, lacking suitable outputs.");
+}
+
+static void pick_master(struct userdata *u, struct output *except) {
+ struct output *o;
+ uint32_t idx;
+ pa_assert(u);
+
+ if (u->master &&
+ u->master != except &&
+ u->master->sink_input &&
+ PA_SINK_OPENED(pa_sink_get_state(u->master->sink))) {
+ update_master(u, u->master);
+ return;
+ }
+
+ for (o = pa_idxset_first(u->outputs, &idx); o; o = pa_idxset_next(u->outputs, &idx))
+ if (o != except &&
+ o->sink_input &&
+ PA_SINK_OPENED(pa_sink_get_state(o->sink))) {
+ update_master(u, o);
+ return;
+ }
+
+ update_master(u, NULL);
+}
+
+static int output_create_sink_input(struct output *o) {
+ pa_sink_input_new_data data;
+ char *t;
+
+ pa_assert(o);
+
+ if (o->sink_input)
+ return 0;
+
+ t = pa_sprintf_malloc("Simultaneous output on %s", o->sink->description);
+
+ pa_sink_input_new_data_init(&data);
+ data.sink = o->sink;
+ data.driver = __FILE__;
+ data.name = t;
+ pa_sink_input_new_data_set_sample_spec(&data, &o->userdata->sink->sample_spec);
+ pa_sink_input_new_data_set_channel_map(&data, &o->userdata->sink->channel_map);
+ data.module = o->userdata->module;
+ data.resample_method = o->userdata->resample_method;
+
+ o->sink_input = pa_sink_input_new(o->userdata->core, &data, PA_SINK_INPUT_VARIABLE_RATE|PA_SINK_INPUT_DONT_MOVE);
+
+ pa_xfree(t);
+
+ if (!o->sink_input)
+ return -1;
+
+ o->sink_input->parent.process_msg = sink_input_process_msg;
+ o->sink_input->peek = sink_input_peek_cb;
+ o->sink_input->drop = sink_input_drop_cb;
+ o->sink_input->attach = sink_input_attach_cb;
+ o->sink_input->detach = sink_input_detach_cb;
+ o->sink_input->kill = sink_input_kill_cb;
+ o->sink_input->userdata = o;
+
+
+ return 0;
+}
+
+static struct output *output_new(struct userdata *u, pa_sink *sink) {
+ struct output *o;
+
+ pa_assert(u);
+ pa_assert(sink);
+ pa_assert(u->sink);
+
+ o = pa_xnew(struct output, 1);
+ o->userdata = u;
+ o->inq = pa_asyncmsgq_new(0);
+ o->outq = pa_asyncmsgq_new(0);
+ o->inq_rtpoll_item = NULL;
+ o->outq_rtpoll_item = NULL;
+ o->sink = sink;
+ o->sink_input = NULL;
+ o->memblockq = pa_memblockq_new(
+ 0,
+ MEMBLOCKQ_MAXLENGTH,
+ MEMBLOCKQ_MAXLENGTH,
+ pa_frame_size(&u->sink->sample_spec),
+ 1,
+ 0,
+ NULL);
+
+ pa_assert_se(pa_idxset_put(u->outputs, o, NULL) == 0);
+
+ if (u->sink && PA_SINK_LINKED(pa_sink_get_state(u->sink)))
+ pa_asyncmsgq_send(u->sink->asyncmsgq, PA_MSGOBJECT(u->sink), SINK_MESSAGE_ADD_OUTPUT, o, 0, NULL);
+ else {
+ /* If the sink is not yet started, we need to do the activation ourselves */
+ PA_LLIST_PREPEND(struct output, u->thread_info.active_outputs, o);
+
+ o->outq_rtpoll_item = pa_rtpoll_item_new_asyncmsgq(
+ u->rtpoll,
+ PA_RTPOLL_EARLY-1, /* This item is very important */
+ o->outq);
+ }
+
+ if (PA_SINK_OPENED(pa_sink_get_state(u->sink)) || pa_sink_get_state(u->sink) == PA_SINK_INIT) {
+ pa_sink_suspend(sink, FALSE);
+
+ if (PA_SINK_OPENED(pa_sink_get_state(sink)))
+ if (output_create_sink_input(o) < 0)
+ goto fail;
+ }
+
+
+ update_description(u);
+
+ return o;
+
+fail:
+
+ if (o) {
+ pa_idxset_remove_by_data(u->outputs, o, NULL);
+
+ if (o->sink_input) {
+ pa_sink_input_unlink(o->sink_input);
+ pa_sink_input_unref(o->sink_input);
+ }
+
+ if (o->memblockq)
+ pa_memblockq_free(o->memblockq);
+
+ if (o->inq)
+ pa_asyncmsgq_unref(o->inq);
+
+ if (o->outq)
+ pa_asyncmsgq_unref(o->outq);
+
+ pa_xfree(o);
+ }
+
+ return NULL;
+}
+
+static pa_hook_result_t sink_new_hook_cb(pa_core *c, pa_sink *s, struct userdata* u) {
+ struct output *o;
+
+ pa_core_assert_ref(c);
+ pa_sink_assert_ref(s);
+ pa_assert(u);
+ pa_assert(u->automatic);
+
+ if (!(s->flags & PA_SINK_HARDWARE) || s == u->sink)
+ return PA_HOOK_OK;
+
+ pa_log_info("Configuring new sink: %s", s->name);
+
+ if (!(o = output_new(u, s))) {
+ pa_log("Failed to create sink input on sink '%s'.", s->name);
+ return PA_HOOK_OK;
+ }
+
+ if (o->sink_input)
+ pa_sink_input_put(o->sink_input);
+
+ pick_master(u, NULL);
+
+ return PA_HOOK_OK;
+}
+
+static pa_hook_result_t sink_unlink_hook_cb(pa_core *c, pa_sink *s, struct userdata* u) {
+ struct output *o;
+ uint32_t idx;
+
+ pa_assert(c);
+ pa_sink_assert_ref(s);
+ pa_assert(u);
+
+ if (s == u->sink)
+ return PA_HOOK_OK;
+
+ for (o = pa_idxset_first(u->outputs, &idx); o; o = pa_idxset_next(u->outputs, &idx))
+ if (o->sink == s)
+ break;
+
+ if (!o)
+ return PA_HOOK_OK;
+
+ pa_log_info("Unconfiguring sink: %s", s->name);
+
+ output_free(o);
+
+ return PA_HOOK_OK;
+}
+
+static pa_hook_result_t sink_state_changed_hook_cb(pa_core *c, pa_sink *s, struct userdata* u) {
+ struct output *o;
+ uint32_t idx;
+ pa_sink_state_t state;
+
+ if (s == u->sink)
+ return PA_HOOK_OK;
+
+ for (o = pa_idxset_first(u->outputs, &idx); o; o = pa_idxset_next(u->outputs, &idx))
+ if (o->sink == s)
+ break;
+
+ if (!o)
+ return PA_HOOK_OK;
+
+ state = pa_sink_get_state(s);
+
+ if (PA_SINK_OPENED(state) && PA_SINK_OPENED(pa_sink_get_state(u->sink)) && !o->sink_input) {
+ enable_output(o);
+ pick_master(u, NULL);
+ }
+
+ if (state == PA_SINK_SUSPENDED && o->sink_input) {
+ disable_output(o);
+ pick_master(u, o);
+ }
+
+ return PA_HOOK_OK;
+}
+
+int pa__init(pa_module*m) {
+ struct userdata *u;
+ pa_modargs *ma = NULL;
+ const char *master_name, *slaves, *rm;
+ pa_sink *master_sink = NULL;
+ int resample_method = PA_RESAMPLER_TRIVIAL;
+ pa_sample_spec ss;
+ pa_channel_map map;
+ struct output *o;
+ uint32_t idx;
+
+ pa_assert(m);
+
+ if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
+ pa_log("failed to parse module arguments");
+ goto fail;
+ }
+
+ if ((rm = pa_modargs_get_value(ma, "resample_method", NULL))) {
+ if ((resample_method = pa_parse_resample_method(rm)) < 0) {
+ pa_log("invalid resample method '%s'", rm);
+ goto fail;
+ }
+ }
+
+ u = pa_xnew(struct userdata, 1);
+ u->core = m->core;
+ u->module = m;
+ m->userdata = u;
+ u->sink = NULL;
+ u->master = NULL;
+ u->time_event = NULL;
+ u->adjust_time = DEFAULT_ADJUST_TIME;
+ pa_thread_mq_init(&u->thread_mq, m->core->mainloop);
+ u->rtpoll = pa_rtpoll_new();
+ u->thread = NULL;
+ u->resample_method = resample_method;
+ u->outputs = pa_idxset_new(NULL, NULL);
+ memset(&u->adjust_timestamp, 0, sizeof(u->adjust_timestamp));
+ u->sink_new_slot = u->sink_unlink_slot = u->sink_state_changed_slot = NULL;
+ PA_LLIST_HEAD_INIT(struct output, u->thread_info.active_outputs);
+ pa_atomic_store(&u->thread_info.running, FALSE);
+ u->thread_info.in_null_mode = FALSE;
+ pa_rtpoll_item_new_asyncmsgq(u->rtpoll, PA_RTPOLL_EARLY, u->thread_mq.inq);
+
+ if (pa_modargs_get_value_u32(ma, "adjust_time", &u->adjust_time) < 0) {
+ pa_log("Failed to parse adjust_time value");
+ goto fail;
+ }
+
+ master_name = pa_modargs_get_value(ma, "master", NULL);
+ slaves = pa_modargs_get_value(ma, "slaves", NULL);
+ if (!master_name != !slaves) {
+ pa_log("No master or slave sinks specified");
+ goto fail;
+ }
+
+ if (master_name) {
+ if (!(master_sink = pa_namereg_get(m->core, master_name, PA_NAMEREG_SINK, 1))) {
+ pa_log("Invalid master sink '%s'", master_name);
+ goto fail;
+ }
+
+ ss = master_sink->sample_spec;
+ u->automatic = FALSE;
+ } else {
+ master_sink = NULL;
+ ss = m->core->default_sample_spec;
+ u->automatic = TRUE;
+ }
+
+ if ((pa_modargs_get_sample_spec(ma, &ss) < 0)) {
+ pa_log("Invalid sample specification.");
+ goto fail;
+ }
+
+ if (master_sink && ss.channels == master_sink->sample_spec.channels)
+ map = master_sink->channel_map;
+ else {
+ pa_assert_se(pa_channel_map_init_auto(&map, ss.channels, PA_CHANNEL_MAP_AUX));
+ pa_channel_map_init_auto(&map, ss.channels, PA_CHANNEL_MAP_DEFAULT);
+ }
+
+ if ((pa_modargs_get_channel_map(ma, NULL, &map) < 0)) {
+ pa_log("Invalid channel map.");
+ goto fail;
+ }
+
+ if (ss.channels != map.channels) {
+ pa_log("Channel map and sample specification don't match.");
+ goto fail;
+ }
+
+ if (!(u->sink = pa_sink_new(m->core, __FILE__, pa_modargs_get_value(ma, "sink_name", DEFAULT_SINK_NAME), 0, &ss, &map))) {
+ pa_log("Failed to create sink");
+ goto fail;
+ }
+
+ u->sink->parent.process_msg = sink_process_msg;
+ u->sink->get_latency = sink_get_latency_cb;
+ u->sink->set_state = sink_set_state;
+ u->sink->userdata = u;
+
+ u->sink->flags = PA_SINK_LATENCY;
+ pa_sink_set_module(u->sink, m);
+ pa_sink_set_description(u->sink, "Simultaneous output");
+ pa_sink_set_rtpoll(u->sink, u->rtpoll);
+ pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
+
+ u->block_size = pa_bytes_per_second(&ss) / 20; /* 50 ms */
+ if (u->block_size <= 0)
+ u->block_size = pa_frame_size(&ss);
+
+ if (!u->automatic) {
+ const char*split_state;
+ char *n = NULL;
+ pa_assert(slaves);
+
+ /* The master and slaves have been specified manually */
+
+ if (!(u->master = output_new(u, master_sink))) {
+ pa_log("Failed to create master sink input on sink '%s'.", master_sink->name);
+ goto fail;
+ }
+
+ split_state = NULL;
+ while ((n = pa_split(slaves, ",", &split_state))) {
+ pa_sink *slave_sink;
+
+ if (!(slave_sink = pa_namereg_get(m->core, n, PA_NAMEREG_SINK, 1)) || slave_sink == u->sink) {
+ pa_log("Invalid slave sink '%s'", n);
+ pa_xfree(n);
+ goto fail;
+ }
+
+ pa_xfree(n);
+
+ if (!output_new(u, slave_sink)) {
+ pa_log("Failed to create slave sink input on sink '%s'.", slave_sink->name);
+ goto fail;
+ }
+ }
+
+ if (pa_idxset_size(u->outputs) <= 1)
+ pa_log_warn("No slave sinks specified.");
+
+ u->sink_new_slot = NULL;
+
+ } else {
+ pa_sink *s;
+
+ /* We're in automatic mode, we elect one hw sink to the master
+ * and attach all other hw sinks as slaves to it */
+
+ for (s = pa_idxset_first(m->core->sinks, &idx); s; s = pa_idxset_next(m->core->sinks, &idx)) {
+
+ if (!(s->flags & PA_SINK_HARDWARE) || s == u->sink)
+ continue;
+
+ if (!output_new(u, s)) {
+ pa_log("Failed to create sink input on sink '%s'.", s->name);
+ goto fail;
+ }
+ }
+
+ u->sink_new_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SINK_NEW_POST], (pa_hook_cb_t) sink_new_hook_cb, u);
+ }
+
+ u->sink_unlink_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SINK_UNLINK], (pa_hook_cb_t) sink_unlink_hook_cb, u);
+ u->sink_state_changed_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], (pa_hook_cb_t) sink_state_changed_hook_cb, u);
+
+ pick_master(u, NULL);
+
+ if (!(u->thread = pa_thread_new(thread_func, u))) {
+ pa_log("Failed to create thread.");
+ goto fail;
+ }
+
+ /* Activate the sink and the sink inputs */
+ pa_sink_put(u->sink);
+
+ for (o = pa_idxset_first(u->outputs, &idx); o; o = pa_idxset_next(u->outputs, &idx))
+ if (o->sink_input)
+ pa_sink_input_put(o->sink_input);
+
+ if (u->adjust_time > 0) {
+ struct timeval tv;
+ pa_gettimeofday(&tv);
+ tv.tv_sec += u->adjust_time;
+ u->time_event = m->core->mainloop->time_new(m->core->mainloop, &tv, time_callback, u);
+ }
+
+ pa_modargs_free(ma);
+
+ return 0;
+
+fail:
+
+ if (ma)
+ pa_modargs_free(ma);
+
+ pa__done(m);
+
+ return -1;
+}
+
+static void output_free(struct output *o) {
+ pa_assert(o);
+
+ pick_master(o->userdata, o);
+
+ disable_output(o);
+
+ pa_assert_se(pa_idxset_remove_by_data(o->userdata->outputs, o, NULL));
+
+ update_description(o->userdata);
+
+ if (o->inq_rtpoll_item)
+ pa_rtpoll_item_free(o->inq_rtpoll_item);
+
+ if (o->outq_rtpoll_item)
+ pa_rtpoll_item_free(o->outq_rtpoll_item);
+
+ if (o->inq)
+ pa_asyncmsgq_unref(o->inq);
+
+ if (o->outq)
+ pa_asyncmsgq_unref(o->outq);
+
+ if (o->memblockq)
+ pa_memblockq_free(o->memblockq);
+
+ pa_xfree(o);
+}
+
+void pa__done(pa_module*m) {
+ struct userdata *u;
+ struct output *o;
+
+ pa_assert(m);
+
+ if (!(u = m->userdata))
+ return;
+
+ if (u->sink_new_slot)
+ pa_hook_slot_free(u->sink_new_slot);
+
+ if (u->sink_unlink_slot)
+ pa_hook_slot_free(u->sink_unlink_slot);
+
+ if (u->sink_state_changed_slot)
+ pa_hook_slot_free(u->sink_state_changed_slot);
+
+ if (u->outputs) {
+ while ((o = pa_idxset_first(u->outputs, NULL)))
+ output_free(o);
+
+ pa_idxset_free(u->outputs, NULL, NULL);
+ }
+
+ if (u->sink)
+ pa_sink_unlink(u->sink);
+
+ if (u->thread) {
+ pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
+ pa_thread_free(u->thread);
+ }
+
+ pa_thread_mq_done(&u->thread_mq);
+
+ if (u->sink)
+ pa_sink_unref(u->sink);
+
+ if (u->rtpoll)
+ pa_rtpoll_free(u->rtpoll);
+
+ if (u->time_event)
+ u->core->mainloop->time_free(u->time_event);
+
+ pa_xfree(u);
+}
diff --git a/src/modules/module-default-device-restore.c b/src/modules/module-default-device-restore.c
new file mode 100644
index 00000000..b550ae78
--- /dev/null
+++ b/src/modules/module-default-device-restore.c
@@ -0,0 +1,101 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2006 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <pulsecore/core-util.h>
+#include <pulsecore/module.h>
+#include <pulsecore/log.h>
+#include <pulsecore/namereg.h>
+
+#include "module-default-device-restore-symdef.h"
+
+PA_MODULE_AUTHOR("Lennart Poettering");
+PA_MODULE_DESCRIPTION("Automatically restore the default sink and source");
+PA_MODULE_VERSION(PACKAGE_VERSION);
+PA_MODULE_LOAD_ONCE(TRUE);
+
+#define DEFAULT_SINK_FILE "default-sink"
+#define DEFAULT_SOURCE_FILE "default-source"
+
+int pa__init(pa_module *m) {
+ FILE *f;
+
+ /* We never overwrite manually configured settings */
+
+ if (m->core->default_sink_name)
+ pa_log_info("Manually configured default sink, not overwriting.");
+ else if ((f = pa_open_config_file(NULL, DEFAULT_SINK_FILE, NULL, NULL, "r"))) {
+ char ln[256] = "";
+
+ fgets(ln, sizeof(ln)-1, f);
+ pa_strip_nl(ln);
+ fclose(f);
+
+ if (!ln[0])
+ pa_log_debug("No previous default sink setting, ignoring.");
+ else if (pa_namereg_get(m->core, ln, PA_NAMEREG_SINK, 1)) {
+ pa_namereg_set_default(m->core, ln, PA_NAMEREG_SINK);
+ pa_log_debug("Restored default sink '%s'.", ln);
+ } else
+ pa_log_info("Saved default sink '%s' not existant, not restoring default sink setting.", ln);
+ }
+
+ if (m->core->default_source_name)
+ pa_log_info("Manually configured default source, not overwriting.");
+ else if ((f = pa_open_config_file(NULL, DEFAULT_SOURCE_FILE, NULL, NULL, "r"))) {
+ char ln[256] = "";
+
+ fgets(ln, sizeof(ln)-1, f);
+ pa_strip_nl(ln);
+ fclose(f);
+
+ if (!ln[0])
+ pa_log_debug("No previous default source setting, ignoring.");
+ else if (pa_namereg_get(m->core, ln, PA_NAMEREG_SOURCE, 1)) {
+ pa_namereg_set_default(m->core, ln, PA_NAMEREG_SOURCE);
+ pa_log_debug("Restored default source '%s'.", ln);
+ } else
+ pa_log_info("Saved default source '%s' not existant, not restoring default source setting.", ln);
+ }
+
+ return 0;
+}
+
+void pa__done(pa_module*m) {
+ FILE *f;
+
+ if ((f = pa_open_config_file(NULL, DEFAULT_SINK_FILE, NULL, NULL, "w"))) {
+ const char *n = pa_namereg_get_default_sink_name(m->core);
+ fprintf(f, "%s\n", n ? n : "");
+ fclose(f);
+ }
+
+ if ((f = pa_open_config_file(NULL, DEFAULT_SOURCE_FILE, NULL, NULL, "w"))) {
+ const char *n = pa_namereg_get_default_source_name(m->core);
+ fprintf(f, "%s\n", n ? n : "");
+ fclose(f);
+ }
+}
diff --git a/src/modules/module-defs.h.m4 b/src/modules/module-defs.h.m4
new file mode 100644
index 00000000..a49e8329
--- /dev/null
+++ b/src/modules/module-defs.h.m4
@@ -0,0 +1,32 @@
+dnl $Id$
+changecom(`/*', `*/')dnl
+define(`module_name', patsubst(patsubst(patsubst(fname, `-symdef.h$'), `^.*/'), `[^0-9a-zA-Z]', `_'))dnl
+define(`c_symbol', patsubst(module_name, `[^0-9a-zA-Z]', `_'))dnl
+define(`c_macro', patsubst(module_name, `[^0-9a-zA-Z]', `'))dnl
+define(`incmacro', `foo'c_macro`symdeffoo')dnl
+define(`gen_symbol', `#define $1 'module_name`_LTX_$1')dnl
+#ifndef incmacro
+#define incmacro
+
+#include <pulsecore/core.h>
+#include <pulsecore/module.h>
+#include <pulsecore/macro.h>
+
+gen_symbol(pa__init)
+gen_symbol(pa__done)
+gen_symbol(pa__get_author)
+gen_symbol(pa__get_description)
+gen_symbol(pa__get_usage)
+gen_symbol(pa__get_version)
+gen_symbol(pa__load_once)
+
+int pa__init(pa_module*m);
+void pa__done(pa_module*m);
+
+const char* pa__get_author(void);
+const char* pa__get_description(void);
+const char* pa__get_usage(void);
+const char* pa__get_version(void);
+pa_bool_t pa__load_once(void);
+
+#endif
diff --git a/src/modules/module-detect.c b/src/modules/module-detect.c
new file mode 100644
index 00000000..ee650dfd
--- /dev/null
+++ b/src/modules/module-detect.c
@@ -0,0 +1,272 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2006 Lennart Poettering
+ Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
+ Copyright 2006 Diego Pettenò
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <stdio.h>
+#include <unistd.h>
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <pulse/xmalloc.h>
+
+#include <pulsecore/core-error.h>
+#include <pulsecore/module.h>
+#include <pulsecore/modargs.h>
+#include <pulsecore/log.h>
+#include <pulsecore/core-util.h>
+#include <pulsecore/macro.h>
+
+#include "module-detect-symdef.h"
+
+PA_MODULE_AUTHOR("Lennart Poettering");
+PA_MODULE_DESCRIPTION("Detect available audio hardware and load matching drivers");
+PA_MODULE_VERSION(PACKAGE_VERSION);
+PA_MODULE_LOAD_ONCE(TRUE);
+PA_MODULE_USAGE("just-one=<boolean>");
+
+static const char* const valid_modargs[] = {
+ "just-one",
+ NULL
+};
+
+#ifdef HAVE_ALSA
+
+static int detect_alsa(pa_core *c, int just_one) {
+ FILE *f;
+ int n = 0, n_sink = 0, n_source = 0;
+
+ if (!(f = fopen("/proc/asound/devices", "r"))) {
+
+ if (errno != ENOENT)
+ pa_log_error("open(\"/proc/asound/devices\") failed: %s", pa_cstrerror(errno));
+
+ return -1;
+ }
+
+ while (!feof(f)) {
+ char line[64], args[64];
+ unsigned device, subdevice;
+ int is_sink;
+
+ if (!fgets(line, sizeof(line), f))
+ break;
+
+ line[strcspn(line, "\r\n")] = 0;
+
+ if (pa_endswith(line, "digital audio playback"))
+ is_sink = 1;
+ else if (pa_endswith(line, "digital audio capture"))
+ is_sink = 0;
+ else
+ continue;
+
+ if (just_one && is_sink && n_sink >= 1)
+ continue;
+
+ if (just_one && !is_sink && n_source >= 1)
+ continue;
+
+ if (sscanf(line, " %*i: [%u- %u]: ", &device, &subdevice) != 2)
+ continue;
+
+ /* Only one sink per device */
+ if (subdevice != 0)
+ continue;
+
+ pa_snprintf(args, sizeof(args), "device=hw:%u", device);
+ if (!pa_module_load(c, is_sink ? "module-alsa-sink" : "module-alsa-source", args))
+ continue;
+
+ n++;
+
+ if (is_sink)
+ n_sink++;
+ else
+ n_source++;
+ }
+
+ fclose(f);
+
+ return n;
+}
+#endif
+
+#ifdef HAVE_OSS
+static int detect_oss(pa_core *c, int just_one) {
+ FILE *f;
+ int n = 0, b = 0;
+
+ if (!(f = fopen("/dev/sndstat", "r")) &&
+ !(f = fopen("/proc/sndstat", "r")) &&
+ !(f = fopen("/proc/asound/oss/sndstat", "r"))) {
+
+ if (errno != ENOENT)
+ pa_log_error("failed to open OSS sndstat device: %s", pa_cstrerror(errno));
+
+ return -1;
+ }
+
+ while (!feof(f)) {
+ char line[64], args[64];
+ unsigned device;
+
+ if (!fgets(line, sizeof(line), f))
+ break;
+
+ line[strcspn(line, "\r\n")] = 0;
+
+ if (!b) {
+ b = strcmp(line, "Audio devices:") == 0 || strcmp(line, "Installed devices:") == 0;
+ continue;
+ }
+
+ if (line[0] == 0)
+ break;
+
+ if (sscanf(line, "%u: ", &device) == 1) {
+ if (device == 0)
+ pa_snprintf(args, sizeof(args), "device=/dev/dsp");
+ else
+ pa_snprintf(args, sizeof(args), "device=/dev/dsp%u", device);
+
+ if (!pa_module_load(c, "module-oss", args))
+ continue;
+
+ } else if (sscanf(line, "pcm%u: ", &device) == 1) {
+ /* FreeBSD support, the devices are named /dev/dsp0.0, dsp0.1 and so on */
+ pa_snprintf(args, sizeof(args), "device=/dev/dsp%u.0", device);
+
+ if (!pa_module_load(c, "module-oss", args))
+ continue;
+ }
+
+ n++;
+
+ if (just_one)
+ break;
+ }
+
+ fclose(f);
+ return n;
+}
+#endif
+
+#ifdef HAVE_SOLARIS
+static int detect_solaris(pa_core *c, int just_one) {
+ struct stat s;
+ const char *dev;
+ char args[64];
+
+ dev = getenv("AUDIODEV");
+ if (!dev)
+ dev = "/dev/audio";
+
+ if (stat(dev, &s) < 0) {
+ if (errno != ENOENT)
+ pa_log_error("failed to open device %s: %s", dev, pa_cstrerror(errno));
+ return -1;
+ }
+
+ if (!S_ISCHR(s.st_mode))
+ return 0;
+
+ pa_snprintf(args, sizeof(args), "device=%s", dev);
+
+ if (!pa_module_load(c, "module-solaris", args))
+ return 0;
+
+ return 1;
+}
+#endif
+
+#ifdef OS_IS_WIN32
+static int detect_waveout(pa_core *c, int just_one) {
+ /*
+ * FIXME: No point in enumerating devices until the plugin supports
+ * selecting anything but the first.
+ */
+ if (!pa_module_load(c, "module-waveout", ""))
+ return 0;
+
+ return 1;
+}
+#endif
+
+int pa__init(pa_module*m) {
+ pa_bool_t just_one = FALSE;
+ int n = 0;
+ pa_modargs *ma;
+
+ pa_assert(m);
+
+ if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
+ pa_log("Failed to parse module arguments");
+ goto fail;
+ }
+
+ if (pa_modargs_get_value_boolean(ma, "just-one", &just_one) < 0) {
+ pa_log("just_one= expects a boolean argument.");
+ goto fail;
+ }
+
+#if HAVE_ALSA
+ if ((n = detect_alsa(m->core, just_one)) <= 0)
+#endif
+#if HAVE_OSS
+ if ((n = detect_oss(m->core, just_one)) <= 0)
+#endif
+#if HAVE_SOLARIS
+ if ((n = detect_solaris(m->core, just_one)) <= 0)
+#endif
+#if OS_IS_WIN32
+ if ((n = detect_waveout(m->core, just_one)) <= 0)
+#endif
+ {
+ pa_log_warn("failed to detect any sound hardware.");
+ goto fail;
+ }
+
+ pa_log_info("loaded %i modules.", n);
+
+ /* We were successful and can unload ourselves now. */
+ pa_module_unload_request(m);
+
+ pa_modargs_free(ma);
+
+ return 0;
+
+fail:
+ if (ma)
+ pa_modargs_free(ma);
+
+ return -1;
+}
diff --git a/src/modules/module-esound-compat-spawnfd.c b/src/modules/module-esound-compat-spawnfd.c
new file mode 100644
index 00000000..8321192b
--- /dev/null
+++ b/src/modules/module-esound-compat-spawnfd.c
@@ -0,0 +1,80 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2004-2006 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <unistd.h>
+#include <string.h>
+#include <errno.h>
+
+#include <pulsecore/core-error.h>
+#include <pulsecore/module.h>
+#include <pulsecore/modargs.h>
+#include <pulsecore/core-util.h>
+#include <pulsecore/log.h>
+
+#include "module-esound-compat-spawnfd-symdef.h"
+
+PA_MODULE_AUTHOR("Lennart Poettering");
+PA_MODULE_DESCRIPTION("ESOUND compatibility module: -spawnfd emulation");
+PA_MODULE_LOAD_ONCE(TRUE);
+PA_MODULE_VERSION(PACKAGE_VERSION);
+PA_MODULE_USAGE("fd=<file descriptor>");
+
+static const char* const valid_modargs[] = {
+ "fd",
+ NULL,
+};
+
+int pa__init(pa_module*m) {
+ pa_modargs *ma = NULL;
+ int ret = -1, fd = -1;
+ char x = 1;
+
+ pa_assert(m);
+
+ if (!(ma = pa_modargs_new(m->argument, valid_modargs)) ||
+ pa_modargs_get_value_s32(ma, "fd", &fd) < 0 ||
+ fd < 0) {
+
+ pa_log("Failed to parse module arguments");
+ goto finish;
+ }
+
+ if (pa_loop_write(fd, &x, sizeof(x), NULL) != sizeof(x))
+ pa_log_warn("write(%u, 1, 1) failed: %s", fd, pa_cstrerror(errno));
+
+ pa_assert_se(pa_close(fd) == 0);
+
+ pa_module_unload_request(m);
+
+ ret = 0;
+
+finish:
+ if (ma)
+ pa_modargs_free(ma);
+
+ return ret;
+}
diff --git a/src/modules/module-esound-compat-spawnpid.c b/src/modules/module-esound-compat-spawnpid.c
new file mode 100644
index 00000000..67f0a231
--- /dev/null
+++ b/src/modules/module-esound-compat-spawnpid.c
@@ -0,0 +1,77 @@
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2004-2006 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <unistd.h>
+#include <string.h>
+#include <errno.h>
+#include <signal.h>
+
+#include <pulsecore/core-error.h>
+#include <pulsecore/module.h>
+#include <pulsecore/core-util.h>
+#include <pulsecore/modargs.h>
+#include <pulsecore/log.h>
+
+#include "module-esound-compat-spawnpid-symdef.h"
+
+PA_MODULE_AUTHOR("Lennart Poettering");
+PA_MODULE_DESCRIPTION("ESOUND compatibility module: -spawnpid emulation");
+PA_MODULE_VERSION(PACKAGE_VERSION);
+PA_MODULE_LOAD_ONCE(TRUE);
+PA_MODULE_USAGE("pid=<process id>");
+
+static const char* const valid_modargs[] = {
+ "pid",
+ NULL,
+};
+
+int pa__init(pa_module*m) {
+ pa_modargs *ma = NULL;
+ int ret = -1;
+ uint32_t pid = 0;
+
+ pa_assert(m);
+
+ if (!(ma = pa_modargs_new(m->argument, valid_modargs)) ||
+ pa_modargs_get_value_u32(ma, "pid", &pid) < 0 ||
+ !pid) {
+ pa_log("Failed to parse module arguments");
+ goto finish;
+ }
+
+ if (kill(pid, SIGUSR1) < 0)
+ pa_log_warn("kill(%u) failed: %s", pid, pa_cstrerror(errno));
+
+ pa_module_unload_request(m);
+
+ ret = 0;
+
+finish:
+ if (ma)
+ pa_modargs_free(ma);
+
+ return ret;
+}
diff --git a/src/modules/module-esound-sink.c b/src/modules/module-esound-sink.c
new file mode 100644
index 00000000..f9bea63d
--- /dev/null
+++ b/src/modules/module-esound-sink.c
@@ -0,0 +1,661 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2004-2006 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <limits.h>
+#include <poll.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <netinet/tcp.h>
+#include <sys/ioctl.h>
+
+#ifdef HAVE_LINUX_SOCKIOS_H
+#include <linux/sockios.h>
+#endif
+
+#include <pulse/xmalloc.h>
+#include <pulse/timeval.h>
+
+#include <pulsecore/core-error.h>
+#include <pulsecore/iochannel.h>
+#include <pulsecore/sink.h>
+#include <pulsecore/module.h>
+#include <pulsecore/core-util.h>
+#include <pulsecore/modargs.h>
+#include <pulsecore/log.h>
+#include <pulsecore/socket-client.h>
+#include <pulsecore/esound.h>
+#include <pulsecore/authkey.h>
+#include <pulsecore/thread-mq.h>
+#include <pulsecore/thread.h>
+#include <pulsecore/time-smoother.h>
+#include <pulsecore/rtclock.h>
+#include <pulsecore/socket-util.h>
+
+#include "module-esound-sink-symdef.h"
+
+PA_MODULE_AUTHOR("Lennart Poettering");
+PA_MODULE_DESCRIPTION("ESOUND Sink");
+PA_MODULE_VERSION(PACKAGE_VERSION);
+PA_MODULE_LOAD_ONCE(FALSE);
+PA_MODULE_USAGE(
+ "sink_name=<name for the sink> "
+ "server=<address> cookie=<filename> "
+ "format=<sample format> "
+ "channels=<number of channels> "
+ "rate=<sample rate>");
+
+#define DEFAULT_SINK_NAME "esound_out"
+
+struct userdata {
+ pa_core *core;
+ pa_module *module;
+ pa_sink *sink;
+
+ pa_thread_mq thread_mq;
+ pa_rtpoll *rtpoll;
+ pa_rtpoll_item *rtpoll_item;
+ pa_thread *thread;
+
+ pa_memchunk memchunk;
+
+ void *write_data;
+ size_t write_length, write_index;
+
+ void *read_data;
+ size_t read_length, read_index;
+
+ enum {
+ STATE_AUTH,
+ STATE_LATENCY,
+ STATE_PREPARE,
+ STATE_RUNNING,
+ STATE_DEAD
+ } state;
+
+ pa_usec_t latency;
+
+ esd_format_t format;
+ int32_t rate;
+
+ pa_smoother *smoother;
+ int fd;
+
+ int64_t offset;
+
+ pa_iochannel *io;
+ pa_socket_client *client;
+
+ size_t block_size;
+};
+
+static const char* const valid_modargs[] = {
+ "server",
+ "cookie",
+ "rate",
+ "format",
+ "channels",
+ "sink_name",
+ NULL
+};
+
+enum {
+ SINK_MESSAGE_PASS_SOCKET = PA_SINK_MESSAGE_MAX
+};
+
+static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
+ struct userdata *u = PA_SINK(o)->userdata;
+
+ switch (code) {
+
+ case PA_SINK_MESSAGE_SET_STATE:
+
+ switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
+
+ case PA_SINK_SUSPENDED:
+ pa_assert(PA_SINK_OPENED(u->sink->thread_info.state));
+
+ pa_smoother_pause(u->smoother, pa_rtclock_usec());
+ break;
+
+ case PA_SINK_IDLE:
+ case PA_SINK_RUNNING:
+
+ if (u->sink->thread_info.state == PA_SINK_SUSPENDED)
+ pa_smoother_resume(u->smoother, pa_rtclock_usec());
+
+ break;
+
+ case PA_SINK_UNLINKED:
+ case PA_SINK_INIT:
+ ;
+ }
+
+ break;
+
+ case PA_SINK_MESSAGE_GET_LATENCY: {
+ pa_usec_t w, r;
+
+ r = pa_smoother_get(u->smoother, pa_rtclock_usec());
+ w = pa_bytes_to_usec(u->offset + u->memchunk.length, &u->sink->sample_spec);
+
+ *((pa_usec_t*) data) = w > r ? w - r : 0;
+ break;
+ }
+
+ case SINK_MESSAGE_PASS_SOCKET: {
+ struct pollfd *pollfd;
+
+ pa_assert(!u->rtpoll_item);
+
+ u->rtpoll_item = pa_rtpoll_item_new(u->rtpoll, PA_RTPOLL_NEVER, 1);
+ pollfd = pa_rtpoll_item_get_pollfd(u->rtpoll_item, NULL);
+ pollfd->fd = u->fd;
+ pollfd->events = pollfd->revents = 0;
+
+ return 0;
+ }
+ }
+
+ return pa_sink_process_msg(o, code, data, offset, chunk);
+}
+
+static void thread_func(void *userdata) {
+ struct userdata *u = userdata;
+ int write_type = 0;
+
+ pa_assert(u);
+
+ pa_log_debug("Thread starting up");
+
+ pa_thread_mq_install(&u->thread_mq);
+ pa_rtpoll_install(u->rtpoll);
+
+ pa_smoother_set_time_offset(u->smoother, pa_rtclock_usec());
+
+ for (;;) {
+ int ret;
+
+ if (u->rtpoll_item) {
+ struct pollfd *pollfd;
+ pollfd = pa_rtpoll_item_get_pollfd(u->rtpoll_item, NULL);
+
+ /* Render some data and write it to the fifo */
+ if (PA_SINK_OPENED(u->sink->thread_info.state) && pollfd->revents) {
+ pa_usec_t usec;
+ int64_t n;
+
+ for (;;) {
+ ssize_t l;
+ void *p;
+
+ if (u->memchunk.length <= 0)
+ pa_sink_render(u->sink, u->block_size, &u->memchunk);
+
+ pa_assert(u->memchunk.length > 0);
+
+ p = pa_memblock_acquire(u->memchunk.memblock);
+ l = pa_write(u->fd, (uint8_t*) p + u->memchunk.index, u->memchunk.length, &write_type);
+ pa_memblock_release(u->memchunk.memblock);
+
+ pa_assert(l != 0);
+
+ if (l < 0) {
+
+ if (errno == EINTR)
+ continue;
+ else if (errno == EAGAIN) {
+
+ /* OK, we filled all socket buffers up
+ * now. */
+ goto filled_up;
+
+ } else {
+ pa_log("Failed to write data to FIFO: %s", pa_cstrerror(errno));
+ goto fail;
+ }
+
+ } else {
+ u->offset += l;
+
+ u->memchunk.index += l;
+ u->memchunk.length -= l;
+
+ if (u->memchunk.length <= 0) {
+ pa_memblock_unref(u->memchunk.memblock);
+ pa_memchunk_reset(&u->memchunk);
+ }
+
+ pollfd->revents = 0;
+
+ if (u->memchunk.length > 0)
+
+ /* OK, we wrote less that we asked for,
+ * hence we can assume that the socket
+ * buffers are full now */
+ goto filled_up;
+ }
+ }
+
+ filled_up:
+
+ /* At this spot we know that the socket buffers are
+ * fully filled up. This is the best time to estimate
+ * the playback position of the server */
+
+ n = u->offset;
+
+#ifdef SIOCOUTQ
+ {
+ int l;
+ if (ioctl(u->fd, SIOCOUTQ, &l) >= 0 && l > 0)
+ n -= l;
+ }
+#endif
+
+ usec = pa_bytes_to_usec(n, &u->sink->sample_spec);
+
+ if (usec > u->latency)
+ usec -= u->latency;
+ else
+ usec = 0;
+
+ pa_smoother_put(u->smoother, pa_rtclock_usec(), usec);
+ }
+
+ /* Hmm, nothing to do. Let's sleep */
+ pollfd->events = PA_SINK_OPENED(u->sink->thread_info.state) ? POLLOUT : 0;
+ }
+
+ if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
+ goto fail;
+
+ if (ret == 0)
+ goto finish;
+
+ if (u->rtpoll_item) {
+ struct pollfd* pollfd;
+
+ pollfd = pa_rtpoll_item_get_pollfd(u->rtpoll_item, NULL);
+
+ if (pollfd->revents & ~POLLOUT) {
+ pa_log("FIFO shutdown.");
+ goto fail;
+ }
+ }
+ }
+
+fail:
+ /* If this was no regular exit from the loop we have to continue
+ * processing messages until we received PA_MESSAGE_SHUTDOWN */
+ pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
+ pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
+
+finish:
+ pa_log_debug("Thread shutting down");
+}
+
+static int do_write(struct userdata *u) {
+ ssize_t r;
+ pa_assert(u);
+
+ if (!pa_iochannel_is_writable(u->io))
+ return 0;
+
+ if (u->write_data) {
+ pa_assert(u->write_index < u->write_length);
+
+ if ((r = pa_iochannel_write(u->io, (uint8_t*) u->write_data + u->write_index, u->write_length - u->write_index)) <= 0) {
+ pa_log("write() failed: %s", pa_cstrerror(errno));
+ return -1;
+ }
+
+ u->write_index += r;
+ pa_assert(u->write_index <= u->write_length);
+
+ if (u->write_index == u->write_length) {
+ pa_xfree(u->write_data);
+ u->write_data = NULL;
+ u->write_index = u->write_length = 0;
+ }
+ }
+
+ if (!u->write_data && u->state == STATE_PREPARE) {
+ /* OK, we're done with sending all control data we need to, so
+ * let's hand the socket over to the IO thread now */
+
+ pa_assert(u->fd < 0);
+ u->fd = pa_iochannel_get_send_fd(u->io);
+
+ pa_iochannel_set_noclose(u->io, TRUE);
+ pa_iochannel_free(u->io);
+ u->io = NULL;
+
+ pa_make_tcp_socket_low_delay(u->fd);
+
+ pa_log_debug("Connection authenticated, handing fd to IO thread...");
+
+ pa_asyncmsgq_post(u->thread_mq.inq, PA_MSGOBJECT(u->sink), SINK_MESSAGE_PASS_SOCKET, NULL, 0, NULL, NULL);
+ u->state = STATE_RUNNING;
+ }
+
+ return 0;
+}
+
+static int handle_response(struct userdata *u) {
+ pa_assert(u);
+
+ switch (u->state) {
+
+ case STATE_AUTH:
+ pa_assert(u->read_length == sizeof(int32_t));
+
+ /* Process auth data */
+ if (!*(int32_t*) u->read_data) {
+ pa_log("Authentication failed: %s", pa_cstrerror(errno));
+ return -1;
+ }
+
+ /* Request latency data */
+ pa_assert(!u->write_data);
+ *(int32_t*) (u->write_data = pa_xmalloc(u->write_length = sizeof(int32_t))) = ESD_PROTO_LATENCY;
+
+ u->write_index = 0;
+ u->state = STATE_LATENCY;
+
+ /* Space for next response */
+ pa_assert(u->read_length >= sizeof(int32_t));
+ u->read_index = 0;
+ u->read_length = sizeof(int32_t);
+
+ break;
+
+ case STATE_LATENCY: {
+ int32_t *p;
+ pa_assert(u->read_length == sizeof(int32_t));
+
+ /* Process latency info */
+ u->latency = (pa_usec_t) ((double) (*(int32_t*) u->read_data) * 1000000 / 44100);
+ if (u->latency > 10000000) {
+ pa_log_warn("Invalid latency information received from server");
+ u->latency = 0;
+ }
+
+ /* Create stream */
+ pa_assert(!u->write_data);
+ p = u->write_data = pa_xmalloc0(u->write_length = sizeof(int32_t)*3+ESD_NAME_MAX);
+ *(p++) = ESD_PROTO_STREAM_PLAY;
+ *(p++) = u->format;
+ *(p++) = u->rate;
+ pa_strlcpy((char*) p, "PulseAudio Tunnel", ESD_NAME_MAX);
+
+ u->write_index = 0;
+ u->state = STATE_PREPARE;
+
+ /* Don't read any further */
+ pa_xfree(u->read_data);
+ u->read_data = NULL;
+ u->read_index = u->read_length = 0;
+
+ break;
+ }
+
+ default:
+ pa_assert_not_reached();
+ }
+
+ return 0;
+}
+
+static int do_read(struct userdata *u) {
+ pa_assert(u);
+
+ if (!pa_iochannel_is_readable(u->io))
+ return 0;
+
+ if (u->state == STATE_AUTH || u->state == STATE_LATENCY) {
+ ssize_t r;
+
+ if (!u->read_data)
+ return 0;
+
+ pa_assert(u->read_index < u->read_length);
+
+ if ((r = pa_iochannel_read(u->io, (uint8_t*) u->read_data + u->read_index, u->read_length - u->read_index)) <= 0) {
+ pa_log("read() failed: %s", r < 0 ? pa_cstrerror(errno) : "EOF");
+ return -1;
+ }
+
+ u->read_index += r;
+ pa_assert(u->read_index <= u->read_length);
+
+ if (u->read_index == u->read_length)
+ return handle_response(u);
+ }
+
+ return 0;
+}
+
+static void io_callback(PA_GCC_UNUSED pa_iochannel *io, void*userdata) {
+ struct userdata *u = userdata;
+ pa_assert(u);
+
+ if (do_read(u) < 0 || do_write(u) < 0) {
+
+ if (u->io) {
+ pa_iochannel_free(u->io);
+ u->io = NULL;
+ }
+
+ pa_module_unload_request(u->module);
+ }
+}
+
+static void on_connection(PA_GCC_UNUSED pa_socket_client *c, pa_iochannel*io, void *userdata) {
+ struct userdata *u = userdata;
+
+ pa_socket_client_unref(u->client);
+ u->client = NULL;
+
+ if (!io) {
+ pa_log("Connection failed: %s", pa_cstrerror(errno));
+ pa_module_unload_request(u->module);
+ return;
+ }
+
+ pa_assert(!u->io);
+ u->io = io;
+ pa_iochannel_set_callback(u->io, io_callback, u);
+
+ pa_log_debug("Connection established, authenticating ...");
+}
+
+int pa__init(pa_module*m) {
+ struct userdata *u = NULL;
+ const char *p;
+ pa_sample_spec ss;
+ pa_modargs *ma = NULL;
+ char *t;
+ const char *espeaker;
+ uint32_t key;
+
+ pa_assert(m);
+
+ if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
+ pa_log("failed to parse module arguments");
+ goto fail;
+ }
+
+ ss = m->core->default_sample_spec;
+ if (pa_modargs_get_sample_spec(ma, &ss) < 0) {
+ pa_log("invalid sample format specification");
+ goto fail;
+ }
+
+ if ((ss.format != PA_SAMPLE_U8 && ss.format != PA_SAMPLE_S16NE) ||
+ (ss.channels > 2)) {
+ pa_log("esound sample type support is limited to mono/stereo and U8 or S16NE sample data");
+ goto fail;
+ }
+
+ u = pa_xnew0(struct userdata, 1);
+ u->core = m->core;
+ u->module = m;
+ m->userdata = u;
+ u->fd = -1;
+ u->smoother = pa_smoother_new(PA_USEC_PER_SEC, PA_USEC_PER_SEC*2, TRUE);
+ pa_memchunk_reset(&u->memchunk);
+ u->offset = 0;
+
+ pa_thread_mq_init(&u->thread_mq, m->core->mainloop);
+ u->rtpoll = pa_rtpoll_new();
+ pa_rtpoll_item_new_asyncmsgq(u->rtpoll, PA_RTPOLL_EARLY, u->thread_mq.inq);
+ u->rtpoll_item = NULL;
+
+ u->format =
+ (ss.format == PA_SAMPLE_U8 ? ESD_BITS8 : ESD_BITS16) |
+ (ss.channels == 2 ? ESD_STEREO : ESD_MONO);
+ u->rate = ss.rate;
+ u->block_size = pa_usec_to_bytes(PA_USEC_PER_SEC/20, &ss);
+
+ u->read_data = u->write_data = NULL;
+ u->read_index = u->write_index = u->read_length = u->write_length = 0;
+
+ u->state = STATE_AUTH;
+ u->latency = 0;
+
+ if (!(u->sink = pa_sink_new(m->core, __FILE__, pa_modargs_get_value(ma, "sink_name", DEFAULT_SINK_NAME), 0, &ss, NULL))) {
+ pa_log("Failed to create sink.");
+ goto fail;
+ }
+
+ u->sink->parent.process_msg = sink_process_msg;
+ u->sink->userdata = u;
+ u->sink->flags = PA_SINK_LATENCY|PA_SINK_NETWORK;
+
+ pa_sink_set_module(u->sink, m);
+ pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
+ pa_sink_set_rtpoll(u->sink, u->rtpoll);
+
+ if (!(espeaker = getenv("ESPEAKER")))
+ espeaker = ESD_UNIX_SOCKET_NAME;
+
+ if (!(u->client = pa_socket_client_new_string(u->core->mainloop, p = pa_modargs_get_value(ma, "server", espeaker), ESD_DEFAULT_PORT))) {
+ pa_log("Failed to connect to server.");
+ goto fail;
+ }
+
+ pa_sink_set_description(u->sink, t = pa_sprintf_malloc("Esound sink '%s'", p));
+ pa_xfree(t);
+
+ pa_socket_client_set_callback(u->client, on_connection, u);
+
+ /* Prepare the initial request */
+ u->write_data = pa_xmalloc(u->write_length = ESD_KEY_LEN + sizeof(int32_t));
+ if (pa_authkey_load_auto(pa_modargs_get_value(ma, "cookie", ".esd_auth"), u->write_data, ESD_KEY_LEN) < 0) {
+ pa_log("Failed to load cookie");
+ goto fail;
+ }
+
+ key = ESD_ENDIAN_KEY;
+ memcpy((uint8_t*) u->write_data + ESD_KEY_LEN, &key, sizeof(key));
+
+ /* Reserve space for the response */
+ u->read_data = pa_xmalloc(u->read_length = sizeof(int32_t));
+
+ if (!(u->thread = pa_thread_new(thread_func, u))) {
+ pa_log("Failed to create thread.");
+ goto fail;
+ }
+
+ pa_sink_put(u->sink);
+
+ pa_modargs_free(ma);
+
+ return 0;
+
+fail:
+ if (ma)
+ pa_modargs_free(ma);
+
+ pa__done(m);
+
+ return -1;
+}
+
+void pa__done(pa_module*m) {
+ struct userdata *u;
+ pa_assert(m);
+
+ if (!(u = m->userdata))
+ return;
+
+ if (u->sink)
+ pa_sink_unlink(u->sink);
+
+ if (u->thread) {
+ pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
+ pa_thread_free(u->thread);
+ }
+
+ pa_thread_mq_done(&u->thread_mq);
+
+ if (u->sink)
+ pa_sink_unref(u->sink);
+
+ if (u->io)
+ pa_iochannel_free(u->io);
+
+ if (u->rtpoll_item)
+ pa_rtpoll_item_free(u->rtpoll_item);
+
+ if (u->rtpoll)
+ pa_rtpoll_free(u->rtpoll);
+
+ if (u->memchunk.memblock)
+ pa_memblock_unref(u->memchunk.memblock);
+
+ if (u->client)
+ pa_socket_client_unref(u->client);
+
+ pa_xfree(u->read_data);
+ pa_xfree(u->write_data);
+
+ if (u->smoother)
+ pa_smoother_free(u->smoother);
+
+ if (u->fd >= 0)
+ pa_close(u->fd);
+
+ pa_xfree(u);
+}
diff --git a/src/modules/module-hal-detect.c b/src/modules/module-hal-detect.c
new file mode 100644
index 00000000..832bc73e
--- /dev/null
+++ b/src/modules/module-hal-detect.c
@@ -0,0 +1,851 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2006 Lennart Poettering
+ Copyright 2006 Shams E. King
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <stdio.h>
+#include <unistd.h>
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#include <pulse/xmalloc.h>
+#include <pulse/timeval.h>
+
+#include <pulsecore/core-error.h>
+#include <pulsecore/module.h>
+#include <pulsecore/log.h>
+#include <pulsecore/hashmap.h>
+#include <pulsecore/idxset.h>
+#include <pulsecore/core-util.h>
+#include <pulsecore/namereg.h>
+#include <pulsecore/core-scache.h>
+#include <pulsecore/modargs.h>
+
+#include <hal/libhal.h>
+
+#include "dbus-util.h"
+#include "module-hal-detect-symdef.h"
+
+PA_MODULE_AUTHOR("Shahms King");
+PA_MODULE_DESCRIPTION("Detect available audio hardware and load matching drivers");
+PA_MODULE_VERSION(PACKAGE_VERSION);
+PA_MODULE_LOAD_ONCE(TRUE);
+#if defined(HAVE_ALSA) && defined(HAVE_OSS)
+PA_MODULE_USAGE("api=<alsa or oss>");
+#elif defined(HAVE_ALSA)
+PA_MODULE_USAGE("api=<alsa>");
+#elif defined(HAVE_OSS)
+PA_MODULE_USAGE("api=<oss>");
+#endif
+
+struct device {
+ uint32_t index;
+ char *udi;
+ char *sink_name, *source_name;
+ int acl_race_fix;
+};
+
+struct userdata {
+ pa_core *core;
+ LibHalContext *context;
+ pa_dbus_connection *connection;
+ pa_hashmap *devices;
+ const char *capability;
+};
+
+struct timerdata {
+ struct userdata *u;
+ char *udi;
+};
+
+#define CAPABILITY_ALSA "alsa"
+#define CAPABILITY_OSS "oss"
+
+static const char* const valid_modargs[] = {
+ "api",
+ NULL
+};
+
+static void hal_device_free(struct device* d) {
+ pa_assert(d);
+
+ pa_xfree(d->udi);
+ pa_xfree(d->sink_name);
+ pa_xfree(d->source_name);
+ pa_xfree(d);
+}
+
+static void hal_device_free_cb(void *d, PA_GCC_UNUSED void *data) {
+ hal_device_free(d);
+}
+
+static const char *strip_udi(const char *udi) {
+ const char *slash;
+
+ if ((slash = strrchr(udi, '/')))
+ return slash+1;
+
+ return udi;
+}
+
+#ifdef HAVE_ALSA
+
+typedef enum {
+ ALSA_TYPE_SINK,
+ ALSA_TYPE_SOURCE,
+ ALSA_TYPE_OTHER,
+ ALSA_TYPE_MAX
+} alsa_type_t;
+
+static alsa_type_t hal_alsa_device_get_type(LibHalContext *context, const char *udi, DBusError *error) {
+ char *type;
+ alsa_type_t t;
+
+ if (!(type = libhal_device_get_property_string(context, udi, "alsa.type", error)))
+ return ALSA_TYPE_OTHER;
+
+ if (!strcmp(type, "playback"))
+ t = ALSA_TYPE_SINK;
+ else if (!strcmp(type, "capture"))
+ t = ALSA_TYPE_SOURCE;
+ else
+ t = ALSA_TYPE_OTHER;
+
+ libhal_free_string(type);
+
+ return t;
+}
+
+static int hal_alsa_device_is_modem(LibHalContext *context, const char *udi, DBusError *error) {
+ char *class;
+ int r;
+
+ if (!(class = libhal_device_get_property_string(context, udi, "alsa.pcm_class", error)))
+ return 0;
+
+ r = strcmp(class, "modem") == 0;
+ pa_xfree(class);
+
+ return r;
+}
+
+static pa_module* hal_device_load_alsa(struct userdata *u, const char *udi, char **sink_name, char **source_name) {
+ char *args;
+ alsa_type_t type;
+ int device, card;
+ const char *module_name;
+ DBusError error;
+ pa_module *m;
+
+ dbus_error_init(&error);
+
+ pa_assert(u);
+ pa_assert(sink_name);
+ pa_assert(source_name);
+
+ *sink_name = *source_name = NULL;
+
+ type = hal_alsa_device_get_type(u->context, udi, &error);
+ if (dbus_error_is_set(&error) || type == ALSA_TYPE_OTHER)
+ goto fail;
+
+ device = libhal_device_get_property_int(u->context, udi, "alsa.device", &error);
+ if (dbus_error_is_set(&error) || device != 0)
+ goto fail;
+
+ card = libhal_device_get_property_int(u->context, udi, "alsa.card", &error);
+ if (dbus_error_is_set(&error))
+ goto fail;
+
+ if (hal_alsa_device_is_modem(u->context, udi, &error))
+ goto fail;
+
+ if (type == ALSA_TYPE_SINK) {
+ *sink_name = pa_sprintf_malloc("alsa_output.%s", strip_udi(udi));
+
+ module_name = "module-alsa-sink";
+ args = pa_sprintf_malloc("device_id=%u sink_name=%s", card, *sink_name);
+ } else {
+ *source_name = pa_sprintf_malloc("alsa_input.%s", strip_udi(udi));
+
+ module_name = "module-alsa-source";
+ args = pa_sprintf_malloc("device_id=%u source_name=%s", card, *source_name);
+ }
+
+ pa_log_debug("Loading %s with arguments '%s'", module_name, args);
+
+ m = pa_module_load(u->core, module_name, args);
+
+ pa_xfree(args);
+
+ if (!m) {
+ pa_xfree(*sink_name);
+ pa_xfree(*source_name);
+ *sink_name = *source_name = NULL;
+ }
+
+ return m;
+
+fail:
+ if (dbus_error_is_set(&error)) {
+ pa_log_error("D-Bus error while parsing ALSA data: %s: %s", error.name, error.message);
+ dbus_error_free(&error);
+ }
+
+ return NULL;
+}
+
+#endif
+
+#ifdef HAVE_OSS
+
+static int hal_oss_device_is_pcm(LibHalContext *context, const char *udi, DBusError *error) {
+ char *class = NULL, *dev = NULL, *e;
+ int device;
+ int r = 0;
+
+ class = libhal_device_get_property_string(context, udi, "oss.type", error);
+ if (dbus_error_is_set(error) || !class)
+ goto finish;
+
+ if (strcmp(class, "pcm"))
+ goto finish;
+
+ dev = libhal_device_get_property_string(context, udi, "oss.device_file", error);
+ if (dbus_error_is_set(error) || !dev)
+ goto finish;
+
+ if ((e = strrchr(dev, '/')))
+ if (pa_startswith(e + 1, "audio"))
+ goto finish;
+
+ device = libhal_device_get_property_int(context, udi, "oss.device", error);
+ if (dbus_error_is_set(error) || device != 0)
+ goto finish;
+
+ r = 1;
+
+finish:
+
+ libhal_free_string(class);
+ libhal_free_string(dev);
+
+ return r;
+}
+
+static pa_module* hal_device_load_oss(struct userdata *u, const char *udi, char **sink_name, char **source_name) {
+ char* args;
+ char* device;
+ DBusError error;
+ pa_module *m;
+
+ dbus_error_init(&error);
+
+ pa_assert(u);
+ pa_assert(sink_name);
+ pa_assert(source_name);
+
+ *sink_name = *source_name = NULL;
+
+ if (!hal_oss_device_is_pcm(u->context, udi, &error) || dbus_error_is_set(&error))
+ goto fail;
+
+ device = libhal_device_get_property_string(u->context, udi, "oss.device_file", &error);
+ if (!device || dbus_error_is_set(&error))
+ goto fail;
+
+ *sink_name = pa_sprintf_malloc("oss_output.%s", strip_udi(udi));
+ *source_name = pa_sprintf_malloc("oss_input.%s", strip_udi(udi));
+
+ args = pa_sprintf_malloc("device=%s sink_name=%s source_name=%s", device, *sink_name, *source_name);
+ libhal_free_string(device);
+
+ pa_log_debug("Loading module-oss with arguments '%s'", args);
+ m = pa_module_load(u->core, "module-oss", args);
+ pa_xfree(args);
+
+ if (!m) {
+ pa_xfree(*sink_name);
+ pa_xfree(*source_name);
+ *sink_name = *source_name = NULL;
+ }
+
+ return m;
+
+fail:
+ if (dbus_error_is_set(&error)) {
+ pa_log_error("D-Bus error while parsing OSS data: %s: %s", error.name, error.message);
+ dbus_error_free(&error);
+ }
+
+ return NULL;
+}
+#endif
+
+static struct device* hal_device_add(struct userdata *u, const char *udi) {
+ pa_module* m = NULL;
+ struct device *d;
+ char *sink_name = NULL, *source_name = NULL;
+
+ pa_assert(u);
+ pa_assert(u->capability);
+ pa_assert(!pa_hashmap_get(u->devices, udi));
+
+#ifdef HAVE_ALSA
+ if (strcmp(u->capability, CAPABILITY_ALSA) == 0)
+ m = hal_device_load_alsa(u, udi, &sink_name, &source_name);
+#endif
+#ifdef HAVE_OSS
+ if (strcmp(u->capability, CAPABILITY_OSS) == 0)
+ m = hal_device_load_oss(u, udi, &sink_name, &source_name);
+#endif
+
+ if (!m)
+ return NULL;
+
+ d = pa_xnew(struct device, 1);
+ d->acl_race_fix = 0;
+ d->udi = pa_xstrdup(udi);
+ d->index = m->index;
+ d->sink_name = sink_name;
+ d->source_name = source_name;
+ pa_hashmap_put(u->devices, d->udi, d);
+
+ return d;
+}
+
+static int hal_device_add_all(struct userdata *u, const char *capability) {
+ DBusError error;
+ int i, n, count = 0;
+ char** udis;
+
+ pa_assert(u);
+
+ dbus_error_init(&error);
+
+ if (u->capability && strcmp(u->capability, capability) != 0)
+ return 0;
+
+ pa_log_info("Trying capability %s", capability);
+
+ udis = libhal_find_device_by_capability(u->context, capability, &n, &error);
+ if (dbus_error_is_set(&error)) {
+ pa_log_error("Error finding devices: %s: %s", error.name, error.message);
+ dbus_error_free(&error);
+ return -1;
+ }
+
+ if (n > 0) {
+ u->capability = capability;
+
+ for (i = 0; i < n; i++) {
+ struct device *d;
+
+ if (!(d = hal_device_add(u, udis[i])))
+ pa_log_debug("Not loaded device %s", udis[i]);
+ else {
+ if (d->sink_name)
+ pa_scache_play_item_by_name(u->core, "pulse-coldplug", d->sink_name, PA_VOLUME_NORM, 0);
+ count++;
+ }
+ }
+ }
+
+ libhal_free_string_array(udis);
+ return count;
+}
+
+static dbus_bool_t device_has_capability(LibHalContext *context, const char *udi, const char* cap, DBusError *error){
+ dbus_bool_t has_prop;
+
+ has_prop = libhal_device_property_exists(context, udi, "info.capabilities", error);
+ if (!has_prop || dbus_error_is_set(error))
+ return FALSE;
+
+ return libhal_device_query_capability(context, udi, cap, error);
+}
+
+static void device_added_time_cb(pa_mainloop_api *ea, pa_time_event *ev, const struct timeval *tv, void *userdata) {
+ DBusError error;
+ struct timerdata *td = userdata;
+
+ dbus_error_init(&error);
+
+ if (!pa_hashmap_get(td->u->devices, td->udi)) {
+ int b;
+ struct device *d;
+
+ b = libhal_device_exists(td->u->context, td->udi, &error);
+
+ if (dbus_error_is_set(&error)) {
+ pa_log_error("Error adding device: %s: %s", error.name, error.message);
+ dbus_error_free(&error);
+ } else if (b) {
+ if (!(d = hal_device_add(td->u, td->udi)))
+ pa_log_debug("Not loaded device %s", td->udi);
+ else {
+ if (d->sink_name)
+ pa_scache_play_item_by_name(td->u->core, "pulse-hotplug", d->sink_name, PA_VOLUME_NORM, 0);
+ }
+ }
+ }
+
+ pa_xfree(td->udi);
+ pa_xfree(td);
+ ea->time_free(ev);
+}
+
+static void device_added_cb(LibHalContext *context, const char *udi) {
+ DBusError error;
+ struct timeval tv;
+ struct timerdata *t;
+ struct userdata *u;
+ int good = 0;
+
+ pa_assert_se(u = libhal_ctx_get_user_data(context));
+
+ if (pa_hashmap_get(u->devices, udi))
+ return;
+
+ pa_log_debug("HAL Device added: %s", udi);
+
+ dbus_error_init(&error);
+
+ if (u->capability) {
+
+ good = device_has_capability(context, udi, u->capability, &error);
+
+ if (dbus_error_is_set(&error)) {
+ pa_log_error("Error getting capability: %s: %s", error.name, error.message);
+ dbus_error_free(&error);
+ return;
+ }
+
+ } else {
+
+#ifdef HAVE_ALSA
+ good = device_has_capability(context, udi, CAPABILITY_ALSA, &error);
+
+ if (dbus_error_is_set(&error)) {
+ pa_log_error("Error getting capability: %s: %s", error.name, error.message);
+ dbus_error_free(&error);
+ return;
+ }
+
+ if (good)
+ u->capability = CAPABILITY_ALSA;
+#endif
+#if defined(HAVE_OSS) && defined(HAVE_ALSA)
+ if (!good) {
+#endif
+#ifdef HAS_OSS
+ good = device_has_capability(context, udi, CAPABILITY_OSS, &error);
+
+ if (dbus_error_is_set(&error)) {
+ pa_log_error("Error getting capability: %s: %s", error.name, error.message);
+ dbus_error_free(&error);
+ return;
+ }
+
+ if (good)
+ u->capability = CAPABILITY_OSS;
+
+#endif
+#if defined(HAVE_OSS) && defined(HAVE_ALSA)
+ }
+#endif
+ }
+
+ if (!good)
+ return;
+
+ /* actually add the device 1/2 second later */
+ t = pa_xnew(struct timerdata, 1);
+ t->u = u;
+ t->udi = pa_xstrdup(udi);
+
+ pa_gettimeofday(&tv);
+ pa_timeval_add(&tv, 500000);
+ u->core->mainloop->time_new(u->core->mainloop, &tv, device_added_time_cb, t);
+}
+
+static void device_removed_cb(LibHalContext* context, const char *udi) {
+ struct device *d;
+ struct userdata *u;
+
+ pa_assert_se(u = libhal_ctx_get_user_data(context));
+
+ pa_log_debug("Device removed: %s", udi);
+
+ if ((d = pa_hashmap_remove(u->devices, udi))) {
+ pa_module_unload_by_index(u->core, d->index);
+ hal_device_free(d);
+ }
+}
+
+static void new_capability_cb(LibHalContext *context, const char *udi, const char* capability) {
+ struct userdata *u;
+
+ pa_assert_se(u = libhal_ctx_get_user_data(context));
+
+ if (!u->capability || strcmp(u->capability, capability) == 0)
+ /* capability we care about, pretend it's a new device */
+ device_added_cb(context, udi);
+}
+
+static void lost_capability_cb(LibHalContext *context, const char *udi, const char* capability) {
+ struct userdata *u;
+
+ pa_assert_se(u = libhal_ctx_get_user_data(context));
+
+ if (u->capability && strcmp(u->capability, capability) == 0)
+ /* capability we care about, pretend it was removed */
+ device_removed_cb(context, udi);
+}
+
+static DBusHandlerResult filter_cb(DBusConnection *bus, DBusMessage *message, void *userdata) {
+ struct userdata*u = userdata;
+ DBusError error;
+
+ pa_assert(bus);
+ pa_assert(message);
+ pa_assert(u);
+
+ dbus_error_init(&error);
+
+ pa_log_debug("dbus: interface=%s, path=%s, member=%s\n",
+ dbus_message_get_interface(message),
+ dbus_message_get_path(message),
+ dbus_message_get_member(message));
+
+ if (dbus_message_is_signal(message, "org.freedesktop.Hal.Device.AccessControl", "ACLAdded") ||
+ dbus_message_is_signal(message, "org.freedesktop.Hal.Device.AccessControl", "ACLRemoved")) {
+ uint32_t uid;
+ int suspend = strcmp(dbus_message_get_member(message), "ACLRemoved") == 0;
+
+ if (!dbus_message_get_args(message, &error, DBUS_TYPE_UINT32, &uid, DBUS_TYPE_INVALID) || dbus_error_is_set(&error)) {
+ pa_log_error("Failed to parse ACL message: %s: %s", error.name, error.message);
+ goto finish;
+ }
+
+ if (uid == getuid() || uid == geteuid()) {
+ struct device *d;
+ const char *udi;
+
+ udi = dbus_message_get_path(message);
+
+ if ((d = pa_hashmap_get(u->devices, udi))) {
+ int send_acl_race_fix_message = 0;
+
+ d->acl_race_fix = 0;
+
+ if (d->sink_name) {
+ pa_sink *sink;
+
+ if ((sink = pa_namereg_get(u->core, d->sink_name, PA_NAMEREG_SINK, 0))) {
+ int prev_suspended = pa_sink_get_state(sink) == PA_SINK_SUSPENDED;
+
+ if (prev_suspended && !suspend) {
+ /* resume */
+ if (pa_sink_suspend(sink, 0) >= 0)
+ pa_scache_play_item_by_name(u->core, "pulse-access", d->sink_name, PA_VOLUME_NORM, 0);
+ else
+ d->acl_race_fix = 1;
+
+ } else if (!prev_suspended && suspend) {
+ /* suspend */
+ if (pa_sink_suspend(sink, 1) >= 0)
+ send_acl_race_fix_message = 1;
+ }
+ }
+ }
+
+ if (d->source_name) {
+ pa_source *source;
+
+ if ((source = pa_namereg_get(u->core, d->source_name, PA_NAMEREG_SOURCE, 0))) {
+ int prev_suspended = pa_source_get_state(source) == PA_SOURCE_SUSPENDED;
+
+ if (prev_suspended && !suspend) {
+ /* resume */
+ if (pa_source_suspend(source, 0) < 0)
+ d->acl_race_fix = 1;
+
+ } else if (!prev_suspended && suspend) {
+ /* suspend */
+ if (pa_source_suspend(source, 0) >= 0)
+ send_acl_race_fix_message = 1;
+ }
+ }
+ }
+
+ if (send_acl_race_fix_message) {
+ DBusMessage *msg;
+ msg = dbus_message_new_signal(udi, "org.pulseaudio.Server", "DirtyGiveUpMessage");
+ dbus_connection_send(pa_dbus_connection_get(u->connection), msg, NULL);
+ dbus_message_unref(msg);
+ }
+
+ } else if (!suspend)
+ device_added_cb(u->context, udi);
+ }
+
+ } else if (dbus_message_is_signal(message, "org.pulseaudio.Server", "DirtyGiveUpMessage")) {
+ /* We use this message to avoid a dirty race condition when we
+ get an ACLAdded message before the previously owning PA
+ sever has closed the device. We can remove this as
+ soon as HAL learns frevoke() */
+
+ const char *udi;
+ struct device *d;
+
+ udi = dbus_message_get_path(message);
+
+ if ((d = pa_hashmap_get(u->devices, udi)) && d->acl_race_fix) {
+ pa_log_debug("Got dirty give up message for '%s', trying resume ...", udi);
+
+ d->acl_race_fix = 0;
+
+ if (d->sink_name) {
+ pa_sink *sink;
+
+ if ((sink = pa_namereg_get(u->core, d->sink_name, PA_NAMEREG_SINK, 0))) {
+
+ int prev_suspended = pa_sink_get_state(sink) == PA_SINK_SUSPENDED;
+
+ if (prev_suspended) {
+ /* resume */
+ if (pa_sink_suspend(sink, 0) >= 0)
+ pa_scache_play_item_by_name(u->core, "pulse-access", d->sink_name, PA_VOLUME_NORM, 0);
+ }
+ }
+ }
+
+ if (d->source_name) {
+ pa_source *source;
+
+ if ((source = pa_namereg_get(u->core, d->source_name, PA_NAMEREG_SOURCE, 0))) {
+
+ int prev_suspended = pa_source_get_state(source) == PA_SOURCE_SUSPENDED;
+
+ if (prev_suspended)
+ pa_source_suspend(source, 0);
+ }
+ }
+
+ } else
+ /* Yes, we don't check the UDI for validity, but hopefully HAL will */
+ device_added_cb(u->context, udi);
+ }
+
+finish:
+ dbus_error_free(&error);
+
+ return DBUS_HANDLER_RESULT_HANDLED;
+}
+
+static void hal_context_free(LibHalContext* hal_context) {
+ DBusError error;
+
+ dbus_error_init(&error);
+
+ libhal_ctx_shutdown(hal_context, &error);
+ libhal_ctx_free(hal_context);
+
+ dbus_error_free(&error);
+}
+
+static LibHalContext* hal_context_new(pa_core* c, DBusConnection *conn) {
+ DBusError error;
+ LibHalContext *hal_context = NULL;
+
+ dbus_error_init(&error);
+
+ if (!(hal_context = libhal_ctx_new())) {
+ pa_log_error("libhal_ctx_new() failed");
+ goto fail;
+ }
+
+ if (!libhal_ctx_set_dbus_connection(hal_context, conn)) {
+ pa_log_error("Error establishing DBUS connection: %s: %s", error.name, error.message);
+ goto fail;
+ }
+
+ if (!libhal_ctx_init(hal_context, &error)) {
+ pa_log_error("Couldn't connect to hald: %s: %s", error.name, error.message);
+ goto fail;
+ }
+
+ return hal_context;
+
+fail:
+ if (hal_context)
+ hal_context_free(hal_context);
+
+ dbus_error_free(&error);
+
+ return NULL;
+}
+
+int pa__init(pa_module*m) {
+ DBusError error;
+ pa_dbus_connection *conn;
+ struct userdata *u = NULL;
+ LibHalContext *hal_context = NULL;
+ int n = 0;
+ pa_modargs *ma;
+ const char *api;
+
+ pa_assert(m);
+
+ dbus_error_init(&error);
+
+ if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
+ pa_log("Failed to parse module arguments");
+ goto fail;
+ }
+
+ if ((api = pa_modargs_get_value(ma, "api", NULL))) {
+ int good = 0;
+
+#ifdef HAVE_ALSA
+ if (strcmp(api, CAPABILITY_ALSA) == 0) {
+ good = 1;
+ api = CAPABILITY_ALSA;
+ }
+#endif
+#ifdef HAVE_OSS
+ if (strcmp(api, CAPABILITY_OSS) == 0) {
+ good = 1;
+ api = CAPABILITY_OSS;
+ }
+#endif
+
+ if (!good) {
+ pa_log_error("Invalid API specification.");
+ goto fail;
+ }
+ }
+
+ if (!(conn = pa_dbus_bus_get(m->core, DBUS_BUS_SYSTEM, &error)) || dbus_error_is_set(&error)) {
+ if (conn)
+ pa_dbus_connection_unref(conn);
+ pa_log_error("Unable to contact DBUS system bus: %s: %s", error.name, error.message);
+ goto fail;
+ }
+
+ if (!(hal_context = hal_context_new(m->core, pa_dbus_connection_get(conn)))) {
+ /* pa_hal_context_new() logs appropriate errors */
+ pa_dbus_connection_unref(conn);
+ goto fail;
+ }
+
+ u = pa_xnew(struct userdata, 1);
+ u->core = m->core;
+ u->context = hal_context;
+ u->connection = conn;
+ u->devices = pa_hashmap_new(pa_idxset_string_hash_func, pa_idxset_string_compare_func);
+ u->capability = api;
+ m->userdata = u;
+
+#ifdef HAVE_ALSA
+ n = hal_device_add_all(u, CAPABILITY_ALSA);
+#endif
+#if defined(HAVE_ALSA) && defined(HAVE_OSS)
+ if (n <= 0)
+#endif
+#ifdef HAVE_OSS
+ n += hal_device_add_all(u, CAPABILITY_OSS);
+#endif
+
+ libhal_ctx_set_user_data(hal_context, u);
+ libhal_ctx_set_device_added(hal_context, device_added_cb);
+ libhal_ctx_set_device_removed(hal_context, device_removed_cb);
+ libhal_ctx_set_device_new_capability(hal_context, new_capability_cb);
+ libhal_ctx_set_device_lost_capability(hal_context, lost_capability_cb);
+
+ if (!libhal_device_property_watch_all(hal_context, &error)) {
+ pa_log_error("Error monitoring device list: %s: %s", error.name, error.message);
+ goto fail;
+ }
+
+ if (!dbus_connection_add_filter(pa_dbus_connection_get(conn), filter_cb, u, NULL)) {
+ pa_log_error("Failed to add filter function");
+ goto fail;
+ }
+
+ dbus_bus_add_match(pa_dbus_connection_get(conn), "type='signal',sender='org.freedesktop.Hal', interface='org.freedesktop.Hal.Device.AccessControl'", &error);
+ if (dbus_error_is_set(&error)) {
+ pa_log_error("Unable to subscribe to HAL ACL signals: %s: %s", error.name, error.message);
+ goto fail;
+ }
+
+ dbus_bus_add_match(pa_dbus_connection_get(conn), "type='signal',interface='org.pulseaudio.Server'", &error);
+ if (dbus_error_is_set(&error)) {
+ pa_log_error("Unable to subscribe to PulseAudio signals: %s: %s", error.name, error.message);
+ goto fail;
+ }
+
+ pa_log_info("Loaded %i modules.", n);
+
+ pa_modargs_free(ma);
+
+ return 0;
+
+fail:
+ if (ma)
+ pa_modargs_free(ma);
+
+ dbus_error_free(&error);
+ pa__done(m);
+
+ return -1;
+}
+
+
+void pa__done(pa_module *m) {
+ struct userdata *u;
+
+ pa_assert(m);
+
+ if (!(u = m->userdata))
+ return;
+
+ if (u->context)
+ hal_context_free(u->context);
+
+ if (u->devices)
+ pa_hashmap_free(u->devices, hal_device_free_cb, NULL);
+
+ if (u->connection)
+ pa_dbus_connection_unref(u->connection);
+
+ pa_xfree(u);
+}
diff --git a/src/modules/module-jack-sink.c b/src/modules/module-jack-sink.c
new file mode 100644
index 00000000..a42aa9ef
--- /dev/null
+++ b/src/modules/module-jack-sink.c
@@ -0,0 +1,456 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2006 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <limits.h>
+
+#include <jack/jack.h>
+
+#include <pulse/xmalloc.h>
+
+#include <pulsecore/core-error.h>
+#include <pulsecore/sink.h>
+#include <pulsecore/module.h>
+#include <pulsecore/core-util.h>
+#include <pulsecore/modargs.h>
+#include <pulsecore/log.h>
+#include <pulsecore/thread.h>
+#include <pulsecore/thread-mq.h>
+#include <pulsecore/rtpoll.h>
+#include <pulsecore/sample-util.h>
+
+#include "module-jack-sink-symdef.h"
+
+/* General overview:
+ *
+ * Because JACK has a very unflexible event loop management, which
+ * doesn't allow us to add our own event sources to the event thread
+ * we cannot use the JACK real-time thread for dispatching our PA
+ * work. Instead, we run an additional RT thread which does most of
+ * the PA handling, and have the JACK RT thread request data from it
+ * via pa_asyncmsgq. The cost is an additional context switch which
+ * should hopefully not be that expensive if RT scheduling is
+ * enabled. A better fix would only be possible with additional event
+ * source support in JACK.
+ */
+
+PA_MODULE_AUTHOR("Lennart Poettering");
+PA_MODULE_DESCRIPTION("JACK Sink");
+PA_MODULE_LOAD_ONCE(TRUE);
+PA_MODULE_VERSION(PACKAGE_VERSION);
+PA_MODULE_USAGE(
+ "sink_name=<name of sink> "
+ "server_name=<jack server name> "
+ "client_name=<jack client name> "
+ "channels=<number of channels> "
+ "connect=<connect ports?> "
+ "channel_map=<channel map>");
+
+#define DEFAULT_SINK_NAME "jack_out"
+
+struct userdata {
+ pa_core *core;
+ pa_module *module;
+ pa_sink *sink;
+
+ unsigned channels;
+
+ jack_port_t* port[PA_CHANNELS_MAX];
+ jack_client_t *client;
+
+ void *buffer[PA_CHANNELS_MAX];
+
+ pa_thread_mq thread_mq;
+ pa_asyncmsgq *jack_msgq;
+ pa_rtpoll *rtpoll;
+ pa_rtpoll_item *rtpoll_item;
+
+ pa_thread *thread;
+
+ jack_nframes_t frames_in_buffer;
+ jack_nframes_t saved_frame_time;
+ pa_bool_t saved_frame_time_valid;
+};
+
+static const char* const valid_modargs[] = {
+ "sink_name",
+ "server_name",
+ "client_name",
+ "channels",
+ "connect",
+ "channel_map",
+ NULL
+};
+
+enum {
+ SINK_MESSAGE_RENDER = PA_SINK_MESSAGE_MAX,
+ SINK_MESSAGE_ON_SHUTDOWN
+};
+
+static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *memchunk) {
+ struct userdata *u = PA_SINK(o)->userdata;
+
+ switch (code) {
+
+ case SINK_MESSAGE_RENDER:
+
+ /* Handle the request from the JACK thread */
+
+ if (u->sink->thread_info.state == PA_SINK_RUNNING) {
+ pa_memchunk chunk;
+ size_t nbytes;
+ void *p;
+
+ pa_assert(offset > 0);
+ nbytes = offset * pa_frame_size(&u->sink->sample_spec);
+
+ pa_sink_render_full(u->sink, nbytes, &chunk);
+
+ p = (uint8_t*) pa_memblock_acquire(chunk.memblock) + chunk.index;
+ pa_deinterleave(p, u->buffer, u->channels, sizeof(float), offset);
+ pa_memblock_release(chunk.memblock);
+
+ pa_memblock_unref(chunk.memblock);
+ } else {
+ unsigned c;
+ pa_sample_spec ss;
+
+ /* Humm, we're not RUNNING, hence let's write some silence */
+
+ ss = u->sink->sample_spec;
+ ss.channels = 1;
+
+ for (c = 0; c < u->channels; c++)
+ pa_silence_memory(u->buffer[c], offset * pa_sample_size(&ss), &ss);
+ }
+
+ u->frames_in_buffer = offset;
+ u->saved_frame_time = * (jack_nframes_t*) data;
+ u->saved_frame_time_valid = TRUE;
+
+ return 0;
+
+ case SINK_MESSAGE_ON_SHUTDOWN:
+ pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
+ return 0;
+
+ case PA_SINK_MESSAGE_GET_LATENCY: {
+ jack_nframes_t l, ft, d;
+ size_t n;
+
+ /* This is the "worst-case" latency */
+ l = jack_port_get_total_latency(u->client, u->port[0]) + u->frames_in_buffer;
+
+ if (u->saved_frame_time_valid) {
+ /* Adjust the worst case latency by the time that
+ * passed since we last handed data to JACK */
+
+ ft = jack_frame_time(u->client);
+ d = ft > u->saved_frame_time ? ft - u->saved_frame_time : 0;
+ l = l > d ? l - d : 0;
+ }
+
+ /* Convert it to usec */
+ n = l * pa_frame_size(&u->sink->sample_spec);
+ *((pa_usec_t*) data) = pa_bytes_to_usec(n, &u->sink->sample_spec);
+
+ return 0;
+ }
+ }
+
+ return pa_sink_process_msg(o, code, data, offset, memchunk);
+}
+
+static int jack_process(jack_nframes_t nframes, void *arg) {
+ struct userdata *u = arg;
+ unsigned c;
+ jack_nframes_t frame_time;
+ pa_assert(u);
+
+ /* We just forward the request to our other RT thread */
+
+ for (c = 0; c < u->channels; c++)
+ pa_assert_se(u->buffer[c] = jack_port_get_buffer(u->port[c], nframes));
+
+ frame_time = jack_frame_time(u->client);
+
+ pa_assert_se(pa_asyncmsgq_send(u->jack_msgq, PA_MSGOBJECT(u->sink), SINK_MESSAGE_RENDER, &frame_time, nframes, NULL) == 0);
+ return 0;
+}
+
+static void thread_func(void *userdata) {
+ struct userdata *u = userdata;
+
+ pa_assert(u);
+
+ pa_log_debug("Thread starting up");
+
+ if (u->core->realtime_scheduling)
+ pa_make_realtime(u->core->realtime_priority);
+
+ pa_thread_mq_install(&u->thread_mq);
+ pa_rtpoll_install(u->rtpoll);
+
+ for (;;) {
+ int ret;
+
+ if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
+ goto fail;
+
+ if (ret == 0)
+ goto finish;
+ }
+
+fail:
+ /* If this was no regular exit from the loop we have to continue
+ * processing messages until we received PA_MESSAGE_SHUTDOWN */
+ pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
+ pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
+
+finish:
+ pa_log_debug("Thread shutting down");
+}
+
+static void jack_error_func(const char*t) {
+ char *s;
+
+ s = pa_xstrndup(t, strcspn(t, "\n\r"));
+ pa_log_warn("JACK error >%s<", s);
+ pa_xfree(s);
+}
+
+static void jack_init(void *arg) {
+ struct userdata *u = arg;
+
+ pa_log_info("JACK thread starting up.");
+
+ if (u->core->realtime_scheduling)
+ pa_make_realtime(u->core->realtime_priority+4);
+}
+
+static void jack_shutdown(void* arg) {
+ struct userdata *u = arg;
+
+ pa_log_info("JACK thread shutting down..");
+ pa_asyncmsgq_post(u->jack_msgq, PA_MSGOBJECT(u->sink), SINK_MESSAGE_ON_SHUTDOWN, NULL, 0, NULL, NULL);
+}
+
+int pa__init(pa_module*m) {
+ struct userdata *u = NULL;
+ pa_sample_spec ss;
+ pa_channel_map map;
+ pa_modargs *ma = NULL;
+ jack_status_t status;
+ const char *server_name, *client_name;
+ uint32_t channels = 0;
+ pa_bool_t do_connect = TRUE;
+ unsigned i;
+ const char **ports = NULL, **p;
+ char *t;
+
+ pa_assert(m);
+
+ jack_set_error_function(jack_error_func);
+
+ if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
+ pa_log("Failed to parse module arguments.");
+ goto fail;
+ }
+
+ if (pa_modargs_get_value_boolean(ma, "connect", &do_connect) < 0) {
+ pa_log("Failed to parse connect= argument.");
+ goto fail;
+ }
+
+ server_name = pa_modargs_get_value(ma, "server_name", NULL);
+ client_name = pa_modargs_get_value(ma, "client_name", "PulseAudio JACK Sink");
+
+ u = pa_xnew0(struct userdata, 1);
+ u->core = m->core;
+ u->module = m;
+ m->userdata = u;
+ u->saved_frame_time_valid = FALSE;
+ pa_thread_mq_init(&u->thread_mq, m->core->mainloop);
+ u->rtpoll = pa_rtpoll_new();
+ pa_rtpoll_item_new_asyncmsgq(u->rtpoll, PA_RTPOLL_EARLY, u->thread_mq.inq);
+
+ /* The queue linking the JACK thread and our RT thread */
+ u->jack_msgq = pa_asyncmsgq_new(0);
+
+ /* The msgq from the JACK RT thread should have an even higher
+ * priority than the normal message queues, to match the guarantee
+ * all other drivers make: supplying the audio device with data is
+ * the top priority -- and as long as that is possible we don't do
+ * anything else */
+ u->rtpoll_item = pa_rtpoll_item_new_asyncmsgq(u->rtpoll, PA_RTPOLL_EARLY-1, u->jack_msgq);
+
+ if (!(u->client = jack_client_open(client_name, server_name ? JackServerName : JackNullOption, &status, server_name))) {
+ pa_log("jack_client_open() failed.");
+ goto fail;
+ }
+
+ ports = jack_get_ports(u->client, NULL, NULL, JackPortIsPhysical|JackPortIsInput);
+
+ channels = 0;
+ for (p = ports; *p; p++)
+ channels++;
+
+ if (!channels)
+ channels = m->core->default_sample_spec.channels;
+
+ if (pa_modargs_get_value_u32(ma, "channels", &channels) < 0 || channels <= 0 || channels >= PA_CHANNELS_MAX) {
+ pa_log("Failed to parse channels= argument.");
+ goto fail;
+ }
+
+ pa_assert_se(pa_channel_map_init_auto(&map, channels, PA_CHANNEL_MAP_AUX));
+ pa_channel_map_init_auto(&map, channels, PA_CHANNEL_MAP_ALSA);
+ if (pa_modargs_get_channel_map(ma, NULL, &map) < 0 || map.channels != channels) {
+ pa_log("Failed to parse channel_map= argument.");
+ goto fail;
+ }
+
+ pa_log_info("Successfully connected as '%s'", jack_get_client_name(u->client));
+
+ ss.channels = u->channels = channels;
+ ss.rate = jack_get_sample_rate(u->client);
+ ss.format = PA_SAMPLE_FLOAT32NE;
+
+ pa_assert(pa_sample_spec_valid(&ss));
+
+ for (i = 0; i < ss.channels; i++) {
+ if (!(u->port[i] = jack_port_register(u->client, pa_channel_position_to_string(map.map[i]), JACK_DEFAULT_AUDIO_TYPE, JackPortIsOutput|JackPortIsTerminal, 0))) {
+ pa_log("jack_port_register() failed.");
+ goto fail;
+ }
+ }
+
+ if (!(u->sink = pa_sink_new(m->core, __FILE__, pa_modargs_get_value(ma, "sink_name", DEFAULT_SINK_NAME), 0, &ss, &map))) {
+ pa_log("failed to create sink.");
+ goto fail;
+ }
+
+ u->sink->parent.process_msg = sink_process_msg;
+ u->sink->userdata = u;
+ u->sink->flags = PA_SINK_LATENCY;
+
+ pa_sink_set_module(u->sink, m);
+ pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
+ pa_sink_set_rtpoll(u->sink, u->rtpoll);
+ pa_sink_set_description(u->sink, t = pa_sprintf_malloc("Jack sink (%s)", jack_get_client_name(u->client)));
+ pa_xfree(t);
+
+ jack_set_process_callback(u->client, jack_process, u);
+ jack_on_shutdown(u->client, jack_shutdown, u);
+ jack_set_thread_init_callback(u->client, jack_init, u);
+
+ if (!(u->thread = pa_thread_new(thread_func, u))) {
+ pa_log("Failed to create thread.");
+ goto fail;
+ }
+
+ if (jack_activate(u->client)) {
+ pa_log("jack_activate() failed");
+ goto fail;
+ }
+
+ if (do_connect) {
+ for (i = 0, p = ports; i < ss.channels; i++, p++) {
+
+ if (!*p) {
+ pa_log("Not enough physical output ports, leaving unconnected.");
+ break;
+ }
+
+ pa_log_info("Connecting %s to %s", jack_port_name(u->port[i]), *p);
+
+ if (jack_connect(u->client, jack_port_name(u->port[i]), *p)) {
+ pa_log("Failed to connect %s to %s, leaving unconnected.", jack_port_name(u->port[i]), *p);
+ break;
+ }
+ }
+ }
+
+ pa_sink_put(u->sink);
+
+ free(ports);
+ pa_modargs_free(ma);
+
+ return 0;
+
+fail:
+ if (ma)
+ pa_modargs_free(ma);
+
+ free(ports);
+
+ pa__done(m);
+
+ return -1;
+}
+
+void pa__done(pa_module*m) {
+ struct userdata *u;
+
+ pa_assert(m);
+
+ if (!(u = m->userdata))
+ return;
+
+ if (u->client)
+ jack_client_close(u->client);
+
+ if (u->sink)
+ pa_sink_unlink(u->sink);
+
+ if (u->thread) {
+ pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
+ pa_thread_free(u->thread);
+ }
+
+ pa_thread_mq_done(&u->thread_mq);
+
+ if (u->sink)
+ pa_sink_unref(u->sink);
+
+ if (u->rtpoll_item)
+ pa_rtpoll_item_free(u->rtpoll_item);
+
+ if (u->jack_msgq)
+ pa_asyncmsgq_unref(u->jack_msgq);
+
+ if (u->rtpoll)
+ pa_rtpoll_free(u->rtpoll);
+
+ pa_xfree(u);
+}
diff --git a/src/modules/module-jack-source.c b/src/modules/module-jack-source.c
new file mode 100644
index 00000000..4ee08bf1
--- /dev/null
+++ b/src/modules/module-jack-source.c
@@ -0,0 +1,427 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2006 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <limits.h>
+
+#include <jack/jack.h>
+
+#include <pulse/xmalloc.h>
+
+#include <pulsecore/core-error.h>
+#include <pulsecore/source.h>
+#include <pulsecore/module.h>
+#include <pulsecore/core-util.h>
+#include <pulsecore/modargs.h>
+#include <pulsecore/log.h>
+#include <pulsecore/thread.h>
+#include <pulsecore/thread-mq.h>
+#include <pulsecore/rtpoll.h>
+#include <pulsecore/sample-util.h>
+
+#include "module-jack-source-symdef.h"
+
+/* See module-jack-sink for a few comments how this module basically
+ * works */
+
+PA_MODULE_AUTHOR("Lennart Poettering");
+PA_MODULE_DESCRIPTION("JACK Source");
+PA_MODULE_VERSION(PACKAGE_VERSION);
+PA_MODULE_LOAD_ONCE(TRUE);
+PA_MODULE_USAGE(
+ "source_name=<name of source> "
+ "server_name=<jack server name> "
+ "client_name=<jack client name> "
+ "channels=<number of channels> "
+ "connect=<connect ports?>"
+ "channel_map=<channel map>");
+
+#define DEFAULT_SOURCE_NAME "jack_in"
+
+struct userdata {
+ pa_core *core;
+ pa_module *module;
+ pa_source *source;
+
+ unsigned channels;
+
+ jack_port_t* port[PA_CHANNELS_MAX];
+ jack_client_t *client;
+
+ pa_thread_mq thread_mq;
+ pa_asyncmsgq *jack_msgq;
+ pa_rtpoll *rtpoll;
+ pa_rtpoll_item *rtpoll_item;
+
+ pa_thread *thread;
+
+ jack_nframes_t saved_frame_time;
+ pa_bool_t saved_frame_time_valid;
+};
+
+static const char* const valid_modargs[] = {
+ "source_name",
+ "server_name",
+ "client_name",
+ "channels",
+ "connect",
+ "channel_map",
+ NULL
+};
+
+enum {
+ SOURCE_MESSAGE_POST = PA_SOURCE_MESSAGE_MAX,
+ SOURCE_MESSAGE_ON_SHUTDOWN
+};
+
+static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
+ struct userdata *u = PA_SOURCE(o)->userdata;
+
+ switch (code) {
+
+ case SOURCE_MESSAGE_POST:
+
+ /* Handle the new block from the JACK thread */
+ pa_assert(chunk);
+ pa_assert(chunk->length > 0);
+
+ if (u->source->thread_info.state == PA_SOURCE_RUNNING)
+ pa_source_post(u->source, chunk);
+
+ u->saved_frame_time = offset;
+ u->saved_frame_time_valid = TRUE;
+
+ return 0;
+
+ case SOURCE_MESSAGE_ON_SHUTDOWN:
+ pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
+ return 0;
+
+ case PA_SOURCE_MESSAGE_GET_LATENCY: {
+ jack_nframes_t l, ft, d;
+ size_t n;
+
+ /* This is the "worst-case" latency */
+ l = jack_port_get_total_latency(u->client, u->port[0]);
+
+ if (u->saved_frame_time_valid) {
+ /* Adjust the worst case latency by the time that
+ * passed since we last handed data to JACK */
+
+ ft = jack_frame_time(u->client);
+ d = ft > u->saved_frame_time ? ft - u->saved_frame_time : 0;
+ l += d;
+ }
+
+ /* Convert it to usec */
+ n = l * pa_frame_size(&u->source->sample_spec);
+ *((pa_usec_t*) data) = pa_bytes_to_usec(n, &u->source->sample_spec);
+
+ return 0;
+ }
+ }
+
+ return pa_source_process_msg(o, code, data, offset, chunk);
+}
+
+static int jack_process(jack_nframes_t nframes, void *arg) {
+ unsigned c;
+ struct userdata *u = arg;
+ const void *buffer[PA_CHANNELS_MAX];
+ void *p;
+ jack_nframes_t frame_time;
+ pa_memchunk chunk;
+
+ pa_assert(u);
+
+ for (c = 0; c < u->channels; c++)
+ pa_assert(buffer[c] = jack_port_get_buffer(u->port[c], nframes));
+
+ /* We interleave the data and pass it on to the other RT thread */
+
+ pa_memchunk_reset(&chunk);
+ chunk.length = nframes * pa_frame_size(&u->source->sample_spec);
+ chunk.memblock = pa_memblock_new(u->core->mempool, chunk.length);
+ p = pa_memblock_acquire(chunk.memblock);
+ pa_interleave(buffer, u->channels, p, sizeof(float), nframes);
+ pa_memblock_release(chunk.memblock);
+
+ frame_time = jack_frame_time(u->client);
+
+ pa_asyncmsgq_post(u->jack_msgq, PA_MSGOBJECT(u->source), SOURCE_MESSAGE_POST, NULL, frame_time, &chunk, NULL);
+
+ pa_memblock_unref(chunk.memblock);
+
+ return 0;
+}
+
+static void thread_func(void *userdata) {
+ struct userdata *u = userdata;
+
+ pa_assert(u);
+
+ pa_log_debug("Thread starting up");
+
+ if (u->core->realtime_scheduling)
+ pa_make_realtime(u->core->realtime_priority);
+
+ pa_thread_mq_install(&u->thread_mq);
+ pa_rtpoll_install(u->rtpoll);
+
+ for (;;) {
+ int ret;
+
+ if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
+ goto fail;
+
+ if (ret == 0)
+ goto finish;
+ }
+
+fail:
+ /* If this was no regular exit from the loop we have to continue
+ * processing messages until we received PA_MESSAGE_SHUTDOWN */
+ pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
+ pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
+
+finish:
+ pa_log_debug("Thread shutting down");
+}
+
+static void jack_error_func(const char*t) {
+ char *s;
+
+ s = pa_xstrndup(t, strcspn(t, "\n\r"));
+ pa_log_warn("JACK error >%s<", s);
+ pa_xfree(s);
+}
+
+static void jack_init(void *arg) {
+ struct userdata *u = arg;
+
+ pa_log_info("JACK thread starting up.");
+
+ if (u->core->realtime_scheduling)
+ pa_make_realtime(u->core->realtime_priority+4);
+}
+
+static void jack_shutdown(void* arg) {
+ struct userdata *u = arg;
+
+ pa_log_info("JACK thread shutting down..");
+ pa_asyncmsgq_post(u->jack_msgq, PA_MSGOBJECT(u->source), SOURCE_MESSAGE_ON_SHUTDOWN, NULL, 0, NULL, NULL);
+}
+
+int pa__init(pa_module*m) {
+ struct userdata *u = NULL;
+ pa_sample_spec ss;
+ pa_channel_map map;
+ pa_modargs *ma = NULL;
+ jack_status_t status;
+ const char *server_name, *client_name;
+ uint32_t channels = 0;
+ pa_bool_t do_connect = TRUE;
+ unsigned i;
+ const char **ports = NULL, **p;
+ char *t;
+
+ pa_assert(m);
+
+ jack_set_error_function(jack_error_func);
+
+ if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
+ pa_log("Failed to parse module arguments.");
+ goto fail;
+ }
+
+ if (pa_modargs_get_value_boolean(ma, "connect", &do_connect) < 0) {
+ pa_log("Failed to parse connect= argument.");
+ goto fail;
+ }
+
+ server_name = pa_modargs_get_value(ma, "server_name", NULL);
+ client_name = pa_modargs_get_value(ma, "client_name", "PulseAudio JACK Source");
+
+ u = pa_xnew0(struct userdata, 1);
+ u->core = m->core;
+ u->module = m;
+ m->userdata = u;
+ u->saved_frame_time_valid = FALSE;
+
+ pa_thread_mq_init(&u->thread_mq, m->core->mainloop);
+ u->rtpoll = pa_rtpoll_new();
+ pa_rtpoll_item_new_asyncmsgq(u->rtpoll, PA_RTPOLL_EARLY, u->thread_mq.inq);
+
+ u->jack_msgq = pa_asyncmsgq_new(0);
+ u->rtpoll_item = pa_rtpoll_item_new_asyncmsgq(u->rtpoll, PA_RTPOLL_EARLY-1, u->jack_msgq);
+
+ if (!(u->client = jack_client_open(client_name, server_name ? JackServerName : JackNullOption, &status, server_name))) {
+ pa_log("jack_client_open() failed.");
+ goto fail;
+ }
+
+ ports = jack_get_ports(u->client, NULL, NULL, JackPortIsPhysical|JackPortIsOutput);
+
+ channels = 0;
+ for (p = ports; *p; p++)
+ channels++;
+
+ if (!channels)
+ channels = m->core->default_sample_spec.channels;
+
+ if (pa_modargs_get_value_u32(ma, "channels", &channels) < 0 || channels <= 0 || channels >= PA_CHANNELS_MAX) {
+ pa_log("failed to parse channels= argument.");
+ goto fail;
+ }
+
+ pa_assert_se(pa_channel_map_init_auto(&map, channels, PA_CHANNEL_MAP_AUX));
+ pa_channel_map_init_auto(&map, channels, PA_CHANNEL_MAP_ALSA);
+ if (pa_modargs_get_channel_map(ma, NULL, &map) < 0 || map.channels != channels) {
+ pa_log("failed to parse channel_map= argument.");
+ goto fail;
+ }
+
+ pa_log_info("Successfully connected as '%s'", jack_get_client_name(u->client));
+
+ ss.channels = u->channels = channels;
+ ss.rate = jack_get_sample_rate(u->client);
+ ss.format = PA_SAMPLE_FLOAT32NE;
+
+ pa_assert(pa_sample_spec_valid(&ss));
+
+ for (i = 0; i < ss.channels; i++) {
+ if (!(u->port[i] = jack_port_register(u->client, pa_channel_position_to_string(map.map[i]), JACK_DEFAULT_AUDIO_TYPE, JackPortIsInput|JackPortIsTerminal, 0))) {
+ pa_log("jack_port_register() failed.");
+ goto fail;
+ }
+ }
+
+ if (!(u->source = pa_source_new(m->core, __FILE__, pa_modargs_get_value(ma, "source_name", DEFAULT_SOURCE_NAME), 0, &ss, &map))) {
+ pa_log("failed to create source.");
+ goto fail;
+ }
+
+ u->source->parent.process_msg = source_process_msg;
+ u->source->userdata = u;
+ u->source->flags = PA_SOURCE_LATENCY;
+
+ pa_source_set_module(u->source, m);
+ pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
+ pa_source_set_rtpoll(u->source, u->rtpoll);
+ pa_source_set_description(u->source, t = pa_sprintf_malloc("Jack source (%s)", jack_get_client_name(u->client)));
+ pa_xfree(t);
+
+ jack_set_process_callback(u->client, jack_process, u);
+ jack_on_shutdown(u->client, jack_shutdown, u);
+ jack_set_thread_init_callback(u->client, jack_init, u);
+
+ if (!(u->thread = pa_thread_new(thread_func, u))) {
+ pa_log("Failed to create thread.");
+ goto fail;
+ }
+
+ if (jack_activate(u->client)) {
+ pa_log("jack_activate() failed");
+ goto fail;
+ }
+
+ if (do_connect) {
+ for (i = 0, p = ports; i < ss.channels; i++, p++) {
+
+ if (!*p) {
+ pa_log("not enough physical output ports, leaving unconnected.");
+ break;
+ }
+
+ pa_log_info("connecting %s to %s", jack_port_name(u->port[i]), *p);
+
+ if (jack_connect(u->client, *p, jack_port_name(u->port[i]))) {
+ pa_log("failed to connect %s to %s, leaving unconnected.", jack_port_name(u->port[i]), *p);
+ break;
+ }
+ }
+
+ }
+
+ pa_source_put(u->source);
+
+ free(ports);
+ pa_modargs_free(ma);
+
+ return 0;
+
+fail:
+ if (ma)
+ pa_modargs_free(ma);
+
+ free(ports);
+
+ pa__done(m);
+
+ return -1;
+}
+
+void pa__done(pa_module*m) {
+ struct userdata *u;
+ pa_assert(m);
+
+ if (!(u = m->userdata))
+ return;
+
+ if (u->client)
+ jack_client_close(u->client);
+
+ if (u->source)
+ pa_source_unlink(u->source);
+
+ if (u->thread) {
+ pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
+ pa_thread_free(u->thread);
+ }
+
+ pa_thread_mq_done(&u->thread_mq);
+
+ if (u->source)
+ pa_source_unref(u->source);
+
+ if (u->rtpoll_item)
+ pa_rtpoll_item_free(u->rtpoll_item);
+
+ if (u->jack_msgq)
+ pa_asyncmsgq_unref(u->jack_msgq);
+
+ if (u->rtpoll)
+ pa_rtpoll_free(u->rtpoll);
+
+ pa_xfree(u);
+}
diff --git a/src/modules/module-ladspa-sink.c b/src/modules/module-ladspa-sink.c
new file mode 100644
index 00000000..b31037b6
--- /dev/null
+++ b/src/modules/module-ladspa-sink.c
@@ -0,0 +1,684 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2004-2006 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+/* TODO: Some plugins cause latency, and some even report it by using a control
+ out port. We don't currently use the latency information. */
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <pulse/xmalloc.h>
+
+#include <pulsecore/core-error.h>
+#include <pulsecore/namereg.h>
+#include <pulsecore/sink.h>
+#include <pulsecore/module.h>
+#include <pulsecore/core-util.h>
+#include <pulsecore/modargs.h>
+#include <pulsecore/log.h>
+#include <pulsecore/thread.h>
+#include <pulsecore/thread-mq.h>
+#include <pulsecore/rtpoll.h>
+#include <pulsecore/sample-util.h>
+
+#include "module-ladspa-sink-symdef.h"
+#include "ladspa.h"
+
+PA_MODULE_AUTHOR("Lennart Poettering");
+PA_MODULE_DESCRIPTION("Virtual LADSPA sink");
+PA_MODULE_VERSION(PACKAGE_VERSION);
+PA_MODULE_LOAD_ONCE(FALSE);
+PA_MODULE_USAGE(
+ "sink_name=<name for the sink> "
+ "master=<name of sink to remap> "
+ "format=<sample format> "
+ "channels=<number of channels> "
+ "rate=<sample rate> "
+ "channel_map=<channel map> "
+ "plugin=<ladspa plugin name> "
+ "label=<ladspa plugin label> "
+ "control=<comma seperated list of input control values>");
+
+struct userdata {
+ pa_core *core;
+ pa_module *module;
+
+ pa_sink *sink, *master;
+ pa_sink_input *sink_input;
+
+ const LADSPA_Descriptor *descriptor;
+ unsigned channels;
+ LADSPA_Handle handle[PA_CHANNELS_MAX];
+ LADSPA_Data *input, *output;
+ size_t block_size;
+ unsigned long input_port, output_port;
+ LADSPA_Data *control;
+
+ /* This is a dummy buffer. Every port must be connected, but we don't care
+ about control out ports. We connect them all to this single buffer. */
+ LADSPA_Data control_out;
+
+ pa_memchunk memchunk;
+};
+
+static const char* const valid_modargs[] = {
+ "sink_name",
+ "master",
+ "format",
+ "channels",
+ "rate",
+ "channel_map",
+ "plugin",
+ "label",
+ "control",
+ NULL
+};
+
+/* Called from I/O thread context */
+static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
+ struct userdata *u = PA_SINK(o)->userdata;
+
+ switch (code) {
+
+ case PA_SINK_MESSAGE_GET_LATENCY: {
+ pa_usec_t usec = 0;
+
+ if (PA_MSGOBJECT(u->master)->process_msg(PA_MSGOBJECT(u->master), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
+ usec = 0;
+
+ *((pa_usec_t*) data) = usec + pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
+ return 0;
+ }
+ }
+
+ return pa_sink_process_msg(o, code, data, offset, chunk);
+}
+
+/* Called from main context */
+static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
+ struct userdata *u;
+
+ pa_sink_assert_ref(s);
+ pa_assert_se(u = s->userdata);
+
+ if (PA_SINK_LINKED(state) && u->sink_input && PA_SINK_INPUT_LINKED(pa_sink_input_get_state(u->sink_input)))
+ pa_sink_input_cork(u->sink_input, state == PA_SINK_SUSPENDED);
+
+ return 0;
+}
+
+/* Called from I/O thread context */
+static int sink_input_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
+ struct userdata *u = PA_SINK_INPUT(o)->userdata;
+
+ switch (code) {
+ case PA_SINK_INPUT_MESSAGE_GET_LATENCY:
+ *((pa_usec_t*) data) = pa_bytes_to_usec(u->memchunk.length, &u->sink_input->sample_spec);
+
+ /* Fall through, the default handler will add in the extra
+ * latency added by the resampler */
+ break;
+ }
+
+ return pa_sink_input_process_msg(o, code, data, offset, chunk);
+}
+
+/* Called from I/O thread context */
+static int sink_input_peek_cb(pa_sink_input *i, size_t length, pa_memchunk *chunk) {
+ struct userdata *u;
+
+ pa_sink_input_assert_ref(i);
+ pa_assert_se(u = i->userdata);
+
+ if (!u->memchunk.memblock) {
+ pa_memchunk tchunk;
+ float *src, *dst;
+ size_t fs;
+ unsigned n, c;
+
+ pa_sink_render(u->sink, length, &tchunk);
+
+ fs = pa_frame_size(&i->sample_spec);
+ n = tchunk.length / fs;
+
+ pa_assert(n > 0);
+
+ u->memchunk.memblock = pa_memblock_new(i->sink->core->mempool, tchunk.length);
+ u->memchunk.index = 0;
+ u->memchunk.length = tchunk.length;
+
+ src = (float*) ((uint8_t*) pa_memblock_acquire(tchunk.memblock) + tchunk.index);
+ dst = (float*) pa_memblock_acquire(u->memchunk.memblock);
+
+ for (c = 0; c < u->channels; c++) {
+ unsigned j;
+ float *p, *q;
+
+ p = src + c;
+ q = u->input;
+ for (j = 0; j < n; j++, p += u->channels, q++)
+ *q = PA_CLAMP_UNLIKELY(*p, -1.0, 1.0);
+
+ u->descriptor->run(u->handle[c], n);
+
+ q = u->output;
+ p = dst + c;
+ for (j = 0; j < n; j++, q++, p += u->channels)
+ *p = PA_CLAMP_UNLIKELY(*q, -1.0, 1.0);
+ }
+
+ pa_memblock_release(tchunk.memblock);
+ pa_memblock_release(u->memchunk.memblock);
+
+ pa_memblock_unref(tchunk.memblock);
+ }
+
+ pa_assert(u->memchunk.length > 0);
+ pa_assert(u->memchunk.memblock);
+
+ *chunk = u->memchunk;
+ pa_memblock_ref(chunk->memblock);
+
+ return 0;
+}
+
+/* Called from I/O thread context */
+static void sink_input_drop_cb(pa_sink_input *i, size_t length) {
+ struct userdata *u;
+
+ pa_sink_input_assert_ref(i);
+ pa_assert_se(u = i->userdata);
+ pa_assert(length > 0);
+
+ if (u->memchunk.memblock) {
+
+ if (length < u->memchunk.length) {
+ u->memchunk.index += length;
+ u->memchunk.length -= length;
+ return;
+ }
+
+ pa_memblock_unref(u->memchunk.memblock);
+ length -= u->memchunk.length;
+ pa_memchunk_reset(&u->memchunk);
+ }
+
+ if (length > 0)
+ pa_sink_skip(u->sink, length);
+}
+
+/* Called from I/O thread context */
+static void sink_input_detach_cb(pa_sink_input *i) {
+ struct userdata *u;
+
+ pa_sink_input_assert_ref(i);
+ pa_assert_se(u = i->userdata);
+
+ pa_sink_detach_within_thread(u->sink);
+}
+
+/* Called from I/O thread context */
+static void sink_input_attach_cb(pa_sink_input *i) {
+ struct userdata *u;
+
+ pa_sink_input_assert_ref(i);
+ pa_assert_se(u = i->userdata);
+
+ pa_sink_set_asyncmsgq(u->sink, i->sink->asyncmsgq);
+ pa_sink_set_rtpoll(u->sink, i->sink->rtpoll);
+
+ pa_sink_attach_within_thread(u->sink);
+}
+
+/* Called from main context */
+static void sink_input_kill_cb(pa_sink_input *i) {
+ struct userdata *u;
+
+ pa_sink_input_assert_ref(i);
+ pa_assert_se(u = i->userdata);
+
+ pa_sink_input_unlink(u->sink_input);
+ pa_sink_input_unref(u->sink_input);
+ u->sink_input = NULL;
+
+ pa_sink_unlink(u->sink);
+ pa_sink_unref(u->sink);
+ u->sink = NULL;
+
+ pa_module_unload_request(u->module);
+}
+
+int pa__init(pa_module*m) {
+ struct userdata *u;
+ pa_sample_spec ss;
+ pa_channel_map map;
+ pa_modargs *ma;
+ char *t;
+ pa_sink *master;
+ pa_sink_input_new_data data;
+ const char *plugin, *label;
+ LADSPA_Descriptor_Function descriptor_func;
+ const char *e, *cdata;
+ const LADSPA_Descriptor *d;
+ unsigned long input_port, output_port, p, j, n_control;
+ unsigned c;
+ pa_bool_t *use_default = NULL;
+ char *default_sink_name = NULL;
+
+ pa_assert(m);
+
+ pa_assert(sizeof(LADSPA_Data) == sizeof(float));
+
+ if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
+ pa_log("Failed to parse module arguments.");
+ goto fail;
+ }
+
+ if (!(master = pa_namereg_get(m->core, pa_modargs_get_value(ma, "master", NULL), PA_NAMEREG_SINK, 1))) {
+ pa_log("Master sink not found");
+ goto fail;
+ }
+
+ ss = master->sample_spec;
+ ss.format = PA_SAMPLE_FLOAT32;
+ map = master->channel_map;
+ if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_DEFAULT) < 0) {
+ pa_log("Invalid sample format specification or channel map");
+ goto fail;
+ }
+
+ if (!(plugin = pa_modargs_get_value(ma, "plugin", NULL))) {
+ pa_log("Missing LADSPA plugin name");
+ goto fail;
+ }
+
+ if (!(label = pa_modargs_get_value(ma, "label", NULL))) {
+ pa_log("Missing LADSPA plugin label");
+ goto fail;
+ }
+
+ cdata = pa_modargs_get_value(ma, "control", NULL);
+
+ u = pa_xnew0(struct userdata, 1);
+ u->core = m->core;
+ u->module = m;
+ m->userdata = u;
+ u->master = master;
+ pa_memchunk_reset(&u->memchunk);
+
+ if (!(e = getenv("LADSPA_PATH")))
+ e = LADSPA_PATH;
+
+ /* FIXME: This is not exactly thread safe */
+ t = pa_xstrdup(lt_dlgetsearchpath());
+ lt_dlsetsearchpath(e);
+ m->dl = lt_dlopenext(plugin);
+ lt_dlsetsearchpath(t);
+ pa_xfree(t);
+
+ if (!m->dl) {
+ pa_log("Failed to load LADSPA plugin: %s", lt_dlerror());
+ goto fail;
+ }
+
+ if (!(descriptor_func = (LADSPA_Descriptor_Function) lt_dlsym(m->dl, "ladspa_descriptor"))) {
+ pa_log("LADSPA module lacks ladspa_descriptor() symbol.");
+ goto fail;
+ }
+
+ for (j = 0;; j++) {
+
+ if (!(d = descriptor_func(j))) {
+ pa_log("Failed to find plugin label '%s' in plugin '%s'.", plugin, label);
+ goto fail;
+ }
+
+ if (strcmp(d->Label, label) == 0)
+ break;
+ }
+
+ u->descriptor = d;
+
+ pa_log_debug("Module: %s", plugin);
+ pa_log_debug("Label: %s", d->Label);
+ pa_log_debug("Unique ID: %lu", d->UniqueID);
+ pa_log_debug("Name: %s", d->Name);
+ pa_log_debug("Maker: %s", d->Maker);
+ pa_log_debug("Copyright: %s", d->Copyright);
+
+ input_port = output_port = (unsigned long) -1;
+ n_control = 0;
+
+ for (p = 0; p < d->PortCount; p++) {
+
+ if (LADSPA_IS_PORT_INPUT(d->PortDescriptors[p]) && LADSPA_IS_PORT_AUDIO(d->PortDescriptors[p])) {
+
+ if (strcmp(d->PortNames[p], "Input") == 0) {
+ pa_assert(input_port == (unsigned long) -1);
+ input_port = p;
+ } else {
+ pa_log("Found audio input port on plugin we cannot handle: %s", d->PortNames[p]);
+ goto fail;
+ }
+
+ } else if (LADSPA_IS_PORT_OUTPUT(d->PortDescriptors[p]) && LADSPA_IS_PORT_AUDIO(d->PortDescriptors[p])) {
+
+ if (strcmp(d->PortNames[p], "Output") == 0) {
+ pa_assert(output_port == (unsigned long) -1);
+ output_port = p;
+ } else {
+ pa_log("Found audio output port on plugin we cannot handle: %s", d->PortNames[p]);
+ goto fail;
+ }
+
+ } else if (LADSPA_IS_PORT_INPUT(d->PortDescriptors[p]) && LADSPA_IS_PORT_CONTROL(d->PortDescriptors[p]))
+ n_control++;
+ else {
+ pa_assert(LADSPA_IS_PORT_OUTPUT(d->PortDescriptors[p]) && LADSPA_IS_PORT_CONTROL(d->PortDescriptors[p]));
+ pa_log_debug("Ignored control output port \"%s\".", d->PortNames[p]);
+ }
+ }
+
+ if ((input_port == (unsigned long) -1) || (output_port == (unsigned long) -1)) {
+ pa_log("Failed to identify input and output ports. "
+ "Right now this module can only deal with plugins which provide an 'Input' and an 'Output' audio port. "
+ "Patches welcome!");
+ goto fail;
+ }
+
+ u->block_size = pa_frame_align(pa_mempool_block_size_max(m->core->mempool), &ss);
+
+ u->input = (LADSPA_Data*) pa_xnew(uint8_t, u->block_size);
+ if (LADSPA_IS_INPLACE_BROKEN(d->Properties))
+ u->output = (LADSPA_Data*) pa_xnew(uint8_t, u->block_size);
+ else
+ u->output = u->input;
+
+ u->channels = ss.channels;
+
+ for (c = 0; c < ss.channels; c++) {
+ if (!(u->handle[c] = d->instantiate(d, ss.rate))) {
+ pa_log("Failed to instantiate plugin %s with label %s for channel %i", plugin, d->Label, c);
+ goto fail;
+ }
+
+ d->connect_port(u->handle[c], input_port, u->input);
+ d->connect_port(u->handle[c], output_port, u->output);
+ }
+
+ if (!cdata && n_control > 0) {
+ pa_log("This plugin requires specification of %lu control parameters.", n_control);
+ goto fail;
+ }
+
+ if (n_control > 0) {
+ const char *state = NULL;
+ char *k;
+ unsigned long h;
+
+ u->control = pa_xnew(LADSPA_Data, n_control);
+ use_default = pa_xnew(pa_bool_t, n_control);
+ p = 0;
+
+ while ((k = pa_split(cdata, ",", &state)) && p < n_control) {
+ float f;
+
+ if (*k == 0) {
+ use_default[p++] = TRUE;
+ pa_xfree(k);
+ continue;
+ }
+
+ if (pa_atof(k, &f) < 0) {
+ pa_log("Failed to parse control value '%s'", k);
+ pa_xfree(k);
+ goto fail;
+ }
+
+ pa_xfree(k);
+
+ use_default[p] = FALSE;
+ u->control[p++] = f;
+ }
+
+ /* The previous loop doesn't take the last control value into account
+ if it is left empty, so we do it here. */
+ if (*cdata == 0 || cdata[strlen(cdata) - 1] == ',') {
+ if (p < n_control)
+ use_default[p] = TRUE;
+ p++;
+ }
+
+ if (p > n_control || k) {
+ pa_log("Too many control values passed, %lu expected.", n_control);
+ if (k)
+ pa_xfree(k);
+ goto fail;
+ }
+
+ if (p < n_control) {
+ pa_log("Not enough control values passed, %lu expected, %lu passed.", n_control, p);
+ goto fail;
+ }
+
+ h = 0;
+ for (p = 0; p < d->PortCount; p++) {
+ LADSPA_PortRangeHintDescriptor hint = d->PortRangeHints[p].HintDescriptor;
+
+ if (!LADSPA_IS_PORT_CONTROL(d->PortDescriptors[p]))
+ continue;
+
+ if (LADSPA_IS_PORT_OUTPUT(d->PortDescriptors[p])) {
+ for (c = 0; c < ss.channels; c++)
+ d->connect_port(u->handle[c], p, &u->control_out);
+ continue;
+ }
+
+ pa_assert(h < n_control);
+
+ if (use_default[h]) {
+ LADSPA_Data lower, upper;
+
+ if (!LADSPA_IS_HINT_HAS_DEFAULT(hint)) {
+ pa_log("Control port value left empty but plugin defines no default.");
+ goto fail;
+ }
+
+ lower = d->PortRangeHints[p].LowerBound;
+ upper = d->PortRangeHints[p].UpperBound;
+
+ if (LADSPA_IS_HINT_SAMPLE_RATE(hint)) {
+ lower *= ss.rate;
+ upper *= ss.rate;
+ }
+
+ switch (hint & LADSPA_HINT_DEFAULT_MASK) {
+
+ case LADSPA_HINT_DEFAULT_MINIMUM:
+ u->control[h] = lower;
+ break;
+
+ case LADSPA_HINT_DEFAULT_MAXIMUM:
+ u->control[h] = upper;
+ break;
+
+ case LADSPA_HINT_DEFAULT_LOW:
+ if (LADSPA_IS_HINT_LOGARITHMIC(hint))
+ u->control[h] = exp(log(lower) * 0.75 + log(upper) * 0.25);
+ else
+ u->control[h] = lower * 0.75 + upper * 0.25;
+ break;
+
+ case LADSPA_HINT_DEFAULT_MIDDLE:
+ if (LADSPA_IS_HINT_LOGARITHMIC(hint))
+ u->control[h] = exp(log(lower) * 0.5 + log(upper) * 0.5);
+ else
+ u->control[h] = lower * 0.5 + upper * 0.5;
+ break;
+
+ case LADSPA_HINT_DEFAULT_HIGH:
+ if (LADSPA_IS_HINT_LOGARITHMIC(hint))
+ u->control[h] = exp(log(lower) * 0.25 + log(upper) * 0.75);
+ else
+ u->control[h] = lower * 0.25 + upper * 0.75;
+ break;
+
+ case LADSPA_HINT_DEFAULT_0:
+ u->control[h] = 0;
+ break;
+
+ case LADSPA_HINT_DEFAULT_1:
+ u->control[h] = 1;
+ break;
+
+ case LADSPA_HINT_DEFAULT_100:
+ u->control[h] = 100;
+ break;
+
+ case LADSPA_HINT_DEFAULT_440:
+ u->control[h] = 440;
+ break;
+
+ default:
+ pa_assert_not_reached();
+ }
+ }
+
+ if (LADSPA_IS_HINT_INTEGER(hint))
+ u->control[h] = roundf(u->control[h]);
+
+ pa_log_debug("Binding %f to port %s", u->control[h], d->PortNames[p]);
+
+ for (c = 0; c < ss.channels; c++)
+ d->connect_port(u->handle[c], p, &u->control[h]);
+
+ h++;
+ }
+
+ pa_assert(h == n_control);
+ }
+
+ if (d->activate)
+ for (c = 0; c < u->channels; c++)
+ d->activate(u->handle[c]);
+
+ default_sink_name = pa_sprintf_malloc("%s.ladspa", master->name);
+
+ /* Create sink */
+ if (!(u->sink = pa_sink_new(m->core, __FILE__, pa_modargs_get_value(ma, "sink_name", default_sink_name), 0, &ss, &map))) {
+ pa_log("Failed to create sink.");
+ goto fail;
+ }
+
+ u->sink->parent.process_msg = sink_process_msg;
+ u->sink->set_state = sink_set_state;
+ u->sink->userdata = u;
+ u->sink->flags = PA_SINK_LATENCY;
+
+ pa_sink_set_module(u->sink, m);
+ pa_sink_set_description(u->sink, t = pa_sprintf_malloc("LADSPA plugin '%s' on '%s'", label, master->description));
+ pa_xfree(t);
+ pa_sink_set_asyncmsgq(u->sink, master->asyncmsgq);
+ pa_sink_set_rtpoll(u->sink, master->rtpoll);
+
+ /* Create sink input */
+ pa_sink_input_new_data_init(&data);
+ data.sink = u->master;
+ data.driver = __FILE__;
+ data.name = "LADSPA Stream";
+ pa_sink_input_new_data_set_sample_spec(&data, &ss);
+ pa_sink_input_new_data_set_channel_map(&data, &map);
+ data.module = m;
+
+ if (!(u->sink_input = pa_sink_input_new(m->core, &data, PA_SINK_INPUT_DONT_MOVE)))
+ goto fail;
+
+ u->sink_input->parent.process_msg = sink_input_process_msg;
+ u->sink_input->peek = sink_input_peek_cb;
+ u->sink_input->drop = sink_input_drop_cb;
+ u->sink_input->kill = sink_input_kill_cb;
+ u->sink_input->attach = sink_input_attach_cb;
+ u->sink_input->detach = sink_input_detach_cb;
+ u->sink_input->userdata = u;
+
+ pa_sink_put(u->sink);
+ pa_sink_input_put(u->sink_input);
+
+ pa_modargs_free(ma);
+
+ pa_xfree(use_default);
+ pa_xfree(default_sink_name);
+
+ return 0;
+
+fail:
+ if (ma)
+ pa_modargs_free(ma);
+
+ pa_xfree(use_default);
+ pa_xfree(default_sink_name);
+
+ pa__done(m);
+
+ return -1;
+}
+
+void pa__done(pa_module*m) {
+ struct userdata *u;
+ unsigned c;
+
+ pa_assert(m);
+
+ if (!(u = m->userdata))
+ return;
+
+ if (u->sink_input) {
+ pa_sink_input_unlink(u->sink_input);
+ pa_sink_input_unref(u->sink_input);
+ }
+
+ if (u->sink) {
+ pa_sink_unlink(u->sink);
+ pa_sink_unref(u->sink);
+ }
+
+ if (u->memchunk.memblock)
+ pa_memblock_unref(u->memchunk.memblock);
+
+ for (c = 0; c < u->channels; c++)
+ if (u->handle[c]) {
+ if (u->descriptor->deactivate)
+ u->descriptor->deactivate(u->handle[c]);
+ u->descriptor->cleanup(u->handle[c]);
+ }
+
+ if (u->output != u->input)
+ pa_xfree(u->output);
+
+ pa_xfree(u->input);
+
+ pa_xfree(u->control);
+
+ pa_xfree(u);
+}
diff --git a/src/modules/module-lirc.c b/src/modules/module-lirc.c
new file mode 100644
index 00000000..24542172
--- /dev/null
+++ b/src/modules/module-lirc.c
@@ -0,0 +1,259 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2005-2006 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <stdio.h>
+#include <unistd.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include <lirc/lirc_client.h>
+
+#include <pulse/xmalloc.h>
+
+#include <pulsecore/module.h>
+#include <pulsecore/log.h>
+#include <pulsecore/namereg.h>
+#include <pulsecore/sink.h>
+#include <pulsecore/modargs.h>
+#include <pulsecore/macro.h>
+
+#include "module-lirc-symdef.h"
+
+PA_MODULE_AUTHOR("Lennart Poettering");
+PA_MODULE_DESCRIPTION("LIRC volume control");
+PA_MODULE_VERSION(PACKAGE_VERSION);
+PA_MODULE_LOAD_ONCE(TRUE);
+PA_MODULE_USAGE("config=<config file> sink=<sink name> appname=<lirc application name>");
+
+static const char* const valid_modargs[] = {
+ "config",
+ "sink",
+ "appname",
+ NULL,
+};
+
+struct userdata {
+ int lirc_fd;
+ pa_io_event *io;
+ struct lirc_config *config;
+ char *sink_name;
+ pa_module *module;
+ float mute_toggle_save;
+};
+
+static int lirc_in_use = 0;
+
+static void io_callback(pa_mainloop_api *io, PA_GCC_UNUSED pa_io_event *e, PA_GCC_UNUSED int fd, pa_io_event_flags_t events, void*userdata) {
+ struct userdata *u = userdata;
+ char *name = NULL, *code = NULL;
+
+ pa_assert(io);
+ pa_assert(u);
+
+ if (events & (PA_IO_EVENT_HANGUP|PA_IO_EVENT_ERROR)) {
+ pa_log("Lost connection to LIRC daemon.");
+ goto fail;
+ }
+
+ if (events & PA_IO_EVENT_INPUT) {
+ char *c;
+
+ if (lirc_nextcode(&code) != 0 || !code) {
+ pa_log("lirc_nextcode() failed.");
+ goto fail;
+ }
+
+ c = pa_xstrdup(code);
+ c[strcspn(c, "\n\r")] = 0;
+ pa_log_debug("Raw IR code '%s'", c);
+ pa_xfree(c);
+
+ while (lirc_code2char(u->config, code, &name) == 0 && name) {
+ enum {
+ INVALID,
+ UP,
+ DOWN,
+ MUTE,
+ RESET,
+ MUTE_TOGGLE
+ } volchange = INVALID;
+
+ pa_log_info("Translated IR code '%s'", name);
+
+ if (strcasecmp(name, "volume-up") == 0)
+ volchange = UP;
+ else if (strcasecmp(name, "volume-down") == 0)
+ volchange = DOWN;
+ else if (strcasecmp(name, "mute") == 0)
+ volchange = MUTE;
+ else if (strcasecmp(name, "mute-toggle") == 0)
+ volchange = MUTE_TOGGLE;
+ else if (strcasecmp(name, "reset") == 0)
+ volchange = RESET;
+
+ if (volchange == INVALID)
+ pa_log_warn("Recieved unknown IR code '%s'", name);
+ else {
+ pa_sink *s;
+
+ if (!(s = pa_namereg_get(u->module->core, u->sink_name, PA_NAMEREG_SINK, 1)))
+ pa_log("Failed to get sink '%s'", u->sink_name);
+ else {
+ int i;
+ pa_cvolume cv = *pa_sink_get_volume(s);
+
+#define DELTA (PA_VOLUME_NORM/20)
+
+ switch (volchange) {
+ case UP:
+ for (i = 0; i < cv.channels; i++) {
+ cv.values[i] += DELTA;
+
+ if (cv.values[i] > PA_VOLUME_NORM)
+ cv.values[i] = PA_VOLUME_NORM;
+ }
+
+ pa_sink_set_volume(s, &cv);
+ break;
+
+ case DOWN:
+ for (i = 0; i < cv.channels; i++) {
+ if (cv.values[i] >= DELTA)
+ cv.values[i] -= DELTA;
+ else
+ cv.values[i] = PA_VOLUME_MUTED;
+ }
+
+ pa_sink_set_volume(s, &cv);
+ break;
+
+ case MUTE:
+ pa_sink_set_mute(s, 0);
+ break;
+
+ case RESET:
+ pa_sink_set_mute(s, 1);
+ break;
+
+ case MUTE_TOGGLE:
+
+ pa_sink_set_mute(s, !pa_sink_get_mute(s));
+ break;
+
+ case INVALID:
+ ;
+ }
+ }
+ }
+ }
+ }
+
+ pa_xfree(code);
+
+ return;
+
+fail:
+ u->module->core->mainloop->io_free(u->io);
+ u->io = NULL;
+
+ pa_module_unload_request(u->module);
+
+ pa_xfree(code);
+}
+
+int pa__init(pa_module*m) {
+ pa_modargs *ma = NULL;
+ struct userdata *u;
+
+ pa_assert(m);
+
+ if (lirc_in_use) {
+ pa_log("module-lirc may no be loaded twice.");
+ return -1;
+ }
+
+ if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
+ pa_log("Failed to parse module arguments");
+ goto fail;
+ }
+
+ m->userdata = u = pa_xnew(struct userdata, 1);
+ u->module = m;
+ u->io = NULL;
+ u->config = NULL;
+ u->sink_name = pa_xstrdup(pa_modargs_get_value(ma, "sink", NULL));
+ u->lirc_fd = -1;
+ u->mute_toggle_save = 0;
+
+ if ((u->lirc_fd = lirc_init((char*) pa_modargs_get_value(ma, "appname", "pulseaudio"), 1)) < 0) {
+ pa_log("lirc_init() failed.");
+ goto fail;
+ }
+
+ if (lirc_readconfig((char*) pa_modargs_get_value(ma, "config", NULL), &u->config, NULL) < 0) {
+ pa_log("lirc_readconfig() failed.");
+ goto fail;
+ }
+
+ u->io = m->core->mainloop->io_new(m->core->mainloop, u->lirc_fd, PA_IO_EVENT_INPUT|PA_IO_EVENT_HANGUP, io_callback, u);
+
+ lirc_in_use = 1;
+
+ pa_modargs_free(ma);
+
+ return 0;
+
+fail:
+
+ if (ma)
+ pa_modargs_free(ma);
+
+ pa__done(m);
+ return -1;
+}
+
+void pa__done(pa_module*m) {
+ struct userdata *u;
+ pa_assert(m);
+
+ if (!(u = m->userdata))
+ return;
+
+ if (u->io)
+ m->core->mainloop->io_free(u->io);
+
+ if (u->config)
+ lirc_freeconfig(u->config);
+
+ if (u->lirc_fd >= 0)
+ lirc_deinit();
+
+ pa_xfree(u->sink_name);
+ pa_xfree(u);
+
+ lirc_in_use = 0;
+}
diff --git a/src/modules/module-match.c b/src/modules/module-match.c
new file mode 100644
index 00000000..ed5f3076
--- /dev/null
+++ b/src/modules/module-match.c
@@ -0,0 +1,244 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2004-2006 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <unistd.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <regex.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <pulse/xmalloc.h>
+
+#include <pulsecore/core-error.h>
+#include <pulsecore/module.h>
+#include <pulsecore/core-util.h>
+#include <pulsecore/modargs.h>
+#include <pulsecore/log.h>
+#include <pulsecore/core-subscribe.h>
+#include <pulsecore/sink-input.h>
+#include <pulsecore/core-util.h>
+
+#include "module-match-symdef.h"
+
+PA_MODULE_AUTHOR("Lennart Poettering");
+PA_MODULE_DESCRIPTION("Playback stream expression matching module");
+PA_MODULE_VERSION(PACKAGE_VERSION);
+PA_MODULE_LOAD_ONCE(TRUE);
+PA_MODULE_USAGE("table=<filename>");
+
+#define WHITESPACE "\n\r \t"
+
+#define DEFAULT_MATCH_TABLE_FILE PA_DEFAULT_CONFIG_DIR"/match.table"
+#define DEFAULT_MATCH_TABLE_FILE_USER "match.table"
+
+static const char* const valid_modargs[] = {
+ "table",
+ NULL,
+};
+
+struct rule {
+ regex_t regex;
+ pa_volume_t volume;
+ struct rule *next;
+};
+
+struct userdata {
+ struct rule *rules;
+ pa_subscription *subscription;
+};
+
+static int load_rules(struct userdata *u, const char *filename) {
+ FILE *f;
+ int n = 0;
+ int ret = -1;
+ struct rule *end = NULL;
+ char *fn = NULL;
+
+ pa_assert(u);
+
+ f = filename ?
+ fopen(fn = pa_xstrdup(filename), "r") :
+ pa_open_config_file(DEFAULT_MATCH_TABLE_FILE, DEFAULT_MATCH_TABLE_FILE_USER, NULL, &fn, "r");
+
+ if (!f) {
+ pa_log("failed to open file '%s': %s", fn, pa_cstrerror(errno));
+ goto finish;
+ }
+
+ pa_lock_fd(fileno(f), 1);
+
+ while (!feof(f)) {
+ char *d, *v;
+ pa_volume_t volume;
+ uint32_t k;
+ regex_t regex;
+ char ln[256];
+ struct rule *rule;
+
+ if (!fgets(ln, sizeof(ln), f))
+ break;
+
+ n++;
+
+ pa_strip_nl(ln);
+
+ if (ln[0] == '#' || !*ln )
+ continue;
+
+ d = ln+strcspn(ln, WHITESPACE);
+ v = d+strspn(d, WHITESPACE);
+
+
+ if (!*v) {
+ pa_log(__FILE__ ": [%s:%u] failed to parse line - too few words", filename, n);
+ goto finish;
+ }
+
+ *d = 0;
+ if (pa_atou(v, &k) < 0) {
+ pa_log("[%s:%u] failed to parse volume", filename, n);
+ goto finish;
+ }
+
+ volume = (pa_volume_t) k;
+
+
+ if (regcomp(&regex, ln, REG_EXTENDED|REG_NOSUB) != 0) {
+ pa_log("[%s:%u] invalid regular expression", filename, n);
+ goto finish;
+ }
+
+ rule = pa_xnew(struct rule, 1);
+ rule->regex = regex;
+ rule->volume = volume;
+ rule->next = NULL;
+
+ if (end)
+ end->next = rule;
+ else
+ u->rules = rule;
+ end = rule;
+
+ *d = 0;
+ }
+
+ ret = 0;
+
+finish:
+ if (f) {
+ pa_lock_fd(fileno(f), 0);
+ fclose(f);
+ }
+
+ if (fn)
+ pa_xfree(fn);
+
+ return ret;
+}
+
+static void callback(pa_core *c, pa_subscription_event_type_t t, uint32_t idx, void *userdata) {
+ struct userdata *u = userdata;
+ pa_sink_input *si;
+ struct rule *r;
+
+ pa_assert(c);
+ pa_assert(u);
+
+ if (t != (PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_NEW))
+ return;
+
+ if (!(si = pa_idxset_get_by_index(c->sink_inputs, idx)))
+ return;
+
+ if (!si->name)
+ return;
+
+ for (r = u->rules; r; r = r->next) {
+ if (!regexec(&r->regex, si->name, 0, NULL, 0)) {
+ pa_cvolume cv;
+ pa_log_debug("changing volume of sink input '%s' to 0x%03x", si->name, r->volume);
+ pa_cvolume_set(&cv, si->sample_spec.channels, r->volume);
+ pa_sink_input_set_volume(si, &cv);
+ }
+ }
+}
+
+int pa__init(pa_module*m) {
+ pa_modargs *ma = NULL;
+ struct userdata *u;
+
+ pa_assert(m);
+
+ if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
+ pa_log("Failed to parse module arguments");
+ goto fail;
+ }
+
+ u = pa_xnew(struct userdata, 1);
+ u->rules = NULL;
+ u->subscription = NULL;
+ m->userdata = u;
+
+ if (load_rules(u, pa_modargs_get_value(ma, "table", NULL)) < 0)
+ goto fail;
+
+ u->subscription = pa_subscription_new(m->core, PA_SUBSCRIPTION_MASK_SINK_INPUT, callback, u);
+
+ pa_modargs_free(ma);
+ return 0;
+
+fail:
+ pa__done(m);
+
+ if (ma)
+ pa_modargs_free(ma);
+ return -1;
+}
+
+void pa__done(pa_module*m) {
+ struct userdata* u;
+ struct rule *r, *n;
+
+ pa_assert(m);
+
+ if (!(u = m->userdata))
+ return;
+
+ if (u->subscription)
+ pa_subscription_free(u->subscription);
+
+ for (r = u->rules; r; r = n) {
+ n = r->next;
+
+ regfree(&r->regex);
+ pa_xfree(r);
+ }
+
+ pa_xfree(u);
+}
diff --git a/src/modules/module-mmkbd-evdev.c b/src/modules/module-mmkbd-evdev.c
new file mode 100644
index 00000000..03c0e973
--- /dev/null
+++ b/src/modules/module-mmkbd-evdev.c
@@ -0,0 +1,262 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2005-2006 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <stdio.h>
+#include <unistd.h>
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <fcntl.h>
+
+#include <linux/input.h>
+
+#include <pulse/xmalloc.h>
+
+#include <pulsecore/core-error.h>
+#include <pulsecore/module.h>
+#include <pulsecore/log.h>
+#include <pulsecore/namereg.h>
+#include <pulsecore/sink.h>
+#include <pulsecore/modargs.h>
+#include <pulsecore/core-util.h>
+
+#include "module-mmkbd-evdev-symdef.h"
+
+PA_MODULE_AUTHOR("Lennart Poettering");
+PA_MODULE_DESCRIPTION("Multimedia keyboard support via Linux evdev");
+PA_MODULE_VERSION(PACKAGE_VERSION);
+PA_MODULE_LOAD_ONCE(FALSE);
+PA_MODULE_USAGE("device=<evdev device> sink=<sink name>");
+
+#define DEFAULT_DEVICE "/dev/input/event0"
+
+/*
+ * This isn't defined in older kernel headers and there is no way of
+ * detecting it.
+ */
+struct _input_id {
+ __u16 bustype;
+ __u16 vendor;
+ __u16 product;
+ __u16 version;
+};
+
+static const char* const valid_modargs[] = {
+ "device",
+ "sink",
+ NULL,
+};
+
+struct userdata {
+ int fd, fd_type;
+ pa_io_event *io;
+ char *sink_name;
+ pa_module *module;
+};
+
+static void io_callback(pa_mainloop_api *io, PA_GCC_UNUSED pa_io_event *e, PA_GCC_UNUSED int fd, pa_io_event_flags_t events, void*userdata) {
+ struct userdata *u = userdata;
+
+ pa_assert(io);
+ pa_assert(u);
+
+ if (events & (PA_IO_EVENT_HANGUP|PA_IO_EVENT_ERROR)) {
+ pa_log("Lost connection to evdev device.");
+ goto fail;
+ }
+
+ if (events & PA_IO_EVENT_INPUT) {
+ struct input_event ev;
+
+ if (pa_loop_read(u->fd, &ev, sizeof(ev), &u->fd_type) <= 0) {
+ pa_log("Failed to read from event device: %s", pa_cstrerror(errno));
+ goto fail;
+ }
+
+ if (ev.type == EV_KEY && (ev.value == 1 || ev.value == 2)) {
+ enum { INVALID, UP, DOWN, MUTE_TOGGLE } volchange = INVALID;
+
+ pa_log_debug("Key code=%u, value=%u", ev.code, ev.value);
+
+ switch (ev.code) {
+ case KEY_VOLUMEDOWN: volchange = DOWN; break;
+ case KEY_VOLUMEUP: volchange = UP; break;
+ case KEY_MUTE: volchange = MUTE_TOGGLE; break;
+ }
+
+ if (volchange != INVALID) {
+ pa_sink *s;
+
+ if (!(s = pa_namereg_get(u->module->core, u->sink_name, PA_NAMEREG_SINK, 1)))
+ pa_log("Failed to get sink '%s'", u->sink_name);
+ else {
+ int i;
+ pa_cvolume cv = *pa_sink_get_volume(s);
+
+#define DELTA (PA_VOLUME_NORM/20)
+
+ switch (volchange) {
+ case UP:
+ for (i = 0; i < cv.channels; i++) {
+ cv.values[i] += DELTA;
+
+ if (cv.values[i] > PA_VOLUME_NORM)
+ cv.values[i] = PA_VOLUME_NORM;
+ }
+
+ pa_sink_set_volume(s, &cv);
+ break;
+
+ case DOWN:
+ for (i = 0; i < cv.channels; i++) {
+ if (cv.values[i] >= DELTA)
+ cv.values[i] -= DELTA;
+ else
+ cv.values[i] = PA_VOLUME_MUTED;
+ }
+
+ pa_sink_set_volume(s, &cv);
+ break;
+
+ case MUTE_TOGGLE:
+
+ pa_sink_set_mute(s, !pa_sink_get_mute(s));
+ break;
+
+ case INVALID:
+ ;
+ }
+ }
+ }
+ }
+ }
+
+ return;
+
+fail:
+ u->module->core->mainloop->io_free(u->io);
+ u->io = NULL;
+
+ pa_module_unload_request(u->module);
+}
+
+#define test_bit(bit, array) (array[bit/8] & (1<<(bit%8)))
+
+int pa__init(pa_module*m) {
+
+ pa_modargs *ma = NULL;
+ struct userdata *u;
+ int version;
+ struct _input_id input_id;
+ char name[256];
+ uint8_t evtype_bitmask[EV_MAX/8 + 1];
+
+ pa_assert(m);
+
+ if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
+ pa_log("Failed to parse module arguments");
+ goto fail;
+ }
+
+ m->userdata = u = pa_xnew(struct userdata,1);
+ u->module = m;
+ u->io = NULL;
+ u->sink_name = pa_xstrdup(pa_modargs_get_value(ma, "sink", NULL));
+ u->fd = -1;
+ u->fd_type = 0;
+
+ if ((u->fd = open(pa_modargs_get_value(ma, "device", DEFAULT_DEVICE), O_RDONLY)) < 0) {
+ pa_log("failed to open evdev device: %s", pa_cstrerror(errno));
+ goto fail;
+ }
+
+ if (ioctl(u->fd, EVIOCGVERSION, &version) < 0) {
+ pa_log("EVIOCGVERSION failed: %s", pa_cstrerror(errno));
+ goto fail;
+ }
+
+ pa_log_info("evdev driver version %i.%i.%i", version >> 16, (version >> 8) & 0xff, version & 0xff);
+
+ if(ioctl(u->fd, EVIOCGID, &input_id)) {
+ pa_log("EVIOCGID failed: %s", pa_cstrerror(errno));
+ goto fail;
+ }
+
+ pa_log_info("evdev vendor 0x%04hx product 0x%04hx version 0x%04hx bustype %u",
+ input_id.vendor, input_id.product, input_id.version, input_id.bustype);
+
+ memset(name, 0, sizeof(name));
+ if(ioctl(u->fd, EVIOCGNAME(sizeof(name)), name) < 0) {
+ pa_log("EVIOCGNAME failed: %s", pa_cstrerror(errno));
+ goto fail;
+ }
+
+ pa_log_info("evdev device name: %s", name);
+
+ memset(evtype_bitmask, 0, sizeof(evtype_bitmask));
+ if (ioctl(u->fd, EVIOCGBIT(0, EV_MAX), evtype_bitmask) < 0) {
+ pa_log("EVIOCGBIT failed: %s", pa_cstrerror(errno));
+ goto fail;
+ }
+
+ if (!test_bit(EV_KEY, evtype_bitmask)) {
+ pa_log("Device has no keys.");
+ goto fail;
+ }
+
+ u->io = m->core->mainloop->io_new(m->core->mainloop, u->fd, PA_IO_EVENT_INPUT|PA_IO_EVENT_HANGUP, io_callback, u);
+
+ pa_modargs_free(ma);
+
+ return 0;
+
+fail:
+
+ if (ma)
+ pa_modargs_free(ma);
+
+ pa__done(m);
+ return -1;
+}
+
+void pa__done(pa_module*m) {
+ struct userdata *u;
+
+ pa_assert(m);
+
+ if (!(u = m->userdata))
+ return;
+
+ if (u->io)
+ m->core->mainloop->io_free(u->io);
+
+ if (u->fd >= 0)
+ pa_assert_se(pa_close(u->fd) == 0);
+
+ pa_xfree(u->sink_name);
+ pa_xfree(u);
+}
diff --git a/src/modules/module-native-protocol-fd.c b/src/modules/module-native-protocol-fd.c
new file mode 100644
index 00000000..53f41896
--- /dev/null
+++ b/src/modules/module-native-protocol-fd.c
@@ -0,0 +1,89 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2004-2006 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <stdio.h>
+#include <unistd.h>
+
+#include <pulsecore/module.h>
+#include <pulsecore/macro.h>
+#include <pulsecore/iochannel.h>
+#include <pulsecore/modargs.h>
+#include <pulsecore/protocol-native.h>
+#include <pulsecore/log.h>
+
+#include "module-native-protocol-fd-symdef.h"
+
+PA_MODULE_AUTHOR("Lennart Poettering");
+PA_MODULE_DESCRIPTION("Native protocol autospawn helper");
+PA_MODULE_VERSION(PACKAGE_VERSION);
+PA_MODULE_LOAD_ONCE(TRUE);
+
+static const char* const valid_modargs[] = {
+ "fd",
+ "public",
+ "cookie",
+ NULL,
+};
+
+int pa__init(pa_module*m) {
+ pa_iochannel *io;
+ pa_modargs *ma;
+ int fd, r = -1;
+
+ pa_assert(m);
+
+ if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
+ pa_log("Failed to parse module arguments.");
+ goto finish;
+ }
+
+ if (pa_modargs_get_value_s32(ma, "fd", &fd) < 0) {
+ pa_log("Invalid file descriptor.");
+ goto finish;
+ }
+
+ io = pa_iochannel_new(m->core->mainloop, fd, fd);
+
+ if (!(m->userdata = pa_protocol_native_new_iochannel(m->core, io, m, ma))) {
+ pa_iochannel_free(io);
+ goto finish;
+ }
+
+ r = 0;
+
+finish:
+ if (ma)
+ pa_modargs_free(ma);
+
+ return r;
+}
+
+void pa__done(pa_module*m) {
+ pa_assert(m);
+
+ pa_protocol_native_free(m->userdata);
+}
diff --git a/src/modules/module-null-sink.c b/src/modules/module-null-sink.c
new file mode 100644
index 00000000..de35fff9
--- /dev/null
+++ b/src/modules/module-null-sink.c
@@ -0,0 +1,257 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2004-2006 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <limits.h>
+
+#include <pulse/timeval.h>
+#include <pulse/xmalloc.h>
+
+#include <pulsecore/macro.h>
+#include <pulsecore/sink.h>
+#include <pulsecore/module.h>
+#include <pulsecore/core-util.h>
+#include <pulsecore/core-error.h>
+#include <pulsecore/modargs.h>
+#include <pulsecore/log.h>
+#include <pulsecore/thread.h>
+#include <pulsecore/thread-mq.h>
+#include <pulsecore/rtpoll.h>
+#include <pulsecore/rtclock.h>
+
+#include "module-null-sink-symdef.h"
+
+PA_MODULE_AUTHOR("Lennart Poettering");
+PA_MODULE_DESCRIPTION("Clocked NULL sink");
+PA_MODULE_VERSION(PACKAGE_VERSION);
+PA_MODULE_LOAD_ONCE(FALSE);
+PA_MODULE_USAGE(
+ "format=<sample format> "
+ "channels=<number of channels> "
+ "rate=<sample rate> "
+ "sink_name=<name of sink>"
+ "channel_map=<channel map>"
+ "description=<description for the sink>");
+
+#define DEFAULT_SINK_NAME "null"
+
+struct userdata {
+ pa_core *core;
+ pa_module *module;
+ pa_sink *sink;
+
+ pa_thread *thread;
+ pa_thread_mq thread_mq;
+ pa_rtpoll *rtpoll;
+
+ size_t block_size;
+
+ struct timeval timestamp;
+};
+
+static const char* const valid_modargs[] = {
+ "rate",
+ "format",
+ "channels",
+ "sink_name",
+ "channel_map",
+ "description",
+ NULL
+};
+
+static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
+ struct userdata *u = PA_SINK(o)->userdata;
+
+ switch (code) {
+ case PA_SINK_MESSAGE_SET_STATE:
+
+ if (PA_PTR_TO_UINT(data) == PA_SINK_RUNNING)
+ pa_rtclock_get(&u->timestamp);
+
+ break;
+
+ case PA_SINK_MESSAGE_GET_LATENCY: {
+ struct timeval now;
+
+ pa_rtclock_get(&now);
+
+ if (pa_timeval_cmp(&u->timestamp, &now) > 0)
+ *((pa_usec_t*) data) = 0;
+ else
+ *((pa_usec_t*) data) = pa_timeval_diff(&u->timestamp, &now);
+ break;
+ }
+ }
+
+ return pa_sink_process_msg(o, code, data, offset, chunk);
+}
+
+static void thread_func(void *userdata) {
+ struct userdata *u = userdata;
+
+ pa_assert(u);
+
+ pa_log_debug("Thread starting up");
+
+ pa_thread_mq_install(&u->thread_mq);
+ pa_rtpoll_install(u->rtpoll);
+
+ pa_rtclock_get(&u->timestamp);
+
+ for (;;) {
+ int ret;
+
+ /* Render some data and drop it immediately */
+ if (u->sink->thread_info.state == PA_SINK_RUNNING) {
+ struct timeval now;
+
+ pa_rtclock_get(&now);
+
+ if (pa_timeval_cmp(&u->timestamp, &now) <= 0) {
+ pa_sink_skip(u->sink, u->block_size);
+ pa_timeval_add(&u->timestamp, pa_bytes_to_usec(u->block_size, &u->sink->sample_spec));
+ }
+
+ pa_rtpoll_set_timer_absolute(u->rtpoll, &u->timestamp);
+ } else
+ pa_rtpoll_set_timer_disabled(u->rtpoll);
+
+ /* Hmm, nothing to do. Let's sleep */
+ if ((ret = pa_rtpoll_run(u->rtpoll, 1)) < 0)
+ goto fail;
+
+ if (ret == 0)
+ goto finish;
+ }
+
+fail:
+ /* If this was no regular exit from the loop we have to continue
+ * processing messages until we received PA_MESSAGE_SHUTDOWN */
+ pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
+ pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
+
+finish:
+ pa_log_debug("Thread shutting down");
+}
+
+int pa__init(pa_module*m) {
+ struct userdata *u = NULL;
+ pa_sample_spec ss;
+ pa_channel_map map;
+ pa_modargs *ma = NULL;
+
+ pa_assert(m);
+
+ if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
+ pa_log("Failed to parse module arguments.");
+ goto fail;
+ }
+
+ ss = m->core->default_sample_spec;
+ if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_DEFAULT) < 0) {
+ pa_log("Invalid sample format specification or channel map");
+ goto fail;
+ }
+
+ u = pa_xnew0(struct userdata, 1);
+ u->core = m->core;
+ u->module = m;
+ m->userdata = u;
+ pa_thread_mq_init(&u->thread_mq, m->core->mainloop);
+ u->rtpoll = pa_rtpoll_new();
+ pa_rtpoll_item_new_asyncmsgq(u->rtpoll, PA_RTPOLL_EARLY, u->thread_mq.inq);
+
+ if (!(u->sink = pa_sink_new(m->core, __FILE__, pa_modargs_get_value(ma, "sink_name", DEFAULT_SINK_NAME), 0, &ss, &map))) {
+ pa_log("Failed to create sink.");
+ goto fail;
+ }
+
+ u->sink->parent.process_msg = sink_process_msg;
+ u->sink->userdata = u;
+ u->sink->flags = PA_SINK_LATENCY;
+
+ pa_sink_set_module(u->sink, m);
+ pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
+ pa_sink_set_rtpoll(u->sink, u->rtpoll);
+ pa_sink_set_description(u->sink, pa_modargs_get_value(ma, "description", "NULL sink"));
+
+ u->block_size = pa_bytes_per_second(&ss) / 20; /* 50 ms */
+ if (u->block_size <= 0)
+ u->block_size = pa_frame_size(&ss);
+
+ if (!(u->thread = pa_thread_new(thread_func, u))) {
+ pa_log("Failed to create thread.");
+ goto fail;
+ }
+
+ pa_sink_put(u->sink);
+
+ pa_modargs_free(ma);
+
+ return 0;
+
+fail:
+ if (ma)
+ pa_modargs_free(ma);
+
+ pa__done(m);
+
+ return -1;
+}
+
+void pa__done(pa_module*m) {
+ struct userdata *u;
+
+ pa_assert(m);
+
+ if (!(u = m->userdata))
+ return;
+
+ if (u->sink)
+ pa_sink_unlink(u->sink);
+
+ if (u->thread) {
+ pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
+ pa_thread_free(u->thread);
+ }
+
+ pa_thread_mq_done(&u->thread_mq);
+
+ if (u->sink)
+ pa_sink_unref(u->sink);
+
+ if (u->rtpoll)
+ pa_rtpoll_free(u->rtpoll);
+
+ pa_xfree(u);
+}
diff --git a/src/modules/module-oss.c b/src/modules/module-oss.c
new file mode 100644
index 00000000..a7df8a0c
--- /dev/null
+++ b/src/modules/module-oss.c
@@ -0,0 +1,1499 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2004-2006 Lennart Poettering
+ Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+/* General power management rules:
+ *
+ * When SUSPENDED we close the audio device.
+ *
+ * We make no difference between IDLE and RUNNING in our handling.
+ *
+ * As long as we are in RUNNING/IDLE state we will *always* write data to
+ * the device. If none is avilable from the inputs, we write silence
+ * instead.
+ *
+ * If power should be saved on IDLE module-suspend-on-idle should be used.
+ *
+ */
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#ifdef HAVE_SYS_MMAN_H
+#include <sys/mman.h>
+#endif
+
+#include <sys/soundcard.h>
+#include <sys/ioctl.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <limits.h>
+#include <signal.h>
+#include <poll.h>
+
+#include <pulse/xmalloc.h>
+#include <pulse/util.h>
+
+#include <pulsecore/core-error.h>
+#include <pulsecore/thread.h>
+#include <pulsecore/sink.h>
+#include <pulsecore/source.h>
+#include <pulsecore/module.h>
+#include <pulsecore/sample-util.h>
+#include <pulsecore/core-util.h>
+#include <pulsecore/modargs.h>
+#include <pulsecore/log.h>
+#include <pulsecore/macro.h>
+#include <pulsecore/thread-mq.h>
+#include <pulsecore/rtpoll.h>
+
+#include "oss-util.h"
+#include "module-oss-symdef.h"
+
+PA_MODULE_AUTHOR("Lennart Poettering");
+PA_MODULE_DESCRIPTION("OSS Sink/Source");
+PA_MODULE_VERSION(PACKAGE_VERSION);
+PA_MODULE_LOAD_ONCE(FALSE);
+PA_MODULE_USAGE(
+ "sink_name=<name for the sink> "
+ "source_name=<name for the source> "
+ "device=<OSS device> "
+ "record=<enable source?> "
+ "playback=<enable sink?> "
+ "format=<sample format> "
+ "channels=<number of channels> "
+ "rate=<sample rate> "
+ "fragments=<number of fragments> "
+ "fragment_size=<fragment size> "
+ "channel_map=<channel map> "
+ "mmap=<enable memory mapping?>");
+
+#define DEFAULT_DEVICE "/dev/dsp"
+
+struct userdata {
+ pa_core *core;
+ pa_module *module;
+ pa_sink *sink;
+ pa_source *source;
+
+ pa_thread *thread;
+ pa_thread_mq thread_mq;
+ pa_rtpoll *rtpoll;
+
+ char *device_name;
+
+ pa_memchunk memchunk;
+
+ size_t frame_size;
+ uint32_t in_fragment_size, out_fragment_size, in_nfrags, out_nfrags, in_hwbuf_size, out_hwbuf_size;
+ pa_bool_t use_getospace, use_getispace;
+ pa_bool_t use_getodelay;
+
+ pa_bool_t sink_suspended, source_suspended;
+
+ int fd;
+ int mode;
+
+ int mixer_fd;
+ int mixer_devmask;
+
+ int nfrags, frag_size;
+
+ pa_bool_t use_mmap;
+ unsigned out_mmap_current, in_mmap_current;
+ void *in_mmap, *out_mmap;
+ pa_memblock **in_mmap_memblocks, **out_mmap_memblocks;
+
+ int in_mmap_saved_nfrags, out_mmap_saved_nfrags;
+
+ pa_rtpoll_item *rtpoll_item;
+};
+
+static const char* const valid_modargs[] = {
+ "sink_name",
+ "source_name",
+ "device",
+ "record",
+ "playback",
+ "fragments",
+ "fragment_size",
+ "format",
+ "rate",
+ "channels",
+ "channel_map",
+ "mmap",
+ NULL
+};
+
+static void trigger(struct userdata *u, pa_bool_t quick) {
+ int enable_bits = 0, zero = 0;
+
+ pa_assert(u);
+
+ if (u->fd < 0)
+ return;
+
+ pa_log_debug("trigger");
+
+ if (u->source && PA_SOURCE_OPENED(u->source->thread_info.state))
+ enable_bits |= PCM_ENABLE_INPUT;
+
+ if (u->sink && PA_SINK_OPENED(u->sink->thread_info.state))
+ enable_bits |= PCM_ENABLE_OUTPUT;
+
+ pa_log_debug("trigger: %i", enable_bits);
+
+
+ if (u->use_mmap) {
+
+ if (!quick)
+ ioctl(u->fd, SNDCTL_DSP_SETTRIGGER, &zero);
+
+#ifdef SNDCTL_DSP_HALT
+ if (enable_bits == 0)
+ if (ioctl(u->fd, SNDCTL_DSP_HALT, NULL) < 0)
+ pa_log_warn("SNDCTL_DSP_HALT: %s", pa_cstrerror(errno));
+#endif
+
+ if (ioctl(u->fd, SNDCTL_DSP_SETTRIGGER, &enable_bits) < 0)
+ pa_log_warn("SNDCTL_DSP_SETTRIGGER: %s", pa_cstrerror(errno));
+
+ if (u->sink && !(enable_bits & PCM_ENABLE_OUTPUT)) {
+ pa_log_debug("clearing playback buffer");
+ pa_silence_memory(u->out_mmap, u->out_hwbuf_size, &u->sink->sample_spec);
+ }
+
+ } else {
+
+ if (enable_bits)
+ if (ioctl(u->fd, SNDCTL_DSP_POST, NULL) < 0)
+ pa_log_warn("SNDCTL_DSP_POST: %s", pa_cstrerror(errno));
+
+ if (!quick) {
+ /*
+ * Some crappy drivers do not start the recording until we
+ * read something. Without this snippet, poll will never
+ * register the fd as ready.
+ */
+
+ if (u->source && PA_SOURCE_OPENED(u->source->thread_info.state)) {
+ uint8_t *buf = pa_xnew(uint8_t, u->in_fragment_size);
+ pa_read(u->fd, buf, u->in_fragment_size, NULL);
+ pa_xfree(buf);
+ }
+ }
+ }
+}
+
+static void mmap_fill_memblocks(struct userdata *u, unsigned n) {
+ pa_assert(u);
+ pa_assert(u->out_mmap_memblocks);
+
+/* pa_log("Mmmap writing %u blocks", n); */
+
+ while (n > 0) {
+ pa_memchunk chunk;
+
+ if (u->out_mmap_memblocks[u->out_mmap_current])
+ pa_memblock_unref_fixed(u->out_mmap_memblocks[u->out_mmap_current]);
+
+ chunk.memblock = u->out_mmap_memblocks[u->out_mmap_current] =
+ pa_memblock_new_fixed(
+ u->core->mempool,
+ (uint8_t*) u->out_mmap + u->out_fragment_size * u->out_mmap_current,
+ u->out_fragment_size,
+ 1);
+
+ chunk.length = pa_memblock_get_length(chunk.memblock);
+ chunk.index = 0;
+
+ pa_sink_render_into_full(u->sink, &chunk);
+
+ u->out_mmap_current++;
+ while (u->out_mmap_current >= u->out_nfrags)
+ u->out_mmap_current -= u->out_nfrags;
+
+ n--;
+ }
+}
+
+static int mmap_write(struct userdata *u) {
+ struct count_info info;
+
+ pa_assert(u);
+ pa_assert(u->sink);
+
+/* pa_log("Mmmap writing..."); */
+
+ if (ioctl(u->fd, SNDCTL_DSP_GETOPTR, &info) < 0) {
+ pa_log("SNDCTL_DSP_GETOPTR: %s", pa_cstrerror(errno));
+ return -1;
+ }
+
+ info.blocks += u->out_mmap_saved_nfrags;
+ u->out_mmap_saved_nfrags = 0;
+
+ if (info.blocks > 0)
+ mmap_fill_memblocks(u, info.blocks);
+
+ return info.blocks;
+}
+
+static void mmap_post_memblocks(struct userdata *u, unsigned n) {
+ pa_assert(u);
+ pa_assert(u->in_mmap_memblocks);
+
+/* pa_log("Mmmap reading %u blocks", n); */
+
+ while (n > 0) {
+ pa_memchunk chunk;
+
+ if (!u->in_mmap_memblocks[u->in_mmap_current]) {
+
+ chunk.memblock = u->in_mmap_memblocks[u->in_mmap_current] =
+ pa_memblock_new_fixed(
+ u->core->mempool,
+ (uint8_t*) u->in_mmap + u->in_fragment_size*u->in_mmap_current,
+ u->in_fragment_size,
+ 1);
+
+ chunk.length = pa_memblock_get_length(chunk.memblock);
+ chunk.index = 0;
+
+ pa_source_post(u->source, &chunk);
+ }
+
+ u->in_mmap_current++;
+ while (u->in_mmap_current >= u->in_nfrags)
+ u->in_mmap_current -= u->in_nfrags;
+
+ n--;
+ }
+}
+
+static void mmap_clear_memblocks(struct userdata*u, unsigned n) {
+ unsigned i = u->in_mmap_current;
+
+ pa_assert(u);
+ pa_assert(u->in_mmap_memblocks);
+
+ if (n > u->in_nfrags)
+ n = u->in_nfrags;
+
+ while (n > 0) {
+ if (u->in_mmap_memblocks[i]) {
+ pa_memblock_unref_fixed(u->in_mmap_memblocks[i]);
+ u->in_mmap_memblocks[i] = NULL;
+ }
+
+ i++;
+ while (i >= u->in_nfrags)
+ i -= u->in_nfrags;
+
+ n--;
+ }
+}
+
+static int mmap_read(struct userdata *u) {
+ struct count_info info;
+ pa_assert(u);
+ pa_assert(u->source);
+
+/* pa_log("Mmmap reading..."); */
+
+ if (ioctl(u->fd, SNDCTL_DSP_GETIPTR, &info) < 0) {
+ pa_log("SNDCTL_DSP_GETIPTR: %s", pa_cstrerror(errno));
+ return -1;
+ }
+
+/* pa_log("... %i", info.blocks); */
+
+ info.blocks += u->in_mmap_saved_nfrags;
+ u->in_mmap_saved_nfrags = 0;
+
+ if (info.blocks > 0) {
+ mmap_post_memblocks(u, info.blocks);
+ mmap_clear_memblocks(u, u->in_nfrags/2);
+ }
+
+ return info.blocks;
+}
+
+static pa_usec_t mmap_sink_get_latency(struct userdata *u) {
+ struct count_info info;
+ size_t bpos, n;
+
+ pa_assert(u);
+
+ if (ioctl(u->fd, SNDCTL_DSP_GETOPTR, &info) < 0) {
+ pa_log("SNDCTL_DSP_GETOPTR: %s", pa_cstrerror(errno));
+ return 0;
+ }
+
+ u->out_mmap_saved_nfrags += info.blocks;
+
+ bpos = ((u->out_mmap_current + u->out_mmap_saved_nfrags) * u->out_fragment_size) % u->out_hwbuf_size;
+
+ if (bpos <= (size_t) info.ptr)
+ n = u->out_hwbuf_size - (info.ptr - bpos);
+ else
+ n = bpos - info.ptr;
+
+/* pa_log("n = %u, bpos = %u, ptr = %u, total=%u, fragsize = %u, n_frags = %u\n", n, bpos, (unsigned) info.ptr, total, u->out_fragment_size, u->out_fragments); */
+
+ return pa_bytes_to_usec(n, &u->sink->sample_spec);
+}
+
+static pa_usec_t mmap_source_get_latency(struct userdata *u) {
+ struct count_info info;
+ size_t bpos, n;
+
+ pa_assert(u);
+
+ if (ioctl(u->fd, SNDCTL_DSP_GETIPTR, &info) < 0) {
+ pa_log("SNDCTL_DSP_GETIPTR: %s", pa_cstrerror(errno));
+ return 0;
+ }
+
+ u->in_mmap_saved_nfrags += info.blocks;
+ bpos = ((u->in_mmap_current + u->in_mmap_saved_nfrags) * u->in_fragment_size) % u->in_hwbuf_size;
+
+ if (bpos <= (size_t) info.ptr)
+ n = info.ptr - bpos;
+ else
+ n = u->in_hwbuf_size - bpos + info.ptr;
+
+/* pa_log("n = %u, bpos = %u, ptr = %u, total=%u, fragsize = %u, n_frags = %u\n", n, bpos, (unsigned) info.ptr, total, u->in_fragment_size, u->in_fragments); */
+
+ return pa_bytes_to_usec(n, &u->source->sample_spec);
+}
+
+static pa_usec_t io_sink_get_latency(struct userdata *u) {
+ pa_usec_t r = 0;
+
+ pa_assert(u);
+
+ if (u->use_getodelay) {
+ int arg;
+
+ if (ioctl(u->fd, SNDCTL_DSP_GETODELAY, &arg) < 0) {
+ pa_log_info("Device doesn't support SNDCTL_DSP_GETODELAY: %s", pa_cstrerror(errno));
+ u->use_getodelay = 0;
+ } else
+ r = pa_bytes_to_usec(arg, &u->sink->sample_spec);
+
+ }
+
+ if (!u->use_getodelay && u->use_getospace) {
+ struct audio_buf_info info;
+
+ if (ioctl(u->fd, SNDCTL_DSP_GETOSPACE, &info) < 0) {
+ pa_log_info("Device doesn't support SNDCTL_DSP_GETOSPACE: %s", pa_cstrerror(errno));
+ u->use_getospace = 0;
+ } else
+ r = pa_bytes_to_usec(info.bytes, &u->sink->sample_spec);
+ }
+
+ if (u->memchunk.memblock)
+ r += pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
+
+ return r;
+}
+
+
+static pa_usec_t io_source_get_latency(struct userdata *u) {
+ pa_usec_t r = 0;
+
+ pa_assert(u);
+
+ if (u->use_getispace) {
+ struct audio_buf_info info;
+
+ if (ioctl(u->fd, SNDCTL_DSP_GETISPACE, &info) < 0) {
+ pa_log_info("Device doesn't support SNDCTL_DSP_GETISPACE: %s", pa_cstrerror(errno));
+ u->use_getispace = 0;
+ } else
+ r = pa_bytes_to_usec(info.bytes, &u->source->sample_spec);
+ }
+
+ return r;
+}
+
+static void build_pollfd(struct userdata *u) {
+ struct pollfd *pollfd;
+
+ pa_assert(u);
+ pa_assert(u->fd >= 0);
+
+ if (u->rtpoll_item)
+ pa_rtpoll_item_free(u->rtpoll_item);
+
+ u->rtpoll_item = pa_rtpoll_item_new(u->rtpoll, PA_RTPOLL_NEVER, 1);
+ pollfd = pa_rtpoll_item_get_pollfd(u->rtpoll_item, NULL);
+ pollfd->fd = u->fd;
+ pollfd->events = 0;
+ pollfd->revents = 0;
+}
+
+static int suspend(struct userdata *u) {
+ pa_assert(u);
+ pa_assert(u->fd >= 0);
+
+ pa_log_info("Suspending...");
+
+ if (u->out_mmap_memblocks) {
+ unsigned i;
+ for (i = 0; i < u->out_nfrags; i++)
+ if (u->out_mmap_memblocks[i]) {
+ pa_memblock_unref_fixed(u->out_mmap_memblocks[i]);
+ u->out_mmap_memblocks[i] = NULL;
+ }
+ }
+
+ if (u->in_mmap_memblocks) {
+ unsigned i;
+ for (i = 0; i < u->in_nfrags; i++)
+ if (u->in_mmap_memblocks[i]) {
+ pa_memblock_unref_fixed(u->in_mmap_memblocks[i]);
+ u->in_mmap_memblocks[i] = NULL;
+ }
+ }
+
+ if (u->in_mmap && u->in_mmap != MAP_FAILED) {
+ munmap(u->in_mmap, u->in_hwbuf_size);
+ u->in_mmap = NULL;
+ }
+
+ if (u->out_mmap && u->out_mmap != MAP_FAILED) {
+ munmap(u->out_mmap, u->out_hwbuf_size);
+ u->out_mmap = NULL;
+ }
+
+ /* Let's suspend */
+ ioctl(u->fd, SNDCTL_DSP_SYNC, NULL);
+ pa_close(u->fd);
+ u->fd = -1;
+
+ if (u->rtpoll_item) {
+ pa_rtpoll_item_free(u->rtpoll_item);
+ u->rtpoll_item = NULL;
+ }
+
+ pa_log_info("Device suspended...");
+
+ return 0;
+}
+
+static int unsuspend(struct userdata *u) {
+ int m;
+ pa_sample_spec ss, *ss_original;
+ int frag_size, in_frag_size, out_frag_size;
+ int in_nfrags, out_nfrags;
+ struct audio_buf_info info;
+
+ pa_assert(u);
+ pa_assert(u->fd < 0);
+
+ m = u->mode;
+
+ pa_log_info("Trying resume...");
+
+ if ((u->fd = pa_oss_open(u->device_name, &m, NULL)) < 0) {
+ pa_log_warn("Resume failed, device busy (%s)", pa_cstrerror(errno));
+ return -1;
+
+ if (m != u->mode)
+ pa_log_warn("Resume failed, couldn't open device with original access mode.");
+ goto fail;
+ }
+
+ if (u->nfrags >= 2 && u->frag_size >= 1)
+ if (pa_oss_set_fragments(u->fd, u->nfrags, u->frag_size) < 0) {
+ pa_log_warn("Resume failed, couldn't set original fragment settings.");
+ goto fail;
+ }
+
+ ss = *(ss_original = u->sink ? &u->sink->sample_spec : &u->source->sample_spec);
+ if (pa_oss_auto_format(u->fd, &ss) < 0 || !pa_sample_spec_equal(&ss, ss_original)) {
+ pa_log_warn("Resume failed, couldn't set original sample format settings.");
+ goto fail;
+ }
+
+ if (ioctl(u->fd, SNDCTL_DSP_GETBLKSIZE, &frag_size) < 0) {
+ pa_log_warn("SNDCTL_DSP_GETBLKSIZE: %s", pa_cstrerror(errno));
+ goto fail;
+ }
+
+ in_frag_size = out_frag_size = frag_size;
+ in_nfrags = out_nfrags = u->nfrags;
+
+ if (ioctl(u->fd, SNDCTL_DSP_GETISPACE, &info) >= 0) {
+ in_frag_size = info.fragsize;
+ in_nfrags = info.fragstotal;
+ }
+
+ if (ioctl(u->fd, SNDCTL_DSP_GETOSPACE, &info) >= 0) {
+ out_frag_size = info.fragsize;
+ out_nfrags = info.fragstotal;
+ }
+
+ if ((u->source && (in_frag_size != (int) u->in_fragment_size || in_nfrags != (int) u->in_nfrags)) ||
+ (u->sink && (out_frag_size != (int) u->out_fragment_size || out_nfrags != (int) u->out_nfrags))) {
+ pa_log_warn("Resume failed, input fragment settings don't match.");
+ goto fail;
+ }
+
+ if (u->use_mmap) {
+ if (u->source) {
+ if ((u->in_mmap = mmap(NULL, u->in_hwbuf_size, PROT_READ, MAP_SHARED, u->fd, 0)) == MAP_FAILED) {
+ pa_log("Resume failed, mmap(): %s", pa_cstrerror(errno));
+ goto fail;
+ }
+ }
+
+ if (u->sink) {
+ if ((u->out_mmap = mmap(NULL, u->out_hwbuf_size, PROT_WRITE, MAP_SHARED, u->fd, 0)) == MAP_FAILED) {
+ pa_log("Resume failed, mmap(): %s", pa_cstrerror(errno));
+ if (u->in_mmap && u->in_mmap != MAP_FAILED) {
+ munmap(u->in_mmap, u->in_hwbuf_size);
+ u->in_mmap = NULL;
+ }
+
+ goto fail;
+ }
+
+ pa_silence_memory(u->out_mmap, u->out_hwbuf_size, &ss);
+ }
+ }
+
+ u->out_mmap_current = u->in_mmap_current = 0;
+ u->out_mmap_saved_nfrags = u->in_mmap_saved_nfrags = 0;
+
+ pa_assert(!u->rtpoll_item);
+
+ build_pollfd(u);
+
+ if (u->sink)
+ pa_sink_get_volume(u->sink);
+ if (u->source)
+ pa_source_get_volume(u->source);
+
+ pa_log_info("Resumed successfully...");
+
+ return 0;
+
+fail:
+ pa_close(u->fd);
+ u->fd = -1;
+ return -1;
+}
+
+static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
+ struct userdata *u = PA_SINK(o)->userdata;
+ int ret;
+ pa_bool_t do_trigger = FALSE, quick = TRUE;
+
+ switch (code) {
+
+ case PA_SINK_MESSAGE_GET_LATENCY: {
+ pa_usec_t r = 0;
+
+ if (u->fd >= 0) {
+ if (u->use_mmap)
+ r = mmap_sink_get_latency(u);
+ else
+ r = io_sink_get_latency(u);
+ }
+
+ *((pa_usec_t*) data) = r;
+
+ return 0;
+ }
+
+ case PA_SINK_MESSAGE_SET_STATE:
+
+ switch ((pa_sink_state_t) PA_PTR_TO_UINT(data)) {
+
+ case PA_SINK_SUSPENDED:
+ pa_assert(PA_SINK_OPENED(u->sink->thread_info.state));
+
+ if (!u->source || u->source_suspended) {
+ if (suspend(u) < 0)
+ return -1;
+ }
+
+ do_trigger = TRUE;
+
+ u->sink_suspended = TRUE;
+ break;
+
+ case PA_SINK_IDLE:
+ case PA_SINK_RUNNING:
+
+ if (u->sink->thread_info.state == PA_SINK_INIT) {
+ do_trigger = TRUE;
+ quick = u->source && PA_SOURCE_OPENED(u->source->thread_info.state);
+ }
+
+ if (u->sink->thread_info.state == PA_SINK_SUSPENDED) {
+
+ if (!u->source || u->source_suspended) {
+ if (unsuspend(u) < 0)
+ return -1;
+ quick = FALSE;
+ }
+
+ do_trigger = TRUE;
+
+ u->out_mmap_current = 0;
+ u->out_mmap_saved_nfrags = 0;
+
+ u->sink_suspended = FALSE;
+ }
+
+ break;
+
+ case PA_SINK_UNLINKED:
+ case PA_SINK_INIT:
+ ;
+ }
+
+ break;
+
+ }
+
+ ret = pa_sink_process_msg(o, code, data, offset, chunk);
+
+ if (do_trigger)
+ trigger(u, quick);
+
+ return ret;
+}
+
+static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
+ struct userdata *u = PA_SOURCE(o)->userdata;
+ int ret;
+ int do_trigger = FALSE, quick = TRUE;
+
+ switch (code) {
+
+ case PA_SOURCE_MESSAGE_GET_LATENCY: {
+ pa_usec_t r = 0;
+
+ if (u->fd >= 0) {
+ if (u->use_mmap)
+ r = mmap_source_get_latency(u);
+ else
+ r = io_source_get_latency(u);
+ }
+
+ *((pa_usec_t*) data) = r;
+ return 0;
+ }
+
+ case PA_SOURCE_MESSAGE_SET_STATE:
+
+ switch ((pa_source_state_t) PA_PTR_TO_UINT(data)) {
+ case PA_SOURCE_SUSPENDED:
+ pa_assert(PA_SOURCE_OPENED(u->source->thread_info.state));
+
+ if (!u->sink || u->sink_suspended) {
+ if (suspend(u) < 0)
+ return -1;
+ }
+
+ do_trigger = TRUE;
+
+ u->source_suspended = TRUE;
+ break;
+
+ case PA_SOURCE_IDLE:
+ case PA_SOURCE_RUNNING:
+
+ if (u->source->thread_info.state == PA_SOURCE_INIT) {
+ do_trigger = TRUE;
+ quick = u->sink && PA_SINK_OPENED(u->sink->thread_info.state);
+ }
+
+ if (u->source->thread_info.state == PA_SOURCE_SUSPENDED) {
+
+ if (!u->sink || u->sink_suspended) {
+ if (unsuspend(u) < 0)
+ return -1;
+ quick = FALSE;
+ }
+
+ do_trigger = TRUE;
+
+ u->in_mmap_current = 0;
+ u->in_mmap_saved_nfrags = 0;
+
+ u->source_suspended = FALSE;
+ }
+ break;
+
+ case PA_SOURCE_UNLINKED:
+ case PA_SOURCE_INIT:
+ ;
+
+ }
+ break;
+
+ }
+
+ ret = pa_source_process_msg(o, code, data, offset, chunk);
+
+ if (do_trigger)
+ trigger(u, quick);
+
+ return ret;
+}
+
+static int sink_get_volume(pa_sink *s) {
+ struct userdata *u;
+ int r;
+
+ pa_assert_se(u = s->userdata);
+
+ pa_assert(u->mixer_devmask & (SOUND_MASK_VOLUME|SOUND_MASK_PCM));
+
+ if (u->mixer_devmask & SOUND_MASK_VOLUME)
+ if ((r = pa_oss_get_volume(u->mixer_fd, SOUND_MIXER_READ_VOLUME, &s->sample_spec, &s->volume)) >= 0)
+ return r;
+
+ if (u->mixer_devmask & SOUND_MASK_PCM)
+ if ((r = pa_oss_get_volume(u->mixer_fd, SOUND_MIXER_READ_PCM, &s->sample_spec, &s->volume)) >= 0)
+ return r;
+
+ pa_log_info("Device doesn't support reading mixer settings: %s", pa_cstrerror(errno));
+ return -1;
+}
+
+static int sink_set_volume(pa_sink *s) {
+ struct userdata *u;
+ int r;
+
+ pa_assert_se(u = s->userdata);
+
+ pa_assert(u->mixer_devmask & (SOUND_MASK_VOLUME|SOUND_MASK_PCM));
+
+ if (u->mixer_devmask & SOUND_MASK_VOLUME)
+ if ((r = pa_oss_set_volume(u->mixer_fd, SOUND_MIXER_WRITE_VOLUME, &s->sample_spec, &s->volume)) >= 0)
+ return r;
+
+ if (u->mixer_devmask & SOUND_MASK_PCM)
+ if ((r = pa_oss_get_volume(u->mixer_fd, SOUND_MIXER_WRITE_PCM, &s->sample_spec, &s->volume)) >= 0)
+ return r;
+
+ pa_log_info("Device doesn't support writing mixer settings: %s", pa_cstrerror(errno));
+ return -1;
+}
+
+static int source_get_volume(pa_source *s) {
+ struct userdata *u;
+ int r;
+
+ pa_assert_se(u = s->userdata);
+
+ pa_assert(u->mixer_devmask & (SOUND_MASK_IGAIN|SOUND_MASK_RECLEV));
+
+ if (u->mixer_devmask & SOUND_MASK_IGAIN)
+ if ((r = pa_oss_get_volume(u->mixer_fd, SOUND_MIXER_READ_IGAIN, &s->sample_spec, &s->volume)) >= 0)
+ return r;
+
+ if (u->mixer_devmask & SOUND_MASK_RECLEV)
+ if ((r = pa_oss_get_volume(u->mixer_fd, SOUND_MIXER_READ_RECLEV, &s->sample_spec, &s->volume)) >= 0)
+ return r;
+
+ pa_log_info("Device doesn't support reading mixer settings: %s", pa_cstrerror(errno));
+ return -1;
+}
+
+static int source_set_volume(pa_source *s) {
+ struct userdata *u;
+ int r;
+
+ pa_assert_se(u = s->userdata);
+
+ pa_assert(u->mixer_devmask & (SOUND_MASK_IGAIN|SOUND_MASK_RECLEV));
+
+ if (u->mixer_devmask & SOUND_MASK_IGAIN)
+ if ((r = pa_oss_set_volume(u->mixer_fd, SOUND_MIXER_WRITE_IGAIN, &s->sample_spec, &s->volume)) >= 0)
+ return r;
+
+ if (u->mixer_devmask & SOUND_MASK_RECLEV)
+ if ((r = pa_oss_get_volume(u->mixer_fd, SOUND_MIXER_WRITE_RECLEV, &s->sample_spec, &s->volume)) >= 0)
+ return r;
+
+ pa_log_info("Device doesn't support writing mixer settings: %s", pa_cstrerror(errno));
+ return -1;
+}
+
+static void thread_func(void *userdata) {
+ struct userdata *u = userdata;
+ int write_type = 0, read_type = 0;
+ unsigned short revents = 0;
+
+ pa_assert(u);
+
+ pa_log_debug("Thread starting up");
+
+ if (u->core->realtime_scheduling)
+ pa_make_realtime(u->core->realtime_priority);
+
+ pa_thread_mq_install(&u->thread_mq);
+ pa_rtpoll_install(u->rtpoll);
+
+ for (;;) {
+ int ret;
+
+/* pa_log("loop"); */
+
+ /* Render some data and write it to the dsp */
+
+ if (u->sink && PA_SINK_OPENED(u->sink->thread_info.state) && ((revents & POLLOUT) || u->use_mmap || u->use_getospace)) {
+
+ if (u->use_mmap) {
+
+ if ((ret = mmap_write(u)) < 0)
+ goto fail;
+
+ revents &= ~POLLOUT;
+
+ if (ret > 0)
+ continue;
+
+ } else {
+ ssize_t l;
+ pa_bool_t loop = FALSE, work_done = FALSE;
+
+ l = u->out_fragment_size;
+
+ if (u->use_getospace) {
+ audio_buf_info info;
+
+ if (ioctl(u->fd, SNDCTL_DSP_GETOSPACE, &info) < 0) {
+ pa_log_info("Device doesn't support SNDCTL_DSP_GETOSPACE: %s", pa_cstrerror(errno));
+ u->use_getospace = FALSE;
+ } else {
+ l = info.bytes;
+
+ /* We loop only if GETOSPACE worked and we
+ * actually *know* that we can write more than
+ * one fragment at a time */
+ loop = TRUE;
+ }
+ }
+
+ /* Round down to multiples of the fragment size,
+ * because OSS needs that (at least some versions
+ * do) */
+ l = (l/u->out_fragment_size) * u->out_fragment_size;
+
+ /* Hmm, so poll() signalled us that we can read
+ * something, but GETOSPACE told us there was nothing?
+ * Hmm, make the best of it, try to read some data, to
+ * avoid spinning forever. */
+ if (l <= 0 && (revents & POLLOUT)) {
+ l = u->out_fragment_size;
+ loop = FALSE;
+ }
+
+ while (l > 0) {
+ void *p;
+ ssize_t t;
+
+ if (u->memchunk.length <= 0)
+ pa_sink_render(u->sink, l, &u->memchunk);
+
+ pa_assert(u->memchunk.length > 0);
+
+ p = pa_memblock_acquire(u->memchunk.memblock);
+ t = pa_write(u->fd, (uint8_t*) p + u->memchunk.index, u->memchunk.length, &write_type);
+ pa_memblock_release(u->memchunk.memblock);
+
+/* pa_log("wrote %i bytes of %u", t, l); */
+
+ pa_assert(t != 0);
+
+ if (t < 0) {
+
+ if (errno == EINTR)
+ continue;
+
+ else if (errno == EAGAIN) {
+ pa_log_debug("EAGAIN");
+
+ revents &= ~POLLOUT;
+ break;
+
+ } else {
+ pa_log("Failed to write data to DSP: %s", pa_cstrerror(errno));
+ goto fail;
+ }
+
+ } else {
+
+ u->memchunk.index += t;
+ u->memchunk.length -= t;
+
+ if (u->memchunk.length <= 0) {
+ pa_memblock_unref(u->memchunk.memblock);
+ pa_memchunk_reset(&u->memchunk);
+ }
+
+ l -= t;
+
+ revents &= ~POLLOUT;
+ work_done = TRUE;
+ }
+
+ if (!loop)
+ break;
+ }
+
+ if (work_done)
+ continue;
+ }
+ }
+
+ /* Try to read some data and pass it on to the source driver. */
+
+ if (u->source && PA_SOURCE_OPENED(u->source->thread_info.state) && ((revents & POLLIN) || u->use_mmap || u->use_getispace)) {
+
+ if (u->use_mmap) {
+
+ if ((ret = mmap_read(u)) < 0)
+ goto fail;
+
+ revents &= ~POLLIN;
+
+ if (ret > 0)
+ continue;
+
+ } else {
+
+ void *p;
+ ssize_t l;
+ pa_memchunk memchunk;
+ pa_bool_t loop = FALSE, work_done = FALSE;
+
+ l = u->in_fragment_size;
+
+ if (u->use_getispace) {
+ audio_buf_info info;
+
+ if (ioctl(u->fd, SNDCTL_DSP_GETISPACE, &info) < 0) {
+ pa_log_info("Device doesn't support SNDCTL_DSP_GETISPACE: %s", pa_cstrerror(errno));
+ u->use_getispace = FALSE;
+ } else {
+ l = info.bytes;
+ loop = TRUE;
+ }
+ }
+
+ l = (l/u->in_fragment_size) * u->in_fragment_size;
+
+ if (l <= 0 && (revents & POLLIN)) {
+ l = u->in_fragment_size;
+ loop = FALSE;
+ }
+
+ while (l > 0) {
+ ssize_t t, k;
+
+ pa_assert(l > 0);
+
+ memchunk.memblock = pa_memblock_new(u->core->mempool, (size_t) -1);
+
+ k = pa_memblock_get_length(memchunk.memblock);
+
+ if (k > l)
+ k = l;
+
+ k = (k/u->frame_size)*u->frame_size;
+
+ p = pa_memblock_acquire(memchunk.memblock);
+ t = pa_read(u->fd, p, k, &read_type);
+ pa_memblock_release(memchunk.memblock);
+
+ pa_assert(t != 0); /* EOF cannot happen */
+
+/* pa_log("read %i bytes of %u", t, l); */
+
+ if (t < 0) {
+ pa_memblock_unref(memchunk.memblock);
+
+ if (errno == EINTR)
+ continue;
+
+ else if (errno == EAGAIN) {
+ pa_log_debug("EAGAIN");
+
+ revents &= ~POLLIN;
+ break;
+
+ } else {
+ pa_log("Failed to read data from DSP: %s", pa_cstrerror(errno));
+ goto fail;
+ }
+
+ } else {
+ memchunk.index = 0;
+ memchunk.length = t;
+
+ pa_source_post(u->source, &memchunk);
+ pa_memblock_unref(memchunk.memblock);
+
+ l -= t;
+
+ revents &= ~POLLIN;
+ work_done = TRUE;
+ }
+
+ if (!loop)
+ break;
+ }
+
+ if (work_done)
+ continue;
+ }
+ }
+
+/* pa_log("loop2 revents=%i", revents); */
+
+ if (u->rtpoll_item) {
+ struct pollfd *pollfd;
+
+ pa_assert(u->fd >= 0);
+
+ pollfd = pa_rtpoll_item_get_pollfd(u->rtpoll_item, NULL);
+ pollfd->events =
+ ((u->source && PA_SOURCE_OPENED(u->source->thread_info.state)) ? POLLIN : 0) |
+ ((u->sink && PA_SINK_OPENED(u->sink->thread_info.state)) ? POLLOUT : 0);
+ }
+
+ /* Hmm, nothing to do. Let's sleep */
+ if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
+ goto fail;
+
+ if (ret == 0)
+ goto finish;
+
+ if (u->rtpoll_item) {
+ struct pollfd *pollfd;
+
+ pollfd = pa_rtpoll_item_get_pollfd(u->rtpoll_item, NULL);
+
+ if (pollfd->revents & ~(POLLOUT|POLLIN)) {
+ pa_log("DSP shutdown.");
+ goto fail;
+ }
+
+ revents = pollfd->revents;
+ } else
+ revents = 0;
+ }
+
+fail:
+ /* If this was no regular exit from the loop we have to continue
+ * processing messages until we received PA_MESSAGE_SHUTDOWN */
+ pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
+ pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
+
+finish:
+ pa_log_debug("Thread shutting down");
+}
+
+int pa__init(pa_module*m) {
+
+ struct audio_buf_info info;
+ struct userdata *u = NULL;
+ const char *dev;
+ int fd = -1;
+ int nfrags, frag_size;
+ int mode, caps;
+ pa_bool_t record = TRUE, playback = TRUE, use_mmap = TRUE;
+ pa_sample_spec ss;
+ pa_channel_map map;
+ pa_modargs *ma = NULL;
+ char hwdesc[64], *t;
+ const char *name;
+ int namereg_fail;
+
+ pa_assert(m);
+
+ if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
+ pa_log("Failed to parse module arguments.");
+ goto fail;
+ }
+
+ if (pa_modargs_get_value_boolean(ma, "record", &record) < 0 || pa_modargs_get_value_boolean(ma, "playback", &playback) < 0) {
+ pa_log("record= and playback= expect boolean argument.");
+ goto fail;
+ }
+
+ if (!playback && !record) {
+ pa_log("Neither playback nor record enabled for device.");
+ goto fail;
+ }
+
+ mode = (playback && record) ? O_RDWR : (playback ? O_WRONLY : (record ? O_RDONLY : 0));
+
+ ss = m->core->default_sample_spec;
+ if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_OSS) < 0) {
+ pa_log("Failed to parse sample specification or channel map");
+ goto fail;
+ }
+
+ nfrags = m->core->default_n_fragments;
+ frag_size = pa_usec_to_bytes(m->core->default_fragment_size_msec*1000, &ss);
+ if (frag_size <= 0)
+ frag_size = pa_frame_size(&ss);
+
+ if (pa_modargs_get_value_s32(ma, "fragments", &nfrags) < 0 || pa_modargs_get_value_s32(ma, "fragment_size", &frag_size) < 0) {
+ pa_log("Failed to parse fragments arguments");
+ goto fail;
+ }
+
+ if (pa_modargs_get_value_boolean(ma, "mmap", &use_mmap) < 0) {
+ pa_log("Failed to parse mmap argument.");
+ goto fail;
+ }
+
+ if ((fd = pa_oss_open(dev = pa_modargs_get_value(ma, "device", DEFAULT_DEVICE), &mode, &caps)) < 0)
+ goto fail;
+
+ if (use_mmap && (!(caps & DSP_CAP_MMAP) || !(caps & DSP_CAP_TRIGGER))) {
+ pa_log_info("OSS device not mmap capable, falling back to UNIX read/write mode.");
+ use_mmap = 0;
+ }
+
+ if (use_mmap && mode == O_WRONLY) {
+ pa_log_info("Device opened for playback only, cannot do memory mapping, falling back to UNIX write() mode.");
+ use_mmap = 0;
+ }
+
+ if (pa_oss_get_hw_description(dev, hwdesc, sizeof(hwdesc)) >= 0)
+ pa_log_info("Hardware name is '%s'.", hwdesc);
+ else
+ hwdesc[0] = 0;
+
+ pa_log_info("Device opened in %s mode.", mode == O_WRONLY ? "O_WRONLY" : (mode == O_RDONLY ? "O_RDONLY" : "O_RDWR"));
+
+ if (nfrags >= 2 && frag_size >= 1)
+ if (pa_oss_set_fragments(fd, nfrags, frag_size) < 0)
+ goto fail;
+
+ if (pa_oss_auto_format(fd, &ss) < 0)
+ goto fail;
+
+ if (ioctl(fd, SNDCTL_DSP_GETBLKSIZE, &frag_size) < 0) {
+ pa_log("SNDCTL_DSP_GETBLKSIZE: %s", pa_cstrerror(errno));
+ goto fail;
+ }
+ pa_assert(frag_size > 0);
+
+ u = pa_xnew0(struct userdata, 1);
+ u->core = m->core;
+ u->module = m;
+ m->userdata = u;
+ u->fd = fd;
+ u->mixer_fd = -1;
+ u->use_getospace = u->use_getispace = 1;
+ u->use_getodelay = 1;
+ u->mode = mode;
+ u->frame_size = pa_frame_size(&ss);
+ u->device_name = pa_xstrdup(dev);
+ u->in_nfrags = u->out_nfrags = u->nfrags = nfrags;
+ u->out_fragment_size = u->in_fragment_size = u->frag_size = frag_size;
+ u->use_mmap = use_mmap;
+ pa_thread_mq_init(&u->thread_mq, m->core->mainloop);
+ u->rtpoll = pa_rtpoll_new();
+ pa_rtpoll_item_new_asyncmsgq(u->rtpoll, PA_RTPOLL_EARLY, u->thread_mq.inq);
+ u->rtpoll_item = NULL;
+ build_pollfd(u);
+
+ if (ioctl(fd, SNDCTL_DSP_GETISPACE, &info) >= 0) {
+ pa_log_info("Input -- %u fragments of size %u.", info.fragstotal, info.fragsize);
+ u->in_fragment_size = info.fragsize;
+ u->in_nfrags = info.fragstotal;
+ u->use_getispace = 1;
+ }
+
+ if (ioctl(fd, SNDCTL_DSP_GETOSPACE, &info) >= 0) {
+ pa_log_info("Output -- %u fragments of size %u.", info.fragstotal, info.fragsize);
+ u->out_fragment_size = info.fragsize;
+ u->out_nfrags = info.fragstotal;
+ u->use_getospace = 1;
+ }
+
+ u->in_hwbuf_size = u->in_nfrags * u->in_fragment_size;
+ u->out_hwbuf_size = u->out_nfrags * u->out_fragment_size;
+
+ if (mode != O_WRONLY) {
+ char *name_buf = NULL;
+
+ if (use_mmap) {
+ if ((u->in_mmap = mmap(NULL, u->in_hwbuf_size, PROT_READ, MAP_SHARED, fd, 0)) == MAP_FAILED) {
+ pa_log_warn("mmap(PROT_READ) failed, reverting to non-mmap mode: %s", pa_cstrerror(errno));
+ use_mmap = u->use_mmap = 0;
+ u->in_mmap = NULL;
+ } else
+ pa_log_debug("Successfully mmap()ed input buffer.");
+ }
+
+ if ((name = pa_modargs_get_value(ma, "source_name", NULL)))
+ namereg_fail = 1;
+ else {
+ name = name_buf = pa_sprintf_malloc("oss_input.%s", pa_path_get_filename(dev));
+ namereg_fail = 0;
+ }
+
+ u->source = pa_source_new(m->core, __FILE__, name, namereg_fail, &ss, &map);
+ pa_xfree(name_buf);
+ if (!u->source) {
+ pa_log("Failed to create source object");
+ goto fail;
+ }
+
+ u->source->parent.process_msg = source_process_msg;
+ u->source->userdata = u;
+
+ pa_source_set_module(u->source, m);
+ pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
+ pa_source_set_rtpoll(u->source, u->rtpoll);
+ pa_source_set_description(u->source, t = pa_sprintf_malloc(
+ "OSS PCM on %s%s%s%s%s",
+ dev,
+ hwdesc[0] ? " (" : "",
+ hwdesc[0] ? hwdesc : "",
+ hwdesc[0] ? ")" : "",
+ use_mmap ? " via DMA" : ""));
+ pa_xfree(t);
+ u->source->flags = PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY;
+ u->source->refresh_volume = TRUE;
+
+ if (use_mmap)
+ u->in_mmap_memblocks = pa_xnew0(pa_memblock*, u->in_nfrags);
+ }
+
+ if (mode != O_RDONLY) {
+ char *name_buf = NULL;
+
+ if (use_mmap) {
+ if ((u->out_mmap = mmap(NULL, u->out_hwbuf_size, PROT_WRITE, MAP_SHARED, fd, 0)) == MAP_FAILED) {
+ if (mode == O_RDWR) {
+ pa_log_debug("mmap() failed for input. Changing to O_WRONLY mode.");
+ mode = O_WRONLY;
+ goto go_on;
+ } else {
+ pa_log_warn("mmap(PROT_WRITE) failed, reverting to non-mmap mode: %s", pa_cstrerror(errno));
+ u->use_mmap = (use_mmap = FALSE);
+ u->out_mmap = NULL;
+ }
+ } else {
+ pa_log_debug("Successfully mmap()ed output buffer.");
+ pa_silence_memory(u->out_mmap, u->out_hwbuf_size, &ss);
+ }
+ }
+
+ if ((name = pa_modargs_get_value(ma, "sink_name", NULL)))
+ namereg_fail = 1;
+ else {
+ name = name_buf = pa_sprintf_malloc("oss_output.%s", pa_path_get_filename(dev));
+ namereg_fail = 0;
+ }
+
+ u->sink = pa_sink_new(m->core, __FILE__, name, namereg_fail, &ss, &map);
+ pa_xfree(name_buf);
+ if (!u->sink) {
+ pa_log("Failed to create sink object");
+ goto fail;
+ }
+
+ u->sink->parent.process_msg = sink_process_msg;
+ u->sink->userdata = u;
+
+ pa_sink_set_module(u->sink, m);
+ pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
+ pa_sink_set_rtpoll(u->sink, u->rtpoll);
+ pa_sink_set_description(u->sink, t = pa_sprintf_malloc(
+ "OSS PCM on %s%s%s%s%s",
+ dev,
+ hwdesc[0] ? " (" : "",
+ hwdesc[0] ? hwdesc : "",
+ hwdesc[0] ? ")" : "",
+ use_mmap ? " via DMA" : ""));
+ pa_xfree(t);
+ u->sink->flags = PA_SINK_HARDWARE|PA_SINK_LATENCY;
+ u->sink->refresh_volume = TRUE;
+
+ if (use_mmap)
+ u->out_mmap_memblocks = pa_xnew0(pa_memblock*, u->out_nfrags);
+ }
+
+ if ((u->mixer_fd = pa_oss_open_mixer_for_device(u->device_name)) >= 0) {
+ int do_close = 1;
+ u->mixer_devmask = 0;
+
+ if (ioctl(fd, SOUND_MIXER_READ_DEVMASK, &u->mixer_devmask) < 0)
+ pa_log_warn("SOUND_MIXER_READ_DEVMASK failed: %s", pa_cstrerror(errno));
+
+ else {
+ if (u->sink && (u->mixer_devmask & (SOUND_MASK_VOLUME|SOUND_MASK_PCM))) {
+ pa_log_debug("Found hardware mixer track for playback.");
+ u->sink->flags |= PA_SINK_HW_VOLUME_CTRL;
+ u->sink->get_volume = sink_get_volume;
+ u->sink->set_volume = sink_set_volume;
+ do_close = 0;
+ }
+
+ if (u->source && (u->mixer_devmask & (SOUND_MASK_RECLEV|SOUND_MASK_IGAIN))) {
+ pa_log_debug("Found hardware mixer track for recording.");
+ u->source->flags |= PA_SOURCE_HW_VOLUME_CTRL;
+ u->source->get_volume = source_get_volume;
+ u->source->set_volume = source_set_volume;
+ do_close = 0;
+ }
+ }
+
+ if (do_close) {
+ pa_close(u->mixer_fd);
+ u->mixer_fd = -1;
+ }
+ }
+
+go_on:
+
+ pa_assert(u->source || u->sink);
+
+ pa_memchunk_reset(&u->memchunk);
+
+ if (!(u->thread = pa_thread_new(thread_func, u))) {
+ pa_log("Failed to create thread.");
+ goto fail;
+ }
+
+ /* Read mixer settings */
+ if (u->sink && u->sink->get_volume)
+ sink_get_volume(u->sink);
+ if (u->source && u->source->get_volume)
+ source_get_volume(u->source);
+
+ if (u->sink)
+ pa_sink_put(u->sink);
+ if (u->source)
+ pa_source_put(u->source);
+
+ pa_modargs_free(ma);
+
+ return 0;
+
+fail:
+
+ if (u)
+ pa__done(m);
+ else if (fd >= 0)
+ pa_close(fd);
+
+ if (ma)
+ pa_modargs_free(ma);
+
+ return -1;
+}
+
+void pa__done(pa_module*m) {
+ struct userdata *u;
+
+ pa_assert(m);
+
+ if (!(u = m->userdata))
+ return;
+
+ if (u->sink)
+ pa_sink_unlink(u->sink);
+
+ if (u->source)
+ pa_source_unlink(u->source);
+
+ if (u->thread) {
+ pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
+ pa_thread_free(u->thread);
+ }
+
+ pa_thread_mq_done(&u->thread_mq);
+
+ if (u->sink)
+ pa_sink_unref(u->sink);
+
+ if (u->source)
+ pa_source_unref(u->source);
+
+ if (u->memchunk.memblock)
+ pa_memblock_unref(u->memchunk.memblock);
+
+ if (u->rtpoll_item)
+ pa_rtpoll_item_free(u->rtpoll_item);
+
+ if (u->rtpoll)
+ pa_rtpoll_free(u->rtpoll);
+
+ if (u->out_mmap_memblocks) {
+ unsigned i;
+ for (i = 0; i < u->out_nfrags; i++)
+ if (u->out_mmap_memblocks[i])
+ pa_memblock_unref_fixed(u->out_mmap_memblocks[i]);
+ pa_xfree(u->out_mmap_memblocks);
+ }
+
+ if (u->in_mmap_memblocks) {
+ unsigned i;
+ for (i = 0; i < u->in_nfrags; i++)
+ if (u->in_mmap_memblocks[i])
+ pa_memblock_unref_fixed(u->in_mmap_memblocks[i]);
+ pa_xfree(u->in_mmap_memblocks);
+ }
+
+ if (u->in_mmap && u->in_mmap != MAP_FAILED)
+ munmap(u->in_mmap, u->in_hwbuf_size);
+
+ if (u->out_mmap && u->out_mmap != MAP_FAILED)
+ munmap(u->out_mmap, u->out_hwbuf_size);
+
+ if (u->fd >= 0)
+ pa_close(u->fd);
+
+ if (u->mixer_fd >= 0)
+ pa_close(u->mixer_fd);
+
+ pa_xfree(u->device_name);
+
+ pa_xfree(u);
+}
diff --git a/src/modules/module-pipe-sink.c b/src/modules/module-pipe-sink.c
new file mode 100644
index 00000000..e720c8ad
--- /dev/null
+++ b/src/modules/module-pipe-sink.c
@@ -0,0 +1,333 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2004-2006 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <limits.h>
+#include <sys/ioctl.h>
+#include <poll.h>
+
+#include <pulse/xmalloc.h>
+
+#include <pulsecore/core-error.h>
+#include <pulsecore/sink.h>
+#include <pulsecore/module.h>
+#include <pulsecore/core-util.h>
+#include <pulsecore/modargs.h>
+#include <pulsecore/log.h>
+#include <pulsecore/thread.h>
+#include <pulsecore/thread-mq.h>
+#include <pulsecore/rtpoll.h>
+
+#include "module-pipe-sink-symdef.h"
+
+PA_MODULE_AUTHOR("Lennart Poettering");
+PA_MODULE_DESCRIPTION("UNIX pipe sink");
+PA_MODULE_VERSION(PACKAGE_VERSION);
+PA_MODULE_LOAD_ONCE(FALSE);
+PA_MODULE_USAGE(
+ "sink_name=<name for the sink> "
+ "file=<path of the FIFO> "
+ "format=<sample format> "
+ "channels=<number of channels> "
+ "rate=<sample rate>"
+ "channel_map=<channel map>");
+
+#define DEFAULT_FILE_NAME "/tmp/music.output"
+#define DEFAULT_SINK_NAME "fifo_output"
+
+struct userdata {
+ pa_core *core;
+ pa_module *module;
+ pa_sink *sink;
+
+ pa_thread *thread;
+ pa_thread_mq thread_mq;
+ pa_rtpoll *rtpoll;
+
+ char *filename;
+ int fd;
+
+ pa_memchunk memchunk;
+
+ pa_rtpoll_item *rtpoll_item;
+};
+
+static const char* const valid_modargs[] = {
+ "file",
+ "rate",
+ "format",
+ "channels",
+ "sink_name",
+ "channel_map",
+ NULL
+};
+
+static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
+ struct userdata *u = PA_SINK(o)->userdata;
+
+ switch (code) {
+
+ case PA_SINK_MESSAGE_GET_LATENCY: {
+ size_t n = 0;
+ int l;
+
+#ifdef TIOCINQ
+ if (ioctl(u->fd, TIOCINQ, &l) >= 0 && l > 0)
+ n = (size_t) l;
+#endif
+
+ n += u->memchunk.length;
+
+ *((pa_usec_t*) data) = pa_bytes_to_usec(n, &u->sink->sample_spec);
+ break;
+ }
+ }
+
+ return pa_sink_process_msg(o, code, data, offset, chunk);
+}
+
+static void thread_func(void *userdata) {
+ struct userdata *u = userdata;
+ int write_type = 0;
+
+ pa_assert(u);
+
+ pa_log_debug("Thread starting up");
+
+ pa_thread_mq_install(&u->thread_mq);
+ pa_rtpoll_install(u->rtpoll);
+
+ for (;;) {
+ struct pollfd *pollfd;
+ int ret;
+
+ pollfd = pa_rtpoll_item_get_pollfd(u->rtpoll_item, NULL);
+
+ /* Render some data and write it to the fifo */
+ if (u->sink->thread_info.state == PA_SINK_RUNNING && pollfd->revents) {
+ ssize_t l;
+ void *p;
+
+ if (u->memchunk.length <= 0)
+ pa_sink_render(u->sink, PIPE_BUF, &u->memchunk);
+
+ pa_assert(u->memchunk.length > 0);
+
+ p = pa_memblock_acquire(u->memchunk.memblock);
+ l = pa_write(u->fd, (uint8_t*) p + u->memchunk.index, u->memchunk.length, &write_type);
+ pa_memblock_release(u->memchunk.memblock);
+
+ pa_assert(l != 0);
+
+ if (l < 0) {
+
+ if (errno == EINTR)
+ continue;
+ else if (errno != EAGAIN) {
+ pa_log("Failed to write data to FIFO: %s", pa_cstrerror(errno));
+ goto fail;
+ }
+
+ } else {
+
+ u->memchunk.index += l;
+ u->memchunk.length -= l;
+
+ if (u->memchunk.length <= 0) {
+ pa_memblock_unref(u->memchunk.memblock);
+ pa_memchunk_reset(&u->memchunk);
+ }
+
+ pollfd->revents = 0;
+ }
+ }
+
+ /* Hmm, nothing to do. Let's sleep */
+ pollfd->events = u->sink->thread_info.state == PA_SINK_RUNNING ? POLLOUT : 0;
+
+ if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
+ goto fail;
+
+ if (ret == 0)
+ goto finish;
+
+ pollfd = pa_rtpoll_item_get_pollfd(u->rtpoll_item, NULL);
+
+ if (pollfd->revents & ~POLLOUT) {
+ pa_log("FIFO shutdown.");
+ goto fail;
+ }
+ }
+
+fail:
+ /* If this was no regular exit from the loop we have to continue
+ * processing messages until we received PA_MESSAGE_SHUTDOWN */
+ pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
+ pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
+
+finish:
+ pa_log_debug("Thread shutting down");
+}
+
+int pa__init(pa_module*m) {
+ struct userdata *u;
+ struct stat st;
+ pa_sample_spec ss;
+ pa_channel_map map;
+ pa_modargs *ma;
+ char *t;
+ struct pollfd *pollfd;
+
+ pa_assert(m);
+
+ if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
+ pa_log("Failed to parse module arguments.");
+ goto fail;
+ }
+
+ ss = m->core->default_sample_spec;
+ if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_DEFAULT) < 0) {
+ pa_log("Invalid sample format specification or channel map");
+ goto fail;
+ }
+
+ u = pa_xnew0(struct userdata, 1);
+ u->core = m->core;
+ u->module = m;
+ m->userdata = u;
+ pa_memchunk_reset(&u->memchunk);
+ pa_thread_mq_init(&u->thread_mq, m->core->mainloop);
+ u->rtpoll = pa_rtpoll_new();
+ pa_rtpoll_item_new_asyncmsgq(u->rtpoll, PA_RTPOLL_EARLY, u->thread_mq.inq);
+
+ u->filename = pa_xstrdup(pa_modargs_get_value(ma, "file", DEFAULT_FILE_NAME));
+
+ mkfifo(u->filename, 0666);
+ if ((u->fd = open(u->filename, O_RDWR|O_NOCTTY)) < 0) {
+ pa_log("open('%s'): %s", u->filename, pa_cstrerror(errno));
+ goto fail;
+ }
+
+ pa_make_fd_cloexec(u->fd);
+ pa_make_fd_nonblock(u->fd);
+
+ if (fstat(u->fd, &st) < 0) {
+ pa_log("fstat('%s'): %s", u->filename, pa_cstrerror(errno));
+ goto fail;
+ }
+
+ if (!S_ISFIFO(st.st_mode)) {
+ pa_log("'%s' is not a FIFO.", u->filename);
+ goto fail;
+ }
+
+ if (!(u->sink = pa_sink_new(m->core, __FILE__, pa_modargs_get_value(ma, "sink_name", DEFAULT_SINK_NAME), 0, &ss, &map))) {
+ pa_log("Failed to create sink.");
+ goto fail;
+ }
+
+ u->sink->parent.process_msg = sink_process_msg;
+ u->sink->userdata = u;
+ u->sink->flags = PA_SINK_LATENCY;
+
+ pa_sink_set_module(u->sink, m);
+ pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
+ pa_sink_set_rtpoll(u->sink, u->rtpoll);
+ pa_sink_set_description(u->sink, t = pa_sprintf_malloc("Unix FIFO sink '%s'", u->filename));
+ pa_xfree(t);
+
+ u->rtpoll_item = pa_rtpoll_item_new(u->rtpoll, PA_RTPOLL_NEVER, 1);
+ pollfd = pa_rtpoll_item_get_pollfd(u->rtpoll_item, NULL);
+ pollfd->fd = u->fd;
+ pollfd->events = pollfd->revents = 0;
+
+ if (!(u->thread = pa_thread_new(thread_func, u))) {
+ pa_log("Failed to create thread.");
+ goto fail;
+ }
+
+ pa_sink_put(u->sink);
+
+ pa_modargs_free(ma);
+
+ return 0;
+
+fail:
+ if (ma)
+ pa_modargs_free(ma);
+
+ pa__done(m);
+
+ return -1;
+}
+
+void pa__done(pa_module*m) {
+ struct userdata *u;
+
+ pa_assert(m);
+
+ if (!(u = m->userdata))
+ return;
+
+ if (u->sink)
+ pa_sink_unlink(u->sink);
+
+ if (u->thread) {
+ pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
+ pa_thread_free(u->thread);
+ }
+
+ pa_thread_mq_done(&u->thread_mq);
+
+ if (u->sink)
+ pa_sink_unref(u->sink);
+
+ if (u->memchunk.memblock)
+ pa_memblock_unref(u->memchunk.memblock);
+
+ if (u->rtpoll_item)
+ pa_rtpoll_item_free(u->rtpoll_item);
+
+ if (u->rtpoll)
+ pa_rtpoll_free(u->rtpoll);
+
+ if (u->filename) {
+ unlink(u->filename);
+ pa_xfree(u->filename);
+ }
+
+ if (u->fd >= 0)
+ pa_assert_se(pa_close(u->fd) == 0);
+
+ pa_xfree(u);
+}
diff --git a/src/modules/module-pipe-source.c b/src/modules/module-pipe-source.c
new file mode 100644
index 00000000..02935649
--- /dev/null
+++ b/src/modules/module-pipe-source.c
@@ -0,0 +1,309 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2004-2006 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <limits.h>
+#include <sys/poll.h>
+
+#include <pulse/xmalloc.h>
+
+#include <pulsecore/core-error.h>
+#include <pulsecore/source.h>
+#include <pulsecore/module.h>
+#include <pulsecore/core-util.h>
+#include <pulsecore/modargs.h>
+#include <pulsecore/log.h>
+#include <pulsecore/thread.h>
+#include <pulsecore/thread-mq.h>
+#include <pulsecore/rtpoll.h>
+
+#include "module-pipe-source-symdef.h"
+
+PA_MODULE_AUTHOR("Lennart Poettering");
+PA_MODULE_DESCRIPTION("UNIX pipe source");
+PA_MODULE_VERSION(PACKAGE_VERSION);
+PA_MODULE_LOAD_ONCE(FALSE);
+PA_MODULE_USAGE(
+ "source_name=<name for the source> "
+ "file=<path of the FIFO> "
+ "format=<sample format> "
+ "channels=<number of channels> "
+ "rate=<sample rate> "
+ "channel_map=<channel map>");
+
+#define DEFAULT_FILE_NAME "/tmp/music.input"
+#define DEFAULT_SOURCE_NAME "fifo_input"
+
+struct userdata {
+ pa_core *core;
+ pa_module *module;
+ pa_source *source;
+
+ pa_thread *thread;
+ pa_thread_mq thread_mq;
+ pa_rtpoll *rtpoll;
+
+ char *filename;
+ int fd;
+
+ pa_memchunk memchunk;
+
+ pa_rtpoll_item *rtpoll_item;
+};
+
+static const char* const valid_modargs[] = {
+ "file",
+ "rate",
+ "channels",
+ "format",
+ "source_name",
+ "channel_map",
+ NULL
+};
+
+static void thread_func(void *userdata) {
+ struct userdata *u = userdata;
+ int read_type = 0;
+
+ pa_assert(u);
+
+ pa_log_debug("Thread starting up");
+
+ pa_thread_mq_install(&u->thread_mq);
+ pa_rtpoll_install(u->rtpoll);
+
+ for (;;) {
+ int ret;
+ struct pollfd *pollfd;
+
+ pollfd = pa_rtpoll_item_get_pollfd(u->rtpoll_item, NULL);
+
+ /* Try to read some data and pass it on to the source driver */
+ if (u->source->thread_info.state == PA_SOURCE_RUNNING && pollfd->revents) {
+ ssize_t l;
+ void *p;
+
+ if (!u->memchunk.memblock) {
+ u->memchunk.memblock = pa_memblock_new(u->core->mempool, PIPE_BUF);
+ u->memchunk.index = u->memchunk.length = 0;
+ }
+
+ pa_assert(pa_memblock_get_length(u->memchunk.memblock) > u->memchunk.index);
+
+ p = pa_memblock_acquire(u->memchunk.memblock);
+ l = pa_read(u->fd, (uint8_t*) p + u->memchunk.index, pa_memblock_get_length(u->memchunk.memblock) - u->memchunk.index, &read_type);
+ pa_memblock_release(u->memchunk.memblock);
+
+ pa_assert(l != 0); /* EOF cannot happen, since we opened the fifo for both reading and writing */
+
+ if (l < 0) {
+
+ if (errno == EINTR)
+ continue;
+ else if (errno != EAGAIN) {
+ pa_log("Faile to read data from FIFO: %s", pa_cstrerror(errno));
+ goto fail;
+ }
+
+ } else {
+
+ u->memchunk.length = l;
+ pa_source_post(u->source, &u->memchunk);
+ u->memchunk.index += l;
+
+ if (u->memchunk.index >= pa_memblock_get_length(u->memchunk.memblock)) {
+ pa_memblock_unref(u->memchunk.memblock);
+ pa_memchunk_reset(&u->memchunk);
+ }
+
+ pollfd->revents = 0;
+ }
+ }
+
+ /* Hmm, nothing to do. Let's sleep */
+ pollfd->events = u->source->thread_info.state == PA_SOURCE_RUNNING ? POLLIN : 0;
+
+ if ((ret = pa_rtpoll_run(u->rtpoll, 1)) < 0)
+ goto fail;
+
+ if (ret == 0)
+ goto finish;
+
+ pollfd = pa_rtpoll_item_get_pollfd(u->rtpoll_item, NULL);
+ if (pollfd->revents & ~POLLIN) {
+ pa_log("FIFO shutdown.");
+ goto fail;
+ }
+ }
+
+fail:
+ /* If this was no regular exit from the loop we have to continue
+ * processing messages until we received PA_MESSAGE_SHUTDOWN */
+ pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
+ pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
+
+finish:
+ pa_log_debug("Thread shutting down");
+}
+
+int pa__init(pa_module*m) {
+ struct userdata *u;
+ struct stat st;
+ pa_sample_spec ss;
+ pa_channel_map map;
+ pa_modargs *ma;
+ char *t;
+ struct pollfd *pollfd;
+
+ pa_assert(m);
+
+ if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
+ pa_log("failed to parse module arguments.");
+ goto fail;
+ }
+
+ ss = m->core->default_sample_spec;
+ if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_DEFAULT) < 0) {
+ pa_log("invalid sample format specification or channel map");
+ goto fail;
+ }
+
+ u = pa_xnew0(struct userdata, 1);
+ u->core = m->core;
+ u->module = m;
+ m->userdata = u;
+ pa_memchunk_reset(&u->memchunk);
+ pa_thread_mq_init(&u->thread_mq, m->core->mainloop);
+ u->rtpoll = pa_rtpoll_new();
+ pa_rtpoll_item_new_asyncmsgq(u->rtpoll, PA_RTPOLL_EARLY, u->thread_mq.inq);
+
+ u->filename = pa_xstrdup(pa_modargs_get_value(ma, "file", DEFAULT_FILE_NAME));
+
+ mkfifo(u->filename, 0666);
+ if ((u->fd = open(u->filename, O_RDWR|O_NOCTTY)) < 0) {
+ pa_log("open('%s'): %s", u->filename, pa_cstrerror(errno));
+ goto fail;
+ }
+
+ pa_make_fd_cloexec(u->fd);
+ pa_make_fd_nonblock(u->fd);
+
+ if (fstat(u->fd, &st) < 0) {
+ pa_log("fstat('%s'): %s",u->filename, pa_cstrerror(errno));
+ goto fail;
+ }
+
+ if (!S_ISFIFO(st.st_mode)) {
+ pa_log("'%s' is not a FIFO.", u->filename);
+ goto fail;
+ }
+
+ if (!(u->source = pa_source_new(m->core, __FILE__, pa_modargs_get_value(ma, "source_name", DEFAULT_SOURCE_NAME), 0, &ss, &map))) {
+ pa_log("Failed to create source.");
+ goto fail;
+ }
+
+ u->source->userdata = u;
+ u->source->flags = 0;
+
+ pa_source_set_module(u->source, m);
+ pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
+ pa_source_set_rtpoll(u->source, u->rtpoll);
+ pa_source_set_description(u->source, t = pa_sprintf_malloc("Unix FIFO source '%s'", u->filename));
+ pa_xfree(t);
+
+ u->rtpoll_item = pa_rtpoll_item_new(u->rtpoll, PA_RTPOLL_NEVER, 1);
+ pollfd = pa_rtpoll_item_get_pollfd(u->rtpoll_item, NULL);
+ pollfd->fd = u->fd;
+ pollfd->events = pollfd->revents = 0;
+
+ if (!(u->thread = pa_thread_new(thread_func, u))) {
+ pa_log("Failed to create thread.");
+ goto fail;
+ }
+
+ pa_source_put(u->source);
+
+ pa_modargs_free(ma);
+
+ return 0;
+
+fail:
+ if (ma)
+ pa_modargs_free(ma);
+
+ pa__done(m);
+
+ return -1;
+}
+
+void pa__done(pa_module*m) {
+ struct userdata *u;
+
+ pa_assert(m);
+
+ if (!(u = m->userdata))
+ return;
+
+ if (u->source)
+ pa_source_unlink(u->source);
+
+ if (u->thread) {
+ pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
+ pa_thread_free(u->thread);
+ }
+
+ pa_thread_mq_done(&u->thread_mq);
+
+ if (u->source)
+ pa_source_unref(u->source);
+
+ if (u->memchunk.memblock)
+ pa_memblock_unref(u->memchunk.memblock);
+
+ if (u->rtpoll_item)
+ pa_rtpoll_item_free(u->rtpoll_item);
+
+ if (u->rtpoll)
+ pa_rtpoll_free(u->rtpoll);
+
+ if (u->filename) {
+ unlink(u->filename);
+ pa_xfree(u->filename);
+ }
+
+ if (u->fd >= 0)
+ pa_assert_se(pa_close(u->fd) == 0);
+
+ pa_xfree(u);
+}
diff --git a/src/modules/module-protocol-stub.c b/src/modules/module-protocol-stub.c
new file mode 100644
index 00000000..a9bd850b
--- /dev/null
+++ b/src/modules/module-protocol-stub.c
@@ -0,0 +1,376 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2004-2006 Lennart Poettering
+ Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <string.h>
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <limits.h>
+
+#ifdef HAVE_SYS_SOCKET_H
+#include <sys/socket.h>
+#endif
+#ifdef HAVE_ARPA_INET_H
+#include <arpa/inet.h>
+#endif
+#ifdef HAVE_NETINET_IN_H
+#include <netinet/in.h>
+#endif
+
+#include <pulse/xmalloc.h>
+
+#include <pulsecore/winsock.h>
+#include <pulsecore/core-error.h>
+#include <pulsecore/module.h>
+#include <pulsecore/socket-server.h>
+#include <pulsecore/socket-util.h>
+#include <pulsecore/core-util.h>
+#include <pulsecore/modargs.h>
+#include <pulsecore/log.h>
+#include <pulsecore/native-common.h>
+#include <pulsecore/creds.h>
+
+#ifdef USE_TCP_SOCKETS
+#define SOCKET_DESCRIPTION "(TCP sockets)"
+#define SOCKET_USAGE "port=<TCP port number> listen=<address to listen on>"
+#else
+#define SOCKET_DESCRIPTION "(UNIX sockets)"
+#define SOCKET_USAGE "socket=<path to UNIX socket>"
+#endif
+
+#if defined(USE_PROTOCOL_SIMPLE)
+ #include <pulsecore/protocol-simple.h>
+ #define protocol_new pa_protocol_simple_new
+ #define protocol_free pa_protocol_simple_free
+ #define TCPWRAP_SERVICE "pulseaudio-simple"
+ #define IPV4_PORT 4711
+ #define UNIX_SOCKET "simple"
+ #define MODULE_ARGUMENTS "rate", "format", "channels", "sink", "source", "playback", "record",
+ #if defined(USE_TCP_SOCKETS)
+ #include "module-simple-protocol-tcp-symdef.h"
+ #else
+ #include "module-simple-protocol-unix-symdef.h"
+ #endif
+PA_MODULE_DESCRIPTION("Simple protocol "SOCKET_DESCRIPTION);
+ PA_MODULE_USAGE("rate=<sample rate> "
+ "format=<sample format> "
+ "channels=<number of channels> "
+ "sink=<sink to connect to> "
+ "source=<source to connect to> "
+ "playback=<enable playback?> "
+ "record=<enable record?> "
+ SOCKET_USAGE);
+#elif defined(USE_PROTOCOL_CLI)
+ #include <pulsecore/protocol-cli.h>
+ #define protocol_new pa_protocol_cli_new
+ #define protocol_free pa_protocol_cli_free
+ #define TCPWRAP_SERVICE "pulseaudio-cli"
+ #define IPV4_PORT 4712
+ #define UNIX_SOCKET "cli"
+ #define MODULE_ARGUMENTS
+ #ifdef USE_TCP_SOCKETS
+ #include "module-cli-protocol-tcp-symdef.h"
+ #else
+ #include "module-cli-protocol-unix-symdef.h"
+ #endif
+ PA_MODULE_DESCRIPTION("Command line interface protocol "SOCKET_DESCRIPTION);
+ PA_MODULE_USAGE(SOCKET_USAGE);
+#elif defined(USE_PROTOCOL_HTTP)
+ #include <pulsecore/protocol-http.h>
+ #define protocol_new pa_protocol_http_new
+ #define protocol_free pa_protocol_http_free
+ #define TCPWRAP_SERVICE "pulseaudio-http"
+ #define IPV4_PORT 4714
+ #define UNIX_SOCKET "http"
+ #define MODULE_ARGUMENTS
+ #ifdef USE_TCP_SOCKETS
+ #include "module-http-protocol-tcp-symdef.h"
+ #else
+ #include "module-http-protocol-unix-symdef.h"
+ #endif
+ PA_MODULE_DESCRIPTION("HTTP "SOCKET_DESCRIPTION);
+ PA_MODULE_USAGE(SOCKET_USAGE);
+#elif defined(USE_PROTOCOL_NATIVE)
+ #include <pulsecore/protocol-native.h>
+ #define protocol_new pa_protocol_native_new
+ #define protocol_free pa_protocol_native_free
+ #define TCPWRAP_SERVICE "pulseaudio-native"
+ #define IPV4_PORT PA_NATIVE_DEFAULT_PORT
+ #define UNIX_SOCKET PA_NATIVE_DEFAULT_UNIX_SOCKET
+ #define MODULE_ARGUMENTS_COMMON "cookie", "auth-anonymous",
+ #ifdef USE_TCP_SOCKETS
+ #include "module-native-protocol-tcp-symdef.h"
+ #else
+ #include "module-native-protocol-unix-symdef.h"
+ #endif
+
+ #if defined(HAVE_CREDS) && !defined(USE_TCP_SOCKETS)
+ #define MODULE_ARGUMENTS MODULE_ARGUMENTS_COMMON "auth-group", "auth-group-enable",
+ #define AUTH_USAGE "auth-group=<system group to allow access> auth-group-enable=<enable auth by UNIX group?> "
+ #elif defined(USE_TCP_SOCKETS)
+ #define MODULE_ARGUMENTS MODULE_ARGUMENTS_COMMON "auth-ip-acl",
+ #define AUTH_USAGE "auth-ip-acl=<IP address ACL to allow access> "
+ #else
+ #define MODULE_ARGUMENTS MODULE_ARGUMENTS_COMMON
+ #define AUTH_USAGE
+ #endif
+
+ PA_MODULE_DESCRIPTION("Native protocol "SOCKET_DESCRIPTION);
+ PA_MODULE_USAGE("auth-anonymous=<don't check for cookies?> "
+ "cookie=<path to cookie file> "
+ AUTH_USAGE
+ SOCKET_USAGE);
+#elif defined(USE_PROTOCOL_ESOUND)
+ #include <pulsecore/protocol-esound.h>
+ #include <pulsecore/esound.h>
+ #define protocol_new pa_protocol_esound_new
+ #define protocol_free pa_protocol_esound_free
+ #define TCPWRAP_SERVICE "esound"
+ #define IPV4_PORT ESD_DEFAULT_PORT
+ #define MODULE_ARGUMENTS_COMMON "sink", "source", "auth-anonymous", "cookie",
+ #ifdef USE_TCP_SOCKETS
+ #include "module-esound-protocol-tcp-symdef.h"
+ #else
+ #include "module-esound-protocol-unix-symdef.h"
+ #endif
+
+ #if defined(USE_TCP_SOCKETS)
+ #define MODULE_ARGUMENTS MODULE_ARGUMENTS_COMMON "auth-ip-acl",
+ #define AUTH_USAGE "auth-ip-acl=<IP address ACL to allow access> "
+ #else
+ #define MODULE_ARGUMENTS MODULE_ARGUMENTS_COMMON
+ #define AUTH_USAGE
+ #endif
+
+ PA_MODULE_DESCRIPTION("ESOUND protocol "SOCKET_DESCRIPTION);
+ PA_MODULE_USAGE("sink=<sink to connect to> "
+ "source=<source to connect to> "
+ "auth-anonymous=<don't verify cookies?> "
+ "cookie=<path to cookie file> "
+ AUTH_USAGE
+ SOCKET_USAGE);
+#else
+ #error "Broken build system"
+#endif
+
+PA_MODULE_LOAD_ONCE(FALSE);
+PA_MODULE_AUTHOR("Lennart Poettering");
+PA_MODULE_VERSION(PACKAGE_VERSION);
+
+static const char* const valid_modargs[] = {
+ MODULE_ARGUMENTS
+#if defined(USE_TCP_SOCKETS)
+ "port",
+ "listen",
+#else
+ "socket",
+#endif
+ NULL
+};
+
+struct userdata {
+#if defined(USE_TCP_SOCKETS)
+ void *protocol_ipv4;
+ void *protocol_ipv6;
+#else
+ void *protocol_unix;
+ char *socket_path;
+#endif
+};
+
+int pa__init(pa_module*m) {
+ pa_modargs *ma = NULL;
+ int ret = -1;
+ struct userdata *u = NULL;
+
+#if defined(USE_TCP_SOCKETS)
+ pa_socket_server *s_ipv4 = NULL, *s_ipv6 = NULL;
+ uint32_t port = IPV4_PORT;
+ const char *listen_on;
+#else
+ pa_socket_server *s;
+ int r;
+ char tmp[PATH_MAX];
+
+#if defined(USE_PROTOCOL_ESOUND)
+#if defined(USE_PERUSER_ESOUND_SOCKET)
+ char esdsocketpath[PATH_MAX];
+#else
+ const char esdsocketpath[] = "/tmp/.esd/socket";
+#endif
+#endif
+#endif
+
+ pa_assert(m);
+
+ if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
+ pa_log("Failed to parse module arguments");
+ goto finish;
+ }
+
+ u = pa_xnew0(struct userdata, 1);
+
+#if defined(USE_TCP_SOCKETS)
+ if (pa_modargs_get_value_u32(ma, "port", &port) < 0 || port < 1 || port > 0xFFFF) {
+ pa_log("port= expects a numerical argument between 1 and 65535.");
+ goto fail;
+ }
+
+ listen_on = pa_modargs_get_value(ma, "listen", NULL);
+
+ if (listen_on) {
+ s_ipv6 = pa_socket_server_new_ipv6_string(m->core->mainloop, listen_on, port, TCPWRAP_SERVICE);
+ s_ipv4 = pa_socket_server_new_ipv4_string(m->core->mainloop, listen_on, port, TCPWRAP_SERVICE);
+ } else {
+ s_ipv6 = pa_socket_server_new_ipv6_any(m->core->mainloop, port, TCPWRAP_SERVICE);
+ s_ipv4 = pa_socket_server_new_ipv4_any(m->core->mainloop, port, TCPWRAP_SERVICE);
+ }
+
+ if (!s_ipv4 && !s_ipv6)
+ goto fail;
+
+ if (s_ipv4)
+ if (!(u->protocol_ipv4 = protocol_new(m->core, s_ipv4, m, ma)))
+ pa_socket_server_unref(s_ipv4);
+
+ if (s_ipv6)
+ if (!(u->protocol_ipv6 = protocol_new(m->core, s_ipv6, m, ma)))
+ pa_socket_server_unref(s_ipv6);
+
+ if (!u->protocol_ipv4 && !u->protocol_ipv6)
+ goto fail;
+
+#else
+
+#if defined(USE_PROTOCOL_ESOUND)
+
+#if defined(USE_PERUSER_ESOUND_SOCKET)
+ snprintf(esdsocketpath, sizeof(esdsocketpath), "/tmp/.esd-%lu/socket", (unsigned long) getuid());
+#endif
+ pa_runtime_path(pa_modargs_get_value(ma, "socket", esdsocketpath), tmp, sizeof(tmp));
+ u->socket_path = pa_xstrdup(tmp);
+
+ /* This socket doesn't reside in our own runtime dir but in
+ * /tmp/.esd/, hence we have to create the dir first */
+
+ if (pa_make_secure_parent_dir(u->socket_path, m->core->is_system_instance ? 0755 : 0700, (uid_t)-1, (gid_t)-1) < 0) {
+ pa_log("Failed to create socket directory '%s': %s\n", u->socket_path, pa_cstrerror(errno));
+ goto fail;
+ }
+
+#else
+ pa_runtime_path(pa_modargs_get_value(ma, "socket", UNIX_SOCKET), tmp, sizeof(tmp));
+ u->socket_path = pa_xstrdup(tmp);
+#endif
+
+ if ((r = pa_unix_socket_remove_stale(tmp)) < 0) {
+ pa_log("Failed to remove stale UNIX socket '%s': %s", tmp, pa_cstrerror(errno));
+ goto fail;
+ }
+
+ if (r)
+ pa_log("Removed stale UNIX socket '%s'.", tmp);
+
+ if (!(s = pa_socket_server_new_unix(m->core->mainloop, tmp)))
+ goto fail;
+
+ if (!(u->protocol_unix = protocol_new(m->core, s, m, ma)))
+ goto fail;
+
+#endif
+
+ m->userdata = u;
+
+ ret = 0;
+
+finish:
+ if (ma)
+ pa_modargs_free(ma);
+
+ return ret;
+
+fail:
+ if (u) {
+#if defined(USE_TCP_SOCKETS)
+ if (u->protocol_ipv4)
+ protocol_free(u->protocol_ipv4);
+ if (u->protocol_ipv6)
+ protocol_free(u->protocol_ipv6);
+#else
+ if (u->protocol_unix)
+ protocol_free(u->protocol_unix);
+
+ if (u->socket_path)
+ pa_xfree(u->socket_path);
+#endif
+
+ pa_xfree(u);
+ } else {
+#if defined(USE_TCP_SOCKETS)
+ if (s_ipv4)
+ pa_socket_server_unref(s_ipv4);
+ if (s_ipv6)
+ pa_socket_server_unref(s_ipv6);
+#else
+ if (s)
+ pa_socket_server_unref(s);
+#endif
+ }
+
+ goto finish;
+}
+
+void pa__done(pa_module*m) {
+ struct userdata *u;
+
+ pa_assert(m);
+
+ u = m->userdata;
+
+#if defined(USE_TCP_SOCKETS)
+ if (u->protocol_ipv4)
+ protocol_free(u->protocol_ipv4);
+ if (u->protocol_ipv6)
+ protocol_free(u->protocol_ipv6);
+#else
+ if (u->protocol_unix)
+ protocol_free(u->protocol_unix);
+
+#if defined(USE_PROTOCOL_ESOUND)
+ if (u->socket_path) {
+ char *p = pa_parent_dir(u->socket_path);
+ rmdir(p);
+ pa_xfree(p);
+ }
+#endif
+
+ pa_xfree(u->socket_path);
+#endif
+
+ pa_xfree(u);
+}
diff --git a/src/modules/module-remap-sink.c b/src/modules/module-remap-sink.c
new file mode 100644
index 00000000..39a9245d
--- /dev/null
+++ b/src/modules/module-remap-sink.c
@@ -0,0 +1,335 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2004-2006 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <pulse/xmalloc.h>
+
+#include <pulsecore/core-error.h>
+#include <pulsecore/namereg.h>
+#include <pulsecore/sink.h>
+#include <pulsecore/module.h>
+#include <pulsecore/core-util.h>
+#include <pulsecore/modargs.h>
+#include <pulsecore/log.h>
+#include <pulsecore/thread.h>
+#include <pulsecore/thread-mq.h>
+#include <pulsecore/rtpoll.h>
+
+#include "module-remap-sink-symdef.h"
+
+PA_MODULE_AUTHOR("Lennart Poettering");
+PA_MODULE_DESCRIPTION("Virtual channel remapping sink");
+PA_MODULE_VERSION(PACKAGE_VERSION);
+PA_MODULE_LOAD_ONCE(FALSE);
+PA_MODULE_USAGE(
+ "sink_name=<name for the sink> "
+ "master=<name of sink to remap> "
+ "master_channel_map=<channel map> "
+ "format=<sample format> "
+ "channels=<number of channels> "
+ "rate=<sample rate> "
+ "channel_map=<channel map>");
+
+struct userdata {
+ pa_core *core;
+ pa_module *module;
+
+ pa_sink *sink, *master;
+ pa_sink_input *sink_input;
+
+ pa_memchunk memchunk;
+};
+
+static const char* const valid_modargs[] = {
+ "sink_name",
+ "master",
+ "master_channel_map",
+ "rate",
+ "format",
+ "channels",
+ "channel_map",
+ NULL
+};
+
+/* Called from I/O thread context */
+static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
+ struct userdata *u = PA_SINK(o)->userdata;
+
+ switch (code) {
+
+ case PA_SINK_MESSAGE_GET_LATENCY: {
+ pa_usec_t usec = 0;
+
+ if (PA_MSGOBJECT(u->master)->process_msg(PA_MSGOBJECT(u->master), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
+ usec = 0;
+
+ *((pa_usec_t*) data) = usec + pa_bytes_to_usec(u->memchunk.length, &u->sink->sample_spec);
+ return 0;
+ }
+ }
+
+ return pa_sink_process_msg(o, code, data, offset, chunk);
+}
+
+/* Called from main context */
+static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
+ struct userdata *u;
+
+ pa_sink_assert_ref(s);
+ pa_assert_se(u = s->userdata);
+
+ if (PA_SINK_LINKED(state) && u->sink_input && PA_SINK_INPUT_LINKED(pa_sink_input_get_state(u->sink_input)))
+ pa_sink_input_cork(u->sink_input, state == PA_SINK_SUSPENDED);
+
+ return 0;
+}
+
+/* Called from I/O thread context */
+static int sink_input_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
+ struct userdata *u = PA_SINK_INPUT(o)->userdata;
+
+ switch (code) {
+ case PA_SINK_INPUT_MESSAGE_GET_LATENCY:
+ *((pa_usec_t*) data) = pa_bytes_to_usec(u->memchunk.length, &u->sink_input->sample_spec);
+
+ /* Fall through, the default handler will add in the extra
+ * latency added by the resampler */
+ break;
+ }
+
+ return pa_sink_input_process_msg(o, code, data, offset, chunk);
+}
+
+/* Called from I/O thread context */
+static int sink_input_peek_cb(pa_sink_input *i, size_t length, pa_memchunk *chunk) {
+ struct userdata *u;
+
+ pa_sink_input_assert_ref(i);
+ pa_assert_se(u = i->userdata);
+
+ if (!u->memchunk.memblock)
+ pa_sink_render(u->sink, length, &u->memchunk);
+
+ pa_assert(u->memchunk.memblock);
+ *chunk = u->memchunk;
+ pa_memblock_ref(chunk->memblock);
+ return 0;
+}
+
+/* Called from I/O thread context */
+static void sink_input_drop_cb(pa_sink_input *i, size_t length) {
+ struct userdata *u;
+
+ pa_sink_input_assert_ref(i);
+ pa_assert_se(u = i->userdata);
+ pa_assert(length > 0);
+
+ if (u->memchunk.memblock) {
+
+ if (length < u->memchunk.length) {
+ u->memchunk.index += length;
+ u->memchunk.length -= length;
+ return;
+ }
+
+ pa_memblock_unref(u->memchunk.memblock);
+ length -= u->memchunk.length;
+ pa_memchunk_reset(&u->memchunk);
+ }
+
+ if (length > 0)
+ pa_sink_skip(u->sink, length);
+}
+
+/* Called from I/O thread context */
+static void sink_input_detach_cb(pa_sink_input *i) {
+ struct userdata *u;
+
+ pa_sink_input_assert_ref(i);
+ pa_assert_se(u = i->userdata);
+
+ pa_sink_detach_within_thread(u->sink);
+}
+
+/* Called from I/O thread context */
+static void sink_input_attach_cb(pa_sink_input *i) {
+ struct userdata *u;
+
+ pa_sink_input_assert_ref(i);
+ pa_assert_se(u = i->userdata);
+
+ pa_sink_set_asyncmsgq(u->sink, i->sink->asyncmsgq);
+ pa_sink_set_rtpoll(u->sink, i->sink->rtpoll);
+
+ pa_sink_attach_within_thread(u->sink);
+}
+
+/* Called from main context */
+static void sink_input_kill_cb(pa_sink_input *i) {
+ struct userdata *u;
+
+ pa_sink_input_assert_ref(i);
+ pa_assert_se(u = i->userdata);
+
+ pa_sink_input_unlink(u->sink_input);
+ pa_sink_input_unref(u->sink_input);
+ u->sink_input = NULL;
+
+ pa_sink_unlink(u->sink);
+ pa_sink_unref(u->sink);
+ u->sink = NULL;
+
+ pa_module_unload_request(u->module);
+}
+
+int pa__init(pa_module*m) {
+ struct userdata *u;
+ pa_sample_spec ss;
+ pa_channel_map sink_map, stream_map;
+ pa_modargs *ma;
+ char *t;
+ pa_sink *master;
+ pa_sink_input_new_data data;
+ char *default_sink_name = NULL;
+
+ pa_assert(m);
+
+ if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
+ pa_log("Failed to parse module arguments.");
+ goto fail;
+ }
+
+ if (!(master = pa_namereg_get(m->core, pa_modargs_get_value(ma, "master", NULL), PA_NAMEREG_SINK, 1))) {
+ pa_log("Master sink not found");
+ goto fail;
+ }
+
+ ss = master->sample_spec;
+ sink_map = master->channel_map;
+ if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &sink_map, PA_CHANNEL_MAP_DEFAULT) < 0) {
+ pa_log("Invalid sample format specification or channel map");
+ goto fail;
+ }
+
+ stream_map = sink_map;
+ if (pa_modargs_get_channel_map(ma, "master_channel_map", &stream_map) < 0) {
+ pa_log("Invalid master hannel map");
+ goto fail;
+ }
+
+ if (stream_map.channels != ss.channels) {
+ pa_log("Number of channels doesn't match");
+ goto fail;
+ }
+
+ u = pa_xnew0(struct userdata, 1);
+ u->core = m->core;
+ u->module = m;
+ m->userdata = u;
+ u->master = master;
+ pa_memchunk_reset(&u->memchunk);
+
+ default_sink_name = pa_sprintf_malloc("%s.remapped", master->name);
+
+ /* Create sink */
+ if (!(u->sink = pa_sink_new(m->core, __FILE__, pa_modargs_get_value(ma, "sink_name", default_sink_name), 0, &ss, &sink_map))) {
+ pa_log("Failed to create sink.");
+ goto fail;
+ }
+
+ u->sink->parent.process_msg = sink_process_msg;
+ u->sink->set_state = sink_set_state;
+ u->sink->userdata = u;
+ u->sink->flags = PA_SINK_LATENCY;
+
+ pa_sink_set_module(u->sink, m);
+ pa_sink_set_description(u->sink, t = pa_sprintf_malloc("Remapped %s", master->description));
+ pa_xfree(t);
+ pa_sink_set_asyncmsgq(u->sink, master->asyncmsgq);
+ pa_sink_set_rtpoll(u->sink, master->rtpoll);
+
+ /* Create sink input */
+ pa_sink_input_new_data_init(&data);
+ data.sink = u->master;
+ data.driver = __FILE__;
+ data.name = "Remapped Stream";
+ pa_sink_input_new_data_set_sample_spec(&data, &ss);
+ pa_sink_input_new_data_set_channel_map(&data, &stream_map);
+ data.module = m;
+
+ if (!(u->sink_input = pa_sink_input_new(m->core, &data, PA_SINK_INPUT_DONT_MOVE)))
+ goto fail;
+
+ u->sink_input->parent.process_msg = sink_input_process_msg;
+ u->sink_input->peek = sink_input_peek_cb;
+ u->sink_input->drop = sink_input_drop_cb;
+ u->sink_input->kill = sink_input_kill_cb;
+ u->sink_input->attach = sink_input_attach_cb;
+ u->sink_input->detach = sink_input_detach_cb;
+ u->sink_input->userdata = u;
+
+ pa_sink_put(u->sink);
+ pa_sink_input_put(u->sink_input);
+
+ pa_modargs_free(ma);
+ pa_xfree(default_sink_name);
+
+ return 0;
+
+fail:
+ if (ma)
+ pa_modargs_free(ma);
+
+ pa__done(m);
+
+ pa_xfree(default_sink_name);
+
+ return -1;
+}
+
+void pa__done(pa_module*m) {
+ struct userdata *u;
+
+ pa_assert(m);
+
+ if (!(u = m->userdata))
+ return;
+
+ if (u->sink_input) {
+ pa_sink_input_unlink(u->sink_input);
+ pa_sink_input_unref(u->sink_input);
+ }
+
+ if (u->sink) {
+ pa_sink_unlink(u->sink);
+ pa_sink_unref(u->sink);
+ }
+
+ if (u->memchunk.memblock)
+ pa_memblock_unref(u->memchunk.memblock);
+
+ pa_xfree(u);
+}
diff --git a/src/modules/module-rescue-streams.c b/src/modules/module-rescue-streams.c
new file mode 100644
index 00000000..12957c9d
--- /dev/null
+++ b/src/modules/module-rescue-streams.c
@@ -0,0 +1,164 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2006 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <pulse/xmalloc.h>
+
+#include <pulsecore/core.h>
+#include <pulsecore/sink-input.h>
+#include <pulsecore/source-output.h>
+#include <pulsecore/modargs.h>
+#include <pulsecore/log.h>
+#include <pulsecore/namereg.h>
+
+#include "module-rescue-streams-symdef.h"
+
+PA_MODULE_AUTHOR("Lennart Poettering");
+PA_MODULE_DESCRIPTION("When a sink/source is removed, try to move their streams to the default sink/source");
+PA_MODULE_VERSION(PACKAGE_VERSION);
+PA_MODULE_LOAD_ONCE(TRUE);
+
+static const char* const valid_modargs[] = {
+ NULL,
+};
+
+struct userdata {
+ pa_hook_slot *sink_slot, *source_slot;
+};
+
+static pa_hook_result_t sink_hook_callback(pa_core *c, pa_sink *sink, void* userdata) {
+ pa_sink_input *i;
+ pa_sink *target;
+
+ pa_assert(c);
+ pa_assert(sink);
+
+ if (!pa_idxset_size(sink->inputs)) {
+ pa_log_debug("No sink inputs to move away.");
+ return PA_HOOK_OK;
+ }
+
+ if (!(target = pa_namereg_get(c, NULL, PA_NAMEREG_SINK, 0)) || target == sink) {
+ uint32_t idx;
+
+ for (target = pa_idxset_first(c->sinks, &idx); target; target = pa_idxset_next(c->sinks, &idx))
+ if (target != sink)
+ break;
+
+ if (!target) {
+ pa_log_info("No evacuation sink found.");
+ return PA_HOOK_OK;
+ }
+ }
+
+ while ((i = pa_idxset_first(sink->inputs, NULL))) {
+ if (pa_sink_input_move_to(i, target, 1) < 0) {
+ pa_log_warn("Failed to move sink input %u \"%s\" to %s.", i->index, i->name, target->name);
+ return PA_HOOK_OK;
+ }
+
+ pa_log_info("Sucessfully moved sink input %u \"%s\" to %s.", i->index, i->name, target->name);
+ }
+
+
+ return PA_HOOK_OK;
+}
+
+static pa_hook_result_t source_hook_callback(pa_core *c, pa_source *source, void* userdata) {
+ pa_source_output *o;
+ pa_source *target;
+
+ pa_assert(c);
+ pa_assert(source);
+
+ if (!pa_idxset_size(source->outputs)) {
+ pa_log_debug("No source outputs to move away.");
+ return PA_HOOK_OK;
+ }
+
+ if (!(target = pa_namereg_get(c, NULL, PA_NAMEREG_SOURCE, 0)) || target == source) {
+ uint32_t idx;
+
+ for (target = pa_idxset_first(c->sources, &idx); target; target = pa_idxset_next(c->sources, &idx))
+ if (target != source && !target->monitor_of == !source->monitor_of)
+ break;
+
+ if (!target) {
+ pa_log_info("No evacuation source found.");
+ return PA_HOOK_OK;
+ }
+ }
+
+ pa_assert(target != source);
+
+ while ((o = pa_idxset_first(source->outputs, NULL))) {
+ if (pa_source_output_move_to(o, target) < 0) {
+ pa_log_warn("Failed to move source output %u \"%s\" to %s.", o->index, o->name, target->name);
+ return PA_HOOK_OK;
+ }
+
+ pa_log_info("Sucessfully moved source output %u \"%s\" to %s.", o->index, o->name, target->name);
+ }
+
+
+ return PA_HOOK_OK;
+}
+
+int pa__init(pa_module*m) {
+ pa_modargs *ma = NULL;
+ struct userdata *u;
+
+ pa_assert(m);
+
+ if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
+ pa_log("Failed to parse module arguments");
+ return -1;
+ }
+
+ m->userdata = u = pa_xnew(struct userdata, 1);
+ u->sink_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SINK_UNLINK], (pa_hook_cb_t) sink_hook_callback, NULL);
+ u->source_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SOURCE_UNLINK], (pa_hook_cb_t) source_hook_callback, NULL);
+
+ pa_modargs_free(ma);
+ return 0;
+}
+
+void pa__done(pa_module*m) {
+ struct userdata *u;
+
+ pa_assert(m);
+
+ if (!m->userdata)
+ return;
+
+ u = m->userdata;
+ if (u->sink_slot)
+ pa_hook_slot_free(u->sink_slot);
+ if (u->source_slot)
+ pa_hook_slot_free(u->source_slot);
+
+ pa_xfree(u);
+}
diff --git a/src/modules/module-sine.c b/src/modules/module-sine.c
new file mode 100644
index 00000000..41d9a51c
--- /dev/null
+++ b/src/modules/module-sine.c
@@ -0,0 +1,206 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2004-2006 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <stdio.h>
+#include <math.h>
+
+#include <pulse/xmalloc.h>
+
+#include <pulsecore/sink-input.h>
+#include <pulsecore/module.h>
+#include <pulsecore/modargs.h>
+#include <pulsecore/namereg.h>
+#include <pulsecore/log.h>
+#include <pulsecore/core-util.h>
+
+#include "module-sine-symdef.h"
+
+PA_MODULE_AUTHOR("Lennart Poettering");
+PA_MODULE_DESCRIPTION("Sine wave generator");
+PA_MODULE_VERSION(PACKAGE_VERSION);
+PA_MODULE_LOAD_ONCE(FALSE);
+PA_MODULE_USAGE("sink=<sink to connect to> frequency=<frequency in Hz>");
+
+struct userdata {
+ pa_core *core;
+ pa_module *module;
+ pa_sink_input *sink_input;
+ pa_memblock *memblock;
+ size_t peek_index;
+};
+
+static const char* const valid_modargs[] = {
+ "sink",
+ "frequency",
+ NULL,
+};
+
+static int sink_input_peek_cb(pa_sink_input *i, size_t length, pa_memchunk *chunk) {
+ struct userdata *u;
+
+ pa_assert(i);
+ u = i->userdata;
+ pa_assert(u);
+ pa_assert(chunk);
+
+ chunk->memblock = pa_memblock_ref(u->memblock);
+ chunk->index = u->peek_index;
+ chunk->length = pa_memblock_get_length(u->memblock) - u->peek_index;
+
+ return 0;
+}
+
+static void sink_input_drop_cb(pa_sink_input *i, size_t length) {
+ struct userdata *u;
+ size_t l;
+
+ pa_assert(i);
+ u = i->userdata;
+ pa_assert(u);
+ pa_assert(length > 0);
+
+ u->peek_index += length;
+
+ l = pa_memblock_get_length(u->memblock);
+
+ while (u->peek_index >= l)
+ u->peek_index -= l;
+}
+
+static void sink_input_kill_cb(pa_sink_input *i) {
+ struct userdata *u;
+
+ pa_assert(i);
+ u = i->userdata;
+ pa_assert(u);
+
+ pa_sink_input_unlink(u->sink_input);
+ pa_sink_input_unref(u->sink_input);
+ u->sink_input = NULL;
+
+ pa_module_unload_request(u->module);
+}
+
+static void calc_sine(float *f, size_t l, float freq) {
+ size_t i;
+
+ l /= sizeof(float);
+
+ for (i = 0; i < l; i++)
+ f[i] = (float) sin((double) i/l*M_PI*2*freq)/2;
+}
+
+int pa__init(pa_module*m) {
+ pa_modargs *ma = NULL;
+ struct userdata *u;
+ pa_sink *sink;
+ pa_sample_spec ss;
+ uint32_t frequency;
+ char t[256];
+ void *p;
+ pa_sink_input_new_data data;
+
+ if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
+ pa_log("Failed to parse module arguments");
+ goto fail;
+ }
+
+ m->userdata = u = pa_xnew0(struct userdata, 1);
+ u->core = m->core;
+ u->module = m;
+ u->sink_input = NULL;
+ u->memblock = NULL;
+ u->peek_index = 0;
+
+ if (!(sink = pa_namereg_get(m->core, pa_modargs_get_value(ma, "sink", NULL), PA_NAMEREG_SINK, 1))) {
+ pa_log("No such sink.");
+ goto fail;
+ }
+
+ ss.format = PA_SAMPLE_FLOAT32;
+ ss.rate = sink->sample_spec.rate;
+ ss.channels = 1;
+
+ frequency = 440;
+ if (pa_modargs_get_value_u32(ma, "frequency", &frequency) < 0 || frequency < 1 || frequency > ss.rate/2) {
+ pa_log("Invalid frequency specification");
+ goto fail;
+ }
+
+ u->memblock = pa_memblock_new(m->core->mempool, pa_bytes_per_second(&ss));
+ p = pa_memblock_acquire(u->memblock);
+ calc_sine(p, pa_memblock_get_length(u->memblock), frequency);
+ pa_memblock_release(u->memblock);
+
+ pa_snprintf(t, sizeof(t), "Sine Generator at %u Hz", frequency);
+
+ pa_sink_input_new_data_init(&data);
+ data.sink = sink;
+ data.driver = __FILE__;
+ data.name = t;
+ pa_sink_input_new_data_set_sample_spec(&data, &ss);
+ data.module = m;
+
+ if (!(u->sink_input = pa_sink_input_new(m->core, &data, 0)))
+ goto fail;
+
+ u->sink_input->peek = sink_input_peek_cb;
+ u->sink_input->drop = sink_input_drop_cb;
+ u->sink_input->kill = sink_input_kill_cb;
+ u->sink_input->userdata = u;
+
+ pa_sink_input_put(u->sink_input);
+
+ pa_modargs_free(ma);
+ return 0;
+
+fail:
+ if (ma)
+ pa_modargs_free(ma);
+
+ pa__done(m);
+ return -1;
+}
+
+void pa__done(pa_module*m) {
+ struct userdata *u;
+
+ pa_assert(m);
+
+ if (!(u = m->userdata))
+ return;
+
+ if (u->sink_input) {
+ pa_sink_input_unlink(u->sink_input);
+ pa_sink_input_unref(u->sink_input);
+ }
+
+ if (u->memblock)
+ pa_memblock_unref(u->memblock);
+
+ pa_xfree(u);
+}
diff --git a/src/modules/module-solaris.c b/src/modules/module-solaris.c
new file mode 100644
index 00000000..4a5c88e4
--- /dev/null
+++ b/src/modules/module-solaris.c
@@ -0,0 +1,766 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2006 Lennart Poettering
+ Copyright 2006-2007 Pierre Ossman <ossman@cendio.se> for Cendio AB
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <limits.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <signal.h>
+#include <stropts.h>
+#include <sys/conf.h>
+#include <sys/audio.h>
+
+#include <pulse/error.h>
+#include <pulse/mainloop-signal.h>
+#include <pulse/xmalloc.h>
+#include <pulse/timeval.h>
+
+#include <pulsecore/iochannel.h>
+#include <pulsecore/sink.h>
+#include <pulsecore/source.h>
+#include <pulsecore/module.h>
+#include <pulsecore/sample-util.h>
+#include <pulsecore/core-util.h>
+#include <pulsecore/modargs.h>
+#include <pulsecore/log.h>
+#include <pulsecore/core-error.h>
+#include <pulsecore/thread-mq.h>
+#include <pulsecore/rtpoll.h>
+#include <pulsecore/thread.h>
+
+#include "module-solaris-symdef.h"
+
+PA_MODULE_AUTHOR("Pierre Ossman")
+PA_MODULE_DESCRIPTION("Solaris Sink/Source")
+PA_MODULE_VERSION(PACKAGE_VERSION)
+PA_MODULE_USAGE(
+ "sink_name=<name for the sink> "
+ "source_name=<name for the source> "
+ "device=<OSS device> record=<enable source?> "
+ "playback=<enable sink?> "
+ "format=<sample format> "
+ "channels=<number of channels> "
+ "rate=<sample rate> "
+ "buffer_size=<record buffer size> "
+ "channel_map=<channel map>")
+
+struct userdata {
+ pa_core *core;
+ pa_sink *sink;
+ pa_source *source;
+
+ pa_thread *thread;
+ pa_thread_mq thread_mq;
+ pa_rtpoll *rtpoll;
+
+ pa_signal_event *sig;
+
+ pa_memchunk memchunk;
+
+ unsigned int page_size;
+
+ uint32_t frame_size;
+ uint32_t buffer_size;
+ unsigned int written_bytes, read_bytes;
+
+ int fd;
+ pa_rtpoll_item *rtpoll_item;
+ pa_module *module;
+};
+
+static const char* const valid_modargs[] = {
+ "sink_name",
+ "source_name",
+ "device",
+ "record",
+ "playback",
+ "buffer_size",
+ "format",
+ "rate",
+ "channels",
+ "channel_map",
+ NULL
+};
+
+#define DEFAULT_SINK_NAME "solaris_output"
+#define DEFAULT_SOURCE_NAME "solaris_input"
+#define DEFAULT_DEVICE "/dev/audio"
+
+static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
+ struct userdata *u = PA_SINK(o)->userdata;
+ int err;
+ audio_info_t info;
+
+ switch (code) {
+ case PA_SINK_MESSAGE_GET_LATENCY: {
+ pa_usec_t r = 0;
+
+ if (u->fd >= 0) {
+
+ err = ioctl(u->fd, AUDIO_GETINFO, &info);
+ pa_assert(err >= 0);
+
+ r += pa_bytes_to_usec(u->written_bytes, &PA_SINK(o)->sample_spec);
+ r -= pa_bytes_to_usec(info.play.samples * u->frame_size, &PA_SINK(o)->sample_spec);
+
+ if (u->memchunk.memblock)
+ r += pa_bytes_to_usec(u->memchunk.length, &PA_SINK(o)->sample_spec);
+ }
+
+ *((pa_usec_t*) data) = r;
+
+ return 0;
+ }
+
+ case PA_SINK_MESSAGE_SET_VOLUME:
+ if (u->fd >= 0) {
+ AUDIO_INITINFO(&info);
+
+ info.play.gain = pa_cvolume_avg((pa_cvolume*)data) * AUDIO_MAX_GAIN / PA_VOLUME_NORM;
+ assert(info.play.gain <= AUDIO_MAX_GAIN);
+
+ if (ioctl(u->fd, AUDIO_SETINFO, &info) < 0) {
+ if (errno == EINVAL)
+ pa_log("AUDIO_SETINFO: Unsupported volume.");
+ else
+ pa_log("AUDIO_SETINFO: %s", pa_cstrerror(errno));
+ } else {
+ return 0;
+ }
+ }
+ break;
+
+ case PA_SINK_MESSAGE_GET_VOLUME:
+ if (u->fd >= 0) {
+ err = ioctl(u->fd, AUDIO_GETINFO, &info);
+ assert(err >= 0);
+
+ pa_cvolume_set((pa_cvolume*) data, ((pa_cvolume*) data)->channels,
+ info.play.gain * PA_VOLUME_NORM / AUDIO_MAX_GAIN);
+
+ return 0;
+ }
+ break;
+
+ case PA_SINK_MESSAGE_SET_MUTE:
+ if (u->fd >= 0) {
+ AUDIO_INITINFO(&info);
+
+ info.output_muted = !!PA_PTR_TO_UINT(data);
+
+ if (ioctl(u->fd, AUDIO_SETINFO, &info) < 0)
+ pa_log("AUDIO_SETINFO: %s", pa_cstrerror(errno));
+ else
+ return 0;
+ }
+ break;
+
+ case PA_SINK_MESSAGE_GET_MUTE:
+ if (u->fd >= 0) {
+ err = ioctl(u->fd, AUDIO_GETINFO, &info);
+ pa_assert(err >= 0);
+
+ *(int*)data = !!info.output_muted;
+
+ return 0;
+ }
+ break;
+ }
+
+ return pa_sink_process_msg(o, code, data, offset, chunk);
+}
+
+static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
+ struct userdata *u = PA_SOURCE(o)->userdata;
+ int err;
+ audio_info_t info;
+
+ switch (code) {
+ case PA_SOURCE_MESSAGE_GET_LATENCY: {
+ pa_usec_t r = 0;
+
+ if (u->fd) {
+ err = ioctl(u->fd, AUDIO_GETINFO, &info);
+ pa_assert(err >= 0);
+
+ r += pa_bytes_to_usec(info.record.samples * u->frame_size, &PA_SOURCE(o)->sample_spec);
+ r -= pa_bytes_to_usec(u->read_bytes, &PA_SOURCE(o)->sample_spec);
+ }
+
+ *((pa_usec_t*) data) = r;
+
+ return 0;
+ }
+
+ case PA_SOURCE_MESSAGE_SET_VOLUME:
+ if (u->fd >= 0) {
+ AUDIO_INITINFO(&info);
+
+ info.record.gain = pa_cvolume_avg((pa_cvolume*) data) * AUDIO_MAX_GAIN / PA_VOLUME_NORM;
+ assert(info.record.gain <= AUDIO_MAX_GAIN);
+
+ if (ioctl(u->fd, AUDIO_SETINFO, &info) < 0) {
+ if (errno == EINVAL)
+ pa_log("AUDIO_SETINFO: Unsupported volume.");
+ else
+ pa_log("AUDIO_SETINFO: %s", pa_cstrerror(errno));
+ } else {
+ return 0;
+ }
+ }
+ break;
+
+ case PA_SOURCE_MESSAGE_GET_VOLUME:
+ if (u->fd >= 0) {
+ err = ioctl(u->fd, AUDIO_GETINFO, &info);
+ pa_assert(err >= 0);
+
+ pa_cvolume_set((pa_cvolume*) data, ((pa_cvolume*) data)->channels,
+ info.record.gain * PA_VOLUME_NORM / AUDIO_MAX_GAIN);
+
+ return 0;
+ }
+ break;
+ }
+
+ return pa_source_process_msg(o, code, data, offset, chunk);
+}
+
+static void clear_underflow(struct userdata *u)
+{
+ audio_info_t info;
+
+ AUDIO_INITINFO(&info);
+
+ info.play.error = 0;
+
+ if (ioctl(u->fd, AUDIO_SETINFO, &info) < 0)
+ pa_log("AUDIO_SETINFO: %s", pa_cstrerror(errno));
+}
+
+static void clear_overflow(struct userdata *u)
+{
+ audio_info_t info;
+
+ AUDIO_INITINFO(&info);
+
+ info.record.error = 0;
+
+ if (ioctl(u->fd, AUDIO_SETINFO, &info) < 0)
+ pa_log("AUDIO_SETINFO: %s", pa_cstrerror(errno));
+}
+
+static void thread_func(void *userdata) {
+ struct userdata *u = userdata;
+ unsigned short revents = 0;
+ int ret;
+
+ pa_assert(u);
+
+ pa_log_debug("Thread starting up");
+
+ if (u->core->high_priority)
+ pa_make_realtime();
+
+ pa_thread_mq_install(&u->thread_mq);
+ pa_rtpoll_install(u->rtpoll);
+
+ for (;;) {
+ /* Render some data and write it to the dsp */
+
+ if (u->sink && PA_SINK_OPENED(u->sink->thread_info.state)) {
+ audio_info_t info;
+ int err;
+ size_t len;
+
+ err = ioctl(u->fd, AUDIO_GETINFO, &info);
+ pa_assert(err >= 0);
+
+ /*
+ * Since we cannot modify the size of the output buffer we fake it
+ * by not filling it more than u->buffer_size.
+ */
+ len = u->buffer_size;
+ len -= u->written_bytes - (info.play.samples * u->frame_size);
+
+ /* The sample counter can sometimes go backwards :( */
+ if (len > u->buffer_size)
+ len = 0;
+
+ if (info.play.error) {
+ pa_log_debug("Solaris buffer underflow!");
+ clear_underflow(u);
+ }
+
+ len -= len % u->frame_size;
+
+ while (len) {
+ void *p;
+ ssize_t r;
+
+ if (!u->memchunk.length)
+ pa_sink_render(u->sink, len, &u->memchunk);
+
+ pa_assert(u->memchunk.length);
+
+ p = pa_memblock_acquire(u->memchunk.memblock);
+ r = pa_write(u->fd, (uint8_t*) p + u->memchunk.index, u->memchunk.length, NULL);
+ pa_memblock_release(u->memchunk.memblock);
+
+ if (r < 0) {
+ if (errno == EINTR)
+ continue;
+ else if (errno != EAGAIN) {
+ pa_log("Failed to read data from DSP: %s", pa_cstrerror(errno));
+ goto fail;
+ }
+ } else {
+ pa_assert(r % u->frame_size == 0);
+
+ u->memchunk.index += r;
+ u->memchunk.length -= r;
+
+ if (u->memchunk.length <= 0) {
+ pa_memblock_unref(u->memchunk.memblock);
+ pa_memchunk_reset(&u->memchunk);
+ }
+
+ len -= r;
+ u->written_bytes += r;
+ }
+ }
+ }
+
+ /* Try to read some data and pass it on to the source driver */
+
+ if (u->source && PA_SOURCE_OPENED(u->source->thread_info.state) && ((revents & POLLIN))) {
+ pa_memchunk memchunk;
+ int err;
+ size_t l;
+ void *p;
+ ssize_t r;
+ audio_info_t info;
+
+ err = ioctl(u->fd, AUDIO_GETINFO, &info);
+ pa_assert(err >= 0);
+
+ if (info.record.error) {
+ pa_log_debug("Solaris buffer overflow!");
+ clear_overflow(u);
+ }
+
+ err = ioctl(u->fd, I_NREAD, &l);
+ pa_assert(err >= 0);
+
+ if (l > 0) {
+ /* This is to make sure it fits in the memory pool. Also, a page
+ should be the most efficient transfer size. */
+ if (l > u->page_size)
+ l = u->page_size;
+
+ memchunk.memblock = pa_memblock_new(u->core->mempool, l);
+ pa_assert(memchunk.memblock);
+
+ p = pa_memblock_acquire(memchunk.memblock);
+ r = pa_read(u->fd, p, l, NULL);
+ pa_memblock_release(memchunk.memblock);
+
+ if (r < 0) {
+ pa_memblock_unref(memchunk.memblock);
+ if (errno != EAGAIN) {
+ pa_log("Failed to read data from DSP: %s", pa_cstrerror(errno));
+ goto fail;
+ }
+ } else {
+ memchunk.index = 0;
+ memchunk.length = r;
+
+ pa_source_post(u->source, &memchunk);
+ pa_memblock_unref(memchunk.memblock);
+
+ u->read_bytes += r;
+
+ revents &= ~POLLIN;
+ }
+ }
+ }
+
+ if (u->fd >= 0) {
+ struct pollfd *pollfd;
+
+ pollfd = pa_rtpoll_item_get_pollfd(u->rtpoll_item, NULL);
+ pollfd->events =
+ ((u->source && PA_SOURCE_OPENED(u->source->thread_info.state)) ? POLLIN : 0);
+ }
+
+ /* Hmm, nothing to do. Let's sleep */
+ if ((ret = pa_rtpoll_run(u->rtpoll, 1)) < 0)
+ goto fail;
+
+ if (ret == 0)
+ goto finish;
+
+ if (u->fd >= 0) {
+ struct pollfd *pollfd;
+
+ pollfd = pa_rtpoll_item_get_pollfd(u->rtpoll_item, NULL);
+
+ if (pollfd->revents & ~(POLLOUT|POLLIN)) {
+ pa_log("DSP shutdown.");
+ goto fail;
+ }
+
+ revents = pollfd->revents;
+ } else
+ revents = 0;
+ }
+
+fail:
+ /* We have to continue processing messages until we receive the
+ * SHUTDOWN message */
+ pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
+ pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
+
+finish:
+ pa_log_debug("Thread shutting down");
+}
+
+static void sig_callback(pa_mainloop_api *api, pa_signal_event*e, int sig, void *userdata) {
+ struct userdata *u = userdata;
+
+ assert(u);
+
+ if (u->sink) {
+ pa_sink_get_volume(u->sink);
+ pa_sink_get_mute(u->sink);
+ }
+
+ if (u->source)
+ pa_source_get_volume(u->source);
+}
+
+static int pa_solaris_auto_format(int fd, int mode, pa_sample_spec *ss) {
+ audio_info_t info;
+
+ AUDIO_INITINFO(&info);
+
+ if (mode != O_RDONLY) {
+ info.play.sample_rate = ss->rate;
+ info.play.channels = ss->channels;
+ switch (ss->format) {
+ case PA_SAMPLE_U8:
+ info.play.precision = 8;
+ info.play.encoding = AUDIO_ENCODING_LINEAR;
+ break;
+ case PA_SAMPLE_ALAW:
+ info.play.precision = 8;
+ info.play.encoding = AUDIO_ENCODING_ALAW;
+ break;
+ case PA_SAMPLE_ULAW:
+ info.play.precision = 8;
+ info.play.encoding = AUDIO_ENCODING_ULAW;
+ break;
+ case PA_SAMPLE_S16NE:
+ info.play.precision = 16;
+ info.play.encoding = AUDIO_ENCODING_LINEAR;
+ break;
+ default:
+ return -1;
+ }
+ }
+
+ if (mode != O_WRONLY) {
+ info.record.sample_rate = ss->rate;
+ info.record.channels = ss->channels;
+ switch (ss->format) {
+ case PA_SAMPLE_U8:
+ info.record.precision = 8;
+ info.record.encoding = AUDIO_ENCODING_LINEAR;
+ break;
+ case PA_SAMPLE_ALAW:
+ info.record.precision = 8;
+ info.record.encoding = AUDIO_ENCODING_ALAW;
+ break;
+ case PA_SAMPLE_ULAW:
+ info.record.precision = 8;
+ info.record.encoding = AUDIO_ENCODING_ULAW;
+ break;
+ case PA_SAMPLE_S16NE:
+ info.record.precision = 16;
+ info.record.encoding = AUDIO_ENCODING_LINEAR;
+ break;
+ default:
+ return -1;
+ }
+ }
+
+ if (ioctl(fd, AUDIO_SETINFO, &info) < 0) {
+ if (errno == EINVAL)
+ pa_log("AUDIO_SETINFO: Unsupported sample format.");
+ else
+ pa_log("AUDIO_SETINFO: %s", pa_cstrerror(errno));
+ return -1;
+ }
+
+ return 0;
+}
+
+static int pa_solaris_set_buffer(int fd, int buffer_size) {
+ audio_info_t info;
+
+ AUDIO_INITINFO(&info);
+
+ info.play.buffer_size = buffer_size;
+ info.record.buffer_size = buffer_size;
+
+ if (ioctl(fd, AUDIO_SETINFO, &info) < 0) {
+ if (errno == EINVAL)
+ pa_log("AUDIO_SETINFO: Unsupported buffer size.");
+ else
+ pa_log("AUDIO_SETINFO: %s", pa_cstrerror(errno));
+ return -1;
+ }
+
+ return 0;
+}
+
+int pa__init(pa_module *m) {
+ struct userdata *u = NULL;
+ const char *p;
+ int fd = -1;
+ int buffer_size;
+ int mode;
+ int record = 1, playback = 1;
+ pa_sample_spec ss;
+ pa_channel_map map;
+ pa_modargs *ma = NULL;
+ char *t;
+ struct pollfd *pollfd;
+
+ pa_assert(m);
+
+ if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
+ pa_log("failed to parse module arguments.");
+ goto fail;
+ }
+
+ if (pa_modargs_get_value_boolean(ma, "record", &record) < 0 || pa_modargs_get_value_boolean(ma, "playback", &playback) < 0) {
+ pa_log("record= and playback= expect numeric argument.");
+ goto fail;
+ }
+
+ if (!playback && !record) {
+ pa_log("neither playback nor record enabled for device.");
+ goto fail;
+ }
+
+ mode = (playback&&record) ? O_RDWR : (playback ? O_WRONLY : (record ? O_RDONLY : 0));
+
+ buffer_size = 16384;
+ if (pa_modargs_get_value_s32(ma, "buffer_size", &buffer_size) < 0) {
+ pa_log("failed to parse buffer size argument");
+ goto fail;
+ }
+
+ ss = m->core->default_sample_spec;
+ if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_DEFAULT) < 0) {
+ pa_log("failed to parse sample specification");
+ goto fail;
+ }
+
+ if ((fd = open(p = pa_modargs_get_value(ma, "device", DEFAULT_DEVICE), mode | O_NONBLOCK)) < 0)
+ goto fail;
+
+ pa_log_info("device opened in %s mode.", mode == O_WRONLY ? "O_WRONLY" : (mode == O_RDONLY ? "O_RDONLY" : "O_RDWR"));
+
+ if (pa_solaris_auto_format(fd, mode, &ss) < 0)
+ goto fail;
+
+ if (pa_solaris_set_buffer(fd, buffer_size) < 0)
+ goto fail;
+
+ u = pa_xmalloc(sizeof(struct userdata));
+ u->core = m->core;
+
+ u->fd = fd;
+
+ pa_memchunk_reset(&u->memchunk);
+
+ /* We use this to get a reasonable chunk size */
+ u->page_size = PA_PAGE_SIZE;
+
+ u->frame_size = pa_frame_size(&ss);
+ u->buffer_size = buffer_size;
+
+ u->written_bytes = 0;
+ u->read_bytes = 0;
+
+ u->module = m;
+ m->userdata = u;
+
+ pa_thread_mq_init(&u->thread_mq, m->core->mainloop);
+
+ u->rtpoll = pa_rtpoll_new();
+ pa_rtpoll_item_new_asyncmsgq(u->rtpoll, PA_RTPOLL_EARLY, u->thread_mq.inq);
+
+ pa_rtpoll_set_timer_periodic(u->rtpoll, pa_bytes_to_usec(u->buffer_size / 10, &ss));
+
+ u->rtpoll_item = pa_rtpoll_item_new(u->rtpoll, PA_RTPOLL_NEVER, 1);
+ pollfd = pa_rtpoll_item_get_pollfd(u->rtpoll_item, NULL);
+ pollfd->fd = fd;
+ pollfd->events = 0;
+ pollfd->revents = 0;
+
+ if (mode != O_WRONLY) {
+ u->source = pa_source_new(m->core, __FILE__, pa_modargs_get_value(ma, "source_name", DEFAULT_SOURCE_NAME), 0, &ss, &map);
+ pa_assert(u->source);
+
+ u->source->userdata = u;
+ u->source->parent.process_msg = source_process_msg;
+
+ pa_source_set_module(u->source, m);
+ pa_source_set_description(u->source, t = pa_sprintf_malloc("Solaris PCM on '%s'", p));
+ pa_xfree(t);
+ pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
+ pa_source_set_rtpoll(u->source, u->rtpoll);
+
+ u->source->flags = PA_SOURCE_HARDWARE|PA_SOURCE_LATENCY|PA_SOURCE_HW_VOLUME_CTRL;
+ u->source->refresh_volume = 1;
+ } else
+ u->source = NULL;
+
+ if (mode != O_RDONLY) {
+ u->sink = pa_sink_new(m->core, __FILE__, pa_modargs_get_value(ma, "sink_name", DEFAULT_SINK_NAME), 0, &ss, &map);
+ pa_assert(u->sink);
+
+ u->sink->userdata = u;
+ u->sink->parent.process_msg = sink_process_msg;
+
+ pa_sink_set_module(u->sink, m);
+ pa_sink_set_description(u->sink, t = pa_sprintf_malloc("Solaris PCM on '%s'", p));
+ pa_xfree(t);
+ pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
+ pa_sink_set_rtpoll(u->sink, u->rtpoll);
+
+ u->sink->flags = PA_SINK_HARDWARE|PA_SINK_LATENCY|PA_SINK_HW_VOLUME_CTRL;
+ u->sink->refresh_volume = 1;
+ u->sink->refresh_mute = 1;
+ } else
+ u->sink = NULL;
+
+ pa_assert(u->source || u->sink);
+
+ u->sig = pa_signal_new(SIGPOLL, sig_callback, u);
+ pa_assert(u->sig);
+ ioctl(u->fd, I_SETSIG, S_MSG);
+
+ if (!(u->thread = pa_thread_new(thread_func, u))) {
+ pa_log("Failed to create thread.");
+ goto fail;
+ }
+
+ /* Read mixer settings */
+ if (u->source)
+ pa_asyncmsgq_send(u->thread_mq.inq, PA_MSGOBJECT(u->source), PA_SOURCE_MESSAGE_GET_VOLUME, &u->source->volume, 0, NULL);
+ if (u->sink) {
+ pa_asyncmsgq_send(u->thread_mq.inq, PA_MSGOBJECT(u->sink), PA_SINK_MESSAGE_GET_VOLUME, &u->sink->volume, 0, NULL);
+ pa_asyncmsgq_send(u->thread_mq.inq, PA_MSGOBJECT(u->sink), PA_SINK_MESSAGE_GET_MUTE, &u->sink->muted, 0, NULL);
+ }
+
+ if (u->sink)
+ pa_sink_put(u->sink);
+ if (u->source)
+ pa_source_put(u->source);
+
+ pa_modargs_free(ma);
+
+ return 0;
+
+fail:
+ if (u)
+ pa__done(m);
+ else if (fd >= 0)
+ close(fd);
+
+ if (ma)
+ pa_modargs_free(ma);
+
+ return -1;
+}
+
+void pa__done(pa_module *m) {
+ struct userdata *u;
+
+ pa_assert(m);
+
+ if (!(u = m->userdata))
+ return;
+
+ ioctl(u->fd, I_SETSIG, 0);
+ pa_signal_free(u->sig);
+
+ if (u->sink)
+ pa_sink_unlink(u->sink);
+
+ if (u->source)
+ pa_source_unlink(u->source);
+
+ if (u->thread) {
+ pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
+ pa_thread_free(u->thread);
+ }
+
+ pa_thread_mq_done(&u->thread_mq);
+
+ if (u->sink)
+ pa_sink_unref(u->sink);
+
+ if (u->source)
+ pa_source_unref(u->source);
+
+ if (u->memchunk.memblock)
+ pa_memblock_unref(u->memchunk.memblock);
+
+ if (u->rtpoll_item)
+ pa_rtpoll_item_free(u->rtpoll_item);
+
+ if (u->rtpoll)
+ pa_rtpoll_free(u->rtpoll);
+
+ if (u->fd >= 0)
+ close(u->fd);
+
+ pa_xfree(u);
+}
diff --git a/src/modules/module-suspend-on-idle.c b/src/modules/module-suspend-on-idle.c
new file mode 100644
index 00000000..4c260d76
--- /dev/null
+++ b/src/modules/module-suspend-on-idle.c
@@ -0,0 +1,446 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2006 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <pulse/xmalloc.h>
+#include <pulse/timeval.h>
+
+#include <pulsecore/core.h>
+#include <pulsecore/sink-input.h>
+#include <pulsecore/source-output.h>
+#include <pulsecore/modargs.h>
+#include <pulsecore/log.h>
+#include <pulsecore/namereg.h>
+
+#include "module-suspend-on-idle-symdef.h"
+
+PA_MODULE_AUTHOR("Lennart Poettering");
+PA_MODULE_DESCRIPTION("When a sink/source is idle for too long, suspend it");
+PA_MODULE_VERSION(PACKAGE_VERSION);
+PA_MODULE_LOAD_ONCE(TRUE);
+
+static const char* const valid_modargs[] = {
+ "timeout",
+ NULL,
+};
+
+struct userdata {
+ pa_core *core;
+ pa_usec_t timeout;
+ pa_hashmap *device_infos;
+ pa_hook_slot
+ *sink_new_slot,
+ *source_new_slot,
+ *sink_unlink_slot,
+ *source_unlink_slot,
+ *sink_state_changed_slot,
+ *source_state_changed_slot;
+
+ pa_hook_slot
+ *sink_input_new_slot,
+ *source_output_new_slot,
+ *sink_input_unlink_slot,
+ *source_output_unlink_slot,
+ *sink_input_move_slot,
+ *source_output_move_slot,
+ *sink_input_state_changed_slot,
+ *source_output_state_changed_slot;
+};
+
+struct device_info {
+ struct userdata *userdata;
+ pa_sink *sink;
+ pa_source *source;
+ struct timeval last_use;
+ pa_time_event *time_event;
+};
+
+static void timeout_cb(pa_mainloop_api*a, pa_time_event* e, const struct timeval *tv, void *userdata) {
+ struct device_info *d = userdata;
+
+ pa_assert(d);
+
+ d->userdata->core->mainloop->time_restart(d->time_event, NULL);
+
+ if (d->sink && pa_sink_used_by(d->sink) <= 0 && pa_sink_get_state(d->sink) != PA_SINK_SUSPENDED) {
+ pa_log_info("Sink %s idle for too long, suspending ...", d->sink->name);
+ pa_sink_suspend(d->sink, TRUE);
+ }
+
+ if (d->source && pa_source_used_by(d->source) <= 0 && pa_source_get_state(d->source) != PA_SOURCE_SUSPENDED) {
+ pa_log_info("Source %s idle for too long, suspending ...", d->source->name);
+ pa_source_suspend(d->source, TRUE);
+ }
+}
+
+static void restart(struct device_info *d) {
+ struct timeval tv;
+ pa_assert(d);
+
+ pa_gettimeofday(&tv);
+ d->last_use = tv;
+ pa_timeval_add(&tv, d->userdata->timeout*1000000);
+ d->userdata->core->mainloop->time_restart(d->time_event, &tv);
+
+ if (d->sink)
+ pa_log_debug("Sink %s becomes idle.", d->sink->name);
+ if (d->source)
+ pa_log_debug("Source %s becomes idle.", d->source->name);
+}
+
+static void resume(struct device_info *d) {
+ pa_assert(d);
+
+ d->userdata->core->mainloop->time_restart(d->time_event, NULL);
+
+ if (d->sink) {
+ pa_sink_suspend(d->sink, FALSE);
+
+ pa_log_debug("Sink %s becomes busy.", d->sink->name);
+ }
+
+ if (d->source) {
+ pa_source_suspend(d->source, FALSE);
+
+ pa_log_debug("Source %s becomes busy.", d->source->name);
+ }
+}
+
+static pa_hook_result_t sink_input_fixate_hook_cb(pa_core *c, pa_sink_input_new_data *data, struct userdata *u) {
+ struct device_info *d;
+
+ pa_assert(c);
+ pa_assert(data);
+ pa_assert(u);
+
+ if ((d = pa_hashmap_get(u->device_infos, data->sink)))
+ resume(d);
+
+ return PA_HOOK_OK;
+}
+
+static pa_hook_result_t source_output_fixate_hook_cb(pa_core *c, pa_source_output_new_data *data, struct userdata *u) {
+ struct device_info *d;
+
+ pa_assert(c);
+ pa_assert(data);
+ pa_assert(u);
+
+ if ((d = pa_hashmap_get(u->device_infos, data->source)))
+ resume(d);
+
+ return PA_HOOK_OK;
+}
+
+static pa_hook_result_t sink_input_unlink_hook_cb(pa_core *c, pa_sink_input *s, struct userdata *u) {
+ pa_assert(c);
+ pa_sink_input_assert_ref(s);
+ pa_assert(u);
+
+ if (pa_sink_used_by(s->sink) <= 0) {
+ struct device_info *d;
+ if ((d = pa_hashmap_get(u->device_infos, s->sink)))
+ restart(d);
+ }
+
+ return PA_HOOK_OK;
+}
+
+static pa_hook_result_t source_output_unlink_hook_cb(pa_core *c, pa_source_output *s, struct userdata *u) {
+ pa_assert(c);
+ pa_source_output_assert_ref(s);
+ pa_assert(u);
+
+ if (pa_source_used_by(s->source) <= 0) {
+ struct device_info *d;
+ if ((d = pa_hashmap_get(u->device_infos, s->source)))
+ restart(d);
+ }
+
+ return PA_HOOK_OK;
+}
+
+static pa_hook_result_t sink_input_move_hook_cb(pa_core *c, pa_sink_input_move_hook_data *data, struct userdata *u) {
+ struct device_info *d;
+
+ pa_assert(c);
+ pa_assert(data);
+ pa_assert(u);
+
+ if ((d = pa_hashmap_get(u->device_infos, data->destination)))
+ resume(d);
+
+ if (pa_sink_used_by(data->sink_input->sink) <= 1)
+ if ((d = pa_hashmap_get(u->device_infos, data->sink_input->sink)))
+ restart(d);
+
+ return PA_HOOK_OK;
+}
+
+static pa_hook_result_t source_output_move_hook_cb(pa_core *c, pa_source_output_move_hook_data *data, struct userdata *u) {
+ struct device_info *d;
+
+ pa_assert(c);
+ pa_assert(data);
+ pa_assert(u);
+
+ if ((d = pa_hashmap_get(u->device_infos, data->destination)))
+ resume(d);
+
+ if (pa_source_used_by(data->source_output->source) <= 1)
+ if ((d = pa_hashmap_get(u->device_infos, data->source_output->source)))
+ restart(d);
+
+ return PA_HOOK_OK;
+}
+
+static pa_hook_result_t sink_input_state_changed_hook_cb(pa_core *c, pa_sink_input *s, struct userdata *u) {
+ struct device_info *d;
+ pa_sink_input_state_t state;
+ pa_assert(c);
+ pa_sink_input_assert_ref(s);
+ pa_assert(u);
+
+ state = pa_sink_input_get_state(s);
+ if (state == PA_SINK_INPUT_RUNNING || state == PA_SINK_INPUT_DRAINED)
+ if ((d = pa_hashmap_get(u->device_infos, s->sink)))
+ resume(d);
+
+ return PA_HOOK_OK;
+}
+
+static pa_hook_result_t source_output_state_changed_hook_cb(pa_core *c, pa_source_output *s, struct userdata *u) {
+ struct device_info *d;
+ pa_source_output_state_t state;
+ pa_assert(c);
+ pa_source_output_assert_ref(s);
+ pa_assert(u);
+
+ state = pa_source_output_get_state(s);
+ if (state == PA_SOURCE_OUTPUT_RUNNING)
+ if ((d = pa_hashmap_get(u->device_infos, s->source)))
+ resume(d);
+
+ return PA_HOOK_OK;
+}
+
+static pa_hook_result_t device_new_hook_cb(pa_core *c, pa_object *o, struct userdata *u) {
+ struct device_info *d;
+ pa_source *source;
+ pa_sink *sink;
+
+ pa_assert(c);
+ pa_object_assert_ref(o);
+ pa_assert(u);
+
+ source = pa_source_isinstance(o) ? PA_SOURCE(o) : NULL;
+ sink = pa_sink_isinstance(o) ? PA_SINK(o) : NULL;
+
+ pa_assert(source || sink);
+
+ d = pa_xnew(struct device_info, 1);
+ d->userdata = u;
+ d->source = source ? pa_source_ref(source) : NULL;
+ d->sink = sink ? pa_sink_ref(sink) : NULL;
+ d->time_event = c->mainloop->time_new(c->mainloop, NULL, timeout_cb, d);
+ pa_hashmap_put(u->device_infos, o, d);
+
+ if ((d->sink && pa_sink_used_by(d->sink) <= 0) ||
+ (d->source && pa_source_used_by(d->source) <= 0))
+ restart(d);
+
+ return PA_HOOK_OK;
+}
+
+static void device_info_free(struct device_info *d) {
+ pa_assert(d);
+
+ if (d->source)
+ pa_source_unref(d->source);
+ if (d->sink)
+ pa_sink_unref(d->sink);
+
+ d->userdata->core->mainloop->time_free(d->time_event);
+
+ pa_xfree(d);
+}
+
+static pa_hook_result_t device_unlink_hook_cb(pa_core *c, pa_object *o, struct userdata *u) {
+ struct device_info *d;
+
+ pa_assert(c);
+ pa_object_assert_ref(o);
+ pa_assert(u);
+
+ if ((d = pa_hashmap_remove(u->device_infos, o)))
+ device_info_free(d);
+
+ return PA_HOOK_OK;
+}
+
+static pa_hook_result_t device_state_changed_hook_cb(pa_core *c, pa_object *o, struct userdata *u) {
+ struct device_info *d;
+
+ pa_assert(c);
+ pa_object_assert_ref(o);
+ pa_assert(u);
+
+ if (!(d = pa_hashmap_get(u->device_infos, o)))
+ return PA_HOOK_OK;
+
+ if (pa_sink_isinstance(o)) {
+ pa_sink *s = PA_SINK(o);
+ pa_sink_state_t state = pa_sink_get_state(s);
+
+ if (pa_sink_used_by(s) <= 0) {
+
+ if (PA_SINK_OPENED(state))
+ restart(d);
+
+ }
+
+ } else if (pa_source_isinstance(o)) {
+ pa_source *s = PA_SOURCE(o);
+ pa_source_state_t state = pa_source_get_state(s);
+
+ if (pa_source_used_by(s) <= 0) {
+
+ if (PA_SOURCE_OPENED(state))
+ restart(d);
+ }
+ }
+
+ return PA_HOOK_OK;
+}
+
+int pa__init(pa_module*m) {
+ pa_modargs *ma = NULL;
+ struct userdata *u;
+ uint32_t timeout = 1;
+ uint32_t idx;
+ pa_sink *sink;
+ pa_source *source;
+
+ pa_assert(m);
+
+ if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
+ pa_log("Failed to parse module arguments.");
+ goto fail;
+ }
+
+ if (pa_modargs_get_value_u32(ma, "timeout", &timeout) < 0) {
+ pa_log("Failed to parse timeout value.");
+ goto fail;
+ }
+
+ m->userdata = u = pa_xnew(struct userdata, 1);
+ u->core = m->core;
+ u->timeout = timeout;
+ u->device_infos = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
+
+ for (sink = pa_idxset_first(m->core->sinks, &idx); sink; sink = pa_idxset_next(m->core->sinks, &idx))
+ device_new_hook_cb(m->core, PA_OBJECT(sink), u);
+
+ for (source = pa_idxset_first(m->core->sources, &idx); source; source = pa_idxset_next(m->core->sources, &idx))
+ device_new_hook_cb(m->core, PA_OBJECT(source), u);
+
+ u->sink_new_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SINK_NEW_POST], (pa_hook_cb_t) device_new_hook_cb, u);
+ u->source_new_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SOURCE_NEW_POST], (pa_hook_cb_t) device_new_hook_cb, u);
+ u->sink_unlink_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], (pa_hook_cb_t) device_unlink_hook_cb, u);
+ u->source_unlink_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SOURCE_UNLINK_POST], (pa_hook_cb_t) device_unlink_hook_cb, u);
+ u->sink_state_changed_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], (pa_hook_cb_t) device_state_changed_hook_cb, u);
+ u->source_state_changed_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SOURCE_STATE_CHANGED], (pa_hook_cb_t) device_state_changed_hook_cb, u);
+
+ u->sink_input_new_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SINK_INPUT_FIXATE], (pa_hook_cb_t) sink_input_fixate_hook_cb, u);
+ u->source_output_new_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SOURCE_OUTPUT_FIXATE], (pa_hook_cb_t) source_output_fixate_hook_cb, u);
+ u->sink_input_unlink_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SINK_INPUT_UNLINK_POST], (pa_hook_cb_t) sink_input_unlink_hook_cb, u);
+ u->source_output_unlink_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SOURCE_OUTPUT_UNLINK_POST], (pa_hook_cb_t) source_output_unlink_hook_cb, u);
+ u->sink_input_move_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SINK_INPUT_MOVE], (pa_hook_cb_t) sink_input_move_hook_cb, u);
+ u->source_output_move_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SOURCE_OUTPUT_MOVE], (pa_hook_cb_t) source_output_move_hook_cb, u);
+ u->sink_input_state_changed_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SINK_INPUT_STATE_CHANGED], (pa_hook_cb_t) sink_input_state_changed_hook_cb, u);
+ u->source_output_state_changed_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SOURCE_OUTPUT_STATE_CHANGED], (pa_hook_cb_t) source_output_state_changed_hook_cb, u);
+
+ pa_modargs_free(ma);
+ return 0;
+
+fail:
+
+ if (ma)
+ pa_modargs_free(ma);
+
+ return -1;
+}
+
+void pa__done(pa_module*m) {
+ struct userdata *u;
+ struct device_info *d;
+
+ pa_assert(m);
+
+ if (!m->userdata)
+ return;
+
+ u = m->userdata;
+
+ if (u->sink_new_slot)
+ pa_hook_slot_free(u->sink_new_slot);
+ if (u->sink_unlink_slot)
+ pa_hook_slot_free(u->sink_unlink_slot);
+ if (u->sink_state_changed_slot)
+ pa_hook_slot_free(u->sink_state_changed_slot);
+
+ if (u->source_new_slot)
+ pa_hook_slot_free(u->source_new_slot);
+ if (u->source_unlink_slot)
+ pa_hook_slot_free(u->source_unlink_slot);
+ if (u->source_state_changed_slot)
+ pa_hook_slot_free(u->source_state_changed_slot);
+
+ if (u->sink_input_new_slot)
+ pa_hook_slot_free(u->sink_input_new_slot);
+ if (u->sink_input_unlink_slot)
+ pa_hook_slot_free(u->sink_input_unlink_slot);
+ if (u->sink_input_move_slot)
+ pa_hook_slot_free(u->sink_input_move_slot);
+ if (u->sink_input_state_changed_slot)
+ pa_hook_slot_free(u->sink_input_state_changed_slot);
+
+ if (u->source_output_new_slot)
+ pa_hook_slot_free(u->source_output_new_slot);
+ if (u->source_output_unlink_slot)
+ pa_hook_slot_free(u->source_output_unlink_slot);
+ if (u->source_output_move_slot)
+ pa_hook_slot_free(u->source_output_move_slot);
+ if (u->source_output_state_changed_slot)
+ pa_hook_slot_free(u->source_output_state_changed_slot);
+
+ while ((d = pa_hashmap_steal_first(u->device_infos)))
+ device_info_free(d);
+
+ pa_hashmap_free(u->device_infos, NULL, NULL);
+
+ pa_xfree(u);
+}
diff --git a/src/modules/module-tunnel.c b/src/modules/module-tunnel.c
new file mode 100644
index 00000000..a53e3932
--- /dev/null
+++ b/src/modules/module-tunnel.c
@@ -0,0 +1,1509 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2004-2006 Lennart Poettering
+ Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <unistd.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <pulse/timeval.h>
+#include <pulse/util.h>
+#include <pulse/version.h>
+#include <pulse/xmalloc.h>
+
+#include <pulsecore/module.h>
+#include <pulsecore/core-util.h>
+#include <pulsecore/modargs.h>
+#include <pulsecore/log.h>
+#include <pulsecore/core-subscribe.h>
+#include <pulsecore/sink-input.h>
+#include <pulsecore/pdispatch.h>
+#include <pulsecore/pstream.h>
+#include <pulsecore/pstream-util.h>
+#include <pulsecore/authkey.h>
+#include <pulsecore/socket-client.h>
+#include <pulsecore/socket-util.h>
+#include <pulsecore/authkey-prop.h>
+#include <pulsecore/time-smoother.h>
+#include <pulsecore/thread.h>
+#include <pulsecore/thread-mq.h>
+#include <pulsecore/rtclock.h>
+#include <pulsecore/core-error.h>
+
+#ifdef TUNNEL_SINK
+#include "module-tunnel-sink-symdef.h"
+PA_MODULE_DESCRIPTION("Tunnel module for sinks");
+PA_MODULE_LOAD_ONCE(FALSE);
+PA_MODULE_USAGE(
+ "server=<address> "
+ "sink=<remote sink name> "
+ "cookie=<filename> "
+ "format=<sample format> "
+ "channels=<number of channels> "
+ "rate=<sample rate> "
+ "sink_name=<name for the local sink> "
+ "channel_map=<channel map>");
+#else
+#include "module-tunnel-source-symdef.h"
+PA_MODULE_DESCRIPTION("Tunnel module for sources");
+PA_MODULE_USAGE(
+ "server=<address> "
+ "source=<remote source name> "
+ "cookie=<filename> "
+ "format=<sample format> "
+ "channels=<number of channels> "
+ "rate=<sample rate> "
+ "source_name=<name for the local source> "
+ "channel_map=<channel map>");
+#endif
+
+PA_MODULE_AUTHOR("Lennart Poettering");
+PA_MODULE_VERSION(PACKAGE_VERSION);
+
+#define DEFAULT_TLENGTH_MSEC 100
+#define DEFAULT_MINREQ_MSEC 10
+#define DEFAULT_MAXLENGTH_MSEC ((DEFAULT_TLENGTH_MSEC*3)/2)
+#define DEFAULT_FRAGSIZE_MSEC 10
+
+#define DEFAULT_TIMEOUT 5
+
+#define LATENCY_INTERVAL 10
+
+static const char* const valid_modargs[] = {
+ "server",
+ "cookie",
+ "format",
+ "channels",
+ "rate",
+#ifdef TUNNEL_SINK
+ "sink_name",
+ "sink",
+#else
+ "source_name",
+ "source",
+#endif
+ "channel_map",
+ NULL,
+};
+
+enum {
+ SOURCE_MESSAGE_POST = PA_SOURCE_MESSAGE_MAX
+};
+
+enum {
+ SINK_MESSAGE_REQUEST = PA_SINK_MESSAGE_MAX,
+ SINK_MESSAGE_POST
+};
+
+#ifdef TUNNEL_SINK
+static void command_request(pa_pdispatch *pd, uint32_t command, uint32_t tag, pa_tagstruct *t, void *userdata);
+#endif
+static void command_subscribe_event(pa_pdispatch *pd, uint32_t command, uint32_t tag, pa_tagstruct *t, void *userdata);
+static void command_stream_killed(pa_pdispatch *pd, uint32_t command, uint32_t tag, pa_tagstruct *t, void *userdata);
+static void command_overflow(pa_pdispatch *pd, uint32_t command, uint32_t tag, pa_tagstruct *t, void *userdata);
+static void command_underflow(pa_pdispatch *pd, uint32_t command, uint32_t tag, pa_tagstruct *t, void *userdata);
+static void command_suspend(pa_pdispatch *pd, uint32_t command, uint32_t tag, pa_tagstruct *t, void *userdata);
+static void command_moved(pa_pdispatch *pd, uint32_t command, uint32_t tag, pa_tagstruct *t, void *userdata);
+
+static const pa_pdispatch_cb_t command_table[PA_COMMAND_MAX] = {
+#ifdef TUNNEL_SINK
+ [PA_COMMAND_REQUEST] = command_request,
+#endif
+ [PA_COMMAND_SUBSCRIBE_EVENT] = command_subscribe_event,
+ [PA_COMMAND_OVERFLOW] = command_overflow,
+ [PA_COMMAND_UNDERFLOW] = command_underflow,
+ [PA_COMMAND_PLAYBACK_STREAM_KILLED] = command_stream_killed,
+ [PA_COMMAND_RECORD_STREAM_KILLED] = command_stream_killed,
+ [PA_COMMAND_PLAYBACK_STREAM_SUSPENDED] = command_suspend,
+ [PA_COMMAND_RECORD_STREAM_SUSPENDED] = command_suspend,
+ [PA_COMMAND_PLAYBACK_STREAM_MOVED] = command_moved,
+ [PA_COMMAND_RECORD_STREAM_MOVED] = command_moved,
+};
+
+struct userdata {
+ pa_core *core;
+ pa_module *module;
+
+ pa_thread_mq thread_mq;
+ pa_rtpoll *rtpoll;
+ pa_thread *thread;
+
+ pa_socket_client *client;
+ pa_pstream *pstream;
+ pa_pdispatch *pdispatch;
+
+ char *server_name;
+#ifdef TUNNEL_SINK
+ char *sink_name;
+ pa_sink *sink;
+ uint32_t requested_bytes;
+#else
+ char *source_name;
+ pa_source *source;
+#endif
+
+ uint8_t auth_cookie[PA_NATIVE_COOKIE_LENGTH];
+
+ uint32_t version;
+ uint32_t ctag;
+ uint32_t device_index;
+ uint32_t channel;
+
+ int64_t counter, counter_delta;
+
+ pa_time_event *time_event;
+
+ pa_bool_t auth_cookie_in_property;
+
+ pa_smoother *smoother;
+
+ char *device_description;
+ char *server_fqdn;
+ char *user_name;
+
+ uint32_t maxlength;
+#ifdef TUNNEL_SINK
+ uint32_t tlength;
+ uint32_t minreq;
+ uint32_t prebuf;
+#else
+ uint32_t fragsize;
+#endif
+};
+
+static void command_stream_killed(pa_pdispatch *pd, PA_GCC_UNUSED uint32_t command, PA_GCC_UNUSED uint32_t tag, pa_tagstruct *t, void *userdata) {
+ struct userdata *u = userdata;
+
+ pa_assert(pd);
+ pa_assert(t);
+ pa_assert(u);
+ pa_assert(u->pdispatch == pd);
+
+ pa_log_warn("Stream killed");
+ pa_module_unload_request(u->module);
+}
+
+static void command_overflow(pa_pdispatch *pd, PA_GCC_UNUSED uint32_t command, PA_GCC_UNUSED uint32_t tag, pa_tagstruct *t, void *userdata) {
+ struct userdata *u = userdata;
+
+ pa_assert(pd);
+ pa_assert(t);
+ pa_assert(u);
+ pa_assert(u->pdispatch == pd);
+
+ pa_log_warn("Server signalled buffer overrun.");
+}
+
+static void command_underflow(pa_pdispatch *pd, PA_GCC_UNUSED uint32_t command, PA_GCC_UNUSED uint32_t tag, pa_tagstruct *t, void *userdata) {
+ struct userdata *u = userdata;
+
+ pa_assert(pd);
+ pa_assert(t);
+ pa_assert(u);
+ pa_assert(u->pdispatch == pd);
+
+ pa_log_warn("Server signalled buffer underrun.");
+}
+
+static void command_suspend(pa_pdispatch *pd, PA_GCC_UNUSED uint32_t command, PA_GCC_UNUSED uint32_t tag, pa_tagstruct *t, void *userdata) {
+ struct userdata *u = userdata;
+
+ pa_assert(pd);
+ pa_assert(t);
+ pa_assert(u);
+ pa_assert(u->pdispatch == pd);
+
+ pa_log_debug("Server reports a stream suspension.");
+}
+
+static void command_moved(pa_pdispatch *pd, PA_GCC_UNUSED uint32_t command, PA_GCC_UNUSED uint32_t tag, pa_tagstruct *t, void *userdata) {
+ struct userdata *u = userdata;
+
+ pa_assert(pd);
+ pa_assert(t);
+ pa_assert(u);
+ pa_assert(u->pdispatch == pd);
+
+ pa_log_debug("Server reports a stream move.");
+}
+
+static void stream_cork(struct userdata *u, pa_bool_t cork) {
+ pa_tagstruct *t;
+ pa_assert(u);
+
+ if (cork)
+ pa_smoother_pause(u->smoother, pa_rtclock_usec());
+ else
+ pa_smoother_resume(u->smoother, pa_rtclock_usec());
+
+ if (!u->pstream)
+ return;
+
+ t = pa_tagstruct_new(NULL, 0);
+#ifdef TUNNEL_SINK
+ pa_tagstruct_putu32(t, PA_COMMAND_CORK_PLAYBACK_STREAM);
+#else
+ pa_tagstruct_putu32(t, PA_COMMAND_CORK_RECORD_STREAM);
+#endif
+ pa_tagstruct_putu32(t, u->ctag++);
+ pa_tagstruct_putu32(t, u->channel);
+ pa_tagstruct_put_boolean(t, !!cork);
+ pa_pstream_send_tagstruct(u->pstream, t);
+}
+
+#ifdef TUNNEL_SINK
+
+static void send_data(struct userdata *u) {
+ pa_assert(u);
+
+ while (u->requested_bytes > 0) {
+ pa_memchunk memchunk;
+ pa_sink_render(u->sink, u->requested_bytes, &memchunk);
+ pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->sink), SINK_MESSAGE_POST, NULL, 0, &memchunk, NULL);
+ pa_memblock_unref(memchunk.memblock);
+ u->requested_bytes -= memchunk.length;
+ }
+}
+
+/* This function is called from IO context -- except when it is not. */
+static int sink_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
+ struct userdata *u = PA_SINK(o)->userdata;
+
+ switch (code) {
+
+ case PA_SINK_MESSAGE_SET_STATE: {
+ int r;
+
+ /* First, change the state, because otherwide pa_sink_render() would fail */
+ if ((r = pa_sink_process_msg(o, code, data, offset, chunk)) >= 0)
+ if (PA_SINK_OPENED((pa_sink_state_t) PA_PTR_TO_UINT(data)))
+ send_data(u);
+
+ return r;
+ }
+
+ case SINK_MESSAGE_REQUEST:
+
+ pa_assert(offset > 0);
+ u->requested_bytes += (size_t) offset;
+
+ if (PA_SINK_OPENED(u->sink->thread_info.state))
+ send_data(u);
+
+ return 0;
+
+ case SINK_MESSAGE_POST:
+
+ /* OK, This might be a bit confusing. This message is
+ * delivered to us from the main context -- NOT from the
+ * IO thread context where the rest of the messages are
+ * dispatched. Yeah, ugly, but I am a lazy bastard. */
+
+ pa_pstream_send_memblock(u->pstream, u->channel, 0, PA_SEEK_RELATIVE, chunk);
+ u->counter += chunk->length;
+ u->counter_delta += chunk->length;
+ return 0;
+ }
+
+ return pa_sink_process_msg(o, code, data, offset, chunk);
+}
+
+static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
+ struct userdata *u;
+ pa_sink_assert_ref(s);
+ u = s->userdata;
+
+ switch ((pa_sink_state_t) state) {
+
+ case PA_SINK_SUSPENDED:
+ pa_assert(PA_SINK_OPENED(s->state));
+ stream_cork(u, TRUE);
+ break;
+
+ case PA_SINK_IDLE:
+ case PA_SINK_RUNNING:
+ if (s->state == PA_SINK_SUSPENDED)
+ stream_cork(u, FALSE);
+ break;
+
+ case PA_SINK_UNLINKED:
+ case PA_SINK_INIT:
+ ;
+ }
+
+ return 0;
+}
+
+#else
+
+static int source_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
+ struct userdata *u = PA_SOURCE(o)->userdata;
+
+ switch (code) {
+ case SOURCE_MESSAGE_POST:
+
+ if (PA_SOURCE_OPENED(u->source->thread_info.state))
+ pa_source_post(u->source, chunk);
+ return 0;
+ }
+
+ return pa_source_process_msg(o, code, data, offset, chunk);
+}
+
+static int source_set_state(pa_source *s, pa_source_state_t state) {
+ struct userdata *u;
+ pa_source_assert_ref(s);
+ u = s->userdata;
+
+ switch ((pa_source_state_t) state) {
+
+ case PA_SOURCE_SUSPENDED:
+ pa_assert(PA_SOURCE_OPENED(s->state));
+ stream_cork(u, TRUE);
+ break;
+
+ case PA_SOURCE_IDLE:
+ case PA_SOURCE_RUNNING:
+ if (s->state == PA_SOURCE_SUSPENDED)
+ stream_cork(u, FALSE);
+ break;
+
+ case PA_SOURCE_UNLINKED:
+ case PA_SOURCE_INIT:
+ ;
+ }
+
+ return 0;
+}
+
+#endif
+
+static void thread_func(void *userdata) {
+ struct userdata *u = userdata;
+
+ pa_assert(u);
+
+ pa_log_debug("Thread starting up");
+
+ pa_thread_mq_install(&u->thread_mq);
+ pa_rtpoll_install(u->rtpoll);
+
+ for (;;) {
+ int ret;
+
+ if ((ret = pa_rtpoll_run(u->rtpoll, TRUE)) < 0)
+ goto fail;
+
+ if (ret == 0)
+ goto finish;
+ }
+
+fail:
+ /* If this was no regular exit from the loop we have to continue
+ * processing messages until we received PA_MESSAGE_SHUTDOWN */
+ pa_asyncmsgq_post(u->thread_mq.outq, PA_MSGOBJECT(u->core), PA_CORE_MESSAGE_UNLOAD_MODULE, u->module, 0, NULL, NULL);
+ pa_asyncmsgq_wait_for(u->thread_mq.inq, PA_MESSAGE_SHUTDOWN);
+
+finish:
+ pa_log_debug("Thread shutting down");
+}
+
+#ifdef TUNNEL_SINK
+static void command_request(pa_pdispatch *pd, uint32_t command, PA_GCC_UNUSED uint32_t tag, pa_tagstruct *t, void *userdata) {
+ struct userdata *u = userdata;
+ uint32_t bytes, channel;
+
+ pa_assert(pd);
+ pa_assert(command == PA_COMMAND_REQUEST);
+ pa_assert(t);
+ pa_assert(u);
+ pa_assert(u->pdispatch == pd);
+
+ if (pa_tagstruct_getu32(t, &channel) < 0 ||
+ pa_tagstruct_getu32(t, &bytes) < 0) {
+ pa_log("Invalid protocol reply");
+ goto fail;
+ }
+
+ if (channel != u->channel) {
+ pa_log("Recieved data for invalid channel");
+ goto fail;
+ }
+
+ pa_asyncmsgq_send(u->sink->asyncmsgq, PA_MSGOBJECT(u->sink), SINK_MESSAGE_REQUEST, NULL, bytes, NULL);
+ return;
+
+fail:
+ pa_module_unload_request(u->module);
+}
+
+#endif
+
+static void stream_get_latency_callback(pa_pdispatch *pd, uint32_t command, PA_GCC_UNUSED uint32_t tag, pa_tagstruct *t, void *userdata) {
+ struct userdata *u = userdata;
+ pa_usec_t sink_usec, source_usec, transport_usec, host_usec, k;
+ int playing;
+ int64_t write_index, read_index;
+ struct timeval local, remote, now;
+
+ pa_assert(pd);
+ pa_assert(u);
+
+ if (command != PA_COMMAND_REPLY) {
+ if (command == PA_COMMAND_ERROR)
+ pa_log("Failed to get latency.");
+ else
+ pa_log("Protocol error 1.");
+ goto fail;
+ }
+
+ if (pa_tagstruct_get_usec(t, &sink_usec) < 0 ||
+ pa_tagstruct_get_usec(t, &source_usec) < 0 ||
+ pa_tagstruct_get_boolean(t, &playing) < 0 ||
+ pa_tagstruct_get_timeval(t, &local) < 0 ||
+ pa_tagstruct_get_timeval(t, &remote) < 0 ||
+ pa_tagstruct_gets64(t, &write_index) < 0 ||
+ pa_tagstruct_gets64(t, &read_index) < 0) {
+ pa_log("Invalid reply. (latency)");
+ goto fail;
+ }
+
+ pa_gettimeofday(&now);
+
+ if (pa_timeval_cmp(&local, &remote) < 0 && pa_timeval_cmp(&remote, &now)) {
+ /* local and remote seem to have synchronized clocks */
+#ifdef TUNNEL_SINK
+ transport_usec = pa_timeval_diff(&remote, &local);
+#else
+ transport_usec = pa_timeval_diff(&now, &remote);
+#endif
+ } else
+ transport_usec = pa_timeval_diff(&now, &local)/2;
+
+#ifdef TUNNEL_SINK
+ host_usec = sink_usec + transport_usec;
+#else
+ host_usec = source_usec + transport_usec;
+ if (host_usec > sink_usec)
+ host_usec -= sink_usec;
+ else
+ host_usec = 0;
+#endif
+
+#ifdef TUNNEL_SINK
+ k = pa_bytes_to_usec(u->counter - u->counter_delta, &u->sink->sample_spec);
+
+ if (k > host_usec)
+ k -= host_usec;
+ else
+ k = 0;
+#else
+ k = pa_bytes_to_usec(u->counter - u->counter_delta, &u->source->sample_spec);
+ k += host_usec;
+#endif
+
+ pa_smoother_put(u->smoother, pa_rtclock_usec(), k);
+
+ return;
+
+fail:
+ pa_module_unload_request(u->module);
+}
+
+static void request_latency(struct userdata *u) {
+ pa_tagstruct *t;
+ struct timeval now;
+ uint32_t tag;
+ pa_assert(u);
+
+ t = pa_tagstruct_new(NULL, 0);
+#ifdef TUNNEL_SINK
+ pa_tagstruct_putu32(t, PA_COMMAND_GET_PLAYBACK_LATENCY);
+#else
+ pa_tagstruct_putu32(t, PA_COMMAND_GET_RECORD_LATENCY);
+#endif
+ pa_tagstruct_putu32(t, tag = u->ctag++);
+ pa_tagstruct_putu32(t, u->channel);
+
+ pa_gettimeofday(&now);
+ pa_tagstruct_put_timeval(t, &now);
+
+ pa_pstream_send_tagstruct(u->pstream, t);
+ pa_pdispatch_register_reply(u->pdispatch, tag, DEFAULT_TIMEOUT, stream_get_latency_callback, u, NULL);
+
+ u->counter_delta = 0;
+}
+
+static void timeout_callback(pa_mainloop_api *m, pa_time_event*e, PA_GCC_UNUSED const struct timeval *tv, void *userdata) {
+ struct userdata *u = userdata;
+ struct timeval ntv;
+
+ pa_assert(m);
+ pa_assert(e);
+ pa_assert(u);
+
+ request_latency(u);
+
+ pa_gettimeofday(&ntv);
+ ntv.tv_sec += LATENCY_INTERVAL;
+ m->time_restart(e, &ntv);
+}
+
+#ifdef TUNNEL_SINK
+static pa_usec_t sink_get_latency(pa_sink *s) {
+ pa_usec_t t, c;
+ struct userdata *u = s->userdata;
+
+ pa_sink_assert_ref(s);
+
+ c = pa_bytes_to_usec(u->counter, &s->sample_spec);
+ t = pa_smoother_get(u->smoother, pa_rtclock_usec());
+
+ return c > t ? c - t : 0;
+}
+#else
+static pa_usec_t source_get_latency(pa_source *s) {
+ pa_usec_t t, c;
+ struct userdata *u = s->userdata;
+
+ pa_source_assert_ref(s);
+
+ c = pa_bytes_to_usec(u->counter, &s->sample_spec);
+ t = pa_smoother_get(u->smoother, pa_rtclock_usec());
+
+ return t > c ? t - c : 0;
+}
+#endif
+
+static void update_description(struct userdata *u) {
+ char *d;
+ char un[128], hn[128];
+ pa_tagstruct *t;
+
+ pa_assert(u);
+
+ if (!u->server_fqdn || !u->user_name || !u->device_description)
+ return;
+
+ d = pa_sprintf_malloc("%s on %s@%s", u->device_description, u->user_name, u->server_fqdn);
+
+#ifdef TUNNEL_SINK
+ pa_sink_set_description(u->sink, d);
+#else
+ pa_source_set_description(u->source, d);
+#endif
+
+ pa_xfree(d);
+
+ d = pa_sprintf_malloc("%s for %s@%s", u->device_description,
+ pa_get_user_name(un, sizeof(un)),
+ pa_get_host_name(hn, sizeof(hn)));
+
+ t = pa_tagstruct_new(NULL, 0);
+#ifdef TUNNEL_SINK
+ pa_tagstruct_putu32(t, PA_COMMAND_SET_PLAYBACK_STREAM_NAME);
+#else
+ pa_tagstruct_putu32(t, PA_COMMAND_SET_RECORD_STREAM_NAME);
+#endif
+ pa_tagstruct_putu32(t, u->ctag++);
+ pa_tagstruct_putu32(t, u->channel);
+ pa_tagstruct_puts(t, d);
+ pa_pstream_send_tagstruct(u->pstream, t);
+
+ pa_xfree(d);
+}
+
+static void server_info_cb(pa_pdispatch *pd, uint32_t command, PA_GCC_UNUSED uint32_t tag, pa_tagstruct *t, void *userdata) {
+ struct userdata *u = userdata;
+ pa_sample_spec ss;
+ const char *server_name, *server_version, *user_name, *host_name, *default_sink_name, *default_source_name;
+ uint32_t cookie;
+
+ pa_assert(pd);
+ pa_assert(u);
+
+ if (command != PA_COMMAND_REPLY) {
+ if (command == PA_COMMAND_ERROR)
+ pa_log("Failed to get info.");
+ else
+ pa_log("Protocol error 6.");
+ goto fail;
+ }
+
+ if (pa_tagstruct_gets(t, &server_name) < 0 ||
+ pa_tagstruct_gets(t, &server_version) < 0 ||
+ pa_tagstruct_gets(t, &user_name) < 0 ||
+ pa_tagstruct_gets(t, &host_name) < 0 ||
+ pa_tagstruct_get_sample_spec(t, &ss) < 0 ||
+ pa_tagstruct_gets(t, &default_sink_name) < 0 ||
+ pa_tagstruct_gets(t, &default_source_name) < 0 ||
+ pa_tagstruct_getu32(t, &cookie) < 0) {
+ pa_log("Invalid reply. (get_server_info)");
+ goto fail;
+ }
+
+ pa_xfree(u->server_fqdn);
+ u->server_fqdn = pa_xstrdup(host_name);
+
+ pa_xfree(u->user_name);
+ u->user_name = pa_xstrdup(user_name);
+
+ update_description(u);
+
+ return;
+
+fail:
+ pa_module_unload_request(u->module);
+}
+
+#ifdef TUNNEL_SINK
+
+static void sink_info_cb(pa_pdispatch *pd, uint32_t command, PA_GCC_UNUSED uint32_t tag, pa_tagstruct *t, void *userdata) {
+ struct userdata *u = userdata;
+ uint32_t idx, owner_module, monitor_source, flags;
+ const char *name, *description, *monitor_source_name, *driver;
+ pa_sample_spec ss;
+ pa_channel_map cm;
+ pa_cvolume volume;
+ int mute;
+ pa_usec_t latency;
+
+ pa_assert(pd);
+ pa_assert(u);
+
+ if (command != PA_COMMAND_REPLY) {
+ if (command == PA_COMMAND_ERROR)
+ pa_log("Failed to get info.");
+ else
+ pa_log("Protocol error 5.");
+ goto fail;
+ }
+
+ if (pa_tagstruct_getu32(t, &idx) < 0 ||
+ pa_tagstruct_gets(t, &name) < 0 ||
+ pa_tagstruct_gets(t, &description) < 0 ||
+ pa_tagstruct_get_sample_spec(t, &ss) < 0 ||
+ pa_tagstruct_get_channel_map(t, &cm) < 0 ||
+ pa_tagstruct_getu32(t, &owner_module) < 0 ||
+ pa_tagstruct_get_cvolume(t, &volume) < 0 ||
+ pa_tagstruct_get_boolean(t, &mute) < 0 ||
+ pa_tagstruct_getu32(t, &monitor_source) < 0 ||
+ pa_tagstruct_gets(t, &monitor_source_name) < 0 ||
+ pa_tagstruct_get_usec(t, &latency) < 0 ||
+ pa_tagstruct_gets(t, &driver) < 0 ||
+ pa_tagstruct_getu32(t, &flags) < 0) {
+ pa_log("Invalid reply. (get_sink_info)");
+ goto fail;
+ }
+
+ if (strcmp(name, u->sink_name))
+ return;
+
+ pa_xfree(u->device_description);
+ u->device_description = pa_xstrdup(description);
+
+ update_description(u);
+
+ return;
+
+fail:
+ pa_module_unload_request(u->module);
+}
+
+static void sink_input_info_cb(pa_pdispatch *pd, uint32_t command, PA_GCC_UNUSED uint32_t tag, pa_tagstruct *t, void *userdata) {
+ struct userdata *u = userdata;
+ uint32_t idx, owner_module, client, sink;
+ pa_usec_t buffer_usec, sink_usec;
+ const char *name, *driver, *resample_method;
+ int mute;
+ pa_sample_spec sample_spec;
+ pa_channel_map channel_map;
+ pa_cvolume volume;
+
+ pa_assert(pd);
+ pa_assert(u);
+
+ if (command != PA_COMMAND_REPLY) {
+ if (command == PA_COMMAND_ERROR)
+ pa_log("Failed to get info.");
+ else
+ pa_log("Protocol error 2.");
+ goto fail;
+ }
+
+ if (pa_tagstruct_getu32(t, &idx) < 0 ||
+ pa_tagstruct_gets(t, &name) < 0 ||
+ pa_tagstruct_getu32(t, &owner_module) < 0 ||
+ pa_tagstruct_getu32(t, &client) < 0 ||
+ pa_tagstruct_getu32(t, &sink) < 0 ||
+ pa_tagstruct_get_sample_spec(t, &sample_spec) < 0 ||
+ pa_tagstruct_get_channel_map(t, &channel_map) < 0 ||
+ pa_tagstruct_get_cvolume(t, &volume) < 0 ||
+ pa_tagstruct_get_usec(t, &buffer_usec) < 0 ||
+ pa_tagstruct_get_usec(t, &sink_usec) < 0 ||
+ pa_tagstruct_gets(t, &resample_method) < 0 ||
+ pa_tagstruct_gets(t, &driver) < 0 ||
+ (u->version >= 11 && pa_tagstruct_get_boolean(t, &mute) < 0)) {
+ pa_log("Invalid reply. (get_info)");
+ goto fail;
+ }
+
+ if (idx != u->device_index)
+ return;
+
+ pa_assert(u->sink);
+
+ if ((u->version < 11 || !!mute == !!u->sink->muted) &&
+ pa_cvolume_equal(&volume, &u->sink->volume))
+ return;
+
+ memcpy(&u->sink->volume, &volume, sizeof(pa_cvolume));
+
+ if (u->version >= 11)
+ u->sink->muted = !!mute;
+
+ pa_subscription_post(u->sink->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, u->sink->index);
+ return;
+
+fail:
+ pa_module_unload_request(u->module);
+}
+
+#else
+
+static void source_info_cb(pa_pdispatch *pd, uint32_t command, PA_GCC_UNUSED uint32_t tag, pa_tagstruct *t, void *userdata) {
+ struct userdata *u = userdata;
+ uint32_t idx, owner_module, monitor_of_sink, flags;
+ const char *name, *description, *monitor_of_sink_name, *driver;
+ pa_sample_spec ss;
+ pa_channel_map cm;
+ pa_cvolume volume;
+ int mute;
+ pa_usec_t latency;
+
+ pa_assert(pd);
+ pa_assert(u);
+
+ if (command != PA_COMMAND_REPLY) {
+ if (command == PA_COMMAND_ERROR)
+ pa_log("Failed to get info.");
+ else
+ pa_log("Protocol error 5.");
+ goto fail;
+ }
+
+ if (pa_tagstruct_getu32(t, &idx) < 0 ||
+ pa_tagstruct_gets(t, &name) < 0 ||
+ pa_tagstruct_gets(t, &description) < 0 ||
+ pa_tagstruct_get_sample_spec(t, &ss) < 0 ||
+ pa_tagstruct_get_channel_map(t, &cm) < 0 ||
+ pa_tagstruct_getu32(t, &owner_module) < 0 ||
+ pa_tagstruct_get_cvolume(t, &volume) < 0 ||
+ pa_tagstruct_get_boolean(t, &mute) < 0 ||
+ pa_tagstruct_getu32(t, &monitor_of_sink) < 0 ||
+ pa_tagstruct_gets(t, &monitor_of_sink_name) < 0 ||
+ pa_tagstruct_get_usec(t, &latency) < 0 ||
+ pa_tagstruct_gets(t, &driver) < 0 ||
+ pa_tagstruct_getu32(t, &flags) < 0) {
+ pa_log("Invalid reply. (get_source_info)");
+ goto fail;
+ }
+
+ if (strcmp(name, u->source_name))
+ return;
+
+ pa_xfree(u->device_description);
+ u->device_description = pa_xstrdup(description);
+
+ update_description(u);
+
+ return;
+
+fail:
+ pa_module_unload_request(u->module);
+}
+
+#endif
+
+static void request_info(struct userdata *u) {
+ pa_tagstruct *t;
+ uint32_t tag;
+ pa_assert(u);
+
+ t = pa_tagstruct_new(NULL, 0);
+ pa_tagstruct_putu32(t, PA_COMMAND_GET_SERVER_INFO);
+ pa_tagstruct_putu32(t, tag = u->ctag++);
+ pa_pstream_send_tagstruct(u->pstream, t);
+ pa_pdispatch_register_reply(u->pdispatch, tag, DEFAULT_TIMEOUT, server_info_cb, u, NULL);
+
+#ifdef TUNNEL_SINK
+ t = pa_tagstruct_new(NULL, 0);
+ pa_tagstruct_putu32(t, PA_COMMAND_GET_SINK_INPUT_INFO);
+ pa_tagstruct_putu32(t, tag = u->ctag++);
+ pa_tagstruct_putu32(t, u->device_index);
+ pa_pstream_send_tagstruct(u->pstream, t);
+ pa_pdispatch_register_reply(u->pdispatch, tag, DEFAULT_TIMEOUT, sink_input_info_cb, u, NULL);
+
+ t = pa_tagstruct_new(NULL, 0);
+ pa_tagstruct_putu32(t, PA_COMMAND_GET_SINK_INFO);
+ pa_tagstruct_putu32(t, tag = u->ctag++);
+ pa_tagstruct_putu32(t, PA_INVALID_INDEX);
+ pa_tagstruct_puts(t, u->sink_name);
+ pa_pstream_send_tagstruct(u->pstream, t);
+ pa_pdispatch_register_reply(u->pdispatch, tag, DEFAULT_TIMEOUT, sink_info_cb, u, NULL);
+#else
+ t = pa_tagstruct_new(NULL, 0);
+ pa_tagstruct_putu32(t, PA_COMMAND_GET_SOURCE_INFO);
+ pa_tagstruct_putu32(t, tag = u->ctag++);
+ pa_tagstruct_putu32(t, PA_INVALID_INDEX);
+ pa_tagstruct_puts(t, u->source_name);
+ pa_pstream_send_tagstruct(u->pstream, t);
+ pa_pdispatch_register_reply(u->pdispatch, tag, DEFAULT_TIMEOUT, source_info_cb, u, NULL);
+#endif
+}
+
+static void command_subscribe_event(pa_pdispatch *pd, PA_GCC_UNUSED uint32_t command, PA_GCC_UNUSED uint32_t tag, pa_tagstruct *t, void *userdata) {
+ struct userdata *u = userdata;
+ pa_subscription_event_type_t e;
+ uint32_t idx;
+
+ pa_assert(pd);
+ pa_assert(t);
+ pa_assert(u);
+ pa_assert(command == PA_COMMAND_SUBSCRIBE_EVENT);
+
+ if (pa_tagstruct_getu32(t, &e) < 0 ||
+ pa_tagstruct_getu32(t, &idx) < 0) {
+ pa_log("Invalid protocol reply");
+ pa_module_unload_request(u->module);
+ return;
+ }
+
+ if (e != (PA_SUBSCRIPTION_EVENT_SERVER|PA_SUBSCRIPTION_EVENT_CHANGE) &&
+#ifdef TUNNEL_SINK
+ e != (PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE) &&
+ e != (PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE)
+#else
+ e != (PA_SUBSCRIPTION_EVENT_SOURCE|PA_SUBSCRIPTION_EVENT_CHANGE)
+#endif
+ )
+ return;
+
+ request_info(u);
+}
+
+static void start_subscribe(struct userdata *u) {
+ pa_tagstruct *t;
+ uint32_t tag;
+ pa_assert(u);
+
+ t = pa_tagstruct_new(NULL, 0);
+ pa_tagstruct_putu32(t, PA_COMMAND_SUBSCRIBE);
+ pa_tagstruct_putu32(t, tag = u->ctag++);
+ pa_tagstruct_putu32(t, PA_SUBSCRIPTION_MASK_SERVER|
+#ifdef TUNNEL_SINK
+ PA_SUBSCRIPTION_MASK_SINK_INPUT|PA_SUBSCRIPTION_MASK_SINK
+#else
+ PA_SUBSCRIPTION_MASK_SOURCE
+#endif
+ );
+
+ pa_pstream_send_tagstruct(u->pstream, t);
+}
+
+static void create_stream_callback(pa_pdispatch *pd, uint32_t command, PA_GCC_UNUSED uint32_t tag, pa_tagstruct *t, void *userdata) {
+ struct userdata *u = userdata;
+ struct timeval ntv;
+#ifdef TUNNEL_SINK
+ uint32_t bytes;
+#endif
+
+ pa_assert(pd);
+ pa_assert(u);
+ pa_assert(u->pdispatch == pd);
+
+ if (command != PA_COMMAND_REPLY) {
+ if (command == PA_COMMAND_ERROR)
+ pa_log("Failed to create stream.");
+ else
+ pa_log("Protocol error 3.");
+ goto fail;
+ }
+
+ if (pa_tagstruct_getu32(t, &u->channel) < 0 ||
+ pa_tagstruct_getu32(t, &u->device_index) < 0
+#ifdef TUNNEL_SINK
+ || pa_tagstruct_getu32(t, &bytes) < 0
+#endif
+ )
+ goto parse_error;
+
+ if (u->version >= 9) {
+#ifdef TUNNEL_SINK
+ uint32_t maxlength, tlength, prebuf, minreq;
+
+ if (pa_tagstruct_getu32(t, &maxlength) < 0 ||
+ pa_tagstruct_getu32(t, &tlength) < 0 ||
+ pa_tagstruct_getu32(t, &prebuf) < 0 ||
+ pa_tagstruct_getu32(t, &minreq) < 0)
+ goto parse_error;
+#else
+ uint32_t maxlength, fragsize;
+
+ if (pa_tagstruct_getu32(t, &maxlength) < 0 ||
+ pa_tagstruct_getu32(t, &fragsize) < 0)
+ goto parse_error;
+#endif
+ }
+
+ start_subscribe(u);
+ request_info(u);
+
+ pa_assert(!u->time_event);
+ pa_gettimeofday(&ntv);
+ ntv.tv_sec += LATENCY_INTERVAL;
+ u->time_event = u->core->mainloop->time_new(u->core->mainloop, &ntv, timeout_callback, u);
+
+ request_latency(u);
+
+ pa_log_debug("Stream created.");
+
+#ifdef TUNNEL_SINK
+ pa_asyncmsgq_post(u->sink->asyncmsgq, PA_MSGOBJECT(u->sink), SINK_MESSAGE_REQUEST, NULL, bytes, NULL, NULL);
+#endif
+
+ return;
+
+parse_error:
+ pa_log("Invalid reply. (Create stream)");
+
+fail:
+ pa_module_unload_request(u->module);
+}
+
+static void setup_complete_callback(pa_pdispatch *pd, uint32_t command, uint32_t tag, pa_tagstruct *t, void *userdata) {
+ struct userdata *u = userdata;
+ pa_tagstruct *reply;
+ char name[256], un[128], hn[128];
+#ifdef TUNNEL_SINK
+ pa_cvolume volume;
+#endif
+
+ pa_assert(pd);
+ pa_assert(u);
+ pa_assert(u->pdispatch == pd);
+
+ if (command != PA_COMMAND_REPLY ||
+ pa_tagstruct_getu32(t, &u->version) < 0) {
+ if (command == PA_COMMAND_ERROR)
+ pa_log("Failed to authenticate");
+ else
+ pa_log("Protocol error 4.");
+
+ goto fail;
+ }
+
+ /* Minimum supported protocol version */
+ if (u->version < 8) {
+ pa_log("Incompatible protocol version");
+ goto fail;
+ }
+
+#ifdef TUNNEL_SINK
+ pa_snprintf(name, sizeof(name), "%s for %s@%s",
+ u->sink_name,
+ pa_get_user_name(un, sizeof(un)),
+ pa_get_host_name(hn, sizeof(hn)));
+#else
+ pa_snprintf(name, sizeof(name), "%s for %s@%s",
+ u->source_name,
+ pa_get_user_name(un, sizeof(un)),
+ pa_get_host_name(hn, sizeof(hn)));
+#endif
+
+ reply = pa_tagstruct_new(NULL, 0);
+ pa_tagstruct_putu32(reply, PA_COMMAND_SET_CLIENT_NAME);
+ pa_tagstruct_putu32(reply, tag = u->ctag++);
+ pa_tagstruct_puts(reply, "PulseAudio");
+ pa_pstream_send_tagstruct(u->pstream, reply);
+ /* We ignore the server's reply here */
+
+ reply = pa_tagstruct_new(NULL, 0);
+
+#ifdef TUNNEL_SINK
+ pa_tagstruct_putu32(reply, PA_COMMAND_CREATE_PLAYBACK_STREAM);
+ pa_tagstruct_putu32(reply, tag = u->ctag++);
+ pa_tagstruct_puts(reply, name);
+ pa_tagstruct_put_sample_spec(reply, &u->sink->sample_spec);
+ pa_tagstruct_put_channel_map(reply, &u->sink->channel_map);
+ pa_tagstruct_putu32(reply, PA_INVALID_INDEX);
+ pa_tagstruct_puts(reply, u->sink_name);
+ pa_tagstruct_putu32(reply, u->maxlength);
+ pa_tagstruct_put_boolean(reply, !PA_SINK_OPENED(pa_sink_get_state(u->sink)));
+ pa_tagstruct_putu32(reply, u->tlength);
+ pa_tagstruct_putu32(reply, u->prebuf);
+ pa_tagstruct_putu32(reply, u->minreq);
+ pa_tagstruct_putu32(reply, 0);
+ pa_cvolume_reset(&volume, u->sink->sample_spec.channels);
+ pa_tagstruct_put_cvolume(reply, &volume);
+#else
+ pa_tagstruct_putu32(reply, PA_COMMAND_CREATE_RECORD_STREAM);
+ pa_tagstruct_putu32(reply, tag = u->ctag++);
+ pa_tagstruct_puts(reply, name);
+ pa_tagstruct_put_sample_spec(reply, &u->source->sample_spec);
+ pa_tagstruct_put_channel_map(reply, &u->source->channel_map);
+ pa_tagstruct_putu32(reply, PA_INVALID_INDEX);
+ pa_tagstruct_puts(reply, u->source_name);
+ pa_tagstruct_putu32(reply, u->maxlength);
+ pa_tagstruct_put_boolean(reply, !PA_SOURCE_OPENED(pa_source_get_state(u->source)));
+ pa_tagstruct_putu32(reply, u->fragsize);
+#endif
+
+ /* New flags added in 0.9.8 */
+ if (u->version >= 12) {
+ /* TODO: set these to useful values */
+ pa_tagstruct_put_boolean(reply, FALSE); /*no_remap*/
+ pa_tagstruct_put_boolean(reply, FALSE); /*no_remix*/
+ pa_tagstruct_put_boolean(reply, FALSE); /*fix_format*/
+ pa_tagstruct_put_boolean(reply, FALSE); /*fix_rate*/
+ pa_tagstruct_put_boolean(reply, FALSE); /*fix_channels*/
+ pa_tagstruct_put_boolean(reply, FALSE); /*no_move*/
+ pa_tagstruct_put_boolean(reply, FALSE); /*variable_rate*/
+ }
+
+ pa_pstream_send_tagstruct(u->pstream, reply);
+ pa_pdispatch_register_reply(u->pdispatch, tag, DEFAULT_TIMEOUT, create_stream_callback, u, NULL);
+
+ pa_log_debug("Connection authenticated, creating stream ...");
+
+ return;
+
+fail:
+ pa_module_unload_request(u->module);
+}
+
+static void pstream_die_callback(pa_pstream *p, void *userdata) {
+ struct userdata *u = userdata;
+
+ pa_assert(p);
+ pa_assert(u);
+
+ pa_log_warn("Stream died.");
+ pa_module_unload_request(u->module);
+}
+
+static void pstream_packet_callback(pa_pstream *p, pa_packet *packet, const pa_creds *creds, void *userdata) {
+ struct userdata *u = userdata;
+
+ pa_assert(p);
+ pa_assert(packet);
+ pa_assert(u);
+
+ if (pa_pdispatch_run(u->pdispatch, packet, creds, u) < 0) {
+ pa_log("Invalid packet");
+ pa_module_unload_request(u->module);
+ return;
+ }
+}
+
+#ifndef TUNNEL_SINK
+static void pstream_memblock_callback(pa_pstream *p, uint32_t channel, int64_t offset, pa_seek_mode_t seek, const pa_memchunk *chunk, void *userdata) {
+ struct userdata *u = userdata;
+
+ pa_assert(p);
+ pa_assert(chunk);
+ pa_assert(u);
+
+ if (channel != u->channel) {
+ pa_log("Recieved memory block on bad channel.");
+ pa_module_unload_request(u->module);
+ return;
+ }
+
+ pa_asyncmsgq_send(u->source->asyncmsgq, PA_MSGOBJECT(u->source), SOURCE_MESSAGE_POST, PA_UINT_TO_PTR(seek), offset, chunk);
+
+ u->counter += chunk->length;
+ u->counter_delta += chunk->length;
+}
+
+#endif
+
+static void on_connection(pa_socket_client *sc, pa_iochannel *io, void *userdata) {
+ struct userdata *u = userdata;
+ pa_tagstruct *t;
+ uint32_t tag;
+
+ pa_assert(sc);
+ pa_assert(u);
+ pa_assert(u->client == sc);
+
+ pa_socket_client_unref(u->client);
+ u->client = NULL;
+
+ if (!io) {
+ pa_log("Connection failed: %s", pa_cstrerror(errno));
+ pa_module_unload_request(u->module);
+ return;
+ }
+
+ u->pstream = pa_pstream_new(u->core->mainloop, io, u->core->mempool);
+ u->pdispatch = pa_pdispatch_new(u->core->mainloop, command_table, PA_COMMAND_MAX);
+
+ pa_pstream_set_die_callback(u->pstream, pstream_die_callback, u);
+ pa_pstream_set_recieve_packet_callback(u->pstream, pstream_packet_callback, u);
+#ifndef TUNNEL_SINK
+ pa_pstream_set_recieve_memblock_callback(u->pstream, pstream_memblock_callback, u);
+#endif
+
+ t = pa_tagstruct_new(NULL, 0);
+ pa_tagstruct_putu32(t, PA_COMMAND_AUTH);
+ pa_tagstruct_putu32(t, tag = u->ctag++);
+ pa_tagstruct_putu32(t, PA_PROTOCOL_VERSION);
+ pa_tagstruct_put_arbitrary(t, u->auth_cookie, sizeof(u->auth_cookie));
+
+#ifdef HAVE_CREDS
+{
+ pa_creds ucred;
+
+ if (pa_iochannel_creds_supported(io))
+ pa_iochannel_creds_enable(io);
+
+ ucred.uid = getuid();
+ ucred.gid = getgid();
+
+ pa_pstream_send_tagstruct_with_creds(u->pstream, t, &ucred);
+}
+#else
+ pa_pstream_send_tagstruct(u->pstream, t);
+#endif
+
+ pa_pdispatch_register_reply(u->pdispatch, tag, DEFAULT_TIMEOUT, setup_complete_callback, u, NULL);
+
+ pa_log_debug("Connection established, authenticating ...");
+}
+
+#ifdef TUNNEL_SINK
+
+static int sink_get_volume(pa_sink *sink) {
+ return 0;
+}
+
+static int sink_set_volume(pa_sink *sink) {
+ struct userdata *u;
+ pa_tagstruct *t;
+ uint32_t tag;
+
+ pa_assert(sink);
+ u = sink->userdata;
+ pa_assert(u);
+
+ t = pa_tagstruct_new(NULL, 0);
+ pa_tagstruct_putu32(t, PA_COMMAND_SET_SINK_INPUT_VOLUME);
+ pa_tagstruct_putu32(t, tag = u->ctag++);
+ pa_tagstruct_putu32(t, u->device_index);
+ pa_tagstruct_put_cvolume(t, &sink->volume);
+ pa_pstream_send_tagstruct(u->pstream, t);
+
+ return 0;
+}
+
+static int sink_get_mute(pa_sink *sink) {
+ return 0;
+}
+
+static int sink_set_mute(pa_sink *sink) {
+ struct userdata *u;
+ pa_tagstruct *t;
+ uint32_t tag;
+
+ pa_assert(sink);
+ u = sink->userdata;
+ pa_assert(u);
+
+ if (u->version < 11)
+ return -1;
+
+ t = pa_tagstruct_new(NULL, 0);
+ pa_tagstruct_putu32(t, PA_COMMAND_SET_SINK_INPUT_MUTE);
+ pa_tagstruct_putu32(t, tag = u->ctag++);
+ pa_tagstruct_putu32(t, u->device_index);
+ pa_tagstruct_put_boolean(t, !!sink->muted);
+ pa_pstream_send_tagstruct(u->pstream, t);
+
+ return 0;
+}
+
+#endif
+
+static int load_key(struct userdata *u, const char*fn) {
+ pa_assert(u);
+
+ u->auth_cookie_in_property = FALSE;
+
+ if (!fn && pa_authkey_prop_get(u->core, PA_NATIVE_COOKIE_PROPERTY_NAME, u->auth_cookie, sizeof(u->auth_cookie)) >= 0) {
+ pa_log_debug("Using already loaded auth cookie.");
+ pa_authkey_prop_ref(u->core, PA_NATIVE_COOKIE_PROPERTY_NAME);
+ u->auth_cookie_in_property = 1;
+ return 0;
+ }
+
+ if (!fn)
+ fn = PA_NATIVE_COOKIE_FILE;
+
+ if (pa_authkey_load_auto(fn, u->auth_cookie, sizeof(u->auth_cookie)) < 0)
+ return -1;
+
+ pa_log_debug("Loading cookie from disk.");
+
+ if (pa_authkey_prop_put(u->core, PA_NATIVE_COOKIE_PROPERTY_NAME, u->auth_cookie, sizeof(u->auth_cookie)) >= 0)
+ u->auth_cookie_in_property = TRUE;
+
+ return 0;
+}
+
+int pa__init(pa_module*m) {
+ pa_modargs *ma = NULL;
+ struct userdata *u = NULL;
+ pa_sample_spec ss;
+ pa_channel_map map;
+ char *t, *dn = NULL;
+
+ pa_assert(m);
+
+ if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
+ pa_log("failed to parse module arguments");
+ goto fail;
+ }
+
+ u = pa_xnew0(struct userdata, 1);
+ m->userdata = u;
+ u->module = m;
+ u->core = m->core;
+ u->client = NULL;
+ u->pdispatch = NULL;
+ u->pstream = NULL;
+ u->server_name = NULL;
+#ifdef TUNNEL_SINK
+ u->sink_name = pa_xstrdup(pa_modargs_get_value(ma, "sink", NULL));;
+ u->sink = NULL;
+ u->requested_bytes = 0;
+#else
+ u->source_name = pa_xstrdup(pa_modargs_get_value(ma, "source", NULL));;
+ u->source = NULL;
+#endif
+ u->smoother = pa_smoother_new(PA_USEC_PER_SEC, PA_USEC_PER_SEC*2, TRUE);
+ u->ctag = 1;
+ u->device_index = u->channel = PA_INVALID_INDEX;
+ u->auth_cookie_in_property = FALSE;
+ u->time_event = NULL;
+
+ pa_thread_mq_init(&u->thread_mq, m->core->mainloop);
+ u->rtpoll = pa_rtpoll_new();
+ pa_rtpoll_item_new_asyncmsgq(u->rtpoll, PA_RTPOLL_EARLY, u->thread_mq.inq);
+
+ if (load_key(u, pa_modargs_get_value(ma, "cookie", NULL)) < 0)
+ goto fail;
+
+ if (!(u->server_name = pa_xstrdup(pa_modargs_get_value(ma, "server", NULL)))) {
+ pa_log("no server specified.");
+ goto fail;
+ }
+
+ ss = m->core->default_sample_spec;
+ if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_DEFAULT) < 0) {
+ pa_log("invalid sample format specification");
+ goto fail;
+ }
+
+ if (!(u->client = pa_socket_client_new_string(m->core->mainloop, u->server_name, PA_NATIVE_DEFAULT_PORT))) {
+ pa_log("failed to connect to server '%s'", u->server_name);
+ goto fail;
+ }
+
+ pa_socket_client_set_callback(u->client, on_connection, u);
+
+#ifdef TUNNEL_SINK
+
+ if (!(dn = pa_xstrdup(pa_modargs_get_value(ma, "sink_name", NULL))))
+ dn = pa_sprintf_malloc("tunnel.%s", u->server_name);
+
+ if (!(u->sink = pa_sink_new(m->core, __FILE__, dn, 1, &ss, &map))) {
+ pa_log("Failed to create sink.");
+ goto fail;
+ }
+
+ u->sink->parent.process_msg = sink_process_msg;
+ u->sink->userdata = u;
+ u->sink->set_state = sink_set_state;
+ u->sink->get_latency = sink_get_latency;
+ u->sink->get_volume = sink_get_volume;
+ u->sink->get_mute = sink_get_mute;
+ u->sink->set_volume = sink_set_volume;
+ u->sink->set_mute = sink_set_mute;
+ u->sink->flags = PA_SINK_NETWORK|PA_SINK_LATENCY|PA_SINK_HW_VOLUME_CTRL;
+
+ pa_sink_set_module(u->sink, m);
+ pa_sink_set_asyncmsgq(u->sink, u->thread_mq.inq);
+ pa_sink_set_rtpoll(u->sink, u->rtpoll);
+ pa_sink_set_description(u->sink, t = pa_sprintf_malloc("%s%s%s", u->sink_name ? u->sink_name : "", u->sink_name ? " on " : "", u->server_name));
+ pa_xfree(t);
+
+#else
+
+ if (!(dn = pa_xstrdup(pa_modargs_get_value(ma, "source_name", NULL))))
+ dn = pa_sprintf_malloc("tunnel.%s", u->server_name);
+
+ if (!(u->source = pa_source_new(m->core, __FILE__, dn, 1, &ss, &map))) {
+ pa_log("Failed to create source.");
+ goto fail;
+ }
+
+ u->source->parent.process_msg = source_process_msg;
+ u->source->userdata = u;
+ u->source->set_state = source_set_state;
+ u->source->get_latency = source_get_latency;
+ u->source->flags = PA_SOURCE_NETWORK|PA_SOURCE_LATENCY;
+
+ pa_source_set_module(u->source, m);
+ pa_source_set_asyncmsgq(u->source, u->thread_mq.inq);
+ pa_source_set_rtpoll(u->source, u->rtpoll);
+ pa_source_set_description(u->source, t = pa_sprintf_malloc("%s%s%s", u->source_name ? u->source_name : "", u->source_name ? " on " : "", u->server_name));
+ pa_xfree(t);
+#endif
+
+ pa_xfree(dn);
+
+ u->time_event = NULL;
+
+ u->maxlength = pa_usec_to_bytes(PA_USEC_PER_MSEC * DEFAULT_MAXLENGTH_MSEC, &ss);
+#ifdef TUNNEL_SINK
+ u->tlength = pa_usec_to_bytes(PA_USEC_PER_MSEC * DEFAULT_TLENGTH_MSEC, &ss);
+ u->minreq = pa_usec_to_bytes(PA_USEC_PER_MSEC * DEFAULT_MINREQ_MSEC, &ss);
+ u->prebuf = u->tlength;
+#else
+ u->fragsize = pa_usec_to_bytes(PA_USEC_PER_MSEC * DEFAULT_FRAGSIZE_MSEC, &ss);
+#endif
+
+ u->counter = u->counter_delta = 0;
+ pa_smoother_set_time_offset(u->smoother, pa_rtclock_usec());
+
+ if (!(u->thread = pa_thread_new(thread_func, u))) {
+ pa_log("Failed to create thread.");
+ goto fail;
+ }
+
+#ifdef TUNNEL_SINK
+ pa_sink_put(u->sink);
+#else
+ pa_source_put(u->source);
+#endif
+
+ pa_modargs_free(ma);
+
+ return 0;
+
+fail:
+ pa__done(m);
+
+ if (ma)
+ pa_modargs_free(ma);
+
+ pa_xfree(dn);
+
+ return -1;
+}
+
+void pa__done(pa_module*m) {
+ struct userdata* u;
+
+ pa_assert(m);
+
+ if (!(u = m->userdata))
+ return;
+
+#ifdef TUNNEL_SINK
+ if (u->sink)
+ pa_sink_unlink(u->sink);
+#else
+ if (u->source)
+ pa_source_unlink(u->source);
+#endif
+
+ if (u->thread) {
+ pa_asyncmsgq_send(u->thread_mq.inq, NULL, PA_MESSAGE_SHUTDOWN, NULL, 0, NULL);
+ pa_thread_free(u->thread);
+ }
+
+ pa_thread_mq_done(&u->thread_mq);
+
+#ifdef TUNNEL_SINK
+ if (u->sink)
+ pa_sink_unref(u->sink);
+#else
+ if (u->source)
+ pa_source_unref(u->source);
+#endif
+
+ if (u->rtpoll)
+ pa_rtpoll_free(u->rtpoll);
+
+ if (u->pstream) {
+ pa_pstream_unlink(u->pstream);
+ pa_pstream_unref(u->pstream);
+ }
+
+ if (u->pdispatch)
+ pa_pdispatch_unref(u->pdispatch);
+
+ if (u->client)
+ pa_socket_client_unref(u->client);
+
+ if (u->auth_cookie_in_property)
+ pa_authkey_prop_unref(m->core, PA_NATIVE_COOKIE_PROPERTY_NAME);
+
+ if (u->smoother)
+ pa_smoother_free(u->smoother);
+
+ if (u->time_event)
+ u->core->mainloop->time_free(u->time_event);
+
+#ifdef TUNNEL_SINK
+ pa_xfree(u->sink_name);
+#else
+ pa_xfree(u->source_name);
+#endif
+ pa_xfree(u->server_name);
+
+ pa_xfree(u->device_description);
+ pa_xfree(u->server_fqdn);
+ pa_xfree(u->user_name);
+
+ pa_xfree(u);
+}
diff --git a/src/modules/module-volume-restore.c b/src/modules/module-volume-restore.c
new file mode 100644
index 00000000..192a2a78
--- /dev/null
+++ b/src/modules/module-volume-restore.c
@@ -0,0 +1,580 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2006 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <unistd.h>
+#include <string.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <ctype.h>
+
+#include <pulse/xmalloc.h>
+#include <pulse/volume.h>
+#include <pulse/timeval.h>
+
+#include <pulsecore/core-error.h>
+#include <pulsecore/module.h>
+#include <pulsecore/core-util.h>
+#include <pulsecore/modargs.h>
+#include <pulsecore/log.h>
+#include <pulsecore/core-subscribe.h>
+#include <pulsecore/sink-input.h>
+#include <pulsecore/source-output.h>
+#include <pulsecore/namereg.h>
+
+#include "module-volume-restore-symdef.h"
+
+PA_MODULE_AUTHOR("Lennart Poettering");
+PA_MODULE_DESCRIPTION("Automatically restore the volume and the devices of streams");
+PA_MODULE_VERSION(PACKAGE_VERSION);
+PA_MODULE_LOAD_ONCE(TRUE);
+PA_MODULE_USAGE(
+ "table=<filename> "
+ "restore_device=<Restore the device for each stream?> "
+ "restore_volume=<Restore the volume for each stream?>"
+);
+
+#define WHITESPACE "\n\r \t"
+#define DEFAULT_VOLUME_TABLE_FILE "volume-restore.table"
+#define SAVE_INTERVAL 10
+
+static const char* const valid_modargs[] = {
+ "table",
+ "restore_device",
+ "restore_volume",
+ NULL,
+};
+
+struct rule {
+ char* name;
+ pa_bool_t volume_is_set;
+ pa_cvolume volume;
+ char *sink, *source;
+};
+
+struct userdata {
+ pa_core *core;
+ pa_hashmap *hashmap;
+ pa_subscription *subscription;
+ pa_hook_slot
+ *sink_input_new_hook_slot,
+ *sink_input_fixate_hook_slot,
+ *source_output_new_hook_slot;
+ pa_bool_t modified;
+ char *table_file;
+ pa_time_event *save_time_event;
+};
+
+static pa_cvolume* parse_volume(const char *s, pa_cvolume *v) {
+ char *p;
+ long k;
+ unsigned i;
+
+ pa_assert(s);
+ pa_assert(v);
+
+ if (!isdigit(*s))
+ return NULL;
+
+ k = strtol(s, &p, 0);
+ if (k <= 0 || k > PA_CHANNELS_MAX)
+ return NULL;
+
+ v->channels = (unsigned) k;
+
+ for (i = 0; i < v->channels; i++) {
+ p += strspn(p, WHITESPACE);
+
+ if (!isdigit(*p))
+ return NULL;
+
+ k = strtol(p, &p, 0);
+
+ if (k < PA_VOLUME_MUTED)
+ return NULL;
+
+ v->values[i] = (pa_volume_t) k;
+ }
+
+ if (*p != 0)
+ return NULL;
+
+ return v;
+}
+
+static int load_rules(struct userdata *u) {
+ FILE *f;
+ int n = 0;
+ int ret = -1;
+ char buf_name[256], buf_volume[256], buf_sink[256], buf_source[256];
+ char *ln = buf_name;
+
+ f = u->table_file ?
+ fopen(u->table_file, "r") :
+ pa_open_config_file(NULL, DEFAULT_VOLUME_TABLE_FILE, NULL, &u->table_file, "r");
+
+ if (!f) {
+ if (errno == ENOENT) {
+ pa_log_info("starting with empty ruleset.");
+ ret = 0;
+ } else
+ pa_log("failed to open file '%s': %s", u->table_file, pa_cstrerror(errno));
+
+ goto finish;
+ }
+
+ pa_lock_fd(fileno(f), 1);
+
+ while (!feof(f)) {
+ struct rule *rule;
+ pa_cvolume v;
+ pa_bool_t v_is_set;
+
+ if (!fgets(ln, sizeof(buf_name), f))
+ break;
+
+ n++;
+
+ pa_strip_nl(ln);
+
+ if (ln[0] == '#')
+ continue;
+
+ if (ln == buf_name) {
+ ln = buf_volume;
+ continue;
+ }
+
+ if (ln == buf_volume) {
+ ln = buf_sink;
+ continue;
+ }
+
+ if (ln == buf_sink) {
+ ln = buf_source;
+ continue;
+ }
+
+ pa_assert(ln == buf_source);
+
+ if (buf_volume[0]) {
+ if (!parse_volume(buf_volume, &v)) {
+ pa_log("parse failure in %s:%u, stopping parsing", u->table_file, n);
+ goto finish;
+ }
+
+ v_is_set = TRUE;
+ } else
+ v_is_set = FALSE;
+
+ ln = buf_name;
+
+ if (pa_hashmap_get(u->hashmap, buf_name)) {
+ pa_log("double entry in %s:%u, ignoring", u->table_file, n);
+ continue;
+ }
+
+ rule = pa_xnew(struct rule, 1);
+ rule->name = pa_xstrdup(buf_name);
+ if ((rule->volume_is_set = v_is_set))
+ rule->volume = v;
+ rule->sink = buf_sink[0] ? pa_xstrdup(buf_sink) : NULL;
+ rule->source = buf_source[0] ? pa_xstrdup(buf_source) : NULL;
+
+ pa_hashmap_put(u->hashmap, rule->name, rule);
+ }
+
+ if (ln != buf_name) {
+ pa_log("invalid number of lines in %s.", u->table_file);
+ goto finish;
+ }
+
+ ret = 0;
+
+finish:
+ if (f) {
+ pa_lock_fd(fileno(f), 0);
+ fclose(f);
+ }
+
+ return ret;
+}
+
+static int save_rules(struct userdata *u) {
+ FILE *f;
+ int ret = -1;
+ void *state = NULL;
+ struct rule *rule;
+
+ if (!u->modified)
+ return 0;
+
+ pa_log_info("Saving rules...");
+
+ f = u->table_file ?
+ fopen(u->table_file, "w") :
+ pa_open_config_file(NULL, DEFAULT_VOLUME_TABLE_FILE, NULL, &u->table_file, "w");
+
+ if (!f) {
+ pa_log("Failed to open file '%s': %s", u->table_file, pa_cstrerror(errno));
+ goto finish;
+ }
+
+ pa_lock_fd(fileno(f), 1);
+
+ while ((rule = pa_hashmap_iterate(u->hashmap, &state, NULL))) {
+ unsigned i;
+
+ fprintf(f, "%s\n", rule->name);
+
+ if (rule->volume_is_set) {
+ fprintf(f, "%u", rule->volume.channels);
+
+ for (i = 0; i < rule->volume.channels; i++)
+ fprintf(f, " %u", rule->volume.values[i]);
+ }
+
+ fprintf(f, "\n%s\n%s\n",
+ rule->sink ? rule->sink : "",
+ rule->source ? rule->source : "");
+ }
+
+ ret = 0;
+ u->modified = FALSE;
+ pa_log_debug("Successfully saved rules...");
+
+finish:
+ if (f) {
+ pa_lock_fd(fileno(f), 0);
+ fclose(f);
+ }
+
+ return ret;
+}
+
+static char* client_name(pa_client *c) {
+ char *t, *e;
+
+ if (!c->name || !c->driver)
+ return NULL;
+
+ t = pa_sprintf_malloc("%s$%s", c->driver, c->name);
+ t[strcspn(t, "\n\r#")] = 0;
+
+ if (!*t) {
+ pa_xfree(t);
+ return NULL;
+ }
+
+ if ((e = strrchr(t, '('))) {
+ char *k = e + 1 + strspn(e + 1, "0123456789-");
+
+ /* Dirty trick: truncate all trailing parens with numbers in
+ * between, since they are usually used to identify multiple
+ * sessions of the same application, which is something we
+ * explicitly don't want. Besides other stuff this makes xmms
+ * with esound work properly for us. */
+
+ if (*k == ')' && *(k+1) == 0)
+ *e = 0;
+ }
+
+ return t;
+}
+
+static void save_time_callback(pa_mainloop_api*a, pa_time_event* e, const struct timeval *tv, void *userdata) {
+ struct userdata *u = userdata;
+
+ pa_assert(a);
+ pa_assert(e);
+ pa_assert(tv);
+ pa_assert(u);
+
+ pa_assert(e == u->save_time_event);
+ u->core->mainloop->time_free(u->save_time_event);
+ u->save_time_event = NULL;
+
+ save_rules(u);
+}
+
+static void subscribe_callback(pa_core *c, pa_subscription_event_type_t t, uint32_t idx, void *userdata) {
+ struct userdata *u = userdata;
+ pa_sink_input *si = NULL;
+ pa_source_output *so = NULL;
+ struct rule *r;
+ char *name;
+
+ pa_assert(c);
+ pa_assert(u);
+
+ if (t != (PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_NEW) &&
+ t != (PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE) &&
+ t != (PA_SUBSCRIPTION_EVENT_SOURCE_OUTPUT|PA_SUBSCRIPTION_EVENT_NEW) &&
+ t != (PA_SUBSCRIPTION_EVENT_SOURCE_OUTPUT|PA_SUBSCRIPTION_EVENT_CHANGE))
+ return;
+
+ if ((t & PA_SUBSCRIPTION_EVENT_FACILITY_MASK) == PA_SUBSCRIPTION_EVENT_SINK_INPUT) {
+ if (!(si = pa_idxset_get_by_index(c->sink_inputs, idx)))
+ return;
+
+ if (!si->client || !(name = client_name(si->client)))
+ return;
+ } else {
+ pa_assert((t & PA_SUBSCRIPTION_EVENT_FACILITY_MASK) == PA_SUBSCRIPTION_EVENT_SOURCE_OUTPUT);
+
+ if (!(so = pa_idxset_get_by_index(c->source_outputs, idx)))
+ return;
+
+ if (!so->client || !(name = client_name(so->client)))
+ return;
+ }
+
+ if ((r = pa_hashmap_get(u->hashmap, name))) {
+ pa_xfree(name);
+
+ if (si) {
+
+ if (!r->volume_is_set || !pa_cvolume_equal(pa_sink_input_get_volume(si), &r->volume)) {
+ pa_log_info("Saving volume for <%s>", r->name);
+ r->volume = *pa_sink_input_get_volume(si);
+ r->volume_is_set = TRUE;
+ u->modified = TRUE;
+ }
+
+ if (!r->sink || strcmp(si->sink->name, r->sink) != 0) {
+ pa_log_info("Saving sink for <%s>", r->name);
+ pa_xfree(r->sink);
+ r->sink = pa_xstrdup(si->sink->name);
+ u->modified = TRUE;
+ }
+ } else {
+ pa_assert(so);
+
+ if (!r->source || strcmp(so->source->name, r->source) != 0) {
+ pa_log_info("Saving source for <%s>", r->name);
+ pa_xfree(r->source);
+ r->source = pa_xstrdup(so->source->name);
+ u->modified = TRUE;
+ }
+ }
+
+ } else {
+ pa_log_info("Creating new entry for <%s>", name);
+
+ r = pa_xnew(struct rule, 1);
+ r->name = name;
+
+ if (si) {
+ r->volume = *pa_sink_input_get_volume(si);
+ r->volume_is_set = TRUE;
+ r->sink = pa_xstrdup(si->sink->name);
+ r->source = NULL;
+ } else {
+ pa_assert(so);
+ r->volume_is_set = FALSE;
+ r->sink = NULL;
+ r->source = pa_xstrdup(so->source->name);
+ }
+
+ pa_hashmap_put(u->hashmap, r->name, r);
+ u->modified = TRUE;
+ }
+
+ if (u->modified && !u->save_time_event) {
+ struct timeval tv;
+ pa_gettimeofday(&tv);
+ tv.tv_sec += SAVE_INTERVAL;
+ u->save_time_event = u->core->mainloop->time_new(u->core->mainloop, &tv, save_time_callback, u);
+ }
+}
+
+static pa_hook_result_t sink_input_new_hook_callback(pa_core *c, pa_sink_input_new_data *data, struct userdata *u) {
+ struct rule *r;
+ char *name;
+
+ pa_assert(data);
+
+ /* In the NEW hook we only adjust the device. Adjusting the volume
+ * is left for the FIXATE hook */
+
+ if (!data->client || !(name = client_name(data->client)))
+ return PA_HOOK_OK;
+
+ if ((r = pa_hashmap_get(u->hashmap, name))) {
+ if (!data->sink && r->sink) {
+ if ((data->sink = pa_namereg_get(c, r->sink, PA_NAMEREG_SINK, 1)))
+ pa_log_info("Restoring sink for <%s>", r->name);
+ }
+ }
+
+ pa_xfree(name);
+
+ return PA_HOOK_OK;
+}
+
+static pa_hook_result_t sink_input_fixate_hook_callback(pa_core *c, pa_sink_input_new_data *data, struct userdata *u) {
+ struct rule *r;
+ char *name;
+
+ pa_assert(data);
+
+ /* In the FIXATE hook we only adjust the volum. Adjusting the device
+ * is left for the NEW hook */
+
+ if (!data->client || !(name = client_name(data->client)))
+ return PA_HOOK_OK;
+
+ if ((r = pa_hashmap_get(u->hashmap, name))) {
+
+ if (r->volume_is_set && data->sample_spec.channels == r->volume.channels) {
+ pa_log_info("Restoring volume for <%s>", r->name);
+ pa_sink_input_new_data_set_volume(data, &r->volume);
+ }
+ }
+
+ pa_xfree(name);
+
+ return PA_HOOK_OK;
+}
+
+static pa_hook_result_t source_output_new_hook_callback(pa_core *c, pa_source_output_new_data *data, struct userdata *u) {
+ struct rule *r;
+ char *name;
+
+ pa_assert(data);
+
+ if (!data->client || !(name = client_name(data->client)))
+ return PA_HOOK_OK;
+
+ if ((r = pa_hashmap_get(u->hashmap, name))) {
+ if (!data->source && r->source) {
+ if ((data->source = pa_namereg_get(c, r->source, PA_NAMEREG_SOURCE, 1)))
+ pa_log_info("Restoring source for <%s>", r->name);
+ }
+ }
+
+ return PA_HOOK_OK;
+}
+
+int pa__init(pa_module*m) {
+ pa_modargs *ma = NULL;
+ struct userdata *u;
+ pa_bool_t restore_device = TRUE, restore_volume = TRUE;
+
+ pa_assert(m);
+
+ if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
+ pa_log("Failed to parse module arguments");
+ goto fail;
+ }
+
+ u = pa_xnew(struct userdata, 1);
+ u->core = m->core;
+ u->hashmap = pa_hashmap_new(pa_idxset_string_hash_func, pa_idxset_string_compare_func);
+ u->table_file = pa_xstrdup(pa_modargs_get_value(ma, "table", NULL));
+ u->modified = FALSE;
+ u->subscription = NULL;
+ u->sink_input_new_hook_slot = u->sink_input_fixate_hook_slot = u->source_output_new_hook_slot = NULL;
+ u->save_time_event = NULL;
+
+ m->userdata = u;
+
+ if (pa_modargs_get_value_boolean(ma, "restore_device", &restore_device) < 0 ||
+ pa_modargs_get_value_boolean(ma, "restore_volume", &restore_volume) < 0) {
+ pa_log("restore_volume= and restore_device= expect boolean arguments");
+ goto fail;
+ }
+
+ if (!(restore_device || restore_volume)) {
+ pa_log("Both restrong the volume and restoring the device are disabled. There's no point in using this module at all then, failing.");
+ goto fail;
+ }
+
+ if (load_rules(u) < 0)
+ goto fail;
+
+ u->subscription = pa_subscription_new(m->core, PA_SUBSCRIPTION_MASK_SINK_INPUT|PA_SUBSCRIPTION_MASK_SOURCE_OUTPUT, subscribe_callback, u);
+
+ if (restore_device) {
+ u->sink_input_new_hook_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SINK_INPUT_NEW], (pa_hook_cb_t) sink_input_new_hook_callback, u);
+ u->source_output_new_hook_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SOURCE_OUTPUT_NEW], (pa_hook_cb_t) source_output_new_hook_callback, u);
+ }
+
+ if (restore_volume)
+ u->sink_input_fixate_hook_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SINK_INPUT_FIXATE], (pa_hook_cb_t) sink_input_fixate_hook_callback, u);
+
+ pa_modargs_free(ma);
+ return 0;
+
+fail:
+ pa__done(m);
+ if (ma)
+ pa_modargs_free(ma);
+
+ return -1;
+}
+
+static void free_func(void *p, void *userdata) {
+ struct rule *r = p;
+ pa_assert(r);
+
+ pa_xfree(r->name);
+ pa_xfree(r->sink);
+ pa_xfree(r->source);
+ pa_xfree(r);
+}
+
+void pa__done(pa_module*m) {
+ struct userdata* u;
+
+ pa_assert(m);
+
+ if (!(u = m->userdata))
+ return;
+
+ if (u->subscription)
+ pa_subscription_free(u->subscription);
+
+ if (u->sink_input_new_hook_slot)
+ pa_hook_slot_free(u->sink_input_new_hook_slot);
+ if (u->sink_input_fixate_hook_slot)
+ pa_hook_slot_free(u->sink_input_fixate_hook_slot);
+ if (u->source_output_new_hook_slot)
+ pa_hook_slot_free(u->source_output_new_hook_slot);
+
+ if (u->hashmap) {
+ save_rules(u);
+ pa_hashmap_free(u->hashmap, free_func, NULL);
+ }
+
+ if (u->save_time_event)
+ u->core->mainloop->time_free(u->save_time_event);
+
+ pa_xfree(u->table_file);
+ pa_xfree(u);
+}
diff --git a/src/modules/module-waveout.c b/src/modules/module-waveout.c
new file mode 100644
index 00000000..f8bae02f
--- /dev/null
+++ b/src/modules/module-waveout.c
@@ -0,0 +1,649 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2006 Lennart Poettering
+ Copyright 2006-2007 Pierre Ossman <ossman@cendio.se> for Cendio AB
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <windows.h>
+#include <mmsystem.h>
+
+#include <pulse/mainloop-api.h>
+
+#include <pulse/xmalloc.h>
+#include <pulse/timeval.h>
+
+#include <pulsecore/sink.h>
+#include <pulsecore/source.h>
+#include <pulsecore/module.h>
+#include <pulsecore/modargs.h>
+#include <pulsecore/sample-util.h>
+#include <pulsecore/core-util.h>
+#include <pulsecore/log.h>
+
+#include "module-waveout-symdef.h"
+
+PA_MODULE_AUTHOR("Pierre Ossman")
+PA_MODULE_DESCRIPTION("Windows waveOut Sink/Source")
+PA_MODULE_VERSION(PACKAGE_VERSION)
+PA_MODULE_USAGE(
+ "sink_name=<name for the sink> "
+ "source_name=<name for the source> "
+ "device=<device number> "
+ "record=<enable source?> "
+ "playback=<enable sink?> "
+ "format=<sample format> "
+ "channels=<number of channels> "
+ "rate=<sample rate> "
+ "fragments=<number of fragments> "
+ "fragment_size=<fragment size> "
+ "channel_map=<channel map>")
+
+#define DEFAULT_SINK_NAME "wave_output"
+#define DEFAULT_SOURCE_NAME "wave_input"
+
+#define WAVEOUT_MAX_VOLUME 0xFFFF
+
+struct userdata {
+ pa_sink *sink;
+ pa_source *source;
+ pa_core *core;
+ pa_time_event *event;
+ pa_defer_event *defer;
+ pa_usec_t poll_timeout;
+
+ uint32_t fragments, fragment_size;
+
+ uint32_t free_ofrags, free_ifrags;
+
+ DWORD written_bytes;
+ int sink_underflow;
+
+ int cur_ohdr, cur_ihdr;
+ WAVEHDR *ohdrs, *ihdrs;
+
+ HWAVEOUT hwo;
+ HWAVEIN hwi;
+ pa_module *module;
+
+ CRITICAL_SECTION crit;
+};
+
+static const char* const valid_modargs[] = {
+ "sink_name",
+ "source_name",
+ "device",
+ "record",
+ "playback",
+ "fragments",
+ "fragment_size",
+ "format",
+ "rate",
+ "channels",
+ "channel_map",
+ NULL
+};
+
+static void update_usage(struct userdata *u) {
+ pa_module_set_used(u->module,
+ (u->sink ? pa_sink_used_by(u->sink) : 0) +
+ (u->source ? pa_source_used_by(u->source) : 0));
+}
+
+static void do_write(struct userdata *u)
+{
+ uint32_t free_frags;
+ pa_memchunk memchunk;
+ WAVEHDR *hdr;
+ MMRESULT res;
+
+ if (!u->sink)
+ return;
+
+ EnterCriticalSection(&u->crit);
+ free_frags = u->free_ofrags;
+ LeaveCriticalSection(&u->crit);
+
+ if (!u->sink_underflow && (free_frags == u->fragments))
+ pa_log_debug("WaveOut underflow!");
+
+ while (free_frags) {
+ hdr = &u->ohdrs[u->cur_ohdr];
+ if (hdr->dwFlags & WHDR_PREPARED)
+ waveOutUnprepareHeader(u->hwo, hdr, sizeof(WAVEHDR));
+
+ hdr->dwBufferLength = 0;
+ while (hdr->dwBufferLength < u->fragment_size) {
+ size_t len;
+
+ len = u->fragment_size - hdr->dwBufferLength;
+
+ if (pa_sink_render(u->sink, len, &memchunk) < 0)
+ break;
+
+ assert(memchunk.memblock);
+ assert(memchunk.memblock->data);
+ assert(memchunk.length);
+
+ if (memchunk.length < len)
+ len = memchunk.length;
+
+ memcpy(hdr->lpData + hdr->dwBufferLength,
+ (char*)memchunk.memblock->data + memchunk.index, len);
+
+ hdr->dwBufferLength += len;
+
+ pa_memblock_unref(memchunk.memblock);
+ memchunk.memblock = NULL;
+ }
+
+ /* Insufficient data in sink buffer? */
+ if (hdr->dwBufferLength == 0) {
+ u->sink_underflow = 1;
+ break;
+ }
+
+ u->sink_underflow = 0;
+
+ res = waveOutPrepareHeader(u->hwo, hdr, sizeof(WAVEHDR));
+ if (res != MMSYSERR_NOERROR) {
+ pa_log_error(__FILE__ ": ERROR: Unable to prepare waveOut block: %d",
+ res);
+ }
+ res = waveOutWrite(u->hwo, hdr, sizeof(WAVEHDR));
+ if (res != MMSYSERR_NOERROR) {
+ pa_log_error(__FILE__ ": ERROR: Unable to write waveOut block: %d",
+ res);
+ }
+
+ u->written_bytes += hdr->dwBufferLength;
+
+ EnterCriticalSection(&u->crit);
+ u->free_ofrags--;
+ LeaveCriticalSection(&u->crit);
+
+ free_frags--;
+ u->cur_ohdr++;
+ u->cur_ohdr %= u->fragments;
+ }
+}
+
+static void do_read(struct userdata *u)
+{
+ uint32_t free_frags;
+ pa_memchunk memchunk;
+ WAVEHDR *hdr;
+ MMRESULT res;
+
+ if (!u->source)
+ return;
+
+ EnterCriticalSection(&u->crit);
+
+ free_frags = u->free_ifrags;
+ u->free_ifrags = 0;
+
+ LeaveCriticalSection(&u->crit);
+
+ if (free_frags == u->fragments)
+ pa_log_debug("WaveIn overflow!");
+
+ while (free_frags) {
+ hdr = &u->ihdrs[u->cur_ihdr];
+ if (hdr->dwFlags & WHDR_PREPARED)
+ waveInUnprepareHeader(u->hwi, hdr, sizeof(WAVEHDR));
+
+ if (hdr->dwBytesRecorded) {
+ memchunk.memblock = pa_memblock_new(u->core->mempool, hdr->dwBytesRecorded);
+ assert(memchunk.memblock);
+
+ memcpy((char*)memchunk.memblock->data, hdr->lpData, hdr->dwBytesRecorded);
+
+ memchunk.length = memchunk.memblock->length = hdr->dwBytesRecorded;
+ memchunk.index = 0;
+
+ pa_source_post(u->source, &memchunk);
+ pa_memblock_unref(memchunk.memblock);
+ }
+
+ res = waveInPrepareHeader(u->hwi, hdr, sizeof(WAVEHDR));
+ if (res != MMSYSERR_NOERROR) {
+ pa_log_error(__FILE__ ": ERROR: Unable to prepare waveIn block: %d",
+ res);
+ }
+ res = waveInAddBuffer(u->hwi, hdr, sizeof(WAVEHDR));
+ if (res != MMSYSERR_NOERROR) {
+ pa_log_error(__FILE__ ": ERROR: Unable to add waveIn block: %d",
+ res);
+ }
+
+ free_frags--;
+ u->cur_ihdr++;
+ u->cur_ihdr %= u->fragments;
+ }
+}
+
+static void poll_cb(pa_mainloop_api*a, pa_time_event *e, const struct timeval *tv, void *userdata) {
+ struct userdata *u = userdata;
+ struct timeval ntv;
+
+ assert(u);
+
+ update_usage(u);
+
+ do_write(u);
+ do_read(u);
+
+ pa_gettimeofday(&ntv);
+ pa_timeval_add(&ntv, u->poll_timeout);
+
+ a->time_restart(e, &ntv);
+}
+
+static void defer_cb(pa_mainloop_api*a, pa_defer_event *e, void *userdata) {
+ struct userdata *u = userdata;
+
+ assert(u);
+
+ a->defer_enable(e, 0);
+
+ do_write(u);
+ do_read(u);
+}
+
+static void CALLBACK chunk_done_cb(HWAVEOUT hwo, UINT msg, DWORD_PTR inst, DWORD param1, DWORD param2) {
+ struct userdata *u = (struct userdata *)inst;
+
+ if (msg != WOM_DONE)
+ return;
+
+ EnterCriticalSection(&u->crit);
+
+ u->free_ofrags++;
+ assert(u->free_ofrags <= u->fragments);
+
+ LeaveCriticalSection(&u->crit);
+}
+
+static void CALLBACK chunk_ready_cb(HWAVEIN hwi, UINT msg, DWORD_PTR inst, DWORD param1, DWORD param2) {
+ struct userdata *u = (struct userdata *)inst;
+
+ if (msg != WIM_DATA)
+ return;
+
+ EnterCriticalSection(&u->crit);
+
+ u->free_ifrags++;
+ assert(u->free_ifrags <= u->fragments);
+
+ LeaveCriticalSection(&u->crit);
+}
+
+static pa_usec_t sink_get_latency_cb(pa_sink *s) {
+ struct userdata *u = s->userdata;
+ uint32_t free_frags;
+ MMTIME mmt;
+ assert(s && u && u->sink);
+
+ memset(&mmt, 0, sizeof(mmt));
+ mmt.wType = TIME_BYTES;
+ if (waveOutGetPosition(u->hwo, &mmt, sizeof(mmt)) == MMSYSERR_NOERROR)
+ return pa_bytes_to_usec(u->written_bytes - mmt.u.cb, &s->sample_spec);
+ else {
+ EnterCriticalSection(&u->crit);
+
+ free_frags = u->free_ofrags;
+
+ LeaveCriticalSection(&u->crit);
+
+ return pa_bytes_to_usec((u->fragments - free_frags) * u->fragment_size,
+ &s->sample_spec);
+ }
+}
+
+static pa_usec_t source_get_latency_cb(pa_source *s) {
+ pa_usec_t r = 0;
+ struct userdata *u = s->userdata;
+ uint32_t free_frags;
+ assert(s && u && u->sink);
+
+ EnterCriticalSection(&u->crit);
+
+ free_frags = u->free_ifrags;
+
+ LeaveCriticalSection(&u->crit);
+
+ r += pa_bytes_to_usec((free_frags + 1) * u->fragment_size, &s->sample_spec);
+
+ return r;
+}
+
+static void notify_sink_cb(pa_sink *s) {
+ struct userdata *u = s->userdata;
+ assert(u);
+
+ u->core->mainloop->defer_enable(u->defer, 1);
+}
+
+static void notify_source_cb(pa_source *s) {
+ struct userdata *u = s->userdata;
+ assert(u);
+
+ u->core->mainloop->defer_enable(u->defer, 1);
+}
+
+static int sink_get_hw_volume_cb(pa_sink *s) {
+ struct userdata *u = s->userdata;
+ DWORD vol;
+ pa_volume_t left, right;
+
+ if (waveOutGetVolume(u->hwo, &vol) != MMSYSERR_NOERROR)
+ return -1;
+
+ left = (vol & 0xFFFF) * PA_VOLUME_NORM / WAVEOUT_MAX_VOLUME;
+ right = ((vol >> 16) & 0xFFFF) * PA_VOLUME_NORM / WAVEOUT_MAX_VOLUME;
+
+ /* Windows supports > 2 channels, except for volume control */
+ if (s->hw_volume.channels > 2)
+ pa_cvolume_set(&s->hw_volume, s->hw_volume.channels, (left + right)/2);
+
+ s->hw_volume.values[0] = left;
+ if (s->hw_volume.channels > 1)
+ s->hw_volume.values[1] = right;
+
+ return 0;
+}
+
+static int sink_set_hw_volume_cb(pa_sink *s) {
+ struct userdata *u = s->userdata;
+ DWORD vol;
+
+ vol = s->hw_volume.values[0] * WAVEOUT_MAX_VOLUME / PA_VOLUME_NORM;
+ if (s->hw_volume.channels > 1)
+ vol |= (s->hw_volume.values[0] * WAVEOUT_MAX_VOLUME / PA_VOLUME_NORM) << 16;
+
+ if (waveOutSetVolume(u->hwo, vol) != MMSYSERR_NOERROR)
+ return -1;
+
+ return 0;
+}
+
+static int ss_to_waveformat(pa_sample_spec *ss, LPWAVEFORMATEX wf) {
+ wf->wFormatTag = WAVE_FORMAT_PCM;
+
+ if (ss->channels > 2) {
+ pa_log_error("ERROR: More than two channels not supported.");
+ return -1;
+ }
+
+ wf->nChannels = ss->channels;
+
+ switch (ss->rate) {
+ case 8000:
+ case 11025:
+ case 22005:
+ case 44100:
+ break;
+ default:
+ pa_log_error("ERROR: Unsupported sample rate.");
+ return -1;
+ }
+
+ wf->nSamplesPerSec = ss->rate;
+
+ if (ss->format == PA_SAMPLE_U8)
+ wf->wBitsPerSample = 8;
+ else if (ss->format == PA_SAMPLE_S16NE)
+ wf->wBitsPerSample = 16;
+ else {
+ pa_log_error("ERROR: Unsupported sample format.");
+ return -1;
+ }
+
+ wf->nBlockAlign = wf->nChannels * wf->wBitsPerSample/8;
+ wf->nAvgBytesPerSec = wf->nSamplesPerSec * wf->nBlockAlign;
+
+ wf->cbSize = 0;
+
+ return 0;
+}
+
+int pa__init(pa_core *c, pa_module*m) {
+ struct userdata *u = NULL;
+ HWAVEOUT hwo = INVALID_HANDLE_VALUE;
+ HWAVEIN hwi = INVALID_HANDLE_VALUE;
+ WAVEFORMATEX wf;
+ int nfrags, frag_size;
+ int record = 1, playback = 1;
+ unsigned int device;
+ pa_sample_spec ss;
+ pa_channel_map map;
+ pa_modargs *ma = NULL;
+ unsigned int i;
+ struct timeval tv;
+
+ assert(c && m);
+
+ if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
+ pa_log("failed to parse module arguments.");
+ goto fail;
+ }
+
+ if (pa_modargs_get_value_boolean(ma, "record", &record) < 0 || pa_modargs_get_value_boolean(ma, "playback", &playback) < 0) {
+ pa_log("record= and playback= expect boolean argument.");
+ goto fail;
+ }
+
+ if (!playback && !record) {
+ pa_log("neither playback nor record enabled for device.");
+ goto fail;
+ }
+
+ device = WAVE_MAPPER;
+ if (pa_modargs_get_value_u32(ma, "device", &device) < 0) {
+ pa_log("failed to parse device argument");
+ goto fail;
+ }
+
+ nfrags = 5;
+ frag_size = 8192;
+ if (pa_modargs_get_value_s32(ma, "fragments", &nfrags) < 0 || pa_modargs_get_value_s32(ma, "fragment_size", &frag_size) < 0) {
+ pa_log("failed to parse fragments arguments");
+ goto fail;
+ }
+
+ ss = c->default_sample_spec;
+ if (pa_modargs_get_sample_spec_and_channel_map(ma, &ss, &map, PA_CHANNEL_MAP_WAVEEX) < 0) {
+ pa_log("failed to parse sample specification");
+ goto fail;
+ }
+
+ if (ss_to_waveformat(&ss, &wf) < 0)
+ goto fail;
+
+ u = pa_xmalloc(sizeof(struct userdata));
+
+ if (record) {
+ if (waveInOpen(&hwi, device, &wf, (DWORD_PTR)chunk_ready_cb, (DWORD_PTR)u, CALLBACK_FUNCTION) != MMSYSERR_NOERROR) {
+ pa_log("failed to open waveIn");
+ goto fail;
+ }
+ if (waveInStart(hwi) != MMSYSERR_NOERROR) {
+ pa_log("failed to start waveIn");
+ goto fail;
+ }
+ pa_log_debug("Opened waveIn subsystem.");
+ }
+
+ if (playback) {
+ if (waveOutOpen(&hwo, device, &wf, (DWORD_PTR)chunk_done_cb, (DWORD_PTR)u, CALLBACK_FUNCTION) != MMSYSERR_NOERROR) {
+ pa_log("failed to open waveOut");
+ goto fail;
+ }
+ pa_log_debug("Opened waveOut subsystem.");
+ }
+
+ InitializeCriticalSection(&u->crit);
+
+ if (hwi != INVALID_HANDLE_VALUE) {
+ u->source = pa_source_new(c, __FILE__, pa_modargs_get_value(ma, "source_name", DEFAULT_SOURCE_NAME), 0, &ss, &map);
+ assert(u->source);
+ u->source->userdata = u;
+ u->source->notify = notify_source_cb;
+ u->source->get_latency = source_get_latency_cb;
+ pa_source_set_owner(u->source, m);
+ pa_source_set_description(u->source, "Windows waveIn PCM");
+ u->source->is_hardware = 1;
+ } else
+ u->source = NULL;
+
+ if (hwo != INVALID_HANDLE_VALUE) {
+ u->sink = pa_sink_new(c, __FILE__, pa_modargs_get_value(ma, "sink_name", DEFAULT_SINK_NAME), 0, &ss, &map);
+ assert(u->sink);
+ u->sink->notify = notify_sink_cb;
+ u->sink->get_latency = sink_get_latency_cb;
+ u->sink->get_hw_volume = sink_get_hw_volume_cb;
+ u->sink->set_hw_volume = sink_set_hw_volume_cb;
+ u->sink->userdata = u;
+ pa_sink_set_owner(u->sink, m);
+ pa_sink_set_description(u->sink, "Windows waveOut PCM");
+ u->sink->is_hardware = 1;
+ } else
+ u->sink = NULL;
+
+ assert(u->source || u->sink);
+
+ u->core = c;
+ u->hwi = hwi;
+ u->hwo = hwo;
+
+ u->fragments = nfrags;
+ u->free_ifrags = u->fragments;
+ u->free_ofrags = u->fragments;
+ u->fragment_size = frag_size - (frag_size % pa_frame_size(&ss));
+
+ u->written_bytes = 0;
+ u->sink_underflow = 1;
+
+ u->poll_timeout = pa_bytes_to_usec(u->fragments * u->fragment_size / 10, &ss);
+
+ pa_gettimeofday(&tv);
+ pa_timeval_add(&tv, u->poll_timeout);
+
+ u->event = c->mainloop->time_new(c->mainloop, &tv, poll_cb, u);
+ assert(u->event);
+
+ u->defer = c->mainloop->defer_new(c->mainloop, defer_cb, u);
+ assert(u->defer);
+ c->mainloop->defer_enable(u->defer, 0);
+
+ u->cur_ihdr = 0;
+ u->cur_ohdr = 0;
+ u->ihdrs = pa_xmalloc0(sizeof(WAVEHDR) * u->fragments);
+ assert(u->ihdrs);
+ u->ohdrs = pa_xmalloc0(sizeof(WAVEHDR) * u->fragments);
+ assert(u->ohdrs);
+ for (i = 0;i < u->fragments;i++) {
+ u->ihdrs[i].dwBufferLength = u->fragment_size;
+ u->ohdrs[i].dwBufferLength = u->fragment_size;
+ u->ihdrs[i].lpData = pa_xmalloc(u->fragment_size);
+ assert(u->ihdrs);
+ u->ohdrs[i].lpData = pa_xmalloc(u->fragment_size);
+ assert(u->ohdrs);
+ }
+
+ u->module = m;
+ m->userdata = u;
+
+ pa_modargs_free(ma);
+
+ /* Read mixer settings */
+ if (u->sink)
+ sink_get_hw_volume_cb(u->sink);
+
+ return 0;
+
+fail:
+ if (hwi != INVALID_HANDLE_VALUE)
+ waveInClose(hwi);
+
+ if (hwo != INVALID_HANDLE_VALUE)
+ waveOutClose(hwo);
+
+ if (u)
+ pa_xfree(u);
+
+ if (ma)
+ pa_modargs_free(ma);
+
+ return -1;
+}
+
+void pa__done(pa_core *c, pa_module*m) {
+ struct userdata *u;
+ unsigned int i;
+
+ assert(c && m);
+
+ if (!(u = m->userdata))
+ return;
+
+ if (u->event)
+ c->mainloop->time_free(u->event);
+
+ if (u->defer)
+ c->mainloop->defer_free(u->defer);
+
+ if (u->sink) {
+ pa_sink_disconnect(u->sink);
+ pa_sink_unref(u->sink);
+ }
+
+ if (u->source) {
+ pa_source_disconnect(u->source);
+ pa_source_unref(u->source);
+ }
+
+ if (u->hwi != INVALID_HANDLE_VALUE) {
+ waveInReset(u->hwi);
+ waveInClose(u->hwi);
+ }
+
+ if (u->hwo != INVALID_HANDLE_VALUE) {
+ waveOutReset(u->hwo);
+ waveOutClose(u->hwo);
+ }
+
+ for (i = 0;i < u->fragments;i++) {
+ pa_xfree(u->ihdrs[i].lpData);
+ pa_xfree(u->ohdrs[i].lpData);
+ }
+
+ pa_xfree(u->ihdrs);
+ pa_xfree(u->ohdrs);
+
+ DeleteCriticalSection(&u->crit);
+
+ pa_xfree(u);
+}
diff --git a/src/modules/module-x11-bell.c b/src/modules/module-x11-bell.c
new file mode 100644
index 00000000..87c6849d
--- /dev/null
+++ b/src/modules/module-x11-bell.c
@@ -0,0 +1,171 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2004-2006 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <X11/Xlib.h>
+#include <X11/XKBlib.h>
+
+#include <pulse/xmalloc.h>
+
+#include <pulsecore/iochannel.h>
+#include <pulsecore/sink.h>
+#include <pulsecore/core-scache.h>
+#include <pulsecore/modargs.h>
+#include <pulsecore/namereg.h>
+#include <pulsecore/log.h>
+#include <pulsecore/x11wrap.h>
+
+#include "module-x11-bell-symdef.h"
+
+PA_MODULE_AUTHOR("Lennart Poettering");
+PA_MODULE_DESCRIPTION("X11 Bell interceptor");
+PA_MODULE_VERSION(PACKAGE_VERSION);
+PA_MODULE_LOAD_ONCE(FALSE);
+PA_MODULE_USAGE("sink=<sink to connect to> sample=<sample name> display=<X11 display>");
+
+struct userdata {
+ pa_core *core;
+ int xkb_event_base;
+ char *sink_name;
+ char *scache_item;
+
+ pa_x11_wrapper *x11_wrapper;
+ pa_x11_client *x11_client;
+};
+
+static const char* const valid_modargs[] = {
+ "sink",
+ "sample",
+ "display",
+ NULL
+};
+
+static int x11_event_callback(pa_x11_wrapper *w, XEvent *e, void *userdata) {
+ XkbBellNotifyEvent *bne;
+ struct userdata *u = userdata;
+
+ pa_assert(w);
+ pa_assert(e);
+ pa_assert(u);
+ pa_assert(u->x11_wrapper == w);
+
+ if (((XkbEvent*) e)->any.xkb_type != XkbBellNotify)
+ return 0;
+
+ bne = (XkbBellNotifyEvent*) e;
+
+ if (pa_scache_play_item_by_name(u->core, u->scache_item, u->sink_name, (bne->percent*PA_VOLUME_NORM)/100, 1) < 0) {
+ pa_log_info("Ringing bell failed, reverting to X11 device bell.");
+ XkbForceDeviceBell(pa_x11_wrapper_get_display(w), bne->device, bne->bell_class, bne->bell_id, bne->percent);
+ }
+
+ return 1;
+}
+
+int pa__init(pa_module*m) {
+
+ struct userdata *u = NULL;
+ pa_modargs *ma = NULL;
+ int major, minor;
+ unsigned int auto_ctrls, auto_values;
+
+ pa_assert(m);
+
+ if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
+ pa_log("Failed to parse module arguments");
+ goto fail;
+ }
+
+ m->userdata = u = pa_xnew(struct userdata, 1);
+ u->core = m->core;
+ u->scache_item = pa_xstrdup(pa_modargs_get_value(ma, "sample", "x11-bell"));
+ u->sink_name = pa_xstrdup(pa_modargs_get_value(ma, "sink", NULL));
+ u->x11_client = NULL;
+
+ if (!(u->x11_wrapper = pa_x11_wrapper_get(m->core, pa_modargs_get_value(ma, "display", NULL))))
+ goto fail;
+
+ major = XkbMajorVersion;
+ minor = XkbMinorVersion;
+
+ if (!XkbLibraryVersion(&major, &minor)) {
+ pa_log("XkbLibraryVersion() failed");
+ goto fail;
+ }
+
+ major = XkbMajorVersion;
+ minor = XkbMinorVersion;
+
+ if (!XkbQueryExtension(pa_x11_wrapper_get_display(u->x11_wrapper), NULL, &u->xkb_event_base, NULL, &major, &minor)) {
+ pa_log("XkbQueryExtension() failed");
+ goto fail;
+ }
+
+ XkbSelectEvents(pa_x11_wrapper_get_display(u->x11_wrapper), XkbUseCoreKbd, XkbBellNotifyMask, XkbBellNotifyMask);
+ auto_ctrls = auto_values = XkbAudibleBellMask;
+ XkbSetAutoResetControls(pa_x11_wrapper_get_display(u->x11_wrapper), XkbAudibleBellMask, &auto_ctrls, &auto_values);
+ XkbChangeEnabledControls(pa_x11_wrapper_get_display(u->x11_wrapper), XkbUseCoreKbd, XkbAudibleBellMask, 0);
+
+ u->x11_client = pa_x11_client_new(u->x11_wrapper, x11_event_callback, u);
+
+ pa_modargs_free(ma);
+
+ return 0;
+
+fail:
+ if (ma)
+ pa_modargs_free(ma);
+
+ pa__done(m);
+
+ return -1;
+}
+
+void pa__done(pa_module*m) {
+ struct userdata *u;
+
+ pa_assert(m);
+
+ if (!m->userdata)
+ return;
+
+ u = m->userdata;
+
+ pa_xfree(u->scache_item);
+ pa_xfree(u->sink_name);
+
+ if (u->x11_client)
+ pa_x11_client_free(u->x11_client);
+
+ if (u->x11_wrapper)
+ pa_x11_wrapper_unref(u->x11_wrapper);
+
+ pa_xfree(u);
+}
diff --git a/src/modules/module-x11-publish.c b/src/modules/module-x11-publish.c
new file mode 100644
index 00000000..429c2a69
--- /dev/null
+++ b/src/modules/module-x11-publish.c
@@ -0,0 +1,198 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2004-2006 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <X11/Xlib.h>
+#include <X11/Xatom.h>
+
+#include <pulse/util.h>
+#include <pulse/xmalloc.h>
+
+#include <pulsecore/module.h>
+#include <pulsecore/sink.h>
+#include <pulsecore/core-scache.h>
+#include <pulsecore/modargs.h>
+#include <pulsecore/namereg.h>
+#include <pulsecore/log.h>
+#include <pulsecore/x11wrap.h>
+#include <pulsecore/core-util.h>
+#include <pulsecore/native-common.h>
+#include <pulsecore/authkey-prop.h>
+#include <pulsecore/authkey.h>
+#include <pulsecore/x11prop.h>
+#include <pulsecore/strlist.h>
+#include <pulsecore/props.h>
+
+#include "module-x11-publish-symdef.h"
+
+PA_MODULE_AUTHOR("Lennart Poettering");
+PA_MODULE_DESCRIPTION("X11 Credential Publisher");
+PA_MODULE_VERSION(PACKAGE_VERSION);
+PA_MODULE_LOAD_ONCE(FALSE);
+PA_MODULE_USAGE("display=<X11 display>");
+
+static const char* const valid_modargs[] = {
+ "display",
+ "sink",
+ "source",
+ "cookie",
+ NULL
+};
+
+struct userdata {
+ pa_core *core;
+ pa_x11_wrapper *x11_wrapper;
+ char *id;
+ uint8_t auth_cookie[PA_NATIVE_COOKIE_LENGTH];
+ int auth_cookie_in_property;
+};
+
+static int load_key(struct userdata *u, const char*fn) {
+ pa_assert(u);
+
+ u->auth_cookie_in_property = 0;
+
+ if (!fn && pa_authkey_prop_get(u->core, PA_NATIVE_COOKIE_PROPERTY_NAME, u->auth_cookie, sizeof(u->auth_cookie)) >= 0) {
+ pa_log_debug("using already loaded auth cookie.");
+ pa_authkey_prop_ref(u->core, PA_NATIVE_COOKIE_PROPERTY_NAME);
+ u->auth_cookie_in_property = 1;
+ return 0;
+ }
+
+ if (!fn)
+ fn = PA_NATIVE_COOKIE_FILE;
+
+ if (pa_authkey_load_auto(fn, u->auth_cookie, sizeof(u->auth_cookie)) < 0)
+ return -1;
+
+ pa_log_debug("Loading cookie from disk.");
+
+ if (pa_authkey_prop_put(u->core, PA_NATIVE_COOKIE_PROPERTY_NAME, u->auth_cookie, sizeof(u->auth_cookie)) >= 0)
+ u->auth_cookie_in_property = 1;
+
+ return 0;
+}
+
+int pa__init(pa_module*m) {
+ struct userdata *u;
+ pa_modargs *ma = NULL;
+ char hn[256], un[128];
+ char hx[PA_NATIVE_COOKIE_LENGTH*2+1];
+ const char *t;
+ char *s;
+ pa_strlist *l;
+
+ pa_assert(m);
+
+ if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
+ pa_log("failed to parse module arguments");
+ goto fail;
+ }
+
+ m->userdata = u = pa_xmalloc(sizeof(struct userdata));
+ u->core = m->core;
+ u->id = NULL;
+ u->auth_cookie_in_property = 0;
+
+ if (load_key(u, pa_modargs_get_value(ma, "cookie", NULL)) < 0)
+ goto fail;
+
+ if (!(u->x11_wrapper = pa_x11_wrapper_get(m->core, pa_modargs_get_value(ma, "display", NULL))))
+ goto fail;
+
+ if (!(l = pa_property_get(m->core, PA_NATIVE_SERVER_PROPERTY_NAME)))
+ goto fail;
+
+ l = pa_strlist_reverse(l);
+ s = pa_strlist_tostring(l);
+ l = pa_strlist_reverse(l);
+
+ pa_x11_set_prop(pa_x11_wrapper_get_display(u->x11_wrapper), "PULSE_SERVER", s);
+ pa_xfree(s);
+
+ if (!pa_get_fqdn(hn, sizeof(hn)) || !pa_get_user_name(un, sizeof(un)))
+ goto fail;
+
+ u->id = pa_sprintf_malloc("%s@%s/%u", un, hn, (unsigned) getpid());
+ pa_x11_set_prop(pa_x11_wrapper_get_display(u->x11_wrapper), "PULSE_ID", u->id);
+
+ if ((t = pa_modargs_get_value(ma, "source", NULL)))
+ pa_x11_set_prop(pa_x11_wrapper_get_display(u->x11_wrapper), "PULSE_SOURCE", t);
+
+ if ((t = pa_modargs_get_value(ma, "sink", NULL)))
+ pa_x11_set_prop(pa_x11_wrapper_get_display(u->x11_wrapper), "PULSE_SINK", t);
+
+ pa_x11_set_prop(pa_x11_wrapper_get_display(u->x11_wrapper), "PULSE_COOKIE", pa_hexstr(u->auth_cookie, sizeof(u->auth_cookie), hx, sizeof(hx)));
+
+ pa_modargs_free(ma);
+ return 0;
+
+fail:
+ if (ma)
+ pa_modargs_free(ma);
+
+ pa__done(m);
+ return -1;
+}
+
+void pa__done(pa_module*m) {
+ struct userdata*u;
+
+ pa_assert(m);
+
+ if (!(u = m->userdata))
+ return;
+
+ if (u->x11_wrapper) {
+ char t[256];
+
+ /* Yes, here is a race condition */
+ if (!pa_x11_get_prop(pa_x11_wrapper_get_display(u->x11_wrapper), "PULSE_ID", t, sizeof(t)) || strcmp(t, u->id))
+ pa_log_warn("PulseAudio information vanished from X11!");
+ else {
+ pa_x11_del_prop(pa_x11_wrapper_get_display(u->x11_wrapper), "PULSE_ID");
+ pa_x11_del_prop(pa_x11_wrapper_get_display(u->x11_wrapper), "PULSE_SERVER");
+ pa_x11_del_prop(pa_x11_wrapper_get_display(u->x11_wrapper), "PULSE_SINK");
+ pa_x11_del_prop(pa_x11_wrapper_get_display(u->x11_wrapper), "PULSE_SOURCE");
+ pa_x11_del_prop(pa_x11_wrapper_get_display(u->x11_wrapper), "PULSE_COOKIE");
+ XSync(pa_x11_wrapper_get_display(u->x11_wrapper), False);
+ }
+ }
+
+ if (u->x11_wrapper)
+ pa_x11_wrapper_unref(u->x11_wrapper);
+
+ if (u->auth_cookie_in_property)
+ pa_authkey_prop_unref(m->core, PA_NATIVE_COOKIE_PROPERTY_NAME);
+
+ pa_xfree(u->id);
+ pa_xfree(u);
+}
diff --git a/src/modules/module-x11-xsmp.c b/src/modules/module-x11-xsmp.c
new file mode 100644
index 00000000..e9efa096
--- /dev/null
+++ b/src/modules/module-x11-xsmp.c
@@ -0,0 +1,196 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2004-2006 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <X11/Xlib.h>
+#include <X11/SM/SMlib.h>
+
+#include <pulse/xmalloc.h>
+#include <pulse/util.h>
+
+#include <pulsecore/iochannel.h>
+#include <pulsecore/sink.h>
+#include <pulsecore/core-scache.h>
+#include <pulsecore/modargs.h>
+#include <pulsecore/namereg.h>
+#include <pulsecore/log.h>
+#include <pulsecore/core-util.h>
+
+#include "module-x11-xsmp-symdef.h"
+
+PA_MODULE_AUTHOR("Lennart Poettering");
+PA_MODULE_DESCRIPTION("X11 session management");
+PA_MODULE_VERSION(PACKAGE_VERSION);
+PA_MODULE_LOAD_ONCE(TRUE);
+
+static int ice_in_use = 0;
+
+static const char* const valid_modargs[] = {
+ NULL
+};
+
+static void die_cb(SmcConn connection, SmPointer client_data){
+ pa_core *c = PA_CORE(client_data);
+
+ pa_log_debug("Got die message from XSM. Exiting...");
+
+ pa_core_assert_ref(c);
+ c->mainloop->quit(c->mainloop, 0);
+}
+
+static void save_complete_cb(SmcConn connection, SmPointer client_data) {
+}
+
+static void shutdown_cancelled_cb(SmcConn connection, SmPointer client_data) {
+ SmcSaveYourselfDone(connection, True);
+}
+
+static void save_yourself_cb(SmcConn connection, SmPointer client_data, int save_type, Bool _shutdown, int interact_style, Bool fast) {
+ SmcSaveYourselfDone(connection, True);
+}
+
+static void ice_io_cb(pa_mainloop_api*a, pa_io_event *e, int fd, pa_io_event_flags_t flags, void *userdata) {
+ IceConn connection = userdata;
+
+ if (IceProcessMessages(connection, NULL, NULL) == IceProcessMessagesIOError) {
+ IceSetShutdownNegotiation(connection, False);
+ IceCloseConnection(connection);
+ }
+}
+
+static void new_ice_connection(IceConn connection, IcePointer client_data, Bool opening, IcePointer *watch_data) {
+ pa_core *c = client_data;
+
+ pa_assert(c);
+
+ if (opening)
+ *watch_data = c->mainloop->io_new(c->mainloop, IceConnectionNumber(connection), PA_IO_EVENT_INPUT, ice_io_cb, connection);
+ else
+ c->mainloop->io_free(*watch_data);
+}
+
+int pa__init(pa_module*m) {
+
+ pa_modargs *ma = NULL;
+ char t[256], *vendor, *client_id;
+ SmcCallbacks callbacks;
+ SmProp prop_program, prop_user;
+ SmProp *prop_list[2];
+ SmPropValue val_program, val_user;
+ SmcConn connection;
+
+ pa_assert(m);
+
+ if (ice_in_use) {
+ pa_log("module-x11-xsmp may no be loaded twice.");
+ return -1;
+ }
+
+ IceAddConnectionWatch(new_ice_connection, m->core);
+ ice_in_use = 1;
+
+ if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
+ pa_log("Failed to parse module arguments");
+ goto fail;
+ }
+
+ if (!getenv("SESSION_MANAGER")) {
+ pa_log("X11 session manager not running.");
+ goto fail;
+ }
+
+ memset(&callbacks, 0, sizeof(callbacks));
+ callbacks.die.callback = die_cb;
+ callbacks.die.client_data = m->core;
+ callbacks.save_yourself.callback = save_yourself_cb;
+ callbacks.save_yourself.client_data = m->core;
+ callbacks.save_complete.callback = save_complete_cb;
+ callbacks.save_complete.client_data = m->core;
+ callbacks.shutdown_cancelled.callback = shutdown_cancelled_cb;
+ callbacks.shutdown_cancelled.client_data = m->core;
+
+ if (!(m->userdata = connection = SmcOpenConnection(
+ NULL, m->core,
+ SmProtoMajor, SmProtoMinor,
+ SmcSaveYourselfProcMask | SmcDieProcMask | SmcSaveCompleteProcMask | SmcShutdownCancelledProcMask,
+ &callbacks, NULL, &client_id,
+ sizeof(t), t))) {
+
+ pa_log("Failed to open connection to session manager: %s", t);
+ goto fail;
+ }
+
+ prop_program.name = (char*) SmProgram;
+ prop_program.type = (char*) SmARRAY8;
+ val_program.value = (char*) PACKAGE_NAME;
+ val_program.length = strlen(val_program.value);
+ prop_program.num_vals = 1;
+ prop_program.vals = &val_program;
+ prop_list[0] = &prop_program;
+
+ prop_user.name = (char*) SmUserID;
+ prop_user.type = (char*) SmARRAY8;
+ pa_get_user_name(t, sizeof(t));
+ val_user.value = t;
+ val_user.length = strlen(val_user.value);
+ prop_user.num_vals = 1;
+ prop_user.vals = &val_user;
+ prop_list[1] = &prop_user;
+
+ SmcSetProperties(connection, PA_ELEMENTSOF(prop_list), prop_list);
+
+ pa_log_info("Connected to session manager '%s' as '%s'.", vendor = SmcVendor(connection), client_id);
+ free(vendor);
+ free(client_id);
+
+ pa_modargs_free(ma);
+
+ return 0;
+
+fail:
+ if (ma)
+ pa_modargs_free(ma);
+
+ pa__done(m);
+
+ return -1;
+}
+
+void pa__done(pa_module*m) {
+ pa_assert(m);
+
+ if (m->userdata)
+ SmcCloseConnection(m->userdata, 0, NULL);
+
+ if (ice_in_use) {
+ IceRemoveConnectionWatch(new_ice_connection, m->core);
+ ice_in_use = 0;
+ }
+}
diff --git a/src/modules/module-zeroconf-discover.c b/src/modules/module-zeroconf-discover.c
new file mode 100644
index 00000000..4e76f448
--- /dev/null
+++ b/src/modules/module-zeroconf-discover.c
@@ -0,0 +1,443 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2004-2006 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <avahi-client/client.h>
+#include <avahi-client/lookup.h>
+#include <avahi-common/alternative.h>
+#include <avahi-common/error.h>
+#include <avahi-common/domain.h>
+#include <avahi-common/malloc.h>
+
+#include <pulse/xmalloc.h>
+#include <pulse/util.h>
+
+#include <pulsecore/sink.h>
+#include <pulsecore/source.h>
+#include <pulsecore/native-common.h>
+#include <pulsecore/core-util.h>
+#include <pulsecore/log.h>
+#include <pulsecore/core-subscribe.h>
+#include <pulsecore/hashmap.h>
+#include <pulsecore/modargs.h>
+#include <pulsecore/namereg.h>
+#include <pulsecore/avahi-wrap.h>
+
+#include "module-zeroconf-discover-symdef.h"
+
+PA_MODULE_AUTHOR("Lennart Poettering");
+PA_MODULE_DESCRIPTION("mDNS/DNS-SD Service Discovery");
+PA_MODULE_VERSION(PACKAGE_VERSION);
+PA_MODULE_LOAD_ONCE(TRUE);
+
+#define SERVICE_TYPE_SINK "_pulse-sink._tcp"
+#define SERVICE_TYPE_SOURCE "_non-monitor._sub._pulse-source._tcp"
+
+static const char* const valid_modargs[] = {
+ NULL
+};
+
+struct tunnel {
+ AvahiIfIndex interface;
+ AvahiProtocol protocol;
+ char *name, *type, *domain;
+ uint32_t module_index;
+};
+
+struct userdata {
+ pa_core *core;
+ pa_module *module;
+ AvahiPoll *avahi_poll;
+ AvahiClient *client;
+ AvahiServiceBrowser *source_browser, *sink_browser;
+
+ pa_hashmap *tunnels;
+};
+
+static unsigned tunnel_hash(const void *p) {
+ const struct tunnel *t = p;
+
+ return
+ (unsigned) t->interface +
+ (unsigned) t->protocol +
+ pa_idxset_string_hash_func(t->name) +
+ pa_idxset_string_hash_func(t->type) +
+ pa_idxset_string_hash_func(t->domain);
+}
+
+static int tunnel_compare(const void *a, const void *b) {
+ const struct tunnel *ta = a, *tb = b;
+ int r;
+
+ if (ta->interface != tb->interface)
+ return 1;
+ if (ta->protocol != tb->protocol)
+ return 1;
+ if ((r = strcmp(ta->name, tb->name)))
+ return r;
+ if ((r = strcmp(ta->type, tb->type)))
+ return r;
+ if ((r = strcmp(ta->domain, tb->domain)))
+ return r;
+
+ return 0;
+}
+
+static struct tunnel *tunnel_new(
+ AvahiIfIndex interface, AvahiProtocol protocol,
+ const char *name, const char *type, const char *domain) {
+
+ struct tunnel *t;
+ t = pa_xnew(struct tunnel, 1);
+ t->interface = interface;
+ t->protocol = protocol;
+ t->name = pa_xstrdup(name);
+ t->type = pa_xstrdup(type);
+ t->domain = pa_xstrdup(domain);
+ t->module_index = PA_IDXSET_INVALID;
+ return t;
+}
+
+static void tunnel_free(struct tunnel *t) {
+ pa_assert(t);
+ pa_xfree(t->name);
+ pa_xfree(t->type);
+ pa_xfree(t->domain);
+ pa_xfree(t);
+}
+
+static void resolver_cb(
+ AvahiServiceResolver *r,
+ AvahiIfIndex interface, AvahiProtocol protocol,
+ AvahiResolverEvent event,
+ const char *name, const char *type, const char *domain,
+ const char *host_name, const AvahiAddress *a, uint16_t port,
+ AvahiStringList *txt,
+ AvahiLookupResultFlags flags,
+ void *userdata) {
+
+ struct userdata *u = userdata;
+ struct tunnel *tnl;
+
+ pa_assert(u);
+
+ tnl = tunnel_new(interface, protocol, name, type, domain);
+
+ if (event != AVAHI_RESOLVER_FOUND)
+ pa_log("Resolving of '%s' failed: %s", name, avahi_strerror(avahi_client_errno(u->client)));
+ else {
+ char *device = NULL, *dname, *module_name, *args;
+ const char *t;
+ char at[AVAHI_ADDRESS_STR_MAX], cmt[PA_CHANNEL_MAP_SNPRINT_MAX];
+ pa_sample_spec ss;
+ pa_channel_map cm;
+ AvahiStringList *l;
+ pa_bool_t channel_map_set = FALSE;
+ pa_module *m;
+
+ ss = u->core->default_sample_spec;
+ pa_assert_se(pa_channel_map_init_auto(&cm, ss.channels, PA_CHANNEL_MAP_AUX));
+ pa_channel_map_init_auto(&cm, ss.channels, PA_CHANNEL_MAP_DEFAULT);
+
+ for (l = txt; l; l = l->next) {
+ char *key, *value;
+ pa_assert_se(avahi_string_list_get_pair(l, &key, &value, NULL) == 0);
+
+ if (strcmp(key, "device") == 0) {
+ pa_xfree(device);
+ device = value;
+ value = NULL;
+ } else if (strcmp(key, "rate") == 0)
+ ss.rate = atoi(value);
+ else if (strcmp(key, "channels") == 0)
+ ss.channels = atoi(value);
+ else if (strcmp(key, "format") == 0)
+ ss.format = pa_parse_sample_format(value);
+ else if (strcmp(key, "channel_map") == 0) {
+ pa_channel_map_parse(&cm, value);
+ channel_map_set = TRUE;
+ }
+
+ avahi_free(key);
+ avahi_free(value);
+ }
+
+ if (!channel_map_set && cm.channels != ss.channels) {
+ pa_assert_se(pa_channel_map_init_auto(&cm, ss.channels, PA_CHANNEL_MAP_AUX));
+ pa_channel_map_init_auto(&cm, ss.channels, PA_CHANNEL_MAP_DEFAULT);
+ }
+
+ if (!pa_sample_spec_valid(&ss)) {
+ pa_log("Service '%s' contains an invalid sample specification.", name);
+ avahi_free(device);
+ goto finish;
+ }
+
+ if (!pa_channel_map_valid(&cm) || cm.channels != ss.channels) {
+ pa_log("Service '%s' contains an invalid channel map.", name);
+ avahi_free(device);
+ goto finish;
+ }
+
+ if (device)
+ dname = pa_sprintf_malloc("tunnel.%s.%s", host_name, device);
+ else
+ dname = pa_sprintf_malloc("tunnel.%s", host_name);
+
+ if (!pa_namereg_is_valid_name(dname)) {
+ pa_log("Cannot construct valid device name from credentials of service '%s'.", dname);
+ avahi_free(device);
+ pa_xfree(dname);
+ goto finish;
+ }
+
+ t = strstr(type, "sink") ? "sink" : "source";
+
+ module_name = pa_sprintf_malloc("module-tunnel-%s", t);
+ args = pa_sprintf_malloc("server=[%s]:%u "
+ "%s=%s "
+ "format=%s "
+ "channels=%u "
+ "rate=%u "
+ "%s_name=%s "
+ "channel_map=%s",
+ avahi_address_snprint(at, sizeof(at), a), port,
+ t, device,
+ pa_sample_format_to_string(ss.format),
+ ss.channels,
+ ss.rate,
+ t, dname,
+ pa_channel_map_snprint(cmt, sizeof(cmt), &cm));
+
+ pa_log_debug("Loading module-tunnel-%s with arguments '%s'", module_name, args);
+
+ if ((m = pa_module_load(u->core, module_name, args))) {
+ tnl->module_index = m->index;
+ pa_hashmap_put(u->tunnels, tnl, tnl);
+ tnl = NULL;
+ }
+
+ pa_xfree(module_name);
+ pa_xfree(dname);
+ pa_xfree(args);
+ avahi_free(device);
+ }
+
+finish:
+
+ avahi_service_resolver_free(r);
+
+ if (tnl)
+ tunnel_free(tnl);
+}
+
+static void browser_cb(
+ AvahiServiceBrowser *b,
+ AvahiIfIndex interface, AvahiProtocol protocol,
+ AvahiBrowserEvent event,
+ const char *name, const char *type, const char *domain,
+ AvahiLookupResultFlags flags,
+ void *userdata) {
+
+ struct userdata *u = userdata;
+ struct tunnel *t;
+
+ pa_assert(u);
+
+ if (flags & AVAHI_LOOKUP_RESULT_LOCAL)
+ return;
+
+ t = tunnel_new(interface, protocol, name, type, domain);
+
+ if (event == AVAHI_BROWSER_NEW) {
+
+ if (!pa_hashmap_get(u->tunnels, t))
+ if (!(avahi_service_resolver_new(u->client, interface, protocol, name, type, domain, AVAHI_PROTO_UNSPEC, 0, resolver_cb, u)))
+ pa_log("avahi_service_resolver_new() failed: %s", avahi_strerror(avahi_client_errno(u->client)));
+
+ /* We ignore the returned resolver object here, since the we don't
+ * need to attach any special data to it, and we can still destory
+ * it from the callback */
+
+ } else if (event == AVAHI_BROWSER_REMOVE) {
+ struct tunnel *t2;
+
+ if ((t2 = pa_hashmap_get(u->tunnels, t))) {
+ pa_module_unload_by_index(u->core, t2->module_index);
+ pa_hashmap_remove(u->tunnels, t2);
+ tunnel_free(t2);
+ }
+ }
+
+ tunnel_free(t);
+}
+
+static void client_callback(AvahiClient *c, AvahiClientState state, void *userdata) {
+ struct userdata *u = userdata;
+
+ pa_assert(c);
+ pa_assert(u);
+
+ u->client = c;
+
+ switch (state) {
+ case AVAHI_CLIENT_S_REGISTERING:
+ case AVAHI_CLIENT_S_RUNNING:
+ case AVAHI_CLIENT_S_COLLISION:
+
+ if (!u->sink_browser) {
+
+ if (!(u->sink_browser = avahi_service_browser_new(
+ c,
+ AVAHI_IF_UNSPEC, AVAHI_PROTO_UNSPEC,
+ SERVICE_TYPE_SINK,
+ NULL,
+ 0,
+ browser_cb, u))) {
+
+ pa_log("avahi_service_browser_new() failed: %s", avahi_strerror(avahi_client_errno(c)));
+ pa_module_unload_request(u->module);
+ }
+ }
+
+ if (!u->source_browser) {
+
+ if (!(u->source_browser = avahi_service_browser_new(
+ c,
+ AVAHI_IF_UNSPEC, AVAHI_PROTO_UNSPEC,
+ SERVICE_TYPE_SOURCE,
+ NULL,
+ 0,
+ browser_cb, u))) {
+
+ pa_log("avahi_service_browser_new() failed: %s", avahi_strerror(avahi_client_errno(c)));
+ pa_module_unload_request(u->module);
+ }
+ }
+
+ break;
+
+ case AVAHI_CLIENT_FAILURE:
+ if (avahi_client_errno(c) == AVAHI_ERR_DISCONNECTED) {
+ int error;
+
+ pa_log_debug("Avahi daemon disconnected.");
+
+ if (!(u->client = avahi_client_new(u->avahi_poll, AVAHI_CLIENT_NO_FAIL, client_callback, u, &error))) {
+ pa_log("avahi_client_new() failed: %s", avahi_strerror(error));
+ pa_module_unload_request(u->module);
+ }
+ }
+
+ /* Fall through */
+
+ case AVAHI_CLIENT_CONNECTING:
+
+ if (u->sink_browser) {
+ avahi_service_browser_free(u->sink_browser);
+ u->sink_browser = NULL;
+ }
+
+ if (u->source_browser) {
+ avahi_service_browser_free(u->source_browser);
+ u->source_browser = NULL;
+ }
+
+ break;
+
+ default: ;
+ }
+}
+
+int pa__init(pa_module*m) {
+
+ struct userdata *u;
+ pa_modargs *ma = NULL;
+ int error;
+
+ if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
+ pa_log("Failed to parse module arguments.");
+ goto fail;
+ }
+
+ m->userdata = u = pa_xnew(struct userdata, 1);
+ u->core = m->core;
+ u->module = m;
+ u->sink_browser = u->source_browser = NULL;
+
+ u->tunnels = pa_hashmap_new(tunnel_hash, tunnel_compare);
+
+ u->avahi_poll = pa_avahi_poll_new(m->core->mainloop);
+
+ if (!(u->client = avahi_client_new(u->avahi_poll, AVAHI_CLIENT_NO_FAIL, client_callback, u, &error))) {
+ pa_log("pa_avahi_client_new() failed: %s", avahi_strerror(error));
+ goto fail;
+ }
+
+ pa_modargs_free(ma);
+
+ return 0;
+
+fail:
+ pa__done(m);
+
+ if (ma)
+ pa_modargs_free(ma);
+
+ return -1;
+}
+
+void pa__done(pa_module*m) {
+ struct userdata*u;
+ pa_assert(m);
+
+ if (!(u = m->userdata))
+ return;
+
+ if (u->client)
+ avahi_client_free(u->client);
+
+ if (u->avahi_poll)
+ pa_avahi_poll_free(u->avahi_poll);
+
+ if (u->tunnels) {
+ struct tunnel *t;
+
+ while ((t = pa_hashmap_steal_first(u->tunnels))) {
+ pa_module_unload_by_index(u->core, t->module_index);
+ tunnel_free(t);
+ }
+
+ pa_hashmap_free(u->tunnels, NULL, NULL);
+ }
+
+ pa_xfree(u);
+}
diff --git a/src/modules/module-zeroconf-publish.c b/src/modules/module-zeroconf-publish.c
new file mode 100644
index 00000000..46969a24
--- /dev/null
+++ b/src/modules/module-zeroconf-publish.c
@@ -0,0 +1,650 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2004-2006 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as
+ published by the Free Software Foundation; either version 2 of the
+ License, or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <avahi-client/client.h>
+#include <avahi-client/publish.h>
+#include <avahi-common/alternative.h>
+#include <avahi-common/error.h>
+#include <avahi-common/domain.h>
+
+#include <pulse/xmalloc.h>
+#include <pulse/util.h>
+
+#include <pulsecore/sink.h>
+#include <pulsecore/source.h>
+#include <pulsecore/native-common.h>
+#include <pulsecore/core-util.h>
+#include <pulsecore/log.h>
+#include <pulsecore/core-subscribe.h>
+#include <pulsecore/dynarray.h>
+#include <pulsecore/modargs.h>
+#include <pulsecore/avahi-wrap.h>
+#include <pulsecore/endianmacros.h>
+
+#include "module-zeroconf-publish-symdef.h"
+
+PA_MODULE_AUTHOR("Lennart Poettering");
+PA_MODULE_DESCRIPTION("mDNS/DNS-SD Service Publisher");
+PA_MODULE_VERSION(PACKAGE_VERSION);
+PA_MODULE_LOAD_ONCE(TRUE);
+PA_MODULE_USAGE("port=<IP port number>");
+
+#define SERVICE_TYPE_SINK "_pulse-sink._tcp"
+#define SERVICE_TYPE_SOURCE "_pulse-source._tcp"
+#define SERVICE_TYPE_SERVER "_pulse-server._tcp"
+#define SERVICE_SUBTYPE_SINK_HARDWARE "_hardware._sub."SERVICE_TYPE_SINK
+#define SERVICE_SUBTYPE_SINK_VIRTUAL "_virtual._sub."SERVICE_TYPE_SINK
+#define SERVICE_SUBTYPE_SOURCE_HARDWARE "_hardware._sub."SERVICE_TYPE_SOURCE
+#define SERVICE_SUBTYPE_SOURCE_VIRTUAL "_virtual._sub."SERVICE_TYPE_SOURCE
+#define SERVICE_SUBTYPE_SOURCE_MONITOR "_monitor._sub."SERVICE_TYPE_SOURCE
+#define SERVICE_SUBTYPE_SOURCE_NON_MONITOR "_non-monitor._sub."SERVICE_TYPE_SOURCE
+
+static const char* const valid_modargs[] = {
+ "port",
+ NULL
+};
+
+enum service_subtype {
+ SUBTYPE_HARDWARE,
+ SUBTYPE_VIRTUAL,
+ SUBTYPE_MONITOR
+};
+
+struct service {
+ struct userdata *userdata;
+ AvahiEntryGroup *entry_group;
+ char *service_name;
+ pa_object *device;
+ enum service_subtype subtype;
+};
+
+struct userdata {
+ pa_core *core;
+ pa_module *module;
+ AvahiPoll *avahi_poll;
+ AvahiClient *client;
+
+ pa_hashmap *services;
+ char *service_name;
+
+ AvahiEntryGroup *main_entry_group;
+
+ uint16_t port;
+
+ pa_hook_slot *sink_new_slot, *source_new_slot, *sink_unlink_slot, *source_unlink_slot, *sink_changed_slot, *source_changed_slot;
+};
+
+static void get_service_data(struct service *s, pa_sample_spec *ret_ss, pa_channel_map *ret_map, const char **ret_name, const char **ret_description, enum service_subtype *ret_subtype) {
+ pa_assert(s);
+ pa_assert(ret_ss);
+ pa_assert(ret_description);
+ pa_assert(ret_subtype);
+
+ if (pa_sink_isinstance(s->device)) {
+ pa_sink *sink = PA_SINK(s->device);
+
+ *ret_ss = sink->sample_spec;
+ *ret_map = sink->channel_map;
+ *ret_name = sink->name;
+ *ret_description = sink->description;
+ *ret_subtype = sink->flags & PA_SINK_HARDWARE ? SUBTYPE_HARDWARE : SUBTYPE_VIRTUAL;
+
+ } else if (pa_source_isinstance(s->device)) {
+ pa_source *source = PA_SOURCE(s->device);
+
+ *ret_ss = source->sample_spec;
+ *ret_map = source->channel_map;
+ *ret_name = source->name;
+ *ret_description = source->description;
+ *ret_subtype = source->monitor_of ? SUBTYPE_MONITOR : (source->flags & PA_SOURCE_HARDWARE ? SUBTYPE_HARDWARE : SUBTYPE_VIRTUAL);
+
+ } else
+ pa_assert_not_reached();
+}
+
+static AvahiStringList* txt_record_server_data(pa_core *c, AvahiStringList *l) {
+ char s[128];
+
+ pa_assert(c);
+
+ l = avahi_string_list_add_pair(l, "server-version", PACKAGE_NAME" "PACKAGE_VERSION);
+ l = avahi_string_list_add_pair(l, "user-name", pa_get_user_name(s, sizeof(s)));
+ l = avahi_string_list_add_pair(l, "fqdn", pa_get_fqdn(s, sizeof(s)));
+ l = avahi_string_list_add_printf(l, "cookie=0x%08x", c->cookie);
+
+ return l;
+}
+
+static int publish_service(struct service *s);
+
+static void service_entry_group_callback(AvahiEntryGroup *g, AvahiEntryGroupState state, void *userdata) {
+ struct service *s = userdata;
+
+ pa_assert(s);
+
+ switch (state) {
+
+ case AVAHI_ENTRY_GROUP_ESTABLISHED:
+ pa_log_info("Successfully established service %s.", s->service_name);
+ break;
+
+ case AVAHI_ENTRY_GROUP_COLLISION: {
+ char *t;
+
+ t = avahi_alternative_service_name(s->service_name);
+ pa_log_info("Name collision, renaming %s to %s.", s->service_name, t);
+ pa_xfree(s->service_name);
+ s->service_name = t;
+
+ publish_service(s);
+ break;
+ }
+
+ case AVAHI_ENTRY_GROUP_FAILURE: {
+ pa_log("Failed to register service: %s", avahi_strerror(avahi_client_errno(avahi_entry_group_get_client(g))));
+
+ avahi_entry_group_free(g);
+ s->entry_group = NULL;
+
+ break;
+ }
+
+ case AVAHI_ENTRY_GROUP_UNCOMMITED:
+ case AVAHI_ENTRY_GROUP_REGISTERING:
+ ;
+ }
+}
+
+static void service_free(struct service *s);
+
+static int publish_service(struct service *s) {
+ int r = -1;
+ AvahiStringList *txt = NULL;
+ const char *description = NULL, *name = NULL;
+ pa_sample_spec ss;
+ pa_channel_map map;
+ char cm[PA_CHANNEL_MAP_SNPRINT_MAX];
+ enum service_subtype subtype;
+
+ const char * const subtype_text[] = {
+ [SUBTYPE_HARDWARE] = "hardware",
+ [SUBTYPE_VIRTUAL] = "virtual",
+ [SUBTYPE_MONITOR] = "monitor"
+ };
+
+ pa_assert(s);
+
+ if (!s->userdata->client || avahi_client_get_state(s->userdata->client) != AVAHI_CLIENT_S_RUNNING)
+ return 0;
+
+ if (!s->entry_group) {
+ if (!(s->entry_group = avahi_entry_group_new(s->userdata->client, service_entry_group_callback, s))) {
+ pa_log("avahi_entry_group_new(): %s", avahi_strerror(avahi_client_errno(s->userdata->client)));
+ goto finish;
+ }
+ } else
+ avahi_entry_group_reset(s->entry_group);
+
+ txt = txt_record_server_data(s->userdata->core, txt);
+
+ get_service_data(s, &ss, &map, &name, &description, &subtype);
+ txt = avahi_string_list_add_pair(txt, "device", name);
+ txt = avahi_string_list_add_printf(txt, "rate=%u", ss.rate);
+ txt = avahi_string_list_add_printf(txt, "channels=%u", ss.channels);
+ txt = avahi_string_list_add_pair(txt, "format", pa_sample_format_to_string(ss.format));
+ txt = avahi_string_list_add_pair(txt, "channel_map", pa_channel_map_snprint(cm, sizeof(cm), &map));
+ txt = avahi_string_list_add_pair(txt, "subtype", subtype_text[subtype]);
+
+ if (avahi_entry_group_add_service_strlst(
+ s->entry_group,
+ AVAHI_IF_UNSPEC, AVAHI_PROTO_UNSPEC,
+ 0,
+ s->service_name,
+ pa_sink_isinstance(s->device) ? SERVICE_TYPE_SINK : SERVICE_TYPE_SOURCE,
+ NULL,
+ NULL,
+ s->userdata->port,
+ txt) < 0) {
+
+ pa_log("avahi_entry_group_add_service_strlst(): %s", avahi_strerror(avahi_client_errno(s->userdata->client)));
+ goto finish;
+ }
+
+ if (avahi_entry_group_add_service_subtype(
+ s->entry_group,
+ AVAHI_IF_UNSPEC, AVAHI_PROTO_UNSPEC,
+ 0,
+ s->service_name,
+ pa_sink_isinstance(s->device) ? SERVICE_TYPE_SINK : SERVICE_TYPE_SOURCE,
+ NULL,
+ pa_sink_isinstance(s->device) ? (subtype == SUBTYPE_HARDWARE ? SERVICE_SUBTYPE_SINK_HARDWARE : SERVICE_SUBTYPE_SINK_VIRTUAL) :
+ (subtype == SUBTYPE_HARDWARE ? SERVICE_SUBTYPE_SOURCE_HARDWARE : (subtype == SUBTYPE_VIRTUAL ? SERVICE_SUBTYPE_SOURCE_VIRTUAL : SERVICE_SUBTYPE_SOURCE_MONITOR))) < 0) {
+
+ pa_log("avahi_entry_group_add_service_subtype(): %s", avahi_strerror(avahi_client_errno(s->userdata->client)));
+ goto finish;
+ }
+
+ if (pa_source_isinstance(s->device) && subtype != SUBTYPE_MONITOR) {
+ if (avahi_entry_group_add_service_subtype(
+ s->entry_group,
+ AVAHI_IF_UNSPEC, AVAHI_PROTO_UNSPEC,
+ 0,
+ s->service_name,
+ SERVICE_TYPE_SOURCE,
+ NULL,
+ SERVICE_SUBTYPE_SOURCE_NON_MONITOR) < 0) {
+
+ pa_log("avahi_entry_group_add_service_subtype(): %s", avahi_strerror(avahi_client_errno(s->userdata->client)));
+ goto finish;
+ }
+ }
+
+ if (avahi_entry_group_commit(s->entry_group) < 0) {
+ pa_log("avahi_entry_group_commit(): %s", avahi_strerror(avahi_client_errno(s->userdata->client)));
+ goto finish;
+ }
+
+ r = 0;
+ pa_log_debug("Successfully created entry group for %s.", s->service_name);
+
+finish:
+
+ /* Remove this service */
+ if (r < 0)
+ service_free(s);
+
+ avahi_string_list_free(txt);
+
+ return r;
+}
+
+static struct service *get_service(struct userdata *u, pa_object *device) {
+ struct service *s;
+ char hn[64], un[64];
+ const char *n;
+
+ pa_assert(u);
+ pa_object_assert_ref(device);
+
+ if ((s = pa_hashmap_get(u->services, device)))
+ return s;
+
+ s = pa_xnew(struct service, 1);
+ s->userdata = u;
+ s->entry_group = NULL;
+ s->device = device;
+
+ if (pa_sink_isinstance(device)) {
+ if (!(n = PA_SINK(device)->description))
+ n = PA_SINK(device)->name;
+ } else {
+ if (!(n = PA_SOURCE(device)->description))
+ n = PA_SOURCE(device)->name;
+ }
+
+ s->service_name = pa_truncate_utf8(pa_sprintf_malloc("%s@%s: %s",
+ pa_get_user_name(un, sizeof(un)),
+ pa_get_host_name(hn, sizeof(hn)),
+ n),
+ AVAHI_LABEL_MAX-1);
+
+ pa_hashmap_put(u->services, s->device, s);
+
+ return s;
+}
+
+static void service_free(struct service *s) {
+ pa_assert(s);
+
+ pa_hashmap_remove(s->userdata->services, s->device);
+
+ if (s->entry_group) {
+ pa_log_debug("Removing entry group for %s.", s->service_name);
+ avahi_entry_group_free(s->entry_group);
+ }
+
+ pa_xfree(s->service_name);
+ pa_xfree(s);
+}
+
+static pa_bool_t shall_ignore(pa_object *o) {
+ pa_object_assert_ref(o);
+
+ if (pa_sink_isinstance(o))
+ return !!(PA_SINK(o)->flags & PA_SINK_NETWORK);
+
+ if (pa_source_isinstance(o))
+ return PA_SOURCE(o)->monitor_of || (PA_SOURCE(o)->flags & PA_SOURCE_NETWORK);
+
+ pa_assert_not_reached();
+}
+
+static pa_hook_result_t device_new_or_changed_cb(pa_core *c, pa_object *o, struct userdata *u) {
+ pa_assert(c);
+ pa_object_assert_ref(o);
+
+ if (!shall_ignore(o))
+ publish_service(get_service(u, o));
+
+ return PA_HOOK_OK;
+}
+
+static pa_hook_result_t device_unlink_cb(pa_core *c, pa_object *o, struct userdata *u) {
+ struct service *s;
+
+ pa_assert(c);
+ pa_object_assert_ref(o);
+
+ if ((s = pa_hashmap_get(u->services, o)))
+ service_free(s);
+
+ return PA_HOOK_OK;
+}
+
+static int publish_main_service(struct userdata *u);
+
+static void main_entry_group_callback(AvahiEntryGroup *g, AvahiEntryGroupState state, void *userdata) {
+ struct userdata *u = userdata;
+ pa_assert(u);
+
+ switch (state) {
+
+ case AVAHI_ENTRY_GROUP_ESTABLISHED:
+ pa_log_info("Successfully established main service.");
+ break;
+
+ case AVAHI_ENTRY_GROUP_COLLISION: {
+ char *t;
+
+ t = avahi_alternative_service_name(u->service_name);
+ pa_log_info("Name collision: renaming main service %s to %s.", u->service_name, t);
+ pa_xfree(u->service_name);
+ u->service_name = t;
+
+ publish_main_service(u);
+ break;
+ }
+
+ case AVAHI_ENTRY_GROUP_FAILURE: {
+ pa_log("Failed to register main service: %s", avahi_strerror(avahi_client_errno(avahi_entry_group_get_client(g))));
+
+ avahi_entry_group_free(g);
+ u->main_entry_group = NULL;
+ break;
+ }
+
+ case AVAHI_ENTRY_GROUP_UNCOMMITED:
+ case AVAHI_ENTRY_GROUP_REGISTERING:
+ break;
+ }
+}
+
+static int publish_main_service(struct userdata *u) {
+ AvahiStringList *txt = NULL;
+ int r = -1;
+
+ pa_assert(u);
+
+ if (!u->main_entry_group) {
+ if (!(u->main_entry_group = avahi_entry_group_new(u->client, main_entry_group_callback, u))) {
+ pa_log("avahi_entry_group_new() failed: %s", avahi_strerror(avahi_client_errno(u->client)));
+ goto fail;
+ }
+ } else
+ avahi_entry_group_reset(u->main_entry_group);
+
+ txt = txt_record_server_data(u->core, txt);
+
+ if (avahi_entry_group_add_service_strlst(
+ u->main_entry_group,
+ AVAHI_IF_UNSPEC, AVAHI_PROTO_UNSPEC,
+ 0,
+ u->service_name,
+ SERVICE_TYPE_SERVER,
+ NULL,
+ NULL,
+ u->port,
+ txt) < 0) {
+
+ pa_log("avahi_entry_group_add_service_strlst() failed: %s", avahi_strerror(avahi_client_errno(u->client)));
+ goto fail;
+ }
+
+ if (avahi_entry_group_commit(u->main_entry_group) < 0) {
+ pa_log("avahi_entry_group_commit() failed: %s", avahi_strerror(avahi_client_errno(u->client)));
+ goto fail;
+ }
+
+ r = 0;
+
+fail:
+ avahi_string_list_free(txt);
+
+ return r;
+}
+
+static int publish_all_services(struct userdata *u) {
+ pa_sink *sink;
+ pa_source *source;
+ int r = -1;
+ uint32_t idx;
+
+ pa_assert(u);
+
+ pa_log_debug("Publishing services in Zeroconf");
+
+ for (sink = PA_SINK(pa_idxset_first(u->core->sinks, &idx)); sink; sink = PA_SINK(pa_idxset_next(u->core->sinks, &idx)))
+ if (!shall_ignore(PA_OBJECT(sink)))
+ publish_service(get_service(u, PA_OBJECT(sink)));
+
+ for (source = PA_SOURCE(pa_idxset_first(u->core->sources, &idx)); source; source = PA_SOURCE(pa_idxset_next(u->core->sources, &idx)))
+ if (!shall_ignore(PA_OBJECT(source)))
+ publish_service(get_service(u, PA_OBJECT(source)));
+
+ if (publish_main_service(u) < 0)
+ goto fail;
+
+ r = 0;
+
+fail:
+ return r;
+}
+
+static void unpublish_all_services(struct userdata *u, pa_bool_t rem) {
+ void *state = NULL;
+ struct service *s;
+
+ pa_assert(u);
+
+ pa_log_debug("Unpublishing services in Zeroconf");
+
+ while ((s = pa_hashmap_iterate(u->services, &state, NULL))) {
+ if (s->entry_group) {
+ if (rem) {
+ pa_log_debug("Removing entry group for %s.", s->service_name);
+ avahi_entry_group_free(s->entry_group);
+ s->entry_group = NULL;
+ } else {
+ avahi_entry_group_reset(s->entry_group);
+ pa_log_debug("Resetting entry group for %s.", s->service_name);
+ }
+ }
+ }
+
+ if (u->main_entry_group) {
+ if (rem) {
+ pa_log_debug("Removing main entry group.");
+ avahi_entry_group_free(u->main_entry_group);
+ u->main_entry_group = NULL;
+ } else {
+ avahi_entry_group_reset(u->main_entry_group);
+ pa_log_debug("Resetting main entry group.");
+ }
+ }
+}
+
+static void client_callback(AvahiClient *c, AvahiClientState state, void *userdata) {
+ struct userdata *u = userdata;
+
+ pa_assert(c);
+ pa_assert(u);
+
+ u->client = c;
+
+ switch (state) {
+ case AVAHI_CLIENT_S_RUNNING:
+ publish_all_services(u);
+ break;
+
+ case AVAHI_CLIENT_S_COLLISION:
+ pa_log_debug("Host name collision");
+ unpublish_all_services(u, FALSE);
+ break;
+
+ case AVAHI_CLIENT_FAILURE:
+ if (avahi_client_errno(c) == AVAHI_ERR_DISCONNECTED) {
+ int error;
+
+ pa_log_debug("Avahi daemon disconnected.");
+
+ unpublish_all_services(u, TRUE);
+ avahi_client_free(u->client);
+
+ if (!(u->client = avahi_client_new(u->avahi_poll, AVAHI_CLIENT_NO_FAIL, client_callback, u, &error))) {
+ pa_log("avahi_client_new() failed: %s", avahi_strerror(error));
+ pa_module_unload_request(u->module);
+ }
+ }
+
+ break;
+
+ default: ;
+ }
+}
+
+int pa__init(pa_module*m) {
+
+ struct userdata *u;
+ uint32_t port = PA_NATIVE_DEFAULT_PORT;
+ pa_modargs *ma = NULL;
+ char hn[256], un[256];
+ int error;
+
+ if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
+ pa_log("Failed to parse module arguments.");
+ goto fail;
+ }
+
+ if (pa_modargs_get_value_u32(ma, "port", &port) < 0 || port <= 0 || port > 0xFFFF) {
+ pa_log("Invalid port specified.");
+ goto fail;
+ }
+
+ m->userdata = u = pa_xnew(struct userdata, 1);
+ u->core = m->core;
+ u->module = m;
+ u->port = (uint16_t) port;
+
+ u->avahi_poll = pa_avahi_poll_new(m->core->mainloop);
+
+ u->services = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
+
+ u->sink_new_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SINK_NEW_POST], (pa_hook_cb_t) device_new_or_changed_cb, u);
+ u->sink_changed_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SINK_DESCRIPTION_CHANGED], (pa_hook_cb_t) device_new_or_changed_cb, u);
+ u->sink_unlink_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SINK_UNLINK], (pa_hook_cb_t) device_unlink_cb, u);
+ u->source_new_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SOURCE_NEW_POST], (pa_hook_cb_t) device_new_or_changed_cb, u);
+ u->source_changed_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SOURCE_DESCRIPTION_CHANGED], (pa_hook_cb_t) device_new_or_changed_cb, u);
+ u->source_unlink_slot = pa_hook_connect(&m->core->hooks[PA_CORE_HOOK_SOURCE_UNLINK], (pa_hook_cb_t) device_unlink_cb, u);
+
+ u->main_entry_group = NULL;
+
+ u->service_name = pa_truncate_utf8(pa_sprintf_malloc("%s@%s", pa_get_user_name(un, sizeof(un)), pa_get_host_name(hn, sizeof(hn))), AVAHI_LABEL_MAX);
+
+ if (!(u->client = avahi_client_new(u->avahi_poll, AVAHI_CLIENT_NO_FAIL, client_callback, u, &error))) {
+ pa_log("avahi_client_new() failed: %s", avahi_strerror(error));
+ goto fail;
+ }
+
+ pa_modargs_free(ma);
+
+ return 0;
+
+fail:
+ pa__done(m);
+
+ if (ma)
+ pa_modargs_free(ma);
+
+ return -1;
+}
+
+void pa__done(pa_module*m) {
+ struct userdata*u;
+ pa_assert(m);
+
+ if (!(u = m->userdata))
+ return;
+
+ if (u->services) {
+ struct service *s;
+
+ while ((s = pa_hashmap_get_first(u->services)))
+ service_free(s);
+
+ pa_hashmap_free(u->services, NULL, NULL);
+ }
+
+ if (u->sink_new_slot)
+ pa_hook_slot_free(u->sink_new_slot);
+ if (u->source_new_slot)
+ pa_hook_slot_free(u->source_new_slot);
+ if (u->sink_changed_slot)
+ pa_hook_slot_free(u->sink_changed_slot);
+ if (u->source_changed_slot)
+ pa_hook_slot_free(u->source_changed_slot);
+ if (u->sink_unlink_slot)
+ pa_hook_slot_free(u->sink_unlink_slot);
+ if (u->source_unlink_slot)
+ pa_hook_slot_free(u->source_unlink_slot);
+
+ if (u->main_entry_group)
+ avahi_entry_group_free(u->main_entry_group);
+
+ if (u->client)
+ avahi_client_free(u->client);
+
+ if (u->avahi_poll)
+ pa_avahi_poll_free(u->avahi_poll);
+
+ pa_xfree(u->service_name);
+ pa_xfree(u);
+}
diff --git a/src/modules/oss-util.c b/src/modules/oss-util.c
new file mode 100644
index 00000000..9598feee
--- /dev/null
+++ b/src/modules/oss-util.c
@@ -0,0 +1,419 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2004-2006 Lennart Poettering
+ Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <sys/soundcard.h>
+#include <sys/ioctl.h>
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+#include <pulse/xmalloc.h>
+#include <pulsecore/core-error.h>
+#include <pulsecore/core-util.h>
+#include <pulsecore/log.h>
+#include <pulsecore/macro.h>
+
+#include "oss-util.h"
+
+int pa_oss_open(const char *device, int *mode, int* pcaps) {
+ int fd = -1;
+ int caps;
+
+ pa_assert(device);
+ pa_assert(mode);
+ pa_assert(*mode == O_RDWR || *mode == O_RDONLY || *mode == O_WRONLY);
+
+ if(!pcaps)
+ pcaps = &caps;
+
+ if (*mode == O_RDWR) {
+ if ((fd = open(device, O_RDWR|O_NDELAY|O_NOCTTY)) >= 0) {
+ ioctl(fd, SNDCTL_DSP_SETDUPLEX, 0);
+
+ if (ioctl(fd, SNDCTL_DSP_GETCAPS, pcaps) < 0) {
+ pa_log("SNDCTL_DSP_GETCAPS: %s", pa_cstrerror(errno));
+ goto fail;
+ }
+
+ if (*pcaps & DSP_CAP_DUPLEX)
+ goto success;
+
+ pa_log_warn("'%s' doesn't support full duplex", device);
+
+ pa_close(fd);
+ }
+
+ if ((fd = open(device, (*mode = O_WRONLY)|O_NDELAY|O_NOCTTY)) < 0) {
+ if ((fd = open(device, (*mode = O_RDONLY)|O_NDELAY|O_NOCTTY)) < 0) {
+ pa_log("open('%s'): %s", device, pa_cstrerror(errno));
+ goto fail;
+ }
+ }
+ } else {
+ if ((fd = open(device, *mode|O_NDELAY|O_NOCTTY)) < 0) {
+ pa_log("open('%s'): %s", device, pa_cstrerror(errno));
+ goto fail;
+ }
+ }
+
+ *pcaps = 0;
+
+ if (ioctl(fd, SNDCTL_DSP_GETCAPS, pcaps) < 0) {
+ pa_log("SNDCTL_DSP_GETCAPS: %s", pa_cstrerror(errno));
+ goto fail;
+ }
+
+success:
+
+ pa_log_debug("capabilities:%s%s%s%s%s%s%s%s%s%s%s%s%s%s",
+ *pcaps & DSP_CAP_BATCH ? " BATCH" : "",
+#ifdef DSP_CAP_BIND
+ *pcaps & DSP_CAP_BIND ? " BIND" : "",
+#else
+ "",
+#endif
+ *pcaps & DSP_CAP_COPROC ? " COPROC" : "",
+ *pcaps & DSP_CAP_DUPLEX ? " DUPLEX" : "",
+#ifdef DSP_CAP_FREERATE
+ *pcaps & DSP_CAP_FREERATE ? " FREERATE" : "",
+#else
+ "",
+#endif
+#ifdef DSP_CAP_INPUT
+ *pcaps & DSP_CAP_INPUT ? " INPUT" : "",
+#else
+ "",
+#endif
+ *pcaps & DSP_CAP_MMAP ? " MMAP" : "",
+#ifdef DSP_CAP_MODEM
+ *pcaps & DSP_CAP_MODEM ? " MODEM" : "",
+#else
+ "",
+#endif
+#ifdef DSP_CAP_MULTI
+ *pcaps & DSP_CAP_MULTI ? " MULTI" : "",
+#else
+ "",
+#endif
+#ifdef DSP_CAP_OUTPUT
+ *pcaps & DSP_CAP_OUTPUT ? " OUTPUT" : "",
+#else
+ "",
+#endif
+ *pcaps & DSP_CAP_REALTIME ? " REALTIME" : "",
+#ifdef DSP_CAP_SHADOW
+ *pcaps & DSP_CAP_SHADOW ? " SHADOW" : "",
+#else
+ "",
+#endif
+#ifdef DSP_CAP_VIRTUAL
+ *pcaps & DSP_CAP_VIRTUAL ? " VIRTUAL" : "",
+#else
+ "",
+#endif
+ *pcaps & DSP_CAP_TRIGGER ? " TRIGGER" : "");
+
+ pa_make_fd_cloexec(fd);
+
+ return fd;
+
+fail:
+ if (fd >= 0)
+ pa_close(fd);
+ return -1;
+}
+
+int pa_oss_auto_format(int fd, pa_sample_spec *ss) {
+ int format, channels, speed, reqformat;
+ pa_sample_format_t orig_format;
+
+ static const int format_trans[PA_SAMPLE_MAX] = {
+ [PA_SAMPLE_U8] = AFMT_U8,
+ [PA_SAMPLE_ALAW] = AFMT_A_LAW,
+ [PA_SAMPLE_ULAW] = AFMT_MU_LAW,
+ [PA_SAMPLE_S16LE] = AFMT_S16_LE,
+ [PA_SAMPLE_S16BE] = AFMT_S16_BE,
+ [PA_SAMPLE_FLOAT32LE] = AFMT_QUERY, /* not supported */
+ [PA_SAMPLE_FLOAT32BE] = AFMT_QUERY, /* not supported */
+ [PA_SAMPLE_S32LE] = AFMT_QUERY, /* not supported */
+ [PA_SAMPLE_S32BE] = AFMT_QUERY, /* not supported */
+ };
+
+ pa_assert(fd >= 0);
+ pa_assert(ss);
+
+ orig_format = ss->format;
+
+ reqformat = format = format_trans[ss->format];
+ if (reqformat == AFMT_QUERY || ioctl(fd, SNDCTL_DSP_SETFMT, &format) < 0 || format != reqformat) {
+ format = AFMT_S16_NE;
+ if (ioctl(fd, SNDCTL_DSP_SETFMT, &format) < 0 || format != AFMT_S16_NE) {
+ int f = AFMT_S16_NE == AFMT_S16_LE ? AFMT_S16_BE : AFMT_S16_LE;
+ format = f;
+ if (ioctl(fd, SNDCTL_DSP_SETFMT, &format) < 0 || format != f) {
+ format = AFMT_U8;
+ if (ioctl(fd, SNDCTL_DSP_SETFMT, &format) < 0 || format != AFMT_U8) {
+ pa_log("SNDCTL_DSP_SETFMT: %s", format != AFMT_U8 ? "No supported sample format" : pa_cstrerror(errno));
+ return -1;
+ } else
+ ss->format = PA_SAMPLE_U8;
+ } else
+ ss->format = f == AFMT_S16_LE ? PA_SAMPLE_S16LE : PA_SAMPLE_S16BE;
+ } else
+ ss->format = PA_SAMPLE_S16NE;
+ }
+
+ if (orig_format != ss->format)
+ pa_log_warn("device doesn't support sample format %s, changed to %s.",
+ pa_sample_format_to_string(orig_format),
+ pa_sample_format_to_string(ss->format));
+
+ channels = ss->channels;
+ if (ioctl(fd, SNDCTL_DSP_CHANNELS, &channels) < 0) {
+ pa_log("SNDCTL_DSP_CHANNELS: %s", pa_cstrerror(errno));
+ return -1;
+ }
+ pa_assert(channels > 0);
+
+ if (ss->channels != channels) {
+ pa_log_warn("device doesn't support %i channels, using %i channels.", ss->channels, channels);
+ ss->channels = channels;
+ }
+
+ speed = ss->rate;
+ if (ioctl(fd, SNDCTL_DSP_SPEED, &speed) < 0) {
+ pa_log("SNDCTL_DSP_SPEED: %s", pa_cstrerror(errno));
+ return -1;
+ }
+ pa_assert(speed > 0);
+
+ if (ss->rate != (unsigned) speed) {
+ pa_log_warn("device doesn't support %i Hz, changed to %i Hz.", ss->rate, speed);
+
+ /* If the sample rate deviates too much, we need to resample */
+ if (speed < ss->rate*.95 || speed > ss->rate*1.05)
+ ss->rate = speed;
+ }
+
+ return 0;
+}
+
+static int simple_log2(int v) {
+ int k = 0;
+
+ for (;;) {
+ v >>= 1;
+ if (!v) break;
+ k++;
+ }
+
+ return k;
+}
+
+int pa_oss_set_fragments(int fd, int nfrags, int frag_size) {
+ int arg;
+ arg = ((int) nfrags << 16) | simple_log2(frag_size);
+
+ if (ioctl(fd, SNDCTL_DSP_SETFRAGMENT, &arg) < 0) {
+ pa_log("SNDCTL_DSP_SETFRAGMENT: %s", pa_cstrerror(errno));
+ return -1;
+ }
+
+ return 0;
+}
+
+int pa_oss_get_volume(int fd, int mixer, const pa_sample_spec *ss, pa_cvolume *volume) {
+ char cv[PA_CVOLUME_SNPRINT_MAX];
+ unsigned vol;
+
+ pa_assert(fd >= 0);
+ pa_assert(ss);
+ pa_assert(volume);
+
+ if (ioctl(fd, mixer, &vol) < 0)
+ return -1;
+
+ pa_cvolume_reset(volume, ss->channels);
+
+ volume->values[0] = ((vol & 0xFF) * PA_VOLUME_NORM) / 100;
+
+ if (volume->channels >= 2)
+ volume->values[1] = (((vol >> 8) & 0xFF) * PA_VOLUME_NORM) / 100;
+
+ pa_log_debug("Read mixer settings: %s", pa_cvolume_snprint(cv, sizeof(cv), volume));
+ return 0;
+}
+
+int pa_oss_set_volume(int fd, long mixer, const pa_sample_spec *ss, const pa_cvolume *volume) {
+ char cv[PA_CVOLUME_SNPRINT_MAX];
+ unsigned vol;
+ pa_volume_t l, r;
+
+ l = volume->values[0] > PA_VOLUME_NORM ? PA_VOLUME_NORM : volume->values[0];
+
+ vol = (l*100)/PA_VOLUME_NORM;
+
+ if (ss->channels >= 2) {
+ r = volume->values[1] > PA_VOLUME_NORM ? PA_VOLUME_NORM : volume->values[1];
+ vol |= ((r*100)/PA_VOLUME_NORM) << 8;
+ }
+
+ if (ioctl(fd, mixer, &vol) < 0)
+ return -1;
+
+ pa_log_debug("Wrote mixer settings: %s", pa_cvolume_snprint(cv, sizeof(cv), volume));
+ return 0;
+}
+
+static int get_device_number(const char *dev) {
+ const char *p, *e;
+ char *rp = NULL;
+ int r;
+
+ if (!(p = rp = pa_readlink(dev))) {
+ if (errno != EINVAL && errno != ENOLINK) {
+ r = -1;
+ goto finish;
+ }
+
+ p = dev;
+ }
+
+ if ((e = strrchr(p, '/')))
+ p = e+1;
+
+ if (p == 0) {
+ r = 0;
+ goto finish;
+ }
+
+ p = strchr(p, 0) -1;
+
+ if (*p >= '0' && *p <= '9') {
+ r = *p - '0';
+ goto finish;
+ }
+
+ r = -1;
+
+finish:
+ pa_xfree(rp);
+ return r;
+}
+
+int pa_oss_get_hw_description(const char *dev, char *name, size_t l) {
+ FILE *f;
+ int n, r = -1;
+ int b = 0;
+
+ if ((n = get_device_number(dev)) < 0)
+ return -1;
+
+ if (!(f = fopen("/dev/sndstat", "r")) &&
+ !(f = fopen("/proc/sndstat", "r")) &&
+ !(f = fopen("/proc/asound/oss/sndstat", "r"))) {
+
+ if (errno != ENOENT)
+ pa_log_warn("failed to open OSS sndstat device: %s", pa_cstrerror(errno));
+
+ return -1;
+ }
+
+ while (!feof(f)) {
+ char line[64];
+ int device;
+
+ if (!fgets(line, sizeof(line), f))
+ break;
+
+ line[strcspn(line, "\r\n")] = 0;
+
+ if (!b) {
+ b = strcmp(line, "Audio devices:") == 0;
+ continue;
+ }
+
+ if (line[0] == 0)
+ break;
+
+ if (sscanf(line, "%i: ", &device) != 1)
+ continue;
+
+ if (device == n) {
+ char *k = strchr(line, ':');
+ pa_assert(k);
+ k++;
+ k += strspn(k, " ");
+
+ if (pa_endswith(k, " (DUPLEX)"))
+ k[strlen(k)-9] = 0;
+
+ pa_strlcpy(name, k, l);
+ r = 0;
+ break;
+ }
+ }
+
+ fclose(f);
+ return r;
+}
+
+static int open_mixer(const char *mixer) {
+ int fd;
+
+ if ((fd = open(mixer, O_RDWR|O_NDELAY|O_NOCTTY)) >= 0)
+ return fd;
+
+ return -1;
+}
+
+int pa_oss_open_mixer_for_device(const char *device) {
+ int n;
+ char *fn;
+ int fd;
+
+ if ((n = get_device_number(device)) < 0)
+ return -1;
+
+ if (n == 0)
+ if ((fd = open_mixer("/dev/mixer")) >= 0)
+ return fd;
+
+ fn = pa_sprintf_malloc("/dev/mixer%i", n);
+ fd = open_mixer(fn);
+ pa_xfree(fn);
+
+ if (fd < 0)
+ pa_log_warn("Failed to open mixer '%s': %s", device, pa_cstrerror(errno));
+
+ return fd;
+}
diff --git a/src/modules/oss-util.h b/src/modules/oss-util.h
new file mode 100644
index 00000000..259a622a
--- /dev/null
+++ b/src/modules/oss-util.h
@@ -0,0 +1,43 @@
+#ifndef fooossutilhfoo
+#define fooossutilhfoo
+
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2004-2006 Lennart Poettering
+ Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#include <pulse/sample.h>
+#include <pulse/volume.h>
+
+int pa_oss_open(const char *device, int *mode, int* pcaps);
+int pa_oss_auto_format(int fd, pa_sample_spec *ss);
+
+int pa_oss_set_fragments(int fd, int frags, int frag_size);
+
+int pa_oss_set_volume(int fd, long mixer, const pa_sample_spec *ss, const pa_cvolume *volume);
+int pa_oss_get_volume(int fd, int mixer, const pa_sample_spec *ss, pa_cvolume *volume);
+
+int pa_oss_get_hw_description(const char *dev, char *name, size_t l);
+
+int pa_oss_open_mixer_for_device(const char *device);
+
+#endif
diff --git a/src/modules/rtp/Makefile b/src/modules/rtp/Makefile
new file mode 100644
index 00000000..316beb72
--- /dev/null
+++ b/src/modules/rtp/Makefile
@@ -0,0 +1,13 @@
+# This is a dirty trick just to ease compilation with emacs
+#
+# This file is not intended to be distributed or anything
+#
+# So: don't touch it, even better ignore it!
+
+all:
+ $(MAKE) -C ../..
+
+clean:
+ $(MAKE) -C ../.. clean
+
+.PHONY: all clean
diff --git a/src/modules/rtp/module-rtp-recv.c b/src/modules/rtp/module-rtp-recv.c
new file mode 100644
index 00000000..d8e7a781
--- /dev/null
+++ b/src/modules/rtp/module-rtp-recv.c
@@ -0,0 +1,600 @@
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2006 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <stdio.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <errno.h>
+#include <string.h>
+#include <unistd.h>
+#include <poll.h>
+
+#include <pulse/timeval.h>
+#include <pulse/xmalloc.h>
+
+#include <pulsecore/core-error.h>
+#include <pulsecore/module.h>
+#include <pulsecore/llist.h>
+#include <pulsecore/sink.h>
+#include <pulsecore/sink-input.h>
+#include <pulsecore/memblockq.h>
+#include <pulsecore/log.h>
+#include <pulsecore/core-util.h>
+#include <pulsecore/modargs.h>
+#include <pulsecore/namereg.h>
+#include <pulsecore/sample-util.h>
+#include <pulsecore/macro.h>
+#include <pulsecore/atomic.h>
+#include <pulsecore/rtclock.h>
+#include <pulsecore/atomic.h>
+
+#include "module-rtp-recv-symdef.h"
+
+#include "rtp.h"
+#include "sdp.h"
+#include "sap.h"
+
+PA_MODULE_AUTHOR("Lennart Poettering");
+PA_MODULE_DESCRIPTION("Recieve data from a network via RTP/SAP/SDP");
+PA_MODULE_VERSION(PACKAGE_VERSION);
+PA_MODULE_LOAD_ONCE(FALSE);
+PA_MODULE_USAGE(
+ "sink=<name of the sink> "
+ "sap_address=<multicast address to listen on> "
+);
+
+#define SAP_PORT 9875
+#define DEFAULT_SAP_ADDRESS "224.0.0.56"
+#define MEMBLOCKQ_MAXLENGTH (1024*170)
+#define MAX_SESSIONS 16
+#define DEATH_TIMEOUT 20
+
+static const char* const valid_modargs[] = {
+ "sink",
+ "sap_address",
+ NULL
+};
+
+struct session {
+ struct userdata *userdata;
+ PA_LLIST_FIELDS(struct session);
+
+ pa_sink_input *sink_input;
+ pa_memblockq *memblockq;
+
+ pa_bool_t first_packet;
+ uint32_t ssrc;
+ uint32_t offset;
+
+ struct pa_sdp_info sdp_info;
+
+ pa_rtp_context rtp_context;
+
+ pa_rtpoll_item *rtpoll_item;
+
+ pa_atomic_t timestamp;
+};
+
+struct userdata {
+ pa_module *module;
+
+ pa_sap_context sap_context;
+ pa_io_event* sap_event;
+
+ pa_time_event *check_death_event;
+
+ char *sink_name;
+
+ PA_LLIST_HEAD(struct session, sessions);
+ pa_hashmap *by_origin;
+ int n_sessions;
+};
+
+static void session_free(struct session *s);
+
+/* Called from I/O thread context */
+static int sink_input_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
+ struct session *s = PA_SINK_INPUT(o)->userdata;
+
+ switch (code) {
+ case PA_SINK_INPUT_MESSAGE_GET_LATENCY:
+ *((pa_usec_t*) data) = pa_bytes_to_usec(pa_memblockq_get_length(s->memblockq), &s->sink_input->sample_spec);
+
+ /* Fall through, the default handler will add in the extra
+ * latency added by the resampler */
+ break;
+ }
+
+ return pa_sink_input_process_msg(o, code, data, offset, chunk);
+}
+
+/* Called from I/O thread context */
+static int sink_input_peek(pa_sink_input *i, size_t length, pa_memchunk *chunk) {
+ struct session *s;
+ pa_sink_input_assert_ref(i);
+ pa_assert_se(s = i->userdata);
+
+ return pa_memblockq_peek(s->memblockq, chunk);
+}
+
+/* Called from I/O thread context */
+static void sink_input_drop(pa_sink_input *i, size_t length) {
+ struct session *s;
+ pa_sink_input_assert_ref(i);
+ pa_assert_se(s = i->userdata);
+
+ pa_memblockq_drop(s->memblockq, length);
+}
+
+/* Called from main context */
+static void sink_input_kill(pa_sink_input* i) {
+ struct session *s;
+ pa_sink_input_assert_ref(i);
+ pa_assert_se(s = i->userdata);
+
+ session_free(s);
+}
+
+/* Called from I/O thread context */
+static int rtpoll_work_cb(pa_rtpoll_item *i) {
+ pa_memchunk chunk;
+ int64_t k, j, delta;
+ struct timeval now;
+ struct session *s;
+ struct pollfd *p;
+
+ pa_assert_se(s = pa_rtpoll_item_get_userdata(i));
+
+ p = pa_rtpoll_item_get_pollfd(i, NULL);
+
+ if (p->revents & (POLLERR|POLLNVAL|POLLHUP|POLLOUT)) {
+ pa_log("poll() signalled bad revents.");
+ return -1;
+ }
+
+ if ((p->revents & POLLIN) == 0)
+ return 0;
+
+ p->revents = 0;
+
+ if (pa_rtp_recv(&s->rtp_context, &chunk, s->userdata->module->core->mempool) < 0)
+ return 0;
+
+ if (s->sdp_info.payload != s->rtp_context.payload) {
+ pa_memblock_unref(chunk.memblock);
+ return 0;
+ }
+
+ if (!s->first_packet) {
+ s->first_packet = TRUE;
+
+ s->ssrc = s->rtp_context.ssrc;
+ s->offset = s->rtp_context.timestamp;
+
+ if (s->ssrc == s->userdata->module->core->cookie)
+ pa_log_warn("Detected RTP packet loop!");
+ } else {
+ if (s->ssrc != s->rtp_context.ssrc) {
+ pa_memblock_unref(chunk.memblock);
+ return 0;
+ }
+ }
+
+ /* Check wheter there was a timestamp overflow */
+ k = (int64_t) s->rtp_context.timestamp - (int64_t) s->offset;
+ j = (int64_t) 0x100000000LL - (int64_t) s->offset + (int64_t) s->rtp_context.timestamp;
+
+ if ((k < 0 ? -k : k) < (j < 0 ? -j : j))
+ delta = k;
+ else
+ delta = j;
+
+ pa_memblockq_seek(s->memblockq, delta * s->rtp_context.frame_size, PA_SEEK_RELATIVE);
+
+ if (pa_memblockq_push(s->memblockq, &chunk) < 0) {
+ /* queue overflow, let's flush it and try again */
+ pa_memblockq_flush(s->memblockq);
+ pa_memblockq_push(s->memblockq, &chunk);
+ }
+
+ /* The next timestamp we expect */
+ s->offset = s->rtp_context.timestamp + (chunk.length / s->rtp_context.frame_size);
+
+ pa_memblock_unref(chunk.memblock);
+
+ pa_rtclock_get(&now);
+ pa_atomic_store(&s->timestamp, now.tv_sec);
+
+ return 1;
+}
+
+/* Called from I/O thread context */
+static void sink_input_attach(pa_sink_input *i) {
+ struct session *s;
+ struct pollfd *p;
+
+ pa_sink_input_assert_ref(i);
+ pa_assert_se(s = i->userdata);
+
+ pa_assert(!s->rtpoll_item);
+ s->rtpoll_item = pa_rtpoll_item_new(i->sink->rtpoll, PA_RTPOLL_LATE, 1);
+
+ p = pa_rtpoll_item_get_pollfd(s->rtpoll_item, NULL);
+ p->fd = s->rtp_context.fd;
+ p->events = POLLIN;
+ p->revents = 0;
+
+ pa_rtpoll_item_set_work_callback(s->rtpoll_item, rtpoll_work_cb);
+ pa_rtpoll_item_set_userdata(s->rtpoll_item, s);
+}
+
+/* Called from I/O thread context */
+static void sink_input_detach(pa_sink_input *i) {
+ struct session *s;
+ pa_sink_input_assert_ref(i);
+ pa_assert_se(s = i->userdata);
+
+ pa_assert(s->rtpoll_item);
+ pa_rtpoll_item_free(s->rtpoll_item);
+ s->rtpoll_item = NULL;
+}
+
+static int mcast_socket(const struct sockaddr* sa, socklen_t salen) {
+ int af, fd = -1, r, one;
+
+ pa_assert(sa);
+ pa_assert(salen > 0);
+
+ af = sa->sa_family;
+ if ((fd = socket(af, SOCK_DGRAM, 0)) < 0) {
+ pa_log("Failed to create socket: %s", pa_cstrerror(errno));
+ goto fail;
+ }
+
+ one = 1;
+ if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)) < 0) {
+ pa_log("SO_REUSEADDR failed: %s", pa_cstrerror(errno));
+ goto fail;
+ }
+
+ if (af == AF_INET) {
+ struct ip_mreq mr4;
+ memset(&mr4, 0, sizeof(mr4));
+ mr4.imr_multiaddr = ((const struct sockaddr_in*) sa)->sin_addr;
+ r = setsockopt(fd, IPPROTO_IP, IP_ADD_MEMBERSHIP, &mr4, sizeof(mr4));
+ } else {
+ struct ipv6_mreq mr6;
+ memset(&mr6, 0, sizeof(mr6));
+ mr6.ipv6mr_multiaddr = ((const struct sockaddr_in6*) sa)->sin6_addr;
+ r = setsockopt(fd, IPPROTO_IPV6, IPV6_JOIN_GROUP, &mr6, sizeof(mr6));
+ }
+
+ if (r < 0) {
+ pa_log_info("Joining mcast group failed: %s", pa_cstrerror(errno));
+ goto fail;
+ }
+
+ if (bind(fd, sa, salen) < 0) {
+ pa_log("bind() failed: %s", pa_cstrerror(errno));
+ goto fail;
+ }
+
+ return fd;
+
+fail:
+ if (fd >= 0)
+ close(fd);
+
+ return -1;
+}
+
+static struct session *session_new(struct userdata *u, const pa_sdp_info *sdp_info) {
+ struct session *s = NULL;
+ char *c;
+ pa_sink *sink;
+ int fd = -1;
+ pa_memblock *silence;
+ pa_sink_input_new_data data;
+ struct timeval now;
+
+ pa_assert(u);
+ pa_assert(sdp_info);
+
+ if (u->n_sessions >= MAX_SESSIONS) {
+ pa_log("Session limit reached.");
+ goto fail;
+ }
+
+ if (!(sink = pa_namereg_get(u->module->core, u->sink_name, PA_NAMEREG_SINK, 1))) {
+ pa_log("Sink does not exist.");
+ goto fail;
+ }
+
+ s = pa_xnew0(struct session, 1);
+ s->userdata = u;
+ s->first_packet = FALSE;
+ s->sdp_info = *sdp_info;
+ s->rtpoll_item = NULL;
+
+ pa_rtclock_get(&now);
+ pa_atomic_store(&s->timestamp, now.tv_sec);
+
+ if ((fd = mcast_socket((const struct sockaddr*) &sdp_info->sa, sdp_info->salen)) < 0)
+ goto fail;
+
+ c = pa_sprintf_malloc("RTP Stream%s%s%s",
+ sdp_info->session_name ? " (" : "",
+ sdp_info->session_name ? sdp_info->session_name : "",
+ sdp_info->session_name ? ")" : "");
+
+ pa_sink_input_new_data_init(&data);
+ data.sink = sink;
+ data.driver = __FILE__;
+ data.name = c;
+ data.module = u->module;
+ pa_sink_input_new_data_set_sample_spec(&data, &sdp_info->sample_spec);
+
+ s->sink_input = pa_sink_input_new(u->module->core, &data, 0);
+ pa_xfree(c);
+
+ if (!s->sink_input) {
+ pa_log("Failed to create sink input.");
+ goto fail;
+ }
+
+ s->sink_input->userdata = s;
+
+ s->sink_input->parent.process_msg = sink_input_process_msg;
+ s->sink_input->peek = sink_input_peek;
+ s->sink_input->drop = sink_input_drop;
+ s->sink_input->kill = sink_input_kill;
+ s->sink_input->attach = sink_input_attach;
+ s->sink_input->detach = sink_input_detach;
+
+ silence = pa_silence_memblock_new(
+ s->userdata->module->core->mempool,
+ &s->sink_input->sample_spec,
+ pa_frame_align(pa_bytes_per_second(&s->sink_input->sample_spec)/128, &s->sink_input->sample_spec));
+
+ s->memblockq = pa_memblockq_new(
+ 0,
+ MEMBLOCKQ_MAXLENGTH,
+ MEMBLOCKQ_MAXLENGTH,
+ pa_frame_size(&s->sink_input->sample_spec),
+ pa_bytes_per_second(&s->sink_input->sample_spec)/10+1,
+ 0,
+ silence);
+
+ pa_memblock_unref(silence);
+
+ pa_rtp_context_init_recv(&s->rtp_context, fd, pa_frame_size(&s->sdp_info.sample_spec));
+
+ pa_hashmap_put(s->userdata->by_origin, s->sdp_info.origin, s);
+ u->n_sessions++;
+ PA_LLIST_PREPEND(struct session, s->userdata->sessions, s);
+
+ pa_sink_input_put(s->sink_input);
+
+ pa_log_info("New session '%s'", s->sdp_info.session_name);
+
+ return s;
+
+fail:
+ pa_xfree(s);
+
+ if (fd >= 0)
+ pa_close(fd);
+
+ return NULL;
+}
+
+static void session_free(struct session *s) {
+ pa_assert(s);
+
+ pa_log_info("Freeing session '%s'", s->sdp_info.session_name);
+
+ pa_sink_input_unlink(s->sink_input);
+ pa_sink_input_unref(s->sink_input);
+
+ PA_LLIST_REMOVE(struct session, s->userdata->sessions, s);
+ pa_assert(s->userdata->n_sessions >= 1);
+ s->userdata->n_sessions--;
+ pa_hashmap_remove(s->userdata->by_origin, s->sdp_info.origin);
+
+ pa_memblockq_free(s->memblockq);
+ pa_sdp_info_destroy(&s->sdp_info);
+ pa_rtp_context_destroy(&s->rtp_context);
+
+ pa_xfree(s);
+}
+
+static void sap_event_cb(pa_mainloop_api *m, pa_io_event *e, int fd, pa_io_event_flags_t flags, void *userdata) {
+ struct userdata *u = userdata;
+ int goodbye;
+ pa_sdp_info info;
+ struct session *s;
+
+ pa_assert(m);
+ pa_assert(e);
+ pa_assert(u);
+ pa_assert(fd == u->sap_context.fd);
+ pa_assert(flags == PA_IO_EVENT_INPUT);
+
+ if (pa_sap_recv(&u->sap_context, &goodbye) < 0)
+ return;
+
+ if (!pa_sdp_parse(u->sap_context.sdp_data, &info, goodbye))
+ return;
+
+ if (goodbye) {
+
+ if ((s = pa_hashmap_get(u->by_origin, info.origin)))
+ session_free(s);
+
+ pa_sdp_info_destroy(&info);
+ } else {
+
+ if (!(s = pa_hashmap_get(u->by_origin, info.origin))) {
+ if (!(s = session_new(u, &info)))
+ pa_sdp_info_destroy(&info);
+
+ } else {
+ struct timeval now;
+ pa_rtclock_get(&now);
+ pa_atomic_store(&s->timestamp, now.tv_sec);
+
+ pa_sdp_info_destroy(&info);
+ }
+ }
+}
+
+static void check_death_event_cb(pa_mainloop_api *m, pa_time_event *t, const struct timeval *ptv, void *userdata) {
+ struct session *s, *n;
+ struct userdata *u = userdata;
+ struct timeval now;
+ struct timeval tv;
+
+ pa_assert(m);
+ pa_assert(t);
+ pa_assert(ptv);
+ pa_assert(u);
+
+ pa_rtclock_get(&now);
+
+ pa_log_debug("Checking for dead streams ...");
+
+ for (s = u->sessions; s; s = n) {
+ int k;
+ n = s->next;
+
+ k = pa_atomic_load(&s->timestamp);
+
+ if (k + DEATH_TIMEOUT < now.tv_sec)
+ session_free(s);
+ }
+
+ /* Restart timer */
+ pa_gettimeofday(&tv);
+ pa_timeval_add(&tv, DEATH_TIMEOUT*PA_USEC_PER_SEC);
+ m->time_restart(t, &tv);
+}
+
+int pa__init(pa_module*m) {
+ struct userdata *u;
+ pa_modargs *ma = NULL;
+ struct sockaddr_in sa4;
+ struct sockaddr_in6 sa6;
+ struct sockaddr *sa;
+ socklen_t salen;
+ const char *sap_address;
+ int fd = -1;
+ struct timeval tv;
+
+ pa_assert(m);
+
+ if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
+ pa_log("failed to parse module arguments");
+ goto fail;
+ }
+
+ sap_address = pa_modargs_get_value(ma, "sap_address", DEFAULT_SAP_ADDRESS);
+
+ if (inet_pton(AF_INET6, sap_address, &sa6.sin6_addr) > 0) {
+ sa6.sin6_family = AF_INET6;
+ sa6.sin6_port = htons(SAP_PORT);
+ sa = (struct sockaddr*) &sa6;
+ salen = sizeof(sa6);
+ } else if (inet_pton(AF_INET, sap_address, &sa4.sin_addr) > 0) {
+ sa4.sin_family = AF_INET;
+ sa4.sin_port = htons(SAP_PORT);
+ sa = (struct sockaddr*) &sa4;
+ salen = sizeof(sa4);
+ } else {
+ pa_log("Invalid SAP address '%s'", sap_address);
+ goto fail;
+ }
+
+ if ((fd = mcast_socket(sa, salen)) < 0)
+ goto fail;
+
+ u = pa_xnew(struct userdata, 1);
+ m->userdata = u;
+ u->module = m;
+ u->sink_name = pa_xstrdup(pa_modargs_get_value(ma, "sink", NULL));
+
+ u->sap_event = m->core->mainloop->io_new(m->core->mainloop, fd, PA_IO_EVENT_INPUT, sap_event_cb, u);
+ pa_sap_context_init_recv(&u->sap_context, fd);
+
+ PA_LLIST_HEAD_INIT(struct session, u->sessions);
+ u->n_sessions = 0;
+ u->by_origin = pa_hashmap_new(pa_idxset_string_hash_func, pa_idxset_string_compare_func);
+
+ pa_gettimeofday(&tv);
+ pa_timeval_add(&tv, DEATH_TIMEOUT * PA_USEC_PER_SEC);
+ u->check_death_event = m->core->mainloop->time_new(m->core->mainloop, &tv, check_death_event_cb, u);
+
+ pa_modargs_free(ma);
+
+ return 0;
+
+fail:
+ if (ma)
+ pa_modargs_free(ma);
+
+ if (fd >= 0)
+ pa_close(fd);
+
+ return -1;
+}
+
+void pa__done(pa_module*m) {
+ struct userdata *u;
+ struct session *s;
+
+ pa_assert(m);
+
+ if (!(u = m->userdata))
+ return;
+
+ if (u->sap_event)
+ m->core->mainloop->io_free(u->sap_event);
+
+ if (u->check_death_event)
+ m->core->mainloop->time_free(u->check_death_event);
+
+ pa_sap_context_destroy(&u->sap_context);
+
+ if (u->by_origin) {
+ while ((s = pa_hashmap_get_first(u->by_origin)))
+ session_free(s);
+
+ pa_hashmap_free(u->by_origin, NULL, NULL);
+ }
+
+ pa_xfree(u->sink_name);
+ pa_xfree(u);
+}
diff --git a/src/modules/rtp/module-rtp-send.c b/src/modules/rtp/module-rtp-send.c
new file mode 100644
index 00000000..95ff15de
--- /dev/null
+++ b/src/modules/rtp/module-rtp-send.c
@@ -0,0 +1,397 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2006 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <stdio.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <errno.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <pulse/timeval.h>
+#include <pulse/util.h>
+#include <pulse/xmalloc.h>
+
+#include <pulsecore/core-error.h>
+#include <pulsecore/module.h>
+#include <pulsecore/llist.h>
+#include <pulsecore/source.h>
+#include <pulsecore/source-output.h>
+#include <pulsecore/memblockq.h>
+#include <pulsecore/log.h>
+#include <pulsecore/core-util.h>
+#include <pulsecore/modargs.h>
+#include <pulsecore/namereg.h>
+#include <pulsecore/sample-util.h>
+#include <pulsecore/macro.h>
+#include <pulsecore/socket-util.h>
+
+#include "module-rtp-send-symdef.h"
+
+#include "rtp.h"
+#include "sdp.h"
+#include "sap.h"
+
+PA_MODULE_AUTHOR("Lennart Poettering");
+PA_MODULE_DESCRIPTION("Read data from source and send it to the network via RTP/SAP/SDP");
+PA_MODULE_VERSION(PACKAGE_VERSION);
+PA_MODULE_LOAD_ONCE(FALSE);
+PA_MODULE_USAGE(
+ "source=<name of the source> "
+ "format=<sample format> "
+ "channels=<number of channels> "
+ "rate=<sample rate> "
+ "destination=<destination IP address> "
+ "port=<port number> "
+ "mtu=<maximum transfer unit> "
+ "loop=<loopback to local host?>"
+);
+
+#define DEFAULT_PORT 46000
+#define SAP_PORT 9875
+#define DEFAULT_DESTINATION "224.0.0.56"
+#define MEMBLOCKQ_MAXLENGTH (1024*170)
+#define DEFAULT_MTU 1280
+#define SAP_INTERVAL 5
+
+static const char* const valid_modargs[] = {
+ "source",
+ "format",
+ "channels",
+ "rate",
+ "destination",
+ "port",
+ "mtu" ,
+ "loop",
+ NULL
+};
+
+struct userdata {
+ pa_module *module;
+
+ pa_source_output *source_output;
+ pa_memblockq *memblockq;
+
+ pa_rtp_context rtp_context;
+ pa_sap_context sap_context;
+ size_t mtu;
+
+ pa_time_event *sap_event;
+};
+
+/* Called from I/O thread context */
+static int source_output_process_msg(pa_msgobject *o, int code, void *data, int64_t offset, pa_memchunk *chunk) {
+ struct userdata *u;
+ pa_assert_se(u = PA_SOURCE_OUTPUT(o)->userdata);
+
+ switch (code) {
+ case PA_SOURCE_OUTPUT_MESSAGE_GET_LATENCY:
+ *((pa_usec_t*) data) = pa_bytes_to_usec(pa_memblockq_get_length(u->memblockq), &u->source_output->sample_spec);
+
+ /* Fall through, the default handler will add in the extra
+ * latency added by the resampler */
+ break;
+ }
+
+ return pa_source_output_process_msg(o, code, data, offset, chunk);
+}
+
+/* Called from I/O thread context */
+static void source_output_push(pa_source_output *o, const pa_memchunk *chunk) {
+ struct userdata *u;
+ pa_source_output_assert_ref(o);
+ pa_assert_se(u = o->userdata);
+
+ if (pa_memblockq_push(u->memblockq, chunk) < 0) {
+ pa_log_warn("Failed to push chunk into memblockq.");
+ return;
+ }
+
+ pa_rtp_send(&u->rtp_context, u->mtu, u->memblockq);
+}
+
+/* Called from main context */
+static void source_output_kill(pa_source_output* o) {
+ struct userdata *u;
+ pa_source_output_assert_ref(o);
+ pa_assert_se(u = o->userdata);
+
+ pa_module_unload_request(u->module);
+
+ pa_source_output_unlink(u->source_output);
+ pa_source_output_unref(u->source_output);
+ u->source_output = NULL;
+}
+
+static void sap_event_cb(pa_mainloop_api *m, pa_time_event *t, const struct timeval *tv, void *userdata) {
+ struct userdata *u = userdata;
+ struct timeval next;
+
+ pa_assert(m);
+ pa_assert(t);
+ pa_assert(tv);
+ pa_assert(u);
+
+ pa_sap_send(&u->sap_context, 0);
+
+ pa_gettimeofday(&next);
+ pa_timeval_add(&next, SAP_INTERVAL * PA_USEC_PER_SEC);
+ m->time_restart(t, &next);
+}
+
+int pa__init(pa_module*m) {
+ struct userdata *u;
+ pa_modargs *ma = NULL;
+ const char *dest;
+ uint32_t port = DEFAULT_PORT, mtu;
+ int af, fd = -1, sap_fd = -1;
+ pa_source *s;
+ pa_sample_spec ss;
+ pa_channel_map cm;
+ struct sockaddr_in sa4, sap_sa4;
+ struct sockaddr_in6 sa6, sap_sa6;
+ struct sockaddr_storage sa_dst;
+ pa_source_output *o = NULL;
+ uint8_t payload;
+ char *p;
+ int r, j;
+ socklen_t k;
+ struct timeval tv;
+ char hn[128], *n;
+ pa_bool_t loop = FALSE;
+ pa_source_output_new_data data;
+
+ pa_assert(m);
+
+ if (!(ma = pa_modargs_new(m->argument, valid_modargs))) {
+ pa_log("Failed to parse module arguments");
+ goto fail;
+ }
+
+ if (!(s = pa_namereg_get(m->core, pa_modargs_get_value(ma, "source", NULL), PA_NAMEREG_SOURCE, 1))) {
+ pa_log("Source does not exist.");
+ goto fail;
+ }
+
+ if (pa_modargs_get_value_boolean(ma, "loop", &loop) < 0) {
+ pa_log("Failed to parse \"loop\" parameter.");
+ goto fail;
+ }
+
+ ss = s->sample_spec;
+ pa_rtp_sample_spec_fixup(&ss);
+ cm = s->channel_map;
+ if (pa_modargs_get_sample_spec(ma, &ss) < 0) {
+ pa_log("Failed to parse sample specification");
+ goto fail;
+ }
+
+ if (!pa_rtp_sample_spec_valid(&ss)) {
+ pa_log("Specified sample type not compatible with RTP");
+ goto fail;
+ }
+
+ if (ss.channels != cm.channels)
+ pa_channel_map_init_auto(&cm, ss.channels, PA_CHANNEL_MAP_AIFF);
+
+ payload = pa_rtp_payload_from_sample_spec(&ss);
+
+ mtu = pa_frame_align(DEFAULT_MTU, &ss);
+
+ if (pa_modargs_get_value_u32(ma, "mtu", &mtu) < 0 || mtu < 1 || mtu % pa_frame_size(&ss) != 0) {
+ pa_log("Invalid MTU.");
+ goto fail;
+ }
+
+ port = DEFAULT_PORT + ((rand() % 512) << 1);
+ if (pa_modargs_get_value_u32(ma, "port", &port) < 0 || port < 1 || port > 0xFFFF) {
+ pa_log("port= expects a numerical argument between 1 and 65535.");
+ goto fail;
+ }
+
+ if (port & 1)
+ pa_log_warn("Port number not even as suggested in RFC3550!");
+
+ dest = pa_modargs_get_value(ma, "destination", DEFAULT_DESTINATION);
+
+ if (inet_pton(AF_INET6, dest, &sa6.sin6_addr) > 0) {
+ sa6.sin6_family = af = AF_INET6;
+ sa6.sin6_port = htons(port);
+ sap_sa6 = sa6;
+ sap_sa6.sin6_port = htons(SAP_PORT);
+ } else if (inet_pton(AF_INET, dest, &sa4.sin_addr) > 0) {
+ sa4.sin_family = af = AF_INET;
+ sa4.sin_port = htons(port);
+ sap_sa4 = sa4;
+ sap_sa4.sin_port = htons(SAP_PORT);
+ } else {
+ pa_log("Invalid destination '%s'", dest);
+ goto fail;
+ }
+
+ if ((fd = socket(af, SOCK_DGRAM, 0)) < 0) {
+ pa_log("socket() failed: %s", pa_cstrerror(errno));
+ goto fail;
+ }
+
+ if (connect(fd, af == AF_INET ? (struct sockaddr*) &sa4 : (struct sockaddr*) &sa6, af == AF_INET ? sizeof(sa4) : sizeof(sa6)) < 0) {
+ pa_log("connect() failed: %s", pa_cstrerror(errno));
+ goto fail;
+ }
+
+ if ((sap_fd = socket(af, SOCK_DGRAM, 0)) < 0) {
+ pa_log("socket() failed: %s", pa_cstrerror(errno));
+ goto fail;
+ }
+
+ if (connect(sap_fd, af == AF_INET ? (struct sockaddr*) &sap_sa4 : (struct sockaddr*) &sap_sa6, af == AF_INET ? sizeof(sap_sa4) : sizeof(sap_sa6)) < 0) {
+ pa_log("connect() failed: %s", pa_cstrerror(errno));
+ goto fail;
+ }
+
+ j = !!loop;
+ if (setsockopt(fd, IPPROTO_IP, IP_MULTICAST_LOOP, &j, sizeof(j)) < 0 ||
+ setsockopt(sap_fd, IPPROTO_IP, IP_MULTICAST_LOOP, &j, sizeof(j)) < 0) {
+ pa_log("IP_MULTICAST_LOOP failed: %s", pa_cstrerror(errno));
+ goto fail;
+ }
+
+ /* If the socket queue is full, let's drop packets */
+ pa_make_fd_nonblock(fd);
+ pa_make_udp_socket_low_delay(fd);
+ pa_make_fd_cloexec(fd);
+ pa_make_fd_cloexec(sap_fd);
+
+ pa_source_output_new_data_init(&data);
+ data.name = "RTP Monitor Stream";
+ data.driver = __FILE__;
+ data.module = m;
+ data.source = s;
+ pa_source_output_new_data_set_sample_spec(&data, &ss);
+ pa_source_output_new_data_set_channel_map(&data, &cm);
+
+ if (!(o = pa_source_output_new(m->core, &data, 0))) {
+ pa_log("failed to create source output.");
+ goto fail;
+ }
+
+ o->parent.process_msg = source_output_process_msg;
+ o->push = source_output_push;
+ o->kill = source_output_kill;
+
+ u = pa_xnew(struct userdata, 1);
+ m->userdata = u;
+ o->userdata = u;
+
+ u->module = m;
+ u->source_output = o;
+
+ u->memblockq = pa_memblockq_new(
+ 0,
+ MEMBLOCKQ_MAXLENGTH,
+ MEMBLOCKQ_MAXLENGTH,
+ pa_frame_size(&ss),
+ 1,
+ 0,
+ NULL);
+
+ u->mtu = mtu;
+
+ k = sizeof(sa_dst);
+ pa_assert_se((r = getsockname(fd, (struct sockaddr*) &sa_dst, &k)) >= 0);
+
+ n = pa_sprintf_malloc("PulseAudio RTP Stream on %s", pa_get_fqdn(hn, sizeof(hn)));
+
+ p = pa_sdp_build(af,
+ af == AF_INET ? (void*) &((struct sockaddr_in*) &sa_dst)->sin_addr : (void*) &((struct sockaddr_in6*) &sa_dst)->sin6_addr,
+ af == AF_INET ? (void*) &sa4.sin_addr : (void*) &sa6.sin6_addr,
+ n, port, payload, &ss);
+
+ pa_xfree(n);
+
+ pa_rtp_context_init_send(&u->rtp_context, fd, m->core->cookie, payload, pa_frame_size(&ss));
+ pa_sap_context_init_send(&u->sap_context, sap_fd, p);
+
+ pa_log_info("RTP stream initialized with mtu %u on %s:%u, SSRC=0x%08x, payload=%u, initial sequence #%u", mtu, dest, port, u->rtp_context.ssrc, payload, u->rtp_context.sequence);
+ pa_log_info("SDP-Data:\n%s\nEOF", p);
+
+ pa_sap_send(&u->sap_context, 0);
+
+ pa_gettimeofday(&tv);
+ pa_timeval_add(&tv, SAP_INTERVAL * PA_USEC_PER_SEC);
+ u->sap_event = m->core->mainloop->time_new(m->core->mainloop, &tv, sap_event_cb, u);
+
+ pa_source_output_put(u->source_output);
+
+ pa_modargs_free(ma);
+
+ return 0;
+
+fail:
+ if (ma)
+ pa_modargs_free(ma);
+
+ if (fd >= 0)
+ pa_close(fd);
+
+ if (sap_fd >= 0)
+ pa_close(sap_fd);
+
+ if (o) {
+ pa_source_output_unlink(o);
+ pa_source_output_unref(o);
+ }
+
+ return -1;
+}
+
+void pa__done(pa_module*m) {
+ struct userdata *u;
+ pa_assert(m);
+
+ if (!(u = m->userdata))
+ return;
+
+ if (u->sap_event)
+ m->core->mainloop->time_free(u->sap_event);
+
+ if (u->source_output) {
+ pa_source_output_unlink(u->source_output);
+ pa_source_output_unref(u->source_output);
+ }
+
+ pa_rtp_context_destroy(&u->rtp_context);
+
+ pa_sap_send(&u->sap_context, 1);
+ pa_sap_context_destroy(&u->sap_context);
+
+ if (u->memblockq)
+ pa_memblockq_free(u->memblockq);
+
+ pa_xfree(u);
+}
diff --git a/src/modules/rtp/rfc2327.txt b/src/modules/rtp/rfc2327.txt
new file mode 100644
index 00000000..ce77de61
--- /dev/null
+++ b/src/modules/rtp/rfc2327.txt
@@ -0,0 +1,2355 @@
+
+
+
+
+
+
+Network Working Group M. Handley
+Request for Comments: 2327 V. Jacobson
+Category: Standards Track ISI/LBNL
+ April 1998
+
+
+ SDP: Session Description Protocol
+
+Status of this Memo
+
+ This document specifies an Internet standards track protocol for the
+ Internet community, and requests discussion and suggestions for
+ improvements. Please refer to the current edition of the "Internet
+ Official Protocol Standards" (STD 1) for the standardization state
+ and status of this protocol. Distribution of this memo is unlimited.
+
+Copyright Notice
+
+ Copyright (C) The Internet Society (1998). All Rights Reserved.
+
+Abstract
+
+ This document defines the Session Description Protocol, SDP. SDP is
+ intended for describing multimedia sessions for the purposes of
+ session announcement, session invitation, and other forms of
+ multimedia session initiation.
+
+ This document is a product of the Multiparty Multimedia Session
+ Control (MMUSIC) working group of the Internet Engineering Task
+ Force. Comments are solicited and should be addressed to the working
+ group's mailing list at confctrl@isi.edu and/or the authors.
+
+1. Introduction
+
+ On the Internet multicast backbone (Mbone), a session directory tool
+ is used to advertise multimedia conferences and communicate the
+ conference addresses and conference tool-specific information
+ necessary for participation. This document defines a session
+ description protocol for this purpose, and for general real-time
+ multimedia session description purposes. This memo does not describe
+ multicast address allocation or the distribution of SDP messages in
+ detail. These are described in accompanying memos. SDP is not
+ intended for negotiation of media encodings.
+
+
+
+
+
+
+
+
+Handley & Jacobson Standards Track [Page 1]
+
+RFC 2327 SDP April 1998
+
+
+2. Background
+
+ The Mbone is the part of the internet that supports IP multicast, and
+ thus permits efficient many-to-many communication. It is used
+ extensively for multimedia conferencing. Such conferences usually
+ have the property that tight coordination of conference membership is
+ not necessary; to receive a conference, a user at an Mbone site only
+ has to know the conference's multicast group address and the UDP
+ ports for the conference data streams.
+
+ Session directories assist the advertisement of conference sessions
+ and communicate the relevant conference setup information to
+ prospective participants. SDP is designed to convey such information
+ to recipients. SDP is purely a format for session description - it
+ does not incorporate a transport protocol, and is intended to use
+ different transport protocols as appropriate including the Session
+ Announcement Protocol [4], Session Initiation Protocol [11], Real-
+ Time Streaming Protocol [12], electronic mail using the MIME
+ extensions, and the Hypertext Transport Protocol.
+
+ SDP is intended to be general purpose so that it can be used for a
+ wider range of network environments and applications than just
+ multicast session directories. However, it is not intended to
+ support negotiation of session content or media encodings - this is
+ viewed as outside the scope of session description.
+
+3. Glossary of Terms
+
+ The following terms are used in this document, and have specific
+ meaning within the context of this document.
+
+ Conference
+ A multimedia conference is a set of two or more communicating users
+ along with the software they are using to communicate.
+
+ Session
+ A multimedia session is a set of multimedia senders and receivers
+ and the data streams flowing from senders to receivers. A
+ multimedia conference is an example of a multimedia session.
+
+ Session Advertisement
+ See session announcement.
+
+ Session Announcement
+ A session announcement is a mechanism by which a session
+ description is conveyed to users in a proactive fashion, i.e., the
+ session description was not explicitly requested by the user.
+
+
+
+
+Handley & Jacobson Standards Track [Page 2]
+
+RFC 2327 SDP April 1998
+
+
+ Session Description
+ A well defined format for conveying sufficient information to
+ discover and participate in a multimedia session.
+
+3.1. Terminology
+
+ The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT",
+ "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this
+ document are to be interpreted as described in RFC 2119.
+
+4. SDP Usage
+
+4.1. Multicast Announcements
+
+ SDP is a session description protocol for multimedia sessions. A
+ common mode of usage is for a client to announce a conference session
+ by periodically multicasting an announcement packet to a well known
+ multicast address and port using the Session Announcement Protocol
+ (SAP).
+
+ SAP packets are UDP packets with the following format:
+
+ |--------------------|
+ | SAP header |
+ |--------------------|
+ | text payload |
+ |//////////
+
+
+ The header is the Session Announcement Protocol header. SAP is
+ described in more detail in a companion memo [4]
+
+ The text payload is an SDP session description, as described in this
+ memo. The text payload should be no greater than 1 Kbyte in length.
+ If announced by SAP, only one session announcement is permitted in a
+ single packet.
+
+4.2. Email and WWW Announcements
+
+ Alternative means of conveying session descriptions include
+ electronic mail and the World Wide Web. For both email and WWW
+ distribution, the use of the MIME content type "application/sdp"
+ should be used. This enables the automatic launching of applications
+ for participation in the session from the WWW client or mail reader
+ in a standard manner.
+
+
+
+
+
+
+Handley & Jacobson Standards Track [Page 3]
+
+RFC 2327 SDP April 1998
+
+
+ Note that announcements of multicast sessions made only via email or
+ the World Wide Web (WWW) do not have the property that the receiver
+ of a session announcement can necessarily receive the session because
+ the multicast sessions may be restricted in scope, and access to the
+ WWW server or reception of email is possible outside this scope. SAP
+ announcements do not suffer from this mismatch.
+
+5. Requirements and Recommendations
+
+ The purpose of SDP is to convey information about media streams in
+ multimedia sessions to allow the recipients of a session description
+ to participate in the session. SDP is primarily intended for use in
+ an internetwork, although it is sufficiently general that it can
+ describe conferences in other network environments.
+
+ A multimedia session, for these purposes, is defined as a set of
+ media streams that exist for some duration of time. Media streams
+ can be many-to-many. The times during which the session is active
+ need not be continuous.
+
+ Thus far, multicast based sessions on the Internet have differed from
+ many other forms of conferencing in that anyone receiving the traffic
+ can join the session (unless the session traffic is encrypted). In
+ such an environment, SDP serves two primary purposes. It is a means
+ to communicate the existence of a session, and is a means to convey
+ sufficient information to enable joining and participating in the
+ session. In a unicast environment, only the latter purpose is likely
+ to be relevant.
+
+ Thus SDP includes:
+
+ o Session name and purpose
+
+ o Time(s) the session is active
+
+ o The media comprising the session
+
+ o Information to receive those media (addresses, ports, formats and
+ so on)
+
+ As resources necessary to participate in a session may be limited,
+ some additional information may also be desirable:
+
+ o Information about the bandwidth to be used by the conference
+
+ o Contact information for the person responsible for the session
+
+
+
+
+
+Handley & Jacobson Standards Track [Page 4]
+
+RFC 2327 SDP April 1998
+
+
+ In general, SDP must convey sufficient information to be able to join
+ a session (with the possible exception of encryption keys) and to
+ announce the resources to be used to non-participants that may need
+ to know.
+
+5.1. Media Information
+
+ SDP includes:
+
+ o The type of media (video, audio, etc)
+
+ o The transport protocol (RTP/UDP/IP, H.320, etc)
+
+ o The format of the media (H.261 video, MPEG video, etc)
+
+ For an IP multicast session, the following are also conveyed:
+
+ o Multicast address for media
+
+ o Transport Port for media
+
+ This address and port are the destination address and destination
+ port of the multicast stream, whether being sent, received, or both.
+
+ For an IP unicast session, the following are conveyed:
+
+ o Remote address for media
+
+ o Transport port for contact address
+
+ The semantics of this address and port depend on the media and
+ transport protocol defined. By default, this is the remote address
+ and remote port to which data is sent, and the remote address and
+ local port on which to receive data. However, some media may define
+ to use these to establish a control channel for the actual media
+ flow.
+
+5.2. Timing Information
+
+ Sessions may either be bounded or unbounded in time. Whether or not
+ they are bounded, they may be only active at specific times.
+
+ SDP can convey:
+
+ o An arbitrary list of start and stop times bounding the session
+
+ o For each bound, repeat times such as "every Wednesday at 10am for
+ one hour"
+
+
+
+Handley & Jacobson Standards Track [Page 5]
+
+RFC 2327 SDP April 1998
+
+
+ This timing information is globally consistent, irrespective of local
+ time zone or daylight saving time.
+
+5.3. Private Sessions
+
+ It is possible to create both public sessions and private sessions.
+ Private sessions will typically be conveyed by encrypting the session
+ description to distribute it. The details of how encryption is
+ performed are dependent on the mechanism used to convey SDP - see [4]
+ for how this is done for session announcements.
+
+ If a session announcement is private it is possible to use that
+ private announcement to convey encryption keys necessary to decode
+ each of the media in a conference, including enough information to
+ know which encryption scheme is used for each media.
+
+5.4. Obtaining Further Information about a Session
+
+ A session description should convey enough information to decide
+ whether or not to participate in a session. SDP may include
+ additional pointers in the form of Universal Resources Identifiers
+ (URIs) for more information about the session.
+
+5.5. Categorisation
+
+ When many session descriptions are being distributed by SAP or any
+ other advertisement mechanism, it may be desirable to filter
+ announcements that are of interest from those that are not. SDP
+ supports a categorisation mechanism for sessions that is capable of
+ being automated.
+
+5.6. Internationalization
+
+ The SDP specification recommends the use of the ISO 10646 character
+ sets in the UTF-8 encoding (RFC 2044) to allow many different
+ languages to be represented. However, to assist in compact
+ representations, SDP also allows other character sets such as ISO
+ 8859-1 to be used when desired. Internationalization only applies to
+ free-text fields (session name and background information), and not
+ to SDP as a whole.
+
+6. SDP Specification
+
+ SDP session descriptions are entirely textual using the ISO 10646
+ character set in UTF-8 encoding. SDP field names and attributes names
+ use only the US-ASCII subset of UTF-8, but textual fields and
+ attribute values may use the full ISO 10646 character set. The
+ textual form, as opposed to a binary encoding such as ASN/1 or XDR,
+
+
+
+Handley & Jacobson Standards Track [Page 6]
+
+RFC 2327 SDP April 1998
+
+
+ was chosen to enhance portability, to enable a variety of transports
+ to be used (e.g, session description in a MIME email message) and to
+ allow flexible, text-based toolkits (e.g., Tcl/Tk ) to be used to
+ generate and to process session descriptions. However, since the
+ total bandwidth allocated to all SAP announcements is strictly
+ limited, the encoding is deliberately compact. Also, since
+ announcements may be transported via very unreliable means (e.g.,
+ email) or damaged by an intermediate caching server, the encoding was
+ designed with strict order and formatting rules so that most errors
+ would result in malformed announcements which could be detected
+ easily and discarded. This also allows rapid discarding of encrypted
+ announcements for which a receiver does not have the correct key.
+
+ An SDP session description consists of a number of lines of text of
+ the form <type>=<value> <type> is always exactly one character and is
+ case-significant. <value> is a structured text string whose format
+ depends on <type>. It also will be case-significant unless a
+ specific field defines otherwise. Whitespace is not permitted either
+ side of the `=' sign. In general <value> is either a number of fields
+ delimited by a single space character or a free format string.
+
+ A session description consists of a session-level description
+ (details that apply to the whole session and all media streams) and
+ optionally several media-level descriptions (details that apply onto
+ to a single media stream).
+
+ An announcement consists of a session-level section followed by zero
+ or more media-level sections. The session-level part starts with a
+ `v=' line and continues to the first media-level section. The media
+ description starts with an `m=' line and continues to the next media
+ description or end of the whole session description. In general,
+ session-level values are the default for all media unless overridden
+ by an equivalent media-level value.
+
+ When SDP is conveyed by SAP, only one session description is allowed
+ per packet. When SDP is conveyed by other means, many SDP session
+ descriptions may be concatenated together (the `v=' line indicating
+ the start of a session description terminates the previous
+ description). Some lines in each description are required and some
+ are optional but all must appear in exactly the order given here (the
+ fixed order greatly enhances error detection and allows for a simple
+ parser). Optional items are marked with a `*'.
+
+Session description
+ v= (protocol version)
+ o= (owner/creator and session identifier).
+ s= (session name)
+ i=* (session information)
+
+
+
+Handley & Jacobson Standards Track [Page 7]
+
+RFC 2327 SDP April 1998
+
+
+ u=* (URI of description)
+ e=* (email address)
+ p=* (phone number)
+ c=* (connection information - not required if included in all media)
+ b=* (bandwidth information)
+ One or more time descriptions (see below)
+ z=* (time zone adjustments)
+ k=* (encryption key)
+ a=* (zero or more session attribute lines)
+ Zero or more media descriptions (see below)
+
+Time description
+ t= (time the session is active)
+ r=* (zero or more repeat times)
+
+Media description
+ m= (media name and transport address)
+ i=* (media title)
+ c=* (connection information - optional if included at session-level)
+ b=* (bandwidth information)
+ k=* (encryption key)
+ a=* (zero or more media attribute lines)
+
+ The set of `type' letters is deliberately small and not intended to
+ be extensible -- SDP parsers must completely ignore any announcement
+ that contains a `type' letter that it does not understand. The
+ `attribute' mechanism ("a=" described below) is the primary means for
+ extending SDP and tailoring it to particular applications or media.
+ Some attributes (the ones listed in this document) have a defined
+ meaning but others may be added on an application-, media- or
+ session-specific basis. A session directory must ignore any
+ attribute it doesn't understand.
+
+ The connection (`c=') and attribute (`a=') information in the
+ session-level section applies to all the media of that session unless
+ overridden by connection information or an attribute of the same name
+ in the media description. For instance, in the example below, each
+ media behaves as if it were given a `recvonly' attribute.
+
+ An example SDP description is:
+
+ v=0
+ o=mhandley 2890844526 2890842807 IN IP4 126.16.64.4
+ s=SDP Seminar
+ i=A Seminar on the session description protocol
+ u=http://www.cs.ucl.ac.uk/staff/M.Handley/sdp.03.ps
+ e=mjh@isi.edu (Mark Handley)
+ c=IN IP4 224.2.17.12/127
+
+
+
+Handley & Jacobson Standards Track [Page 8]
+
+RFC 2327 SDP April 1998
+
+
+ t=2873397496 2873404696
+ a=recvonly
+ m=audio 49170 RTP/AVP 0
+ m=video 51372 RTP/AVP 31
+ m=application 32416 udp wb
+ a=orient:portrait
+
+ Text records such as the session name and information are bytes
+ strings which may contain any byte with the exceptions of 0x00 (Nul),
+ 0x0a (ASCII newline) and 0x0d (ASCII carriage return). The sequence
+ CRLF (0x0d0a) is used to end a record, although parsers should be
+ tolerant and also accept records terminated with a single newline
+ character. By default these byte strings contain ISO-10646
+ characters in UTF-8 encoding, but this default may be changed using
+ the `charset' attribute.
+
+ Protocol Version
+
+ v=0
+
+ The "v=" field gives the version of the Session Description Protocol.
+ There is no minor version number.
+
+ Origin
+
+ o=<username> <session id> <version> <network type> <address type>
+ <address>
+
+ The "o=" field gives the originator of the session (their username
+ and the address of the user's host) plus a session id and session
+ version number.
+
+ <username> is the user's login on the originating host, or it is "-"
+ if the originating host does not support the concept of user ids.
+ <username> must not contain spaces. <session id> is a numeric string
+ such that the tuple of <username>, <session id>, <network type>,
+ <address type> and <address> form a globally unique identifier for
+ the session.
+
+ The method of <session id> allocation is up to the creating tool, but
+ it has been suggested that a Network Time Protocol (NTP) timestamp be
+ used to ensure uniqueness [1].
+
+ <version> is a version number for this announcement. It is needed
+ for proxy announcements to detect which of several announcements for
+ the same session is the most recent. Again its usage is up to the
+
+
+
+
+
+Handley & Jacobson Standards Track [Page 9]
+
+RFC 2327 SDP April 1998
+
+
+ creating tool, so long as <version> is increased when a modification
+ is made to the session data. Again, it is recommended (but not
+ mandatory) that an NTP timestamp is used.
+
+ <network type> is a text string giving the type of network.
+ Initially "IN" is defined to have the meaning "Internet". <address
+ type> is a text string giving the type of the address that follows.
+ Initially "IP4" and "IP6" are defined. <address> is the globally
+ unique address of the machine from which the session was created.
+ For an address type of IP4, this is either the fully-qualified domain
+ name of the machine, or the dotted-decimal representation of the IP
+ version 4 address of the machine. For an address type of IP6, this
+ is either the fully-qualified domain name of the machine, or the
+ compressed textual representation of the IP version 6 address of the
+ machine. For both IP4 and IP6, the fully-qualified domain name is
+ the form that SHOULD be given unless this is unavailable, in which
+ case the globally unique address may be substituted. A local IP
+ address MUST NOT be used in any context where the SDP description
+ might leave the scope in which the address is meaningful.
+
+ In general, the "o=" field serves as a globally unique identifier for
+ this version of this session description, and the subfields excepting
+ the version taken together identify the session irrespective of any
+ modifications.
+
+ Session Name
+
+ s=<session name>
+
+ The "s=" field is the session name. There must be one and only one
+ "s=" field per session description, and it must contain ISO 10646
+ characters (but see also the `charset' attribute below).
+
+ Session and Media Information
+
+ i=<session description>
+
+ The "i=" field is information about the session. There may be at
+ most one session-level "i=" field per session description, and at
+ most one "i=" field per media. Although it may be omitted, this is
+ discouraged for session announcements, and user interfaces for
+ composing sessions should require text to be entered. If it is
+ present it must contain ISO 10646 characters (but see also the
+ `charset' attribute below).
+
+ A single "i=" field can also be used for each media definition. In
+ media definitions, "i=" fields are primarily intended for labeling
+ media streams. As such, they are most likely to be useful when a
+
+
+
+Handley & Jacobson Standards Track [Page 10]
+
+RFC 2327 SDP April 1998
+
+
+ single session has more than one distinct media stream of the same
+ media type. An example would be two different whiteboards, one for
+ slides and one for feedback and questions.
+
+ URI
+
+ u=<URI>
+
+ o A URI is a Universal Resource Identifier as used by WWW clients
+
+ o The URI should be a pointer to additional information about the
+ conference
+
+ o This field is optional, but if it is present it should be specified
+ before the first media field
+
+ o No more than one URI field is allowed per session description
+
+
+ Email Address and Phone Number
+
+ e=<email address>
+ p=<phone number>
+
+ o These specify contact information for the person responsible for
+ the conference. This is not necessarily the same person that
+ created the conference announcement.
+
+ o Either an email field or a phone field must be specified.
+ Additional email and phone fields are allowed.
+
+ o If these are present, they should be specified before the first
+ media field.
+
+ o More than one email or phone field can be given for a session
+ description.
+
+ o Phone numbers should be given in the conventional international
+
+ format - preceded by a "+ and the international country code.
+ There must be a space or a hyphen ("-") between the country code
+ and the rest of the phone number. Spaces and hyphens may be used
+ to split up a phone field to aid readability if desired. For
+ example:
+
+ p=+44-171-380-7777 or p=+1 617 253 6011
+
+
+
+
+
+Handley & Jacobson Standards Track [Page 11]
+
+RFC 2327 SDP April 1998
+
+
+ o Both email addresses and phone numbers can have an optional free
+ text string associated with them, normally giving the name of the
+ person who may be contacted. This should be enclosed in
+ parenthesis if it is present. For example:
+
+ e=mjh@isi.edu (Mark Handley)
+
+ The alternative RFC822 name quoting convention is also allowed for
+ both email addresses and phone numbers. For example,
+
+ e=Mark Handley <mjh@isi.edu>
+
+ The free text string should be in the ISO-10646 character set with
+ UTF-8 encoding, or alternatively in ISO-8859-1 or other encodings
+ if the appropriate charset session-level attribute is set.
+
+ Connection Data
+
+ c=<network type> <address type> <connection address>
+
+ The "c=" field contains connection data.
+
+ A session announcement must contain one "c=" field in each media
+ description (see below) or a "c=" field at the session-level. It may
+ contain a session-level "c=" field and one additional "c=" field per
+ media description, in which case the per-media values override the
+ session-level settings for the relevant media.
+
+ The first sub-field is the network type, which is a text string
+ giving the type of network. Initially "IN" is defined to have the
+ meaning "Internet".
+
+ The second sub-field is the address type. This allows SDP to be used
+ for sessions that are not IP based. Currently only IP4 is defined.
+
+ The third sub-field is the connection address. Optional extra
+ subfields may be added after the connection address depending on the
+ value of the <address type> field.
+
+ For IP4 addresses, the connection address is defined as follows:
+
+ o Typically the connection address will be a class-D IP multicast
+
+ group address. If the session is not multicast, then the
+ connection address contains the fully-qualified domain name or the
+ unicast IP address of the expected data source or data relay or
+ data sink as determined by additional attribute fields. It is not
+ expected that fully-qualified domain names or unicast addresses
+
+
+
+Handley & Jacobson Standards Track [Page 12]
+
+RFC 2327 SDP April 1998
+
+
+ will be given in a session description that is communicated by a
+ multicast announcement, though this is not prohibited. If a
+ unicast data stream is to pass through a network address
+ translator, the use of a fully-qualified domain name rather than an
+ unicast IP address is RECOMMENDED. In other cases, the use of an
+ IP address to specify a particular interface on a multi-homed host
+ might be required. Thus this specification leaves the decision as
+ to which to use up to the individual application, but all
+ applications MUST be able to cope with receiving both formats.
+
+ o Conferences using an IP multicast connection address must also have
+ a time to live (TTL) value present in addition to the multicast
+ address. The TTL and the address together define the scope with
+ which multicast packets sent in this conference will be sent. TTL
+ values must be in the range 0-255.
+
+ The TTL for the session is appended to the address using a slash as
+ a separator. An example is:
+
+ c=IN IP4 224.2.1.1/127
+
+ Hierarchical or layered encoding schemes are data streams where the
+ encoding from a single media source is split into a number of
+ layers. The receiver can choose the desired quality (and hence
+ bandwidth) by only subscribing to a subset of these layers. Such
+ layered encodings are normally transmitted in multiple multicast
+ groups to allow multicast pruning. This technique keeps unwanted
+ traffic from sites only requiring certain levels of the hierarchy.
+ For applications requiring multiple multicast groups, we allow the
+ following notation to be used for the connection address:
+
+ <base multicast address>/<ttl>/<number of addresses>
+
+ If the number of addresses is not given it is assumed to be one.
+ Multicast addresses so assigned are contiguously allocated above
+ the base address, so that, for example:
+
+ c=IN IP4 224.2.1.1/127/3
+
+ would state that addresses 224.2.1.1, 224.2.1.2 and 224.2.1.3 are
+ to be used at a ttl of 127. This is semantically identical to
+ including multiple "c=" lines in a media description:
+
+ c=IN IP4 224.2.1.1/127
+ c=IN IP4 224.2.1.2/127
+ c=IN IP4 224.2.1.3/127
+
+
+
+
+
+Handley & Jacobson Standards Track [Page 13]
+
+RFC 2327 SDP April 1998
+
+
+ Multiple addresses or "c=" lines can only be specified on a per-
+ media basis, and not for a session-level "c=" field.
+
+ It is illegal for the slash notation described above to be used for
+ IP unicast addresses.
+
+ Bandwidth
+
+ b=<modifier>:<bandwidth-value>
+
+ o This specifies the proposed bandwidth to be used by the session or
+ media, and is optional.
+
+ o <bandwidth-value> is in kilobits per second
+
+ o <modifier> is a single alphanumeric word giving the meaning of the
+ bandwidth figure.
+
+ o Two modifiers are initially defined:
+
+ CT Conference Total: An implicit maximum bandwidth is associated with
+ each TTL on the Mbone or within a particular multicast
+ administrative scope region (the Mbone bandwidth vs. TTL limits are
+ given in the MBone FAQ). If the bandwidth of a session or media in
+ a session is different from the bandwidth implicit from the scope,
+ a `b=CT:...' line should be supplied for the session giving the
+ proposed upper limit to the bandwidth used. The primary purpose of
+ this is to give an approximate idea as to whether two or more
+ conferences can co-exist simultaneously.
+
+ AS Application-Specific Maximum: The bandwidth is interpreted to be
+ application-specific, i.e., will be the application's concept of
+ maximum bandwidth. Normally this will coincide with what is set on
+ the application's "maximum bandwidth" control if applicable.
+
+ Note that CT gives a total bandwidth figure for all the media at
+ all sites. AS gives a bandwidth figure for a single media at a
+ single site, although there may be many sites sending
+ simultaneously.
+
+ o Extension Mechanism: Tool writers can define experimental bandwidth
+ modifiers by prefixing their modifier with "X-". For example:
+
+ b=X-YZ:128
+
+ SDP parsers should ignore bandwidth fields with unknown modifiers.
+ Modifiers should be alpha-numeric and, although no length limit is
+ given, they are recommended to be short.
+
+
+
+Handley & Jacobson Standards Track [Page 14]
+
+RFC 2327 SDP April 1998
+
+
+ Times, Repeat Times and Time Zones
+
+ t=<start time> <stop time>
+
+ o "t=" fields specify the start and stop times for a conference
+ session. Multiple "t=" fields may be used if a session is active
+ at multiple irregularly spaced times; each additional "t=" field
+ specifies an additional period of time for which the session will
+ be active. If the session is active at regular times, an "r="
+ field (see below) should be used in addition to and following a
+ "t=" field - in which case the "t=" field specifies the start and
+ stop times of the repeat sequence.
+
+ o The first and second sub-fields give the start and stop times for
+ the conference respectively. These values are the decimal
+ representation of Network Time Protocol (NTP) time values in
+ seconds [1]. To convert these values to UNIX time, subtract
+ decimal 2208988800.
+
+ o If the stop-time is set to zero, then the session is not bounded,
+ though it will not become active until after the start-time. If
+ the start-time is also zero, the session is regarded as permanent.
+
+ User interfaces should strongly discourage the creation of
+ unbounded and permanent sessions as they give no information about
+ when the session is actually going to terminate, and so make
+ scheduling difficult.
+
+ The general assumption may be made, when displaying unbounded
+ sessions that have not timed out to the user, that an unbounded
+ session will only be active until half an hour from the current
+ time or the session start time, whichever is the later. If
+ behaviour other than this is required, an end-time should be given
+ and modified as appropriate when new information becomes available
+ about when the session should really end.
+
+ Permanent sessions may be shown to the user as never being active
+ unless there are associated repeat times which state precisely when
+ the session will be active. In general, permanent sessions should
+ not be created for any session expected to have a duration of less
+ than 2 months, and should be discouraged for sessions expected to
+ have a duration of less than 6 months.
+
+ r=<repeat interval> <active duration> <list of offsets from start-
+ time>
+
+ o "r=" fields specify repeat times for a session. For example, if
+ a session is active at 10am on Monday and 11am on Tuesday for one
+
+
+
+Handley & Jacobson Standards Track [Page 15]
+
+RFC 2327 SDP April 1998
+
+
+ hour each week for three months, then the <start time> in the
+ corresponding "t=" field would be the NTP representation of 10am on
+ the first Monday, the <repeat interval> would be 1 week, the
+ <active duration> would be 1 hour, and the offsets would be zero
+ and 25 hours. The corresponding "t=" field stop time would be the
+ NTP representation of the end of the last session three months
+ later. By default all fields are in seconds, so the "r=" and "t="
+ fields might be:
+
+ t=3034423619 3042462419
+ r=604800 3600 0 90000
+
+ To make announcements more compact, times may also be given in units
+ of days, hours or minutes. The syntax for these is a number
+ immediately followed by a single case-sensitive character.
+ Fractional units are not allowed - a smaller unit should be used
+ instead. The following unit specification characters are allowed:
+
+ d - days (86400 seconds)
+ h - minutes (3600 seconds)
+ m - minutes (60 seconds)
+ s - seconds (allowed for completeness but not recommended)
+
+ Thus, the above announcement could also have been written:
+
+ r=7d 1h 0 25h
+
+ Monthly and yearly repeats cannot currently be directly specified
+ with a single SDP repeat time - instead separate "t" fields should
+ be used to explicitly list the session times.
+
+ z=<adjustment time> <offset> <adjustment time> <offset> ....
+
+ o To schedule a repeated session which spans a change from daylight-
+ saving time to standard time or vice-versa, it is necessary to
+ specify offsets from the base repeat times. This is required
+ because different time zones change time at different times of day,
+ different countries change to or from daylight time on different
+ dates, and some countries do not have daylight saving time at all.
+
+ Thus in order to schedule a session that is at the same time winter
+ and summer, it must be possible to specify unambiguously by whose
+ time zone a session is scheduled. To simplify this task for
+ receivers, we allow the sender to specify the NTP time that a time
+ zone adjustment happens and the offset from the time when the
+ session was first scheduled. The "z" field allows the sender to
+ specify a list of these adjustment times and offsets from the base
+ time.
+
+
+
+Handley & Jacobson Standards Track [Page 16]
+
+RFC 2327 SDP April 1998
+
+
+ An example might be:
+
+ z=2882844526 -1h 2898848070 0
+
+ This specifies that at time 2882844526 the time base by which the
+ session's repeat times are calculated is shifted back by 1 hour,
+ and that at time 2898848070 the session's original time base is
+ restored. Adjustments are always relative to the specified start
+ time - they are not cumulative.
+
+ o If a session is likely to last several years, it is expected
+ that
+ the session announcement will be modified periodically rather than
+ transmit several years worth of adjustments in one announcement.
+
+ Encryption Keys
+
+ k=<method>
+ k=<method>:<encryption key>
+
+ o The session description protocol may be used to convey encryption
+ keys. A key field is permitted before the first media entry (in
+ which case it applies to all media in the session), or for each
+ media entry as required.
+
+ o The format of keys and their usage is outside the scope of this
+ document, but see [3].
+
+ o The method indicates the mechanism to be used to obtain a usable
+ key by external means, or from the encoded encryption key given.
+
+ The following methods are defined:
+
+ k=clear:<encryption key>
+ The encryption key (as described in [3] for RTP media streams
+ under the AV profile) is included untransformed in this key
+ field.
+
+ k=base64:<encoded encryption key>
+ The encryption key (as described in [3] for RTP media streams
+ under the AV profile) is included in this key field but has been
+ base64 encoded because it includes characters that are
+ prohibited in SDP.
+
+ k=uri:<URI to obtain key>
+ A Universal Resource Identifier as used by WWW clients is
+ included in this key field. The URI refers to the data
+ containing the key, and may require additional authentication
+
+
+
+Handley & Jacobson Standards Track [Page 17]
+
+RFC 2327 SDP April 1998
+
+
+ before the key can be returned. When a request is made to the
+ given URI, the MIME content-type of the reply specifies the
+ encoding for the key in the reply. The key should not be
+ obtained until the user wishes to join the session to reduce
+ synchronisation of requests to the WWW server(s).
+
+ k=prompt
+ No key is included in this SDP description, but the session or
+ media stream referred to by this key field is encrypted. The
+ user should be prompted for the key when attempting to join the
+ session, and this user-supplied key should then be used to
+ decrypt the media streams.
+
+ Attributes
+
+ a=<attribute>
+ a=<attribute>:<value>
+
+ Attributes are the primary means for extending SDP. Attributes may
+ be defined to be used as "session-level" attributes, "media-level"
+ attributes, or both.
+
+ A media description may have any number of attributes ("a=" fields)
+ which are media specific. These are referred to as "media-level"
+ attributes and add information about the media stream. Attribute
+ fields can also be added before the first media field; these
+ "session-level" attributes convey additional information that applies
+ to the conference as a whole rather than to individual media; an
+ example might be the conference's floor control policy.
+
+ Attribute fields may be of two forms:
+
+ o property attributes. A property attribute is simply of the form
+ "a=<flag>". These are binary attributes, and the presence of the
+ attribute conveys that the attribute is a property of the session.
+ An example might be "a=recvonly".
+
+ o value attributes. A value attribute is of the form
+ "a=<attribute>:<value>". An example might be that a whiteboard
+ could have the value attribute "a=orient:landscape"
+
+ Attribute interpretation depends on the media tool being invoked.
+ Thus receivers of session descriptions should be configurable in
+ their interpretation of announcements in general and of attributes in
+ particular.
+
+ Attribute names must be in the US-ASCII subset of ISO-10646/UTF-8.
+
+
+
+
+Handley & Jacobson Standards Track [Page 18]
+
+RFC 2327 SDP April 1998
+
+
+ Attribute values are byte strings, and MAY use any byte value except
+ 0x00 (Nul), 0x0A (LF), and 0x0D (CR). By default, attribute values
+ are to be interpreted as in ISO-10646 character set with UTF-8
+ encoding. Unlike other text fields, attribute values are NOT
+ normally affected by the `charset' attribute as this would make
+ comparisons against known values problematic. However, when an
+ attribute is defined, it can be defined to be charset-dependent, in
+ which case it's value should be interpreted in the session charset
+ rather than in ISO-10646.
+
+ Attributes that will be commonly used can be registered with IANA
+ (see Appendix B). Unregistered attributes should begin with "X-" to
+ prevent inadvertent collision with registered attributes. In either
+ case, if an attribute is received that is not understood, it should
+ simply be ignored by the receiver.
+
+ Media Announcements
+
+ m=<media> <port> <transport> <fmt list>
+
+ A session description may contain a number of media descriptions.
+ Each media description starts with an "m=" field, and is terminated
+ by either the next "m=" field or by the end of the session
+ description. A media field also has several sub-fields:
+
+ o The first sub-field is the media type. Currently defined media are
+ "audio", "video", "application", "data" and "control", though this
+ list may be extended as new communication modalities emerge (e.g.,
+ telepresense). The difference between "application" and "data" is
+ that the former is a media flow such as whiteboard information, and
+ the latter is bulk-data transfer such as multicasting of program
+ executables which will not typically be displayed to the user.
+ "control" is used to specify an additional conference control
+ channel for the session.
+
+ o The second sub-field is the transport port to which the media
+ stream will be sent. The meaning of the transport port depends on
+ the network being used as specified in the relevant "c" field and
+ on the transport protocol defined in the third sub-field. Other
+ ports used by the media application (such as the RTCP port, see
+ [2]) should be derived algorithmically from the base media port.
+
+ Note: For transports based on UDP, the value should be in the range
+ 1024 to 65535 inclusive. For RTP compliance it should be an even
+ number.
+
+
+
+
+
+
+Handley & Jacobson Standards Track [Page 19]
+
+RFC 2327 SDP April 1998
+
+
+ For applications where hierarchically encoded streams are being
+ sent to a unicast address, it may be necessary to specify multiple
+ transport ports. This is done using a similar notation to that
+ used for IP multicast addresses in the "c=" field:
+
+ m=<media> <port>/<number of ports> <transport> <fmt list>
+
+ In such a case, the ports used depend on the transport protocol.
+ For RTP, only the even ports are used for data and the
+ corresponding one-higher odd port is used for RTCP. For example:
+
+ m=video 49170/2 RTP/AVP 31
+
+ would specify that ports 49170 and 49171 form one RTP/RTCP pair and
+ 49172 and 49173 form the second RTP/RTCP pair. RTP/AVP is the
+ transport protocol and 31 is the format (see below).
+
+ It is illegal for both multiple addresses to be specified in the
+ "c=" field and for multiple ports to be specified in the "m=" field
+ in the same session description.
+
+ o The third sub-field is the transport protocol. The transport
+ protocol values are dependent on the address-type field in the "c="
+ fields. Thus a "c=" field of IP4 defines that the transport
+ protocol runs over IP4. For IP4, it is normally expected that most
+ media traffic will be carried as RTP over UDP. The following
+ transport protocols are preliminarily defined, but may be extended
+ through registration of new protocols with IANA:
+
+ - RTP/AVP - the IETF's Realtime Transport Protocol using the
+ Audio/Video profile carried over UDP.
+
+ - udp - User Datagram Protocol
+
+ If an application uses a single combined proprietary media format
+ and transport protocol over UDP, then simply specifying the
+ transport protocol as udp and using the format field to distinguish
+ the combined protocol is recommended. If a transport protocol is
+ used over UDP to carry several distinct media types that need to be
+ distinguished by a session directory, then specifying the transport
+ protocol and media format separately is necessary. RTP is an
+ example of a transport-protocol that carries multiple payload
+ formats that must be distinguished by the session directory for it
+ to know how to start appropriate tools, relays, mixers or
+ recorders.
+
+
+
+
+
+
+Handley & Jacobson Standards Track [Page 20]
+
+RFC 2327 SDP April 1998
+
+
+ The main reason to specify the transport-protocol in addition to
+ the media format is that the same standard media formats may be
+ carried over different transport protocols even when the network
+ protocol is the same - a historical example is vat PCM audio and
+ RTP PCM audio. In addition, relays and monitoring tools that are
+ transport-protocol-specific but format-independent are possible.
+
+ For RTP media streams operating under the RTP Audio/Video Profile
+ [3], the protocol field is "RTP/AVP". Should other RTP profiles be
+ defined in the future, their profiles will be specified in the same
+ way. For example, the protocol field "RTP/XYZ" would specify RTP
+ operating under a profile whose short name is "XYZ".
+
+ o The fourth and subsequent sub-fields are media formats. For audio
+ and video, these will normally be a media payload type as defined
+ in the RTP Audio/Video Profile.
+
+ When a list of payload formats is given, this implies that all of
+ these formats may be used in the session, but the first of these
+ formats is the default format for the session.
+
+ For media whose transport protocol is not RTP or UDP the format
+ field is protocol specific. Such formats should be defined in an
+ additional specification document.
+
+ For media whose transport protocol is RTP, SDP can be used to
+ provide a dynamic binding of media encoding to RTP payload type.
+ The encoding names in the RTP AV Profile do not specify unique
+ audio encodings (in terms of clock rate and number of audio
+ channels), and so they are not used directly in SDP format fields.
+ Instead, the payload type number should be used to specify the
+ format for static payload types and the payload type number along
+ with additional encoding information should be used for dynamically
+ allocated payload types.
+
+ An example of a static payload type is u-law PCM coded single
+ channel audio sampled at 8KHz. This is completely defined in the
+ RTP Audio/Video profile as payload type 0, so the media field for
+ such a stream sent to UDP port 49232 is:
+
+ m=video 49232 RTP/AVP 0
+
+ An example of a dynamic payload type is 16 bit linear encoded
+ stereo audio sampled at 16KHz. If we wish to use dynamic RTP/AVP
+ payload type 98 for such a stream, additional information is
+ required to decode it:
+
+ m=video 49232 RTP/AVP 98
+
+
+
+Handley & Jacobson Standards Track [Page 21]
+
+RFC 2327 SDP April 1998
+
+
+ a=rtpmap:98 L16/16000/2
+
+ The general form of an rtpmap attribute is:
+
+ a=rtpmap:<payload type> <encoding name>/<clock rate>[/<encoding
+ parameters>]
+
+ For audio streams, <encoding parameters> may specify the number of
+ audio channels. This parameter may be omitted if the number of
+ channels is one provided no additional parameters are needed. For
+ video streams, no encoding parameters are currently specified.
+
+ Additional parameters may be defined in the future, but
+ codecspecific parameters should not be added. Parameters added to
+ an rtpmap attribute should only be those required for a session
+ directory to make the choice of appropriate media too to
+ participate in a session. Codec-specific parameters should be
+ added in other attributes.
+
+ Up to one rtpmap attribute can be defined for each media format
+ specified. Thus we might have:
+
+ m=audio 49230 RTP/AVP 96 97 98
+ a=rtpmap:96 L8/8000
+ a=rtpmap:97 L16/8000
+ a=rtpmap:98 L16/11025/2
+
+ RTP profiles that specify the use of dynamic payload types must
+ define the set of valid encoding names and/or a means to register
+ encoding names if that profile is to be used with SDP.
+
+ Experimental encoding formats can also be specified using rtpmap.
+ RTP formats that are not registered as standard format names must
+ be preceded by "X-". Thus a new experimental redundant audio
+ stream called GSMLPC using dynamic payload type 99 could be
+ specified as:
+
+ m=video 49232 RTP/AVP 99
+ a=rtpmap:99 X-GSMLPC/8000
+
+ Such an experimental encoding requires that any site wishing to
+ receive the media stream has relevant configured state in its
+ session directory to know which tools are appropriate.
+
+ Note that RTP audio formats typically do not include information
+ about the number of samples per packet. If a non-default (as
+ defined in the RTP Audio/Video Profile) packetisation is required,
+ the "ptime" attribute is used as given below.
+
+
+
+Handley & Jacobson Standards Track [Page 22]
+
+RFC 2327 SDP April 1998
+
+
+ For more details on RTP audio and video formats, see [3].
+
+ o Formats for non-RTP media should be registered as MIME content
+ types as described in Appendix B. For example, the LBL whiteboard
+ application might be registered as MIME content-type application/wb
+ with encoding considerations specifying that it operates over UDP,
+ with no appropriate file format. In SDP this would then be
+ expressed using a combination of the "media" field and the "fmt"
+ field, as follows:
+
+ m=application 32416 udp wb
+
+ Suggested Attributes
+
+ The following attributes are suggested. Since application writers
+ may add new attributes as they are required, this list is not
+ exhaustive.
+
+ a=cat:<category>
+ This attribute gives the dot-separated hierarchical category of
+ the session. This is to enable a receiver to filter unwanted
+ sessions by category. It would probably have been a compulsory
+ separate field, except for its experimental nature at this time.
+ It is a session-level attribute, and is not dependent on charset.
+
+ a=keywds:<keywords>
+ Like the cat attribute, this is to assist identifying wanted
+ sessions at the receiver. This allows a receiver to select
+ interesting session based on keywords describing the purpose of
+ the session. It is a session-level attribute. It is a charset
+ dependent attribute, meaning that its value should be interpreted
+ in the charset specified for the session description if one is
+ specified, or by default in ISO 10646/UTF-8.
+
+ a=tool:<name and version of tool>
+ This gives the name and version number of the tool used to create
+ the session description. It is a session-level attribute, and is
+ not dependent on charset.
+
+ a=ptime:<packet time>
+ This gives the length of time in milliseconds represented by the
+ media in a packet. This is probably only meaningful for audio
+ data. It should not be necessary to know ptime to decode RTP or
+ vat audio, and it is intended as a recommendation for the
+ encoding/packetisation of audio. It is a media attribute, and is
+ not dependent on charset.
+
+
+
+
+
+Handley & Jacobson Standards Track [Page 23]
+
+RFC 2327 SDP April 1998
+
+
+ a=recvonly
+ This specifies that the tools should be started in receive-only
+ mode where applicable. It can be either a session or media
+ attribute, and is not dependent on charset.
+
+ a=sendrecv
+ This specifies that the tools should be started in send and
+ receive mode. This is necessary for interactive conferences with
+ tools such as wb which defaults to receive only mode. It can be
+ either a session or media attribute, and is not dependent on
+ charset.
+
+ a=sendonly
+ This specifies that the tools should be started in send-only
+ mode. An example may be where a different unicast address is to
+ be used for a traffic destination than for a traffic source. In
+ such a case, two media descriptions may be use, one sendonly and
+ one recvonly. It can be either a session or media attribute, but
+ would normally only be used as a media attribute, and is not
+ dependent on charset.
+
+ a=orient:<whiteboard orientation>
+ Normally this is only used in a whiteboard media specification.
+ It specifies the orientation of a the whiteboard on the screen.
+ It is a media attribute. Permitted values are `portrait',
+ `landscape' and `seascape' (upside down landscape). It is not
+ dependent on charset
+
+ a=type:<conference type>
+ This specifies the type of the conference. Suggested values are
+ `broadcast', `meeting', `moderated', `test' and `H332'.
+ `recvonly' should be the default for `type:broadcast' sessions,
+ `type:meeting' should imply `sendrecv' and `type:moderated'
+ should indicate the use of a floor control tool and that the
+ media tools are started so as to "mute" new sites joining the
+ conference.
+
+ Specifying the attribute type:H332 indicates that this loosely
+ coupled session is part of a H.332 session as defined in the ITU
+ H.332 specification [10]. Media tools should be started
+ `recvonly'.
+
+ Specifying the attribute type:test is suggested as a hint that,
+ unless explicitly requested otherwise, receivers can safely avoid
+ displaying this session description to users.
+
+ The type attribute is a session-level attribute, and is not
+ dependent on charset.
+
+
+
+Handley & Jacobson Standards Track [Page 24]
+
+RFC 2327 SDP April 1998
+
+
+ a=charset:<character set>
+ This specifies the character set to be used to display the
+ session name and information data. By default, the ISO-10646
+ character set in UTF-8 encoding is used. If a more compact
+ representation is required, other character sets may be used such
+ as ISO-8859-1 for Northern European languages. In particular,
+ the ISO 8859-1 is specified with the following SDP attribute:
+
+ a=charset:ISO-8859-1
+
+ This is a session-level attribute; if this attribute is present,
+ it must be before the first media field. The charset specified
+ MUST be one of those registered with IANA, such as ISO-8859-1.
+ The character set identifier is a US-ASCII string and MUST be
+ compared against the IANA identifiers using a case-insensitive
+ comparison. If the identifier is not recognised or not
+ supported, all strings that are affected by it SHOULD be regarded
+ as byte strings.
+
+ Note that a character set specified MUST still prohibit the use
+ of bytes 0x00 (Nul), 0x0A (LF) and 0x0d (CR). Character sets
+ requiring the use of these characters MUST define a quoting
+ mechanism that prevents these bytes appearing within text fields.
+
+ a=sdplang:<language tag>
+ This can be a session level attribute or a media level attribute.
+ As a session level attribute, it specifies the language for the
+ session description. As a media level attribute, it specifies
+ the language for any media-level SDP information field associated
+ with that media. Multiple sdplang attributes can be provided
+ either at session or media level if multiple languages in the
+ session description or media use multiple languages, in which
+ case the order of the attributes indicates the order of
+ importance of the various languages in the session or media from
+ most important to least important.
+
+ In general, sending session descriptions consisting of multiple
+ languages should be discouraged. Instead, multiple descriptions
+ should be sent describing the session, one in each language.
+ However this is not possible with all transport mechanisms, and
+ so multiple sdplang attributes are allowed although not
+ recommended.
+
+ The sdplang attribute value must be a single RFC 1766 language
+ tag in US-ASCII. It is not dependent on the charset attribute.
+ An sdplang attribute SHOULD be specified when a session is of
+
+
+
+
+
+Handley & Jacobson Standards Track [Page 25]
+
+RFC 2327 SDP April 1998
+
+
+ sufficient scope to cross geographic boundaries where the
+ language of recipients cannot be assumed, or where the session is
+ in a different language from the locally assumed norm.
+
+ a=lang:<language tag>
+ This can be a session level attribute or a media level attribute.
+ As a session level attribute, it specifies the default language
+ for the session being described. As a media level attribute, it
+ specifies the language for that media, overriding any session-
+ level language specified. Multiple lang attributes can be
+ provided either at session or media level if multiple languages
+ if the session description or media use multiple languages, in
+ which case the order of the attributes indicates the order of
+ importance of the various languages in the session or media from
+ most important to least important.
+
+ The lang attribute value must be a single RFC 1766 language tag
+ in US-ASCII. It is not dependent on the charset attribute. A
+ lang attribute SHOULD be specified when a session is of
+ sufficient scope to cross geographic boundaries where the
+ language of recipients cannot be assumed, or where the session is
+ in a different language from the locally assumed norm.
+
+ a=framerate:<frame rate>
+ This gives the maximum video frame rate in frames/sec. It is
+ intended as a recommendation for the encoding of video data.
+ Decimal representations of fractional values using the notation
+ "<integer>.<fraction>" are allowed. It is a media attribute, is
+ only defined for video media, and is not dependent on charset.
+
+ a=quality:<quality>
+ This gives a suggestion for the quality of the encoding as an
+ integer value.
+
+ The intention of the quality attribute for video is to specify a
+ non-default trade-off between frame-rate and still-image quality.
+ For video, the value in the range 0 to 10, with the following
+ suggested meaning:
+
+ 10 - the best still-image quality the compression scheme can
+ give.
+
+ 5 - the default behaviour given no quality suggestion.
+
+ 0 - the worst still-image quality the codec designer thinks is
+ still usable.
+
+ It is a media attribute, and is not dependent on charset.
+
+
+
+Handley & Jacobson Standards Track [Page 26]
+
+RFC 2327 SDP April 1998
+
+
+ a=fmtp:<format> <format specific parameters>
+ This attribute allows parameters that are specific to a
+ particular format to be conveyed in a way that SDP doesn't have
+ to understand them. The format must be one of the formats
+ specified for the media. Format-specific parameters may be any
+ set of parameters required to be conveyed by SDP and given
+ unchanged to the media tool that will use this format.
+
+ It is a media attribute, and is not dependent on charset.
+
+6.1. Communicating Conference Control Policy
+
+ There is some debate over the way conference control policy should be
+ communicated. In general, the authors believe that an implicit
+ declarative style of specifying conference control is desirable where
+ possible.
+
+ A simple declarative style uses a single conference attribute field
+ before the first media field, possibly supplemented by properties
+ such as `recvonly' for some of the media tools. This conference
+ attribute conveys the conference control policy. An example might be:
+
+ a=type:moderated
+
+ In some cases, however, it is possible that this may be insufficient
+ to communicate the details of an unusual conference control policy.
+ If this is the case, then a conference attribute specifying external
+ control might be set, and then one or more "media" fields might be
+ used to specify the conference control tools and configuration data
+ for those tools. An example is an ITU H.332 session:
+
+ c=IN IP4 224.5.6.7
+ a=type:H332
+ m=audio 49230 RTP/AVP 0
+ m=video 49232 RTP/AVP 31
+ m=application 12349 udp wb
+ m=control 49234 H323 mc
+ c=IN IP4 134.134.157.81
+
+ In this example, a general conference attribute (type:H332) is
+ specified stating that conference control will be provided by an
+ external H.332 tool, and a contact addresses for the H.323 session
+ multipoint controller is given.
+
+ In this document, only the declarative style of conference control
+ declaration is specified. Other forms of conference control should
+ specify an appropriate type attribute, and should define the
+ implications this has for control media.
+
+
+
+Handley & Jacobson Standards Track [Page 27]
+
+RFC 2327 SDP April 1998
+
+
+7. Security Considerations
+
+ SDP is a session description format that describes multimedia
+ sessions. A session description should not be trusted unless it has
+ been obtained by an authenticated transport protocol from a trusted
+ source. Many different transport protocols may be used to distribute
+ session description, and the nature of the authentication will differ
+ from transport to transport.
+
+ One transport that will frequently be used to distribute session
+ descriptions is the Session Announcement Protocol (SAP). SAP
+ provides both encryption and authentication mechanisms but due to the
+ nature of session announcements it is likely that there are many
+ occasions where the originator of a session announcement cannot be
+ authenticated because they are previously unknown to the receiver of
+ the announcement and because no common public key infrastructure is
+ available.
+
+ On receiving a session description over an unauthenticated transport
+ mechanism or from an untrusted party, software parsing the session
+ should take a few precautions. Session description contain
+ information required to start software on the receivers system.
+ Software that parses a session description MUST not be able to start
+ other software except that which is specifically configured as
+ appropriate software to participate in multimedia sessions. It is
+ normally considered INAPPROPRIATE for software parsing a session
+ description to start, on a user's system, software that is
+ appropriate to participate in multimedia sessions, without the user
+ first being informed that such software will be started and giving
+ their consent. Thus a session description arriving by session
+ announcement, email, session invitation, or WWW page SHOULD not
+ deliver the user into an {it interactive} multimedia session without
+ the user being aware that this will happen. As it is not always
+ simple to tell whether a session is interactive or not, applications
+ that are unsure should assume sessions are interactive.
+
+ In this specification, there are no attributes which would allow the
+ recipient of a session description to be informed to start multimedia
+ tools in a mode where they default to transmitting. Under some
+ circumstances it might be appropriate to define such attributes. If
+ this is done an application parsing a session description containing
+ such attributes SHOULD either ignore them, or inform the user that
+ joining this session will result in the automatic transmission of
+ multimedia data. The default behaviour for an unknown attribute is
+ to ignore it.
+
+
+
+
+
+
+Handley & Jacobson Standards Track [Page 28]
+
+RFC 2327 SDP April 1998
+
+
+ Session descriptions may be parsed at intermediate systems such as
+ firewalls for the purposes of opening a hole in the firewall to allow
+ the participation in multimedia sessions. It is considered
+ INAPPROPRIATE for a firewall to open such holes for unicast data
+ streams unless the session description comes in a request from inside
+ the firewall.
+
+ For multicast sessions, it is likely that local administrators will
+ apply their own policies, but the exclusive use of "local" or "site-
+ local" administrative scope within the firewall and the refusal of
+ the firewall to open a hole for such scopes will provide separation
+ of global multicast sessions from local ones.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Handley & Jacobson Standards Track [Page 29]
+
+RFC 2327 SDP April 1998
+
+
+Appendix A: SDP Grammar
+
+ This appendix provides an Augmented BNF grammar for SDP. ABNF is
+ defined in RFC 2234.
+
+
+ announcement = proto-version
+ origin-field
+ session-name-field
+ information-field
+ uri-field
+ email-fields
+ phone-fields
+ connection-field
+ bandwidth-fields
+ time-fields
+ key-field
+ attribute-fields
+ media-descriptions
+
+ proto-version = "v=" 1*DIGIT CRLF
+ ;this memo describes version 0
+
+ origin-field = "o=" username space
+ sess-id space sess-version space
+ nettype space addrtype space
+ addr CRLF
+
+ session-name-field = "s=" text CRLF
+
+ information-field = ["i=" text CRLF]
+
+ uri-field = ["u=" uri CRLF]
+
+ email-fields = *("e=" email-address CRLF)
+
+ phone-fields = *("p=" phone-number CRLF)
+
+
+ connection-field = ["c=" nettype space addrtype space
+ connection-address CRLF]
+ ;a connection field must be present
+ ;in every media description or at the
+ ;session-level
+
+
+ bandwidth-fields = *("b=" bwtype ":" bandwidth CRLF)
+
+
+
+
+Handley & Jacobson Standards Track [Page 30]
+
+RFC 2327 SDP April 1998
+
+
+ time-fields = 1*( "t=" start-time space stop-time
+ *(CRLF repeat-fields) CRLF)
+ [zone-adjustments CRLF]
+
+
+ repeat-fields = "r=" repeat-interval space typed-time
+ 1*(space typed-time)
+
+
+ zone-adjustments = time space ["-"] typed-time
+ *(space time space ["-"] typed-time)
+
+
+ key-field = ["k=" key-type CRLF]
+
+
+ key-type = "prompt" |
+ "clear:" key-data |
+ "base64:" key-data |
+ "uri:" uri
+
+
+ key-data = email-safe | "~" | "
+
+
+ attribute-fields = *("a=" attribute CRLF)
+
+
+ media-descriptions = *( media-field
+ information-field
+ *(connection-field)
+ bandwidth-fields
+ key-field
+ attribute-fields )
+
+
+ media-field = "m=" media space port ["/" integer]
+ space proto 1*(space fmt) CRLF
+
+
+ media = 1*(alpha-numeric)
+ ;typically "audio", "video", "application"
+ ;or "data"
+
+ fmt = 1*(alpha-numeric)
+ ;typically an RTP payload type for audio
+ ;and video media
+
+
+
+
+Handley & Jacobson Standards Track [Page 31]
+
+RFC 2327 SDP April 1998
+
+
+ proto = 1*(alpha-numeric)
+ ;typically "RTP/AVP" or "udp" for IP4
+
+
+ port = 1*(DIGIT)
+ ;should in the range "1024" to "65535" inclusive
+ ;for UDP based media
+
+
+ attribute = (att-field ":" att-value) | att-field
+
+
+ att-field = 1*(alpha-numeric)
+
+
+ att-value = byte-string
+
+
+ sess-id = 1*(DIGIT)
+ ;should be unique for this originating username/host
+
+
+ sess-version = 1*(DIGIT)
+ ;0 is a new session
+
+
+ connection-address = multicast-address
+ | addr
+
+
+ multicast-address = 3*(decimal-uchar ".") decimal-uchar "/" ttl
+ [ "/" integer ]
+ ;multicast addresses may be in the range
+ ;224.0.0.0 to 239.255.255.255
+
+ ttl = decimal-uchar
+
+ start-time = time | "0"
+
+ stop-time = time | "0"
+
+ time = POS-DIGIT 9*(DIGIT)
+ ;sufficient for 2 more centuries
+
+
+ repeat-interval = typed-time
+
+
+
+
+
+Handley & Jacobson Standards Track [Page 32]
+
+RFC 2327 SDP April 1998
+
+
+ typed-time = 1*(DIGIT) [fixed-len-time-unit]
+
+
+ fixed-len-time-unit = "d" | "h" | "m" | "s"
+
+
+ bwtype = 1*(alpha-numeric)
+
+ bandwidth = 1*(DIGIT)
+
+
+ username = safe
+ ;pretty wide definition, but doesn't include space
+
+
+ email-address = email | email "(" email-safe ")" |
+ email-safe "<" email ">"
+
+
+ email = ;defined in RFC822
+
+
+ uri= ;defined in RFC1630
+
+
+ phone-number = phone | phone "(" email-safe ")" |
+ email-safe "<" phone ">"
+
+
+ phone = "+" POS-DIGIT 1*(space | "-" | DIGIT)
+ ;there must be a space or hyphen between the
+ ;international code and the rest of the number.
+
+
+ nettype = "IN"
+ ;list to be extended
+
+
+ addrtype = "IP4" | "IP6"
+ ;list to be extended
+
+
+ addr = FQDN | unicast-address
+
+
+ FQDN = 4*(alpha-numeric|"-"|".")
+ ;fully qualified domain name as specified in RFC1035
+
+
+
+
+Handley & Jacobson Standards Track [Page 33]
+
+RFC 2327 SDP April 1998
+
+
+ unicast-address = IP4-address | IP6-address
+
+
+ IP4-address = b1 "." decimal-uchar "." decimal-uchar "." b4
+ b1 = decimal-uchar
+ ;less than "224"; not "0" or "127"
+ b4 = decimal-uchar
+ ;not "0"
+
+ IP6-address = ;to be defined
+
+
+ text = byte-string
+ ;default is to interpret this as IS0-10646 UTF8
+ ;ISO 8859-1 requires a "a=charset:ISO-8859-1"
+ ;session-level attribute to be used
+
+
+ byte-string = 1*(0x01..0x09|0x0b|0x0c|0x0e..0xff)
+ ;any byte except NUL, CR or LF
+
+
+ decimal-uchar = DIGIT
+ | POS-DIGIT DIGIT
+ | ("1" 2*(DIGIT))
+ | ("2" ("0"|"1"|"2"|"3"|"4") DIGIT)
+ | ("2" "5" ("0"|"1"|"2"|"3"|"4"|"5"))
+
+
+ integer = POS-DIGIT *(DIGIT)
+
+
+ alpha-numeric = ALPHA | DIGIT
+
+
+ DIGIT = "0" | POS-DIGIT
+
+
+ POS-DIGIT = "1"|"2"|"3"|"4"|"5"|"6"|"7"|"8"|"9"
+
+
+ ALPHA = "a"|"b"|"c"|"d"|"e"|"f"|"g"|"h"|"i"|"j"|"k"|
+ "l"|"m"|"n"|"o "|"p"|"q"|"r"|"s"|"t"|"u"|"v"|
+ "w"|"x"|"y"|"z"|"A"|"B"|"C "|"D"|"E"|"F"|"G"|
+ "H"|"I"|"J"|"K"|"L"|"M"|"N"|"O"|"P"|" Q"|"R"|
+ "S"|"T"|"U"|"V"|"W"|"X"|"Y"|"Z"
+
+
+
+
+
+Handley & Jacobson Standards Track [Page 34]
+
+RFC 2327 SDP April 1998
+
+
+ email-safe = safe | space | tab
+
+
+ safe = alpha-numeric |
+ "'" | "'" | "-" | "." | "/" | ":" | "?" | """ |
+ "#" | "$" | "&" | "*" | ";" | "=" | "@" | "[" |
+ "]" | "^" | "_" | "`" | "{" | "|" | "}" | "+" |
+ "~" | "
+
+
+ space = %d32
+ tab = %d9
+ CRLF = %d13.10
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Handley & Jacobson Standards Track [Page 35]
+
+RFC 2327 SDP April 1998
+
+
+Appendix B: Guidelines for registering SDP names with IANA
+
+ There are seven field names that may be registered with IANA. Using
+ the terminology in the SDP specification BNF, they are "media",
+ "proto", "fmt", "att-field", "bwtype", "nettype" and "addrtype".
+
+ "media" (eg, audio, video, application, data).
+
+ Packetized media types, such as those used by RTP, share the
+ namespace used by media types registry [RFC 2048] (i.e. "MIME
+ types"). The list of valid media names is the set of top-level
+ MIME content types. The set of media is intended to be small and
+ not to be extended except under rare circumstances. (The MIME
+ subtype corresponds to the "fmt" parameter below).
+
+ "proto"
+
+ In general this should be an IETF standards-track transport
+ protocol identifier such as RTP/AVP (rfc 1889 under the rfc 1890
+ profile).
+
+ However, people will want to invent their own proprietary
+ transport protocols. Some of these should be registered as a
+ "fmt" using "udp" as the protocol and some of which probably
+ can't be.
+
+ Where the protocol and the application are intimately linked,
+ such as with the LBL whiteboard wb which used a proprietary and
+ special purpose protocol over UDP, the protocol name should be
+ "udp" and the format name that should be registered is "wb". The
+ rules for formats (see below) apply to such registrations.
+
+ Where the proprietary transport protocol really carries many
+ different data formats, it is possible to register a new protocol
+ name with IANA. In such a case, an RFC MUST be produced
+ describing the protocol and referenced in the registration. Such
+ an RFC MAY be informational, although it is preferable if it is
+ standards-track.
+
+ "fmt"
+
+ The format namespace is dependent on the context of the "proto"
+ field, so a format cannot be registered without specifying one or
+ more transport protocols that it applies to.
+
+ Formats cover all the possible encodings that might want to be
+ transported in a multimedia session.
+
+
+
+
+Handley & Jacobson Standards Track [Page 36]
+
+RFC 2327 SDP April 1998
+
+
+ For RTP formats that have been assigned static payload types, the
+ payload type number is used. For RTP formats using a dynamic
+ payload type number, the dynamic payload type number is given as
+ the format and an additional "rtpmap" attribute specifies the
+ format and parameters.
+
+ For non-RTP formats, any unregistered format name may be
+ registered through the MIME-type registration process [RFC 2048].
+ The type given here is the MIME subtype only (the top-level MIME
+ content type is specified by the media parameter). The MIME type
+ registration SHOULD reference a standards-track RFC which
+ describes the transport protocol for this media type. If there
+ is an existing MIME type for this format, the MIME registration
+ should be augmented to reference the transport specification for
+ this media type. If there is not an existing MIME type for this
+ format, and there exists no appropriate file format, this should
+ be noted in the encoding considerations as "no appropriate file
+ format".
+
+ "att-field" (Attribute names)
+
+ Attribute field names MAY be registered with IANA, although this
+ is not compulsory, and unknown attributes are simply ignored.
+
+ When an attribute is registered, it must be accompanied by a
+ brief specification stating the following:
+
+ o contact name, email address and telephone number
+
+ o attribute-name (as it will appear in SDP)
+
+ o long-form attribute name in English
+
+ o type of attribute (session level, media level, or both)
+
+ o whether the attribute value is subject to the charset
+ attribute.
+
+ o a one paragraph explanation of the purpose of the attribute.
+
+ o a specification of appropriate attribute values for this
+ attribute.
+
+ IANA will not sanity check such attribute registrations except to
+ ensure that they do not clash with existing registrations.
+
+
+
+
+
+
+Handley & Jacobson Standards Track [Page 37]
+
+RFC 2327 SDP April 1998
+
+
+ Although the above is the minimum that IANA will accept, if the
+ attribute is expected to see widespread use and interoperability
+ is an issue, authors are encouraged to produce a standards-track
+ RFC that specifies the attribute more precisely.
+
+ Submitters of registrations should ensure that the specification
+ is in the spirit of SDP attributes, most notably that the
+ attribute is platform independent in the sense that it makes no
+ implicit assumptions about operating systems and does not name
+ specific pieces of software in a manner that might inhibit
+ interoperability.
+
+ "bwtype" (bandwidth specifiers)
+
+ A proliferation of bandwidth specifiers is strongly discouraged.
+
+ New bandwidth specifiers may be registered with IANA. The
+ submission MUST reference a standards-track RFC specifying the
+ semantics of the bandwidth specifier precisely, and indicating
+ when it should be used, and why the existing registered bandwidth
+ specifiers do not suffice.
+
+ "nettype" (Network Type)
+
+ New network types may be registered with IANA if SDP needs to be
+ used in the context of non-internet environments. Whilst these
+ are not normally the preserve of IANA, there may be circumstances
+ when an Internet application needs to interoperate with a non-
+ internet application, such as when gatewaying an internet
+ telephony call into the PSTN. The number of network types should
+ be small and should be rarely extended. A new network type
+ cannot be registered without registering at least one address
+ type to be used with that network type. A new network type
+ registration MUST reference an RFC which gives details of the
+ network type and address type and specifies how and when they
+ would be used. Such an RFC MAY be Informational.
+
+ "addrtype" (Address Type)
+
+ New address types may be registered with IANA. An address type
+ is only meaningful in the context of a network type, and any
+ registration of an address type MUST specify a registered network
+ type, or be submitted along with a network type registration. A
+ new address type registration MUST reference an RFC giving
+ details of the syntax of the address type. Such an RFC MAY be
+ Informational. Address types are not expected to be registered
+ frequently.
+
+
+
+
+Handley & Jacobson Standards Track [Page 38]
+
+RFC 2327 SDP April 1998
+
+
+ Registration Procedure
+
+ To register a name the above guidelines should be followed regarding
+ the required level of documentation that is required. The
+ registration itself should be sent to IANA. Attribute registrations
+ should include the information given above. Other registrations
+ should include the following additional information:
+
+ o contact name, email address and telephone number
+
+ o name being registered (as it will appear in SDP)
+
+ o long-form name in English
+
+ o type of name ("media", "proto", "fmt", "bwtype", "nettype", or
+ "addrtype")
+
+ o a one paragraph explanation of the purpose of the registered name.
+
+ o a reference to the specification (eg RFC number) of the registered
+ name.
+
+ IANA may refer any registration to the IESG or to any appropriate
+ IETF working group for review, and may request revisions to be made
+ before a registration will be made.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Handley & Jacobson Standards Track [Page 39]
+
+RFC 2327 SDP April 1998
+
+
+Appendix C: Authors' Addresses
+
+ Mark Handley
+ Information Sciences Institute
+ c/o MIT Laboratory for Computer Science
+ 545 Technology Square
+ Cambridge, MA 02139
+ United States
+ electronic mail: mjh@isi.edu
+
+ Van Jacobson
+ MS 46a-1121
+ Lawrence Berkeley Laboratory
+ Berkeley, CA 94720
+ United States
+ electronic mail: van@ee.lbl.gov
+
+Acknowledgments
+
+ Many people in the IETF MMUSIC working group have made comments and
+ suggestions contributing to this document. In particular, we would
+ like to thank Eve Schooler, Steve Casner, Bill Fenner, Allison
+ Mankin, Ross Finlayson, Peter Parnes, Joerg Ott, Carsten Bormann, Rob
+ Lanphier and Steve Hanna.
+
+References
+
+ [1] Mills, D., "Network Time Protocol (version 3) specification and
+ implementation", RFC 1305, March 1992.
+
+ [2] Schulzrinne, H., Casner, S., Frederick, R. and V. Jacobson, "RTP:
+ A Transport Protocol for Real-Time Applications", RFC 1889, January
+ 1996.
+
+ [3] Schulzrinne, H., "RTP Profile for Audio and Video Conferences
+ with Minimal Control", RFC 1890, January 1996
+
+ [4] Handley, M., "SAP - Session Announcement Protocol", Work in
+ Progress.
+
+ [5] V. Jacobson, S. McCanne, "vat - X11-based audio teleconferencing
+ tool" vat manual page, Lawrence Berkeley Laboratory, 1994.
+
+ [6] The Unicode Consortium, "The Unicode Standard -- Version 2.0",
+ Addison-Wesley, 1996.
+
+
+
+
+
+
+Handley & Jacobson Standards Track [Page 40]
+
+RFC 2327 SDP April 1998
+
+
+ [7] ISO/IEC 10646-1:1993. International Standard -- Information
+ technol- ogy -- Universal Multiple-Octet Coded Character Set (UCS) --
+ Part 1: Architecture and Basic Multilingual Plane. Five amendments
+ and a techn- ical corrigendum have been published up to now. UTF-8
+ is described in Annex R, published as Amendment 2.
+
+ [8] Goldsmith, D., and M. Davis, "Using Unicode with MIME", RFC 1641,
+ July 1994.
+
+ [9] Yergeau, F., "UTF-8, a transformation format of Unicode and ISO
+ 10646", RFC 2044, October 1996.
+
+ [10] ITU-T Recommendation H.332 (1998): "Multimedia Terminal for
+ Receiving Internet-based H.323 Conferences", ITU, Geneva.
+
+ [11] Handley, M., Schooler, E., and H. Schulzrinne, "Session
+ Initiation Protocol (SIP)", Work in Progress.
+
+ [12] Schulzrinne, H., Rao, A., and R. Lanphier, "Real Time Streaming
+ Protocol (RTSP)", RFC 2326, April 1998.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Handley & Jacobson Standards Track [Page 41]
+
+RFC 2327 SDP April 1998
+
+
+Full Copyright Statement
+
+ Copyright (C) The Internet Society (1998). All Rights Reserved.
+
+ This document and translations of it may be copied and furnished to
+ others, and derivative works that comment on or otherwise explain it
+ or assist in its implementation may be prepared, copied, published
+ and distributed, in whole or in part, without restriction of any
+ kind, provided that the above copyright notice and this paragraph are
+ included on all such copies and derivative works. However, this
+ document itself may not be modified in any way, such as by removing
+ the copyright notice or references to the Internet Society or other
+ Internet organizations, except as needed for the purpose of
+ developing Internet standards in which case the procedures for
+ copyrights defined in the Internet Standards process must be
+ followed, or as required to translate it into languages other than
+ English.
+
+ The limited permissions granted above are perpetual and will not be
+ revoked by the Internet Society or its successors or assigns.
+
+ This document and the information contained herein is provided on an
+ "AS IS" basis and THE INTERNET SOCIETY AND THE INTERNET ENGINEERING
+ TASK FORCE DISCLAIMS ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING
+ BUT NOT LIMITED TO ANY WARRANTY THAT THE USE OF THE INFORMATION
+ HEREIN WILL NOT INFRINGE ANY RIGHTS OR ANY IMPLIED WARRANTIES OF
+ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Handley & Jacobson Standards Track [Page 42]
+
diff --git a/src/modules/rtp/rfc2974.txt b/src/modules/rtp/rfc2974.txt
new file mode 100644
index 00000000..4a5aa626
--- /dev/null
+++ b/src/modules/rtp/rfc2974.txt
@@ -0,0 +1,1011 @@
+
+
+
+
+
+
+Network Working Group M. Handley
+Request for Comments: 2974 ACIRI
+Category: Experimental C. Perkins
+ USC/ISI
+ E. Whelan
+ UCL
+ October 2000
+
+
+ Session Announcement Protocol
+
+Status of this Memo
+
+ This memo defines an Experimental Protocol for the Internet
+ community. It does not specify an Internet standard of any kind.
+ Discussion and suggestions for improvement are requested.
+ Distribution of this memo is unlimited.
+
+Copyright Notice
+
+ Copyright (C) The Internet Society (2000). All Rights Reserved.
+
+Abstract
+
+ This document describes version 2 of the multicast session directory
+ announcement protocol, Session Announcement Protocol (SAP), and the
+ related issues affecting security and scalability that should be
+ taken into account by implementors.
+
+1 Introduction
+
+ In order to assist the advertisement of multicast multimedia
+ conferences and other multicast sessions, and to communicate the
+ relevant session setup information to prospective participants, a
+ distributed session directory may be used. An instance of such a
+ session directory periodically multicasts packets containing a
+ description of the session, and these advertisements are received by
+ other session directories such that potential remote participants can
+ use the session description to start the tools required to
+ participate in the session.
+
+ This memo describes the issues involved in the multicast announcement
+ of session description information and defines an announcement
+ protocol to be used. Sessions are described using the session
+ description protocol which is described in a companion memo [4].
+
+
+
+
+
+
+Handley, et al. Experimental [Page 1]
+
+RFC 2974 Session Announcement Protocol October 2000
+
+
+2 Terminology
+
+ A SAP announcer periodically multicasts an announcement packet to a
+ well known multicast address and port. The announcement is multicast
+ with the same scope as the session it is announcing, ensuring that
+ the recipients of the announcement are within the scope of the
+ session the announcement describes (bandwidth and other such
+ constraints permitting). This is also important for the scalability
+ of the protocol, as it keeps local session announcements local.
+
+ A SAP listener learns of the multicast scopes it is within (for
+ example, using the Multicast-Scope Zone Announcement Protocol [5])
+ and listens on the well known SAP address and port for those scopes.
+ In this manner, it will eventually learn of all the sessions being
+ announced, allowing those sessions to be joined.
+
+ The key words `MUST', `MUST NOT', `REQUIRED', `SHALL', `SHALL NOT',
+ `SHOULD', `SHOULD NOT', `RECOMMENDED', `MAY', and `OPTIONAL' in this
+ document are to be interpreted as described in [1].
+
+3 Session Announcement
+
+ As noted previously, a SAP announcer periodically sends an
+ announcement packet to a well known multicast address and port.
+ There is no rendezvous mechanism - the SAP announcer is not aware of
+ the presence or absence of any SAP listeners - and no additional
+ reliability is provided over the standard best-effort UDP/IP
+ semantics.
+
+ That announcement contains a session description and SHOULD contain
+ an authentication header. The session description MAY be encrypted
+ although this is NOT RECOMMENDED (see section 7).
+
+ A SAP announcement is multicast with the same scope as the session it
+ is announcing, ensuring that the recipients of the announcement are
+ within the scope of the session the announcement describes. There are
+ a number of possibilities:
+
+ IPv4 global scope sessions use multicast addresses in the range
+ 224.2.128.0 - 224.2.255.255 with SAP announcements being sent to
+ 224.2.127.254 (note that 224.2.127.255 is used by the obsolete
+ SAPv0 and MUST NOT be used).
+
+
+
+
+
+
+
+
+
+Handley, et al. Experimental [Page 2]
+
+RFC 2974 Session Announcement Protocol October 2000
+
+
+ IPv4 administrative scope sessions using administratively scoped IP
+ multicast as defined in [7]. The multicast address to be used for
+ announcements is the highest multicast address in the relevant
+ administrative scope zone. For example, if the scope range is
+ 239.16.32.0 - 239.16.33.255, then 239.16.33.255 is used for SAP
+ announcements.
+
+ IPv6 sessions are announced on the address FF0X:0:0:0:0:0:2:7FFE
+ where X is the 4-bit scope value. For example, an announcement
+ for a link-local session assigned the address
+ FF02:0:0:0:0:0:1234:5678, should be advertised on SAP address
+ FF02:0:0:0:0:0:2:7FFE.
+
+ Ensuring that a description is not used by a potential participant
+ outside the session scope is not addressed in this memo.
+
+ SAP announcements MUST be sent on port 9875 and SHOULD be sent with
+ an IP time-to-live of 255 (the use of TTL scoping for multicast is
+ discouraged [7]).
+
+ If a session uses addresses in multiple administrative scope ranges,
+ it is necessary for the announcer to send identical copies of the
+ announcement to each administrative scope range. It is up to the
+ listeners to parse such multiple announcements as the same session
+ (as identified by the SDP origin field, for example). The
+ announcement rate for each administrative scope range MUST be
+ calculated separately, as if the multiple announcements were
+ separate.
+
+ Multiple announcers may announce a single session, as an aid to
+ robustness in the face of packet loss and failure of one or more
+ announcers. The rate at which each announcer repeats its
+ announcement MUST be scaled back such that the total announcement
+ rate is equal to that which a single server would choose.
+ Announcements made in this manner MUST be identical.
+
+ If multiple announcements are being made for a session, then each
+ announcement MUST carry an authentication header signed by the same
+ key, or be treated as a completely separate announcement by
+ listeners.
+
+ An IPv4 SAP listener SHOULD listen on the IPv4 global scope SAP
+ address and on the SAP addresses for each IPv4 administrative scope
+ zone it is within. The discovery of administrative scope zones is
+ outside the scope of this memo, but it is assumed that each SAP
+ listener within a particular scope zone is aware of that scope zone.
+ A SAP listener which supports IPv6 SHOULD also listen to the IPv6 SAP
+ addresses.
+
+
+
+Handley, et al. Experimental [Page 3]
+
+RFC 2974 Session Announcement Protocol October 2000
+
+
+3.1 Announcement Interval
+
+ The time period between repetitions of an announcement is chosen such
+ that the total bandwidth used by all announcements on a single SAP
+ group remains below a preconfigured limit. If not otherwise
+ specified, the bandwidth limit SHOULD be assumed to be 4000 bits per
+ second.
+
+ Each announcer is expected to listen to other announcements in order
+ to determine the total number of sessions being announced on a
+ particular group. Sessions are uniquely identified by the
+ combination of the message identifier hash and originating source
+ fields of the SAP header (note that SAP v0 announcers always set the
+ message identifier hash to zero, and if such an announcement is
+ received the entire message MUST be compared to determine
+ uniqueness).
+
+ Announcements are made by periodic multicast to the group. The base
+ interval between announcements is derived from the number of
+ announcements being made in that group, the size of the announcement
+ and the configured bandwidth limit. The actual transmission time is
+ derived from this base interval as follows:
+
+ 1. The announcer initializes the variable tp to be the last time a
+ particular announcement was transmitted (or the current time if
+ this is the first time this announcement is to be made).
+
+ 2. Given a configured bandwidth limit in bits/second and an
+ announcement of ad_size bytes, the base announcement interval
+ in seconds is
+
+ interval =max(300; (8*no_of_ads*ad_size)/limit)
+
+ 3. An offset is calculated based on the base announcement interval
+
+ offset= rand(interval* 2/3)-(interval/3)
+
+ 4. The next transmission time for an announcement derived as
+
+ tn =tp+ interval+ offset
+
+ The announcer then sets a timer to expire at tn and waits. At time
+ tn the announcer SHOULD recalculate the next transmission time. If
+ the new value of tn is before the current time, the announcement is
+ sent immediately. Otherwise the transmission is rescheduled for the
+ new tn. This reconsideration prevents transient packet bursts on
+ startup and when a network partition heals.
+
+
+
+
+Handley, et al. Experimental [Page 4]
+
+RFC 2974 Session Announcement Protocol October 2000
+
+
+4 Session Deletion
+
+ Sessions may be deleted in one of several ways:
+
+ Explicit Timeout The session description payload may contain
+ timestamp information specifying the start- and end-times of the
+ session. If the current time is later than the end-time of the
+ session, then the session SHOULD be deleted from the receiver's
+ session cache.
+
+ Implicit Timeout A session announcement message should be received
+ periodically for each session description in a receiver's session
+ cache. The announcement period can be predicted by the receiver
+ from the set of sessions currently being announced. If a session
+ announcement message has not been received for ten times the
+ announcement period, or one hour, whichever is the greater, then
+ the session is deleted from the receiver's session cache. The one
+ hour minimum is to allow for transient network partitionings.
+
+ Explicit Deletion A session deletion packet is received specifying
+ the session to be deleted. Session deletion packets SHOULD have a
+ valid authentication header, matching that used to authenticate
+ previous announcement packets. If this authentication is missing,
+ the deletion message SHOULD be ignored.
+
+5 Session Modification
+
+ A pre-announced session can be modified by simply announcing the
+ modified session description. In this case, the version hash in the
+ SAP header MUST be changed to indicate to receivers that the packet
+ contents should be parsed (or decrypted and parsed if it is
+ encrypted). The session itself, as distinct from the session
+ announcement, is uniquely identified by the payload and not by the
+ message identifier hash in the header.
+
+ The same rules apply for session modification as for session
+ deletion:
+
+ o Either the modified announcement must contain an authentication
+ header signed by the same key as the cached session announcement
+ it is modifying, or:
+
+ o The cached session announcement must not contain an authentication
+ header, and the session modification announcement must originate
+ from the same host as the session it is modifying.
+
+
+
+
+
+
+Handley, et al. Experimental [Page 5]
+
+RFC 2974 Session Announcement Protocol October 2000
+
+
+ If an announcement is received containing an authentication header
+ and the cached announcement did not contain an authentication header,
+ or it contained a different authentication header, then the modified
+ announcement MUST be treated as a new and different announcement, and
+ displayed in addition to the un-authenticated announcement. The same
+ should happen if a modified packet without an authentication header
+ is received from a different source than the original announcement.
+
+ These rules prevent an announcement having an authentication header
+ added by a malicious user and then being deleted using that header,
+ and it also prevents a denial-of-service attack by someone putting
+ out a spoof announcement which, due to packet loss, reaches some
+ participants before the original announcement. Note that under such
+ circumstances, being able to authenticate the message originator is
+ the only way to discover which session is the correct session.
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | V=1 |A|R|T|E|C| auth len | msg id hash |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | |
+ : originating source (32 or 128 bits) :
+ : :
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | optional authentication data |
+ : .... :
+ *-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
+ | optional payload type |
+ + +-+- - - - - - - - - -+
+ | |0| |
+ + - - - - - - - - - - - - - - - - - - - - +-+ |
+ | |
+ : payload :
+ | |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ Figure 1: Packet format
+
+6 Packet Format
+
+ SAP data packets have the format described in figure 1.
+
+ V: Version Number. The version number field MUST be set to 1 (SAPv2
+ announcements which use only SAPv1 features are backwards
+ compatible, those which use new features can be detected by other
+ means, so the SAP version number doesn't need to change).
+
+
+
+
+Handley, et al. Experimental [Page 6]
+
+RFC 2974 Session Announcement Protocol October 2000
+
+
+ A: Address type. If the A bit is 0, the originating source field
+ contains a 32-bit IPv4 address. If the A bit is 1, the
+ originating source contains a 128-bit IPv6 address.
+
+ R: Reserved. SAP announcers MUST set this to 0, SAP listeners MUST
+ ignore the contents of this field.
+
+ T: Message Type. If the T field is set to 0 this is a session
+ announcement packet, if 1 this is a session deletion packet.
+
+ E: Encryption Bit. If the encryption bit is set to 1, the payload of
+ the SAP packet is encrypted. If this bit is 0 the packet is not
+ encrypted. See section 7 for details of the encryption process.
+
+ C: Compressed bit. If the compressed bit is set to 1, the payload is
+ compressed using the zlib compression algorithm [3]. If the
+ payload is to be compressed and encrypted, the compression MUST be
+ performed first.
+
+ Authentication Length. An 8 bit unsigned quantity giving the number
+ of 32 bit words following the main SAP header that contain
+ authentication data. If it is zero, no authentication header is
+ present.
+
+ Authentication data containing a digital signature of the packet,
+ with length as specified by the authentication length header
+ field. See section 8 for details of the authentication process.
+
+ Message Identifier Hash. A 16 bit quantity that, used in combination
+ with the originating source, provides a globally unique identifier
+ indicating the precise version of this announcement. The choice
+ of value for this field is not specified here, except that it MUST
+ be unique for each session announced by a particular SAP announcer
+ and it MUST be changed if the session description is modified (and
+ a session deletion message SHOULD be sent for the old version of
+ the session).
+
+ Earlier versions of SAP used a value of zero to mean that the hash
+ should be ignored and the payload should always be parsed. This
+ had the unfortunate side-effect that SAP announcers had to study
+ the payload data to determine how many unique sessions were being
+ advertised, making the calculation of the announcement interval
+ more complex that necessary. In order to decouple the session
+ announcement process from the contents of those announcements, SAP
+ announcers SHOULD NOT set the message identifier hash to zero.
+
+ SAP listeners MAY silently discard messages if the message
+ identifier hash is set to zero.
+
+
+
+Handley, et al. Experimental [Page 7]
+
+RFC 2974 Session Announcement Protocol October 2000
+
+
+ Originating Source. This gives the IP address of the original source
+ of the message. This is an IPv4 address if the A field is set to
+ zero, else it is an IPv6 address. The address is stored in
+ network byte order.
+
+ SAPv0 permitted the originating source to be zero if the message
+ identifier hash was also zero. This practise is no longer legal,
+ and SAP announcers SHOULD NOT set the originating source to zero.
+ SAP listeners MAY silently discard packets with the originating
+ source set to zero.
+
+ The header is followed by an optional payload type field and the
+ payload data itself. If the E or C bits are set in the header both
+ the payload type and payload are encrypted and/or compressed.
+
+ The payload type field is a MIME content type specifier, describing
+ the format of the payload. This is a variable length ASCII text
+ string, followed by a single zero byte (ASCII NUL). The payload type
+ SHOULD be included in all packets. If the payload type is
+ `application/sdp' both the payload type and its terminating zero byte
+ MAY be omitted, although this is intended for backwards compatibility
+ with SAP v1 listeners only.
+
+ The absence of a payload type field may be noted since the payload
+ section of such a packet will start with an SDP `v=0' field, which is
+ not a legal MIME content type specifier.
+
+ All implementations MUST support payloads of type `application/sdp'
+ [4]. Other formats MAY be supported although since there is no
+ negotiation in SAP an announcer which chooses to use a session
+ description format other than SDP cannot know that the listeners are
+ able to understand the announcement. A proliferation of payload
+ types in announcements has the potential to lead to severe
+ interoperability problems, and for this reason, the use of non-SDP
+ payloads is NOT RECOMMENDED.
+
+ If the packet is an announcement packet, the payload contains a
+ session description.
+
+ If the packet is a session deletion packet, the payload contains a
+ session deletion message. If the payload format is `application/sdp'
+ the deletion message is a single SDP line consisting of the origin
+ field of the announcement to be deleted.
+
+ It is desirable for the payload to be sufficiently small that SAP
+ packets do not get fragmented by the underlying network.
+ Fragmentation has a loss multiplier effect, which is known to
+ significantly affect the reliability of announcements. It is
+
+
+
+Handley, et al. Experimental [Page 8]
+
+RFC 2974 Session Announcement Protocol October 2000
+
+
+ RECOMMENDED that SAP packets are smaller than 1kByte in length,
+ although if it is known that announcements will use a network with a
+ smaller MTU than this, then that SHOULD be used as the maximum
+ recommended packet size.
+
+7 Encrypted Announcements
+
+ An announcement is received by all listeners in the scope to which it
+ is sent. If an announcement is encrypted, and many of the receivers
+ do not have the encryption key, there is a considerable waste of
+ bandwidth since those receivers cannot use the announcement they have
+ received. For this reason, the use of encrypted SAP announcements is
+ NOT RECOMMENDED on the global scope SAP group or on administrative
+ scope groups which may have many receivers which cannot decrypt those
+ announcements.
+
+ The opinion of the authors is that encrypted SAP is useful in special
+ cases only, and that the vast majority of scenarios where encrypted
+ SAP has been proposed may be better served by distributing session
+ details using another mechanism. There are, however, certain
+ scenarios where encrypted announcements may be useful. For this
+ reason, the encryption bit is included in the SAP header to allow
+ experimentation with encrypted announcements.
+
+ This memo does not specify details of the encryption algorithm to be
+ used or the means by which keys are generated and distributed. An
+ additional specification should define these, if it is desired to use
+ encrypted SAP.
+
+ Note that if an encrypted announcement is being announced via a
+ proxy, then there may be no way for the proxy to discover that the
+ announcement has been superseded, and so it may continue to relay the
+ old announcement in addition to the new announcement. SAP provides
+ no mechanism to chain modified encrypted announcements, so it is
+ advisable to announce the unmodified session as deleted for a short
+ time after the modification has occurred. This does not guarantee
+ that all proxies have deleted the session, and so receivers of
+ encrypted sessions should be prepared to discard old versions of
+ session announcements that they may receive. In most cases however,
+ the only stateful proxy will be local to (and known to) the sender,
+ and an additional (local-area) protocol involving a handshake for
+ such session modifications can be used to avoid this problem.
+
+ Session announcements that are encrypted with a symmetric algorithm
+ may allow a degree of privacy in the announcement of a session, but
+ it should be recognized that a user in possession of such a key can
+ pass it on to other users who should not be in possession of such a
+ key. Thus announcements to such a group of key holders cannot be
+
+
+
+Handley, et al. Experimental [Page 9]
+
+RFC 2974 Session Announcement Protocol October 2000
+
+
+ assumed to have come from an authorized key holder unless there is an
+ appropriate authentication header signed by an authorized key holder.
+ In addition the recipients of such encrypted announcements cannot be
+ assumed to only be authorized key holders. Such encrypted
+ announcements do not provide any real security unless all of the
+ authorized key holders are trusted to maintain security of such
+ session directory keys. This property is shared by the multicast
+ session tools themselves, where it is possible for an un-trustworthy
+ member of the session to pass on encryption keys to un-authorized
+ users. However it is likely that keys used for the session tools
+ will be more short lived than those used for session directories.
+
+ Similar considerations should apply when session announcements are
+ encrypted with an asymmetric algorithm, but then it is possible to
+ restrict the possessor(s) of the private key, so that announcements
+ to a key-holder group can not be made, even if one of the untrusted
+ members of the group proves to be un-trustworthy.
+
+ 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | V=1 |P| Auth | |
+ +-+-+-+-+-+-+-+-+ |
+ | Format specific authentication subheader |
+ : .................. :
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ Figure 2: Format of the authentication data in the SAP header
+
+8 Authenticated Announcements
+
+ The authentication header can be used for two purposes:
+
+ o Verification that changes to a session description or deletion of
+ a session are permitted.
+
+ o Authentication of the identity of the session creator.
+
+ In some circumstances only verification is possible because a
+ certificate signed by a mutually trusted person or authority is not
+ available. However, under such circumstances, the session originator
+ may still be authenticated to be the same as the session originator
+ of previous sessions claiming to be from the same person. This may
+ or may not be sufficient depending on the purpose of the session and
+ the people involved.
+
+
+
+
+
+
+Handley, et al. Experimental [Page 10]
+
+RFC 2974 Session Announcement Protocol October 2000
+
+
+ Clearly the key used for the authentication should not be trusted to
+ belong to the session originator unless it has been separately
+ authenticated by some other means, such as being certified by a
+ trusted third party. Such certificates are not normally included in
+ an SAP header because they take more space than can normally be
+ afforded in an SAP packet, and such verification must therefore take
+ place by some other mechanism. However, as certified public keys are
+ normally locally cached, authentication of a particular key only has
+ to take place once, rather than every time the session directory
+ retransmits the announcement.
+
+ SAP is not tied to any single authentication mechanism.
+ Authentication data in the header is self-describing, but the precise
+ format depends on the authentication mechanism in use. The generic
+ format of the authentication data is given in figure 2. The
+ structure of the format specific authentication subheader, using both
+ the PGP and the CMS formats, is discussed in sections 8.1 and 8.2
+ respectively. Additional formats may be added in future.
+
+ Version Number, V: The version number of the authentication format
+ specified by this memo is 1.
+
+ Padding Bit, P: If necessary the authentication data is padded to be
+ a multiple of 32 bits and the padding bit is set. In this case
+ the last byte of the authentication data contains the number of
+ padding bytes (including the last byte) that must be discarded.
+
+ Authentication Type, Auth: The authentication type is a 4 bit
+ encoded field that denotes the authentication infrastructure the
+ sender expects the recipients to use to check the authenticity and
+ integrity of the information. This defines the format of the
+ authentication subheader and can take the values: 0 = PGP format,
+ 1 = CMS format. All other values are undefined and SHOULD be
+ ignored.
+
+ If a SAP packet is to be compressed or encrypted, this MUST be done
+ before the authentication is added.
+
+ The digital signature in the authentication data MUST be calculated
+ over the entire packet, including the header. The authentication
+ length MUST be set to zero and the authentication data excluded when
+ calculating the digital signature.
+
+ It is to be expected that sessions may be announced by a number of
+ different mechanisms, not only SAP. For example, a session
+ description may placed on a web page, sent by email or conveyed in a
+
+
+
+
+
+Handley, et al. Experimental [Page 11]
+
+RFC 2974 Session Announcement Protocol October 2000
+
+
+ session initiation protocol. To ease interoperability with these
+ other mechanisms, application level security is employed, rather than
+ using IPsec authentication headers.
+
+8.1 PGP Authentication
+
+ A full description of the PGP protocol can be found in [2]. When
+ using PGP for SAP authentication the basic format specific
+ authentication subheader comprises a digital signature packet as
+ described in [2]. The signature type MUST be 0x01 which means the
+ signature is that of a canonical text document.
+
+8.2 CMS Authentication
+
+ A full description of the Cryptographic Message Syntax can be found
+ in [6]. The format specific authentication subheader will, in the
+ CMS case, have an ASN.1 ContentInfo type with the ContentType being
+ signedData.
+
+ Use is made of the option available in PKCS#7 to leave the content
+ itself blank as the content which is signed is already present in the
+ packet. Inclusion of it within the SignedData type would duplicate
+ this data and increase the packet length unnecessarily. In addition
+ this allows recipients with either no interest in the authentication,
+ or with no mechanism for checking it, to more easily skip the
+ authentication information.
+
+ There SHOULD be only one signerInfo and related fields corresponding
+ to the originator of the SAP announcement. The signingTime SHOULD be
+ present as a signedAttribute. However, due to the strict size
+ limitations on the size of SAP packets, certificates and CRLs SHOULD
+ NOT be included in the signedData structure. It is expected that
+ users of the protocol will have other methods for certificate and CRL
+ distribution.
+
+9 Scalability and caching
+
+ SAP is intended to announce the existence of long-lived wide-area
+ multicast sessions. It is not an especially timely protocol:
+ sessions are announced by periodic multicast with a repeat rate on
+ the order of tens of minutes, and no enhanced reliability over UDP.
+ This leads to a long startup delay before a complete set of
+ announcements is heard by a listener. This delay is clearly
+ undesirable for interactive browsing of announced sessions.
+
+ In order to reduce the delays inherent in SAP, it is recommended that
+ proxy caches are deployed. A SAP proxy cache is expected to listen
+ to all SAP groups in its scope, and to maintain an up-to-date list of
+
+
+
+Handley, et al. Experimental [Page 12]
+
+RFC 2974 Session Announcement Protocol October 2000
+
+
+ all announced sessions along with the time each announcement was last
+ received. When a new SAP listeners starts, it should contact its
+ local proxy to download this information, which is then sufficient
+ for it to process future announcements directly, as if it has been
+ continually listening.
+
+ The protocol by which a SAP listener contacts its local proxy cache
+ is not specified here.
+
+10 Security Considerations
+
+ SAP contains mechanisms for ensuring integrity of session
+ announcements, for authenticating the origin of an announcement and
+ for encrypting such announcements (sections 7 and 8).
+
+ As stated in section 5, if a session modification announcement is
+ received that contains a valid authentication header, but which is
+ not signed by the original creator of the session, then the session
+ must be treated as a new session in addition to the original session
+ with the same SDP origin information unless the originator of one of
+ the session descriptions can be authenticated using a certificate
+ signed by a trusted third party. If this were not done, there would
+ be a possible denial of service attack whereby a party listens for
+ new announcements, strips off the original authentication header,
+ modifies the session description, adds a new authentication header
+ and re-announces the session. If a rule was imposed that such spoof
+ announcements were ignored, then if packet loss or late starting of a
+ session directory instance caused the original announcement to fail
+ to arrive at a site, but the spoof announcement did so, this would
+ then prevent the original announcement from being accepted at that
+ site.
+
+ A similar denial-of-service attack is possible if a session
+ announcement receiver relies completely on the originating source and
+ hash fields to indicate change, and fails to parse the remainder of
+ announcements for which it has seen the origin/hash combination
+ before.
+
+ A denial of service attack is possible from a malicious site close to
+ a legitimate site which is making a session announcement. This can
+ happen if the malicious site floods the legitimate site with huge
+ numbers of (illegal) low TTL announcements describing high TTL
+ sessions. This may reduce the session announcement rate of the
+ legitimate announcement to below a tenth of the rate expected at
+ remote sites and therefore cause the session to time out. Such an
+ attack is likely to be easily detectable, and we do not provide any
+ mechanism here to prevent it.
+
+
+
+
+Handley, et al. Experimental [Page 13]
+
+RFC 2974 Session Announcement Protocol October 2000
+
+
+A. Summary of differences between SAPv0 and SAPv1
+
+ For this purpose SAPv0 is defined as the protocol in use by version
+ 2.2 of the session directory tool, sdr. SAPv1 is the protocol
+ described in the 19 November 1996 version of this memo. The packet
+ headers of SAP messages are the same in V0 and V1 in that a V1 tool
+ can parse a V0 announcement header but not vice-versa. In SAPv0, the
+ fields have the following values:
+
+ o Version Number: 0
+
+ o Message Type: 0 (Announcement)
+
+ o Authentication Type: 0 (No Authentication)
+
+ o Encryption Bit: 0 (No Encryption)
+
+ o Compression Bit: 0 (No compression)
+
+ o Message Id Hash: 0 (No Hash Specified)
+
+ o Originating Source: 0 (No source specified, announcement has
+ not been relayed)
+
+B. Summary of differences between SAPv1 and SAPv2
+
+ The packet headers of SAP messages are the same in V1 and V2 in that
+ a V2 tool can parse a V1 announcement header but not necessarily
+ vice-versa.
+
+ o The A bit has been added to the SAP header, replacing one of the
+ bits of the SAPv1 message type field. If set to zero the
+ announcement is of an IPv4 session, and the packet is backwards
+ compatible with SAPv1. If set to one the announcement is of an
+ IPv6 session, and SAPv1 listeners (which do not support IPv6) will
+ see this as an illegal message type (MT) field.
+
+ o The second bit of the message type field in SAPv1 has been
+ replaced by a reserved, must-be-zero, bit. This bit was unused in
+ SAPv1, so this change just codifies existing usage.
+
+ o SAPv1 specified encryption of the payload. SAPv2 includes the E
+ bit in the SAP header to indicate that the payload is encrypted,
+ but does not specify any details of the encryption.
+
+ o SAPv1 allowed the message identifier hash and originating source
+ fields to be set to zero, for backwards compatibility. This is no
+ longer legal.
+
+
+
+Handley, et al. Experimental [Page 14]
+
+RFC 2974 Session Announcement Protocol October 2000
+
+
+ o SAPv1 specified gzip compression. SAPv2 uses zlib (the only known
+ implementation of SAP compression used zlib, and gzip compression
+ was a mistake).
+
+ o SAPv2 provides a more complete specification for authentication.
+
+ o SAPv2 allows for non-SDP payloads to be transported. SAPv1
+ required that the payload was SDP.
+
+ o SAPv1 included a timeout field for encrypted announcement, SAPv2
+ does not (and relies of explicit deletion messages or implicit
+ timeouts).
+
+C. Acknowledgements
+
+ SAP and SDP were originally based on the protocol used by the sd
+ session directory from Van Jacobson at LBNL. Version 1 of SAP was
+ designed by Mark Handley as part of the European Commission MICE
+ (Esprit 7602) and MERCI (Telematics 1007) projects. Version 2
+ includes authentication features developed by Edmund Whelan, Goli
+ Montasser-Kohsari and Peter Kirstein as part of the European
+ Commission ICE-TEL project (Telematics 1005), and support for IPv6
+ developed by Maryann P. Maher and Colin Perkins.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Handley, et al. Experimental [Page 15]
+
+RFC 2974 Session Announcement Protocol October 2000
+
+
+D. Authors' Addresses
+
+ Mark Handley
+ AT&T Center for Internet Research at ICSI,
+ International Computer Science Institute,
+ 1947 Center Street, Suite 600,
+ Berkeley, CA 94704, USA
+
+ EMail: mjh@aciri.org
+
+
+ Colin Perkins
+ USC Information Sciences Institute
+ 4350 N. Fairfax Drive, Suite 620
+ Arlington, VA 22203, USA
+
+ EMail: csp@isi.edu
+
+
+ Edmund Whelan
+ Department of Computer Science,
+ University College London,
+ Gower Street,
+ London, WC1E 6BT, UK
+
+ EMail: e.whelan@cs.ucl.ac.uk
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Handley, et al. Experimental [Page 16]
+
+RFC 2974 Session Announcement Protocol October 2000
+
+
+References
+
+ [1] Bradner, S., "Key words for use in RFCs to indicate requirement
+ levels", BCP 14, RFC 2119, March 1997.
+
+ [2] Callas, J., Donnerhacke, L., Finney, H. and R. Thayer. "OpenPGP
+ message format", RFC 2440, November 1998.
+
+ [3] Deutsch, P. and J.-L. Gailly, "Zlib compressed data format
+ specification version 3.3", RFC 1950, May 1996.
+
+ [4] Handley, M. and V. Jacobson, "SDP: Session Description Protocol",
+ RFC 2327, April 1998.
+
+ [5] Handley, M., Thaler, D. and R. Kermode, "Multicast-scope zone
+ announcement protocol (MZAP)", RFC 2776, February 2000.
+
+ [6] Housley, R., "Cryptographic message syntax", RFC 2630, June 1999.
+
+ [7] Mayer, D., "Administratively scoped IP multicast", RFC 2365, July
+ 1998.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Handley, et al. Experimental [Page 17]
+
+RFC 2974 Session Announcement Protocol October 2000
+
+
+Full Copyright Statement
+
+ Copyright (C) The Internet Society (2000). All Rights Reserved.
+
+ This document and translations of it may be copied and furnished to
+ others, and derivative works that comment on or otherwise explain it
+ or assist in its implementation may be prepared, copied, published
+ and distributed, in whole or in part, without restriction of any
+ kind, provided that the above copyright notice and this paragraph are
+ included on all such copies and derivative works. However, this
+ document itself may not be modified in any way, such as by removing
+ the copyright notice or references to the Internet Society or other
+ Internet organizations, except as needed for the purpose of
+ developing Internet standards in which case the procedures for
+ copyrights defined in the Internet Standards process must be
+ followed, or as required to translate it into languages other than
+ English.
+
+ The limited permissions granted above are perpetual and will not be
+ revoked by the Internet Society or its successors or assigns.
+
+ This document and the information contained herein is provided on an
+ "AS IS" basis and THE INTERNET SOCIETY AND THE INTERNET ENGINEERING
+ TASK FORCE DISCLAIMS ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING
+ BUT NOT LIMITED TO ANY WARRANTY THAT THE USE OF THE INFORMATION
+ HEREIN WILL NOT INFRINGE ANY RIGHTS OR ANY IMPLIED WARRANTIES OF
+ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+
+Acknowledgement
+
+ Funding for the RFC Editor function is currently provided by the
+ Internet Society.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Handley, et al. Experimental [Page 18]
+
diff --git a/src/modules/rtp/rfc3550.txt b/src/modules/rtp/rfc3550.txt
new file mode 100644
index 00000000..165736cf
--- /dev/null
+++ b/src/modules/rtp/rfc3550.txt
@@ -0,0 +1,5827 @@
+
+
+
+
+
+
+Network Working Group H. Schulzrinne
+Request for Comments: 3550 Columbia University
+Obsoletes: 1889 S. Casner
+Category: Standards Track Packet Design
+ R. Frederick
+ Blue Coat Systems Inc.
+ V. Jacobson
+ Packet Design
+ July 2003
+
+
+ RTP: A Transport Protocol for Real-Time Applications
+
+Status of this Memo
+
+ This document specifies an Internet standards track protocol for the
+ Internet community, and requests discussion and suggestions for
+ improvements. Please refer to the current edition of the "Internet
+ Official Protocol Standards" (STD 1) for the standardization state
+ and status of this protocol. Distribution of this memo is unlimited.
+
+Copyright Notice
+
+ Copyright (C) The Internet Society (2003). All Rights Reserved.
+
+Abstract
+
+ This memorandum describes RTP, the real-time transport protocol. RTP
+ provides end-to-end network transport functions suitable for
+ applications transmitting real-time data, such as audio, video or
+ simulation data, over multicast or unicast network services. RTP
+ does not address resource reservation and does not guarantee
+ quality-of-service for real-time services. The data transport is
+ augmented by a control protocol (RTCP) to allow monitoring of the
+ data delivery in a manner scalable to large multicast networks, and
+ to provide minimal control and identification functionality. RTP and
+ RTCP are designed to be independent of the underlying transport and
+ network layers. The protocol supports the use of RTP-level
+ translators and mixers.
+
+ Most of the text in this memorandum is identical to RFC 1889 which it
+ obsoletes. There are no changes in the packet formats on the wire,
+ only changes to the rules and algorithms governing how the protocol
+ is used. The biggest change is an enhancement to the scalable timer
+ algorithm for calculating when to send RTCP packets in order to
+ minimize transmission in excess of the intended rate when many
+ participants join a session simultaneously.
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 1]
+
+RFC 3550 RTP July 2003
+
+
+Table of Contents
+
+ 1. Introduction ................................................ 4
+ 1.1 Terminology ............................................ 5
+ 2. RTP Use Scenarios ........................................... 5
+ 2.1 Simple Multicast Audio Conference ...................... 6
+ 2.2 Audio and Video Conference ............................. 7
+ 2.3 Mixers and Translators ................................. 7
+ 2.4 Layered Encodings ...................................... 8
+ 3. Definitions ................................................. 8
+ 4. Byte Order, Alignment, and Time Format ...................... 12
+ 5. RTP Data Transfer Protocol .................................. 13
+ 5.1 RTP Fixed Header Fields ................................ 13
+ 5.2 Multiplexing RTP Sessions .............................. 16
+ 5.3 Profile-Specific Modifications to the RTP Header ....... 18
+ 5.3.1 RTP Header Extension ............................ 18
+ 6. RTP Control Protocol -- RTCP ................................ 19
+ 6.1 RTCP Packet Format ..................................... 21
+ 6.2 RTCP Transmission Interval ............................. 24
+ 6.2.1 Maintaining the Number of Session Members ....... 28
+ 6.3 RTCP Packet Send and Receive Rules ..................... 28
+ 6.3.1 Computing the RTCP Transmission Interval ........ 29
+ 6.3.2 Initialization .................................. 30
+ 6.3.3 Receiving an RTP or Non-BYE RTCP Packet ......... 31
+ 6.3.4 Receiving an RTCP BYE Packet .................... 31
+ 6.3.5 Timing Out an SSRC .............................. 32
+ 6.3.6 Expiration of Transmission Timer ................ 32
+ 6.3.7 Transmitting a BYE Packet ....................... 33
+ 6.3.8 Updating we_sent ................................ 34
+ 6.3.9 Allocation of Source Description Bandwidth ...... 34
+ 6.4 Sender and Receiver Reports ............................ 35
+ 6.4.1 SR: Sender Report RTCP Packet ................... 36
+ 6.4.2 RR: Receiver Report RTCP Packet ................. 42
+ 6.4.3 Extending the Sender and Receiver Reports ....... 42
+ 6.4.4 Analyzing Sender and Receiver Reports ........... 43
+ 6.5 SDES: Source Description RTCP Packet ................... 45
+ 6.5.1 CNAME: Canonical End-Point Identifier SDES Item . 46
+ 6.5.2 NAME: User Name SDES Item ....................... 48
+ 6.5.3 EMAIL: Electronic Mail Address SDES Item ........ 48
+ 6.5.4 PHONE: Phone Number SDES Item ................... 49
+ 6.5.5 LOC: Geographic User Location SDES Item ......... 49
+ 6.5.6 TOOL: Application or Tool Name SDES Item ........ 49
+ 6.5.7 NOTE: Notice/Status SDES Item ................... 50
+ 6.5.8 PRIV: Private Extensions SDES Item .............. 50
+ 6.6 BYE: Goodbye RTCP Packet ............................... 51
+ 6.7 APP: Application-Defined RTCP Packet ................... 52
+ 7. RTP Translators and Mixers .................................. 53
+ 7.1 General Description .................................... 53
+
+
+
+Schulzrinne, et al. Standards Track [Page 2]
+
+RFC 3550 RTP July 2003
+
+
+ 7.2 RTCP Processing in Translators ......................... 55
+ 7.3 RTCP Processing in Mixers .............................. 57
+ 7.4 Cascaded Mixers ........................................ 58
+ 8. SSRC Identifier Allocation and Use .......................... 59
+ 8.1 Probability of Collision ............................... 59
+ 8.2 Collision Resolution and Loop Detection ................ 60
+ 8.3 Use with Layered Encodings ............................. 64
+ 9. Security .................................................... 65
+ 9.1 Confidentiality ........................................ 65
+ 9.2 Authentication and Message Integrity ................... 67
+ 10. Congestion Control .......................................... 67
+ 11. RTP over Network and Transport Protocols .................... 68
+ 12. Summary of Protocol Constants ............................... 69
+ 12.1 RTCP Packet Types ...................................... 70
+ 12.2 SDES Types ............................................. 70
+ 13. RTP Profiles and Payload Format Specifications .............. 71
+ 14. Security Considerations ..................................... 73
+ 15. IANA Considerations ......................................... 73
+ 16. Intellectual Property Rights Statement ...................... 74
+ 17. Acknowledgments ............................................. 74
+ Appendix A. Algorithms ........................................ 75
+ Appendix A.1 RTP Data Header Validity Checks ................... 78
+ Appendix A.2 RTCP Header Validity Checks ....................... 82
+ Appendix A.3 Determining Number of Packets Expected and Lost ... 83
+ Appendix A.4 Generating RTCP SDES Packets ...................... 84
+ Appendix A.5 Parsing RTCP SDES Packets ......................... 85
+ Appendix A.6 Generating a Random 32-bit Identifier ............. 85
+ Appendix A.7 Computing the RTCP Transmission Interval .......... 87
+ Appendix A.8 Estimating the Interarrival Jitter ................ 94
+ Appendix B. Changes from RFC 1889 ............................. 95
+ References ...................................................... 100
+ Normative References ............................................ 100
+ Informative References .......................................... 100
+ Authors' Addresses .............................................. 103
+ Full Copyright Statement ........................................ 104
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 3]
+
+RFC 3550 RTP July 2003
+
+
+1. Introduction
+
+ This memorandum specifies the real-time transport protocol (RTP),
+ which provides end-to-end delivery services for data with real-time
+ characteristics, such as interactive audio and video. Those services
+ include payload type identification, sequence numbering, timestamping
+ and delivery monitoring. Applications typically run RTP on top of
+ UDP to make use of its multiplexing and checksum services; both
+ protocols contribute parts of the transport protocol functionality.
+ However, RTP may be used with other suitable underlying network or
+ transport protocols (see Section 11). RTP supports data transfer to
+ multiple destinations using multicast distribution if provided by the
+ underlying network.
+
+ Note that RTP itself does not provide any mechanism to ensure timely
+ delivery or provide other quality-of-service guarantees, but relies
+ on lower-layer services to do so. It does not guarantee delivery or
+ prevent out-of-order delivery, nor does it assume that the underlying
+ network is reliable and delivers packets in sequence. The sequence
+ numbers included in RTP allow the receiver to reconstruct the
+ sender's packet sequence, but sequence numbers might also be used to
+ determine the proper location of a packet, for example in video
+ decoding, without necessarily decoding packets in sequence.
+
+ While RTP is primarily designed to satisfy the needs of multi-
+ participant multimedia conferences, it is not limited to that
+ particular application. Storage of continuous data, interactive
+ distributed simulation, active badge, and control and measurement
+ applications may also find RTP applicable.
+
+ This document defines RTP, consisting of two closely-linked parts:
+
+ o the real-time transport protocol (RTP), to carry data that has
+ real-time properties.
+
+ o the RTP control protocol (RTCP), to monitor the quality of service
+ and to convey information about the participants in an on-going
+ session. The latter aspect of RTCP may be sufficient for "loosely
+ controlled" sessions, i.e., where there is no explicit membership
+ control and set-up, but it is not necessarily intended to support
+ all of an application's control communication requirements. This
+ functionality may be fully or partially subsumed by a separate
+ session control protocol, which is beyond the scope of this
+ document.
+
+ RTP represents a new style of protocol following the principles of
+ application level framing and integrated layer processing proposed by
+ Clark and Tennenhouse [10]. That is, RTP is intended to be malleable
+
+
+
+Schulzrinne, et al. Standards Track [Page 4]
+
+RFC 3550 RTP July 2003
+
+
+ to provide the information required by a particular application and
+ will often be integrated into the application processing rather than
+ being implemented as a separate layer. RTP is a protocol framework
+ that is deliberately not complete. This document specifies those
+ functions expected to be common across all the applications for which
+ RTP would be appropriate. Unlike conventional protocols in which
+ additional functions might be accommodated by making the protocol
+ more general or by adding an option mechanism that would require
+ parsing, RTP is intended to be tailored through modifications and/or
+ additions to the headers as needed. Examples are given in Sections
+ 5.3 and 6.4.3.
+
+ Therefore, in addition to this document, a complete specification of
+ RTP for a particular application will require one or more companion
+ documents (see Section 13):
+
+ o a profile specification document, which defines a set of payload
+ type codes and their mapping to payload formats (e.g., media
+ encodings). A profile may also define extensions or modifications
+ to RTP that are specific to a particular class of applications.
+ Typically an application will operate under only one profile. A
+ profile for audio and video data may be found in the companion RFC
+ 3551 [1].
+
+ o payload format specification documents, which define how a
+ particular payload, such as an audio or video encoding, is to be
+ carried in RTP.
+
+ A discussion of real-time services and algorithms for their
+ implementation as well as background discussion on some of the RTP
+ design decisions can be found in [11].
+
+1.1 Terminology
+
+ The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT",
+ "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this
+ document are to be interpreted as described in BCP 14, RFC 2119 [2]
+ and indicate requirement levels for compliant RTP implementations.
+
+2. RTP Use Scenarios
+
+ The following sections describe some aspects of the use of RTP. The
+ examples were chosen to illustrate the basic operation of
+ applications using RTP, not to limit what RTP may be used for. In
+ these examples, RTP is carried on top of IP and UDP, and follows the
+ conventions established by the profile for audio and video specified
+ in the companion RFC 3551.
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 5]
+
+RFC 3550 RTP July 2003
+
+
+2.1 Simple Multicast Audio Conference
+
+ A working group of the IETF meets to discuss the latest protocol
+ document, using the IP multicast services of the Internet for voice
+ communications. Through some allocation mechanism the working group
+ chair obtains a multicast group address and pair of ports. One port
+ is used for audio data, and the other is used for control (RTCP)
+ packets. This address and port information is distributed to the
+ intended participants. If privacy is desired, the data and control
+ packets may be encrypted as specified in Section 9.1, in which case
+ an encryption key must also be generated and distributed. The exact
+ details of these allocation and distribution mechanisms are beyond
+ the scope of RTP.
+
+ The audio conferencing application used by each conference
+ participant sends audio data in small chunks of, say, 20 ms duration.
+ Each chunk of audio data is preceded by an RTP header; RTP header and
+ data are in turn contained in a UDP packet. The RTP header indicates
+ what type of audio encoding (such as PCM, ADPCM or LPC) is contained
+ in each packet so that senders can change the encoding during a
+ conference, for example, to accommodate a new participant that is
+ connected through a low-bandwidth link or react to indications of
+ network congestion.
+
+ The Internet, like other packet networks, occasionally loses and
+ reorders packets and delays them by variable amounts of time. To
+ cope with these impairments, the RTP header contains timing
+ information and a sequence number that allow the receivers to
+ reconstruct the timing produced by the source, so that in this
+ example, chunks of audio are contiguously played out the speaker
+ every 20 ms. This timing reconstruction is performed separately for
+ each source of RTP packets in the conference. The sequence number
+ can also be used by the receiver to estimate how many packets are
+ being lost.
+
+ Since members of the working group join and leave during the
+ conference, it is useful to know who is participating at any moment
+ and how well they are receiving the audio data. For that purpose,
+ each instance of the audio application in the conference periodically
+ multicasts a reception report plus the name of its user on the RTCP
+ (control) port. The reception report indicates how well the current
+ speaker is being received and may be used to control adaptive
+ encodings. In addition to the user name, other identifying
+ information may also be included subject to control bandwidth limits.
+ A site sends the RTCP BYE packet (Section 6.6) when it leaves the
+ conference.
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 6]
+
+RFC 3550 RTP July 2003
+
+
+2.2 Audio and Video Conference
+
+ If both audio and video media are used in a conference, they are
+ transmitted as separate RTP sessions. That is, separate RTP and RTCP
+ packets are transmitted for each medium using two different UDP port
+ pairs and/or multicast addresses. There is no direct coupling at the
+ RTP level between the audio and video sessions, except that a user
+ participating in both sessions should use the same distinguished
+ (canonical) name in the RTCP packets for both so that the sessions
+ can be associated.
+
+ One motivation for this separation is to allow some participants in
+ the conference to receive only one medium if they choose. Further
+ explanation is given in Section 5.2. Despite the separation,
+ synchronized playback of a source's audio and video can be achieved
+ using timing information carried in the RTCP packets for both
+ sessions.
+
+2.3 Mixers and Translators
+
+ So far, we have assumed that all sites want to receive media data in
+ the same format. However, this may not always be appropriate.
+ Consider the case where participants in one area are connected
+ through a low-speed link to the majority of the conference
+ participants who enjoy high-speed network access. Instead of forcing
+ everyone to use a lower-bandwidth, reduced-quality audio encoding, an
+ RTP-level relay called a mixer may be placed near the low-bandwidth
+ area. This mixer resynchronizes incoming audio packets to
+ reconstruct the constant 20 ms spacing generated by the sender, mixes
+ these reconstructed audio streams into a single stream, translates
+ the audio encoding to a lower-bandwidth one and forwards the lower-
+ bandwidth packet stream across the low-speed link. These packets
+ might be unicast to a single recipient or multicast on a different
+ address to multiple recipients. The RTP header includes a means for
+ mixers to identify the sources that contributed to a mixed packet so
+ that correct talker indication can be provided at the receivers.
+
+ Some of the intended participants in the audio conference may be
+ connected with high bandwidth links but might not be directly
+ reachable via IP multicast. For example, they might be behind an
+ application-level firewall that will not let any IP packets pass.
+ For these sites, mixing may not be necessary, in which case another
+ type of RTP-level relay called a translator may be used. Two
+ translators are installed, one on either side of the firewall, with
+ the outside one funneling all multicast packets received through a
+ secure connection to the translator inside the firewall. The
+ translator inside the firewall sends them again as multicast packets
+ to a multicast group restricted to the site's internal network.
+
+
+
+Schulzrinne, et al. Standards Track [Page 7]
+
+RFC 3550 RTP July 2003
+
+
+ Mixers and translators may be designed for a variety of purposes. An
+ example is a video mixer that scales the images of individual people
+ in separate video streams and composites them into one video stream
+ to simulate a group scene. Other examples of translation include the
+ connection of a group of hosts speaking only IP/UDP to a group of
+ hosts that understand only ST-II, or the packet-by-packet encoding
+ translation of video streams from individual sources without
+ resynchronization or mixing. Details of the operation of mixers and
+ translators are given in Section 7.
+
+2.4 Layered Encodings
+
+ Multimedia applications should be able to adjust the transmission
+ rate to match the capacity of the receiver or to adapt to network
+ congestion. Many implementations place the responsibility of rate-
+ adaptivity at the source. This does not work well with multicast
+ transmission because of the conflicting bandwidth requirements of
+ heterogeneous receivers. The result is often a least-common
+ denominator scenario, where the smallest pipe in the network mesh
+ dictates the quality and fidelity of the overall live multimedia
+ "broadcast".
+
+ Instead, responsibility for rate-adaptation can be placed at the
+ receivers by combining a layered encoding with a layered transmission
+ system. In the context of RTP over IP multicast, the source can
+ stripe the progressive layers of a hierarchically represented signal
+ across multiple RTP sessions each carried on its own multicast group.
+ Receivers can then adapt to network heterogeneity and control their
+ reception bandwidth by joining only the appropriate subset of the
+ multicast groups.
+
+ Details of the use of RTP with layered encodings are given in
+ Sections 6.3.9, 8.3 and 11.
+
+3. Definitions
+
+ RTP payload: The data transported by RTP in a packet, for
+ example audio samples or compressed video data. The payload
+ format and interpretation are beyond the scope of this document.
+
+ RTP packet: A data packet consisting of the fixed RTP header, a
+ possibly empty list of contributing sources (see below), and the
+ payload data. Some underlying protocols may require an
+ encapsulation of the RTP packet to be defined. Typically one
+ packet of the underlying protocol contains a single RTP packet,
+ but several RTP packets MAY be contained if permitted by the
+ encapsulation method (see Section 11).
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 8]
+
+RFC 3550 RTP July 2003
+
+
+ RTCP packet: A control packet consisting of a fixed header part
+ similar to that of RTP data packets, followed by structured
+ elements that vary depending upon the RTCP packet type. The
+ formats are defined in Section 6. Typically, multiple RTCP
+ packets are sent together as a compound RTCP packet in a single
+ packet of the underlying protocol; this is enabled by the length
+ field in the fixed header of each RTCP packet.
+
+ Port: The "abstraction that transport protocols use to
+ distinguish among multiple destinations within a given host
+ computer. TCP/IP protocols identify ports using small positive
+ integers." [12] The transport selectors (TSEL) used by the OSI
+ transport layer are equivalent to ports. RTP depends upon the
+ lower-layer protocol to provide some mechanism such as ports to
+ multiplex the RTP and RTCP packets of a session.
+
+ Transport address: The combination of a network address and port
+ that identifies a transport-level endpoint, for example an IP
+ address and a UDP port. Packets are transmitted from a source
+ transport address to a destination transport address.
+
+ RTP media type: An RTP media type is the collection of payload
+ types which can be carried within a single RTP session. The RTP
+ Profile assigns RTP media types to RTP payload types.
+
+ Multimedia session: A set of concurrent RTP sessions among a
+ common group of participants. For example, a videoconference
+ (which is a multimedia session) may contain an audio RTP session
+ and a video RTP session.
+
+ RTP session: An association among a set of participants
+ communicating with RTP. A participant may be involved in multiple
+ RTP sessions at the same time. In a multimedia session, each
+ medium is typically carried in a separate RTP session with its own
+ RTCP packets unless the the encoding itself multiplexes multiple
+ media into a single data stream. A participant distinguishes
+ multiple RTP sessions by reception of different sessions using
+ different pairs of destination transport addresses, where a pair
+ of transport addresses comprises one network address plus a pair
+ of ports for RTP and RTCP. All participants in an RTP session may
+ share a common destination transport address pair, as in the case
+ of IP multicast, or the pairs may be different for each
+ participant, as in the case of individual unicast network
+ addresses and port pairs. In the unicast case, a participant may
+ receive from all other participants in the session using the same
+ pair of ports, or may use a distinct pair of ports for each.
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 9]
+
+RFC 3550 RTP July 2003
+
+
+ The distinguishing feature of an RTP session is that each
+ maintains a full, separate space of SSRC identifiers (defined
+ next). The set of participants included in one RTP session
+ consists of those that can receive an SSRC identifier transmitted
+ by any one of the participants either in RTP as the SSRC or a CSRC
+ (also defined below) or in RTCP. For example, consider a three-
+ party conference implemented using unicast UDP with each
+ participant receiving from the other two on separate port pairs.
+ If each participant sends RTCP feedback about data received from
+ one other participant only back to that participant, then the
+ conference is composed of three separate point-to-point RTP
+ sessions. If each participant provides RTCP feedback about its
+ reception of one other participant to both of the other
+ participants, then the conference is composed of one multi-party
+ RTP session. The latter case simulates the behavior that would
+ occur with IP multicast communication among the three
+ participants.
+
+ The RTP framework allows the variations defined here, but a
+ particular control protocol or application design will usually
+ impose constraints on these variations.
+
+ Synchronization source (SSRC): The source of a stream of RTP
+ packets, identified by a 32-bit numeric SSRC identifier carried in
+ the RTP header so as not to be dependent upon the network address.
+ All packets from a synchronization source form part of the same
+ timing and sequence number space, so a receiver groups packets by
+ synchronization source for playback. Examples of synchronization
+ sources include the sender of a stream of packets derived from a
+ signal source such as a microphone or a camera, or an RTP mixer
+ (see below). A synchronization source may change its data format,
+ e.g., audio encoding, over time. The SSRC identifier is a
+ randomly chosen value meant to be globally unique within a
+ particular RTP session (see Section 8). A participant need not
+ use the same SSRC identifier for all the RTP sessions in a
+ multimedia session; the binding of the SSRC identifiers is
+ provided through RTCP (see Section 6.5.1). If a participant
+ generates multiple streams in one RTP session, for example from
+ separate video cameras, each MUST be identified as a different
+ SSRC.
+
+ Contributing source (CSRC): A source of a stream of RTP packets
+ that has contributed to the combined stream produced by an RTP
+ mixer (see below). The mixer inserts a list of the SSRC
+ identifiers of the sources that contributed to the generation of a
+ particular packet into the RTP header of that packet. This list
+ is called the CSRC list. An example application is audio
+ conferencing where a mixer indicates all the talkers whose speech
+
+
+
+Schulzrinne, et al. Standards Track [Page 10]
+
+RFC 3550 RTP July 2003
+
+
+ was combined to produce the outgoing packet, allowing the receiver
+ to indicate the current talker, even though all the audio packets
+ contain the same SSRC identifier (that of the mixer).
+
+ End system: An application that generates the content to be sent
+ in RTP packets and/or consumes the content of received RTP
+ packets. An end system can act as one or more synchronization
+ sources in a particular RTP session, but typically only one.
+
+ Mixer: An intermediate system that receives RTP packets from one
+ or more sources, possibly changes the data format, combines the
+ packets in some manner and then forwards a new RTP packet. Since
+ the timing among multiple input sources will not generally be
+ synchronized, the mixer will make timing adjustments among the
+ streams and generate its own timing for the combined stream.
+ Thus, all data packets originating from a mixer will be identified
+ as having the mixer as their synchronization source.
+
+ Translator: An intermediate system that forwards RTP packets
+ with their synchronization source identifier intact. Examples of
+ translators include devices that convert encodings without mixing,
+ replicators from multicast to unicast, and application-level
+ filters in firewalls.
+
+ Monitor: An application that receives RTCP packets sent by
+ participants in an RTP session, in particular the reception
+ reports, and estimates the current quality of service for
+ distribution monitoring, fault diagnosis and long-term statistics.
+ The monitor function is likely to be built into the application(s)
+ participating in the session, but may also be a separate
+ application that does not otherwise participate and does not send
+ or receive the RTP data packets (since they are on a separate
+ port). These are called third-party monitors. It is also
+ acceptable for a third-party monitor to receive the RTP data
+ packets but not send RTCP packets or otherwise be counted in the
+ session.
+
+ Non-RTP means: Protocols and mechanisms that may be needed in
+ addition to RTP to provide a usable service. In particular, for
+ multimedia conferences, a control protocol may distribute
+ multicast addresses and keys for encryption, negotiate the
+ encryption algorithm to be used, and define dynamic mappings
+ between RTP payload type values and the payload formats they
+ represent for formats that do not have a predefined payload type
+ value. Examples of such protocols include the Session Initiation
+ Protocol (SIP) (RFC 3261 [13]), ITU Recommendation H.323 [14] and
+ applications using SDP (RFC 2327 [15]), such as RTSP (RFC 2326
+ [16]). For simple
+
+
+
+Schulzrinne, et al. Standards Track [Page 11]
+
+RFC 3550 RTP July 2003
+
+
+ applications, electronic mail or a conference database may also be
+ used. The specification of such protocols and mechanisms is
+ outside the scope of this document.
+
+4. Byte Order, Alignment, and Time Format
+
+ All integer fields are carried in network byte order, that is, most
+ significant byte (octet) first. This byte order is commonly known as
+ big-endian. The transmission order is described in detail in [3].
+ Unless otherwise noted, numeric constants are in decimal (base 10).
+
+ All header data is aligned to its natural length, i.e., 16-bit fields
+ are aligned on even offsets, 32-bit fields are aligned at offsets
+ divisible by four, etc. Octets designated as padding have the value
+ zero.
+
+ Wallclock time (absolute date and time) is represented using the
+ timestamp format of the Network Time Protocol (NTP), which is in
+ seconds relative to 0h UTC on 1 January 1900 [4]. The full
+ resolution NTP timestamp is a 64-bit unsigned fixed-point number with
+ the integer part in the first 32 bits and the fractional part in the
+ last 32 bits. In some fields where a more compact representation is
+ appropriate, only the middle 32 bits are used; that is, the low 16
+ bits of the integer part and the high 16 bits of the fractional part.
+ The high 16 bits of the integer part must be determined
+ independently.
+
+ An implementation is not required to run the Network Time Protocol in
+ order to use RTP. Other time sources, or none at all, may be used
+ (see the description of the NTP timestamp field in Section 6.4.1).
+ However, running NTP may be useful for synchronizing streams
+ transmitted from separate hosts.
+
+ The NTP timestamp will wrap around to zero some time in the year
+ 2036, but for RTP purposes, only differences between pairs of NTP
+ timestamps are used. So long as the pairs of timestamps can be
+ assumed to be within 68 years of each other, using modular arithmetic
+ for subtractions and comparisons makes the wraparound irrelevant.
+
+
+
+
+
+
+
+
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 12]
+
+RFC 3550 RTP July 2003
+
+
+5. RTP Data Transfer Protocol
+
+5.1 RTP Fixed Header Fields
+
+ The RTP header has the following format:
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |V=2|P|X| CC |M| PT | sequence number |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | timestamp |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | synchronization source (SSRC) identifier |
+ +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+ | contributing source (CSRC) identifiers |
+ | .... |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ The first twelve octets are present in every RTP packet, while the
+ list of CSRC identifiers is present only when inserted by a mixer.
+ The fields have the following meaning:
+
+ version (V): 2 bits
+ This field identifies the version of RTP. The version defined by
+ this specification is two (2). (The value 1 is used by the first
+ draft version of RTP and the value 0 is used by the protocol
+ initially implemented in the "vat" audio tool.)
+
+ padding (P): 1 bit
+ If the padding bit is set, the packet contains one or more
+ additional padding octets at the end which are not part of the
+ payload. The last octet of the padding contains a count of how
+ many padding octets should be ignored, including itself. Padding
+ may be needed by some encryption algorithms with fixed block sizes
+ or for carrying several RTP packets in a lower-layer protocol data
+ unit.
+
+ extension (X): 1 bit
+ If the extension bit is set, the fixed header MUST be followed by
+ exactly one header extension, with a format defined in Section
+ 5.3.1.
+
+ CSRC count (CC): 4 bits
+ The CSRC count contains the number of CSRC identifiers that follow
+ the fixed header.
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 13]
+
+RFC 3550 RTP July 2003
+
+
+ marker (M): 1 bit
+ The interpretation of the marker is defined by a profile. It is
+ intended to allow significant events such as frame boundaries to
+ be marked in the packet stream. A profile MAY define additional
+ marker bits or specify that there is no marker bit by changing the
+ number of bits in the payload type field (see Section 5.3).
+
+ payload type (PT): 7 bits
+ This field identifies the format of the RTP payload and determines
+ its interpretation by the application. A profile MAY specify a
+ default static mapping of payload type codes to payload formats.
+ Additional payload type codes MAY be defined dynamically through
+ non-RTP means (see Section 3). A set of default mappings for
+ audio and video is specified in the companion RFC 3551 [1]. An
+ RTP source MAY change the payload type during a session, but this
+ field SHOULD NOT be used for multiplexing separate media streams
+ (see Section 5.2).
+
+ A receiver MUST ignore packets with payload types that it does not
+ understand.
+
+ sequence number: 16 bits
+ The sequence number increments by one for each RTP data packet
+ sent, and may be used by the receiver to detect packet loss and to
+ restore packet sequence. The initial value of the sequence number
+ SHOULD be random (unpredictable) to make known-plaintext attacks
+ on encryption more difficult, even if the source itself does not
+ encrypt according to the method in Section 9.1, because the
+ packets may flow through a translator that does. Techniques for
+ choosing unpredictable numbers are discussed in [17].
+
+ timestamp: 32 bits
+ The timestamp reflects the sampling instant of the first octet in
+ the RTP data packet. The sampling instant MUST be derived from a
+ clock that increments monotonically and linearly in time to allow
+ synchronization and jitter calculations (see Section 6.4.1). The
+ resolution of the clock MUST be sufficient for the desired
+ synchronization accuracy and for measuring packet arrival jitter
+ (one tick per video frame is typically not sufficient). The clock
+ frequency is dependent on the format of data carried as payload
+ and is specified statically in the profile or payload format
+ specification that defines the format, or MAY be specified
+ dynamically for payload formats defined through non-RTP means. If
+ RTP packets are generated periodically, the nominal sampling
+ instant as determined from the sampling clock is to be used, not a
+ reading of the system clock. As an example, for fixed-rate audio
+ the timestamp clock would likely increment by one for each
+ sampling period. If an audio application reads blocks covering
+
+
+
+Schulzrinne, et al. Standards Track [Page 14]
+
+RFC 3550 RTP July 2003
+
+
+ 160 sampling periods from the input device, the timestamp would be
+ increased by 160 for each such block, regardless of whether the
+ block is transmitted in a packet or dropped as silent.
+
+ The initial value of the timestamp SHOULD be random, as for the
+ sequence number. Several consecutive RTP packets will have equal
+ timestamps if they are (logically) generated at once, e.g., belong
+ to the same video frame. Consecutive RTP packets MAY contain
+ timestamps that are not monotonic if the data is not transmitted
+ in the order it was sampled, as in the case of MPEG interpolated
+ video frames. (The sequence numbers of the packets as transmitted
+ will still be monotonic.)
+
+ RTP timestamps from different media streams may advance at
+ different rates and usually have independent, random offsets.
+ Therefore, although these timestamps are sufficient to reconstruct
+ the timing of a single stream, directly comparing RTP timestamps
+ from different media is not effective for synchronization.
+ Instead, for each medium the RTP timestamp is related to the
+ sampling instant by pairing it with a timestamp from a reference
+ clock (wallclock) that represents the time when the data
+ corresponding to the RTP timestamp was sampled. The reference
+ clock is shared by all media to be synchronized. The timestamp
+ pairs are not transmitted in every data packet, but at a lower
+ rate in RTCP SR packets as described in Section 6.4.
+
+ The sampling instant is chosen as the point of reference for the
+ RTP timestamp because it is known to the transmitting endpoint and
+ has a common definition for all media, independent of encoding
+ delays or other processing. The purpose is to allow synchronized
+ presentation of all media sampled at the same time.
+
+ Applications transmitting stored data rather than data sampled in
+ real time typically use a virtual presentation timeline derived
+ from wallclock time to determine when the next frame or other unit
+ of each medium in the stored data should be presented. In this
+ case, the RTP timestamp would reflect the presentation time for
+ each unit. That is, the RTP timestamp for each unit would be
+ related to the wallclock time at which the unit becomes current on
+ the virtual presentation timeline. Actual presentation occurs
+ some time later as determined by the receiver.
+
+ An example describing live audio narration of prerecorded video
+ illustrates the significance of choosing the sampling instant as
+ the reference point. In this scenario, the video would be
+ presented locally for the narrator to view and would be
+ simultaneously transmitted using RTP. The "sampling instant" of a
+ video frame transmitted in RTP would be established by referencing
+
+
+
+Schulzrinne, et al. Standards Track [Page 15]
+
+RFC 3550 RTP July 2003
+
+
+ its timestamp to the wallclock time when that video frame was
+ presented to the narrator. The sampling instant for the audio RTP
+ packets containing the narrator's speech would be established by
+ referencing the same wallclock time when the audio was sampled.
+ The audio and video may even be transmitted by different hosts if
+ the reference clocks on the two hosts are synchronized by some
+ means such as NTP. A receiver can then synchronize presentation
+ of the audio and video packets by relating their RTP timestamps
+ using the timestamp pairs in RTCP SR packets.
+
+ SSRC: 32 bits
+ The SSRC field identifies the synchronization source. This
+ identifier SHOULD be chosen randomly, with the intent that no two
+ synchronization sources within the same RTP session will have the
+ same SSRC identifier. An example algorithm for generating a
+ random identifier is presented in Appendix A.6. Although the
+ probability of multiple sources choosing the same identifier is
+ low, all RTP implementations must be prepared to detect and
+ resolve collisions. Section 8 describes the probability of
+ collision along with a mechanism for resolving collisions and
+ detecting RTP-level forwarding loops based on the uniqueness of
+ the SSRC identifier. If a source changes its source transport
+ address, it must also choose a new SSRC identifier to avoid being
+ interpreted as a looped source (see Section 8.2).
+
+ CSRC list: 0 to 15 items, 32 bits each
+ The CSRC list identifies the contributing sources for the payload
+ contained in this packet. The number of identifiers is given by
+ the CC field. If there are more than 15 contributing sources,
+ only 15 can be identified. CSRC identifiers are inserted by
+ mixers (see Section 7.1), using the SSRC identifiers of
+ contributing sources. For example, for audio packets the SSRC
+ identifiers of all sources that were mixed together to create a
+ packet are listed, allowing correct talker indication at the
+ receiver.
+
+5.2 Multiplexing RTP Sessions
+
+ For efficient protocol processing, the number of multiplexing points
+ should be minimized, as described in the integrated layer processing
+ design principle [10]. In RTP, multiplexing is provided by the
+ destination transport address (network address and port number) which
+ is different for each RTP session. For example, in a teleconference
+ composed of audio and video media encoded separately, each medium
+ SHOULD be carried in a separate RTP session with its own destination
+ transport address.
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 16]
+
+RFC 3550 RTP July 2003
+
+
+ Separate audio and video streams SHOULD NOT be carried in a single
+ RTP session and demultiplexed based on the payload type or SSRC
+ fields. Interleaving packets with different RTP media types but
+ using the same SSRC would introduce several problems:
+
+ 1. If, say, two audio streams shared the same RTP session and the
+ same SSRC value, and one were to change encodings and thus acquire
+ a different RTP payload type, there would be no general way of
+ identifying which stream had changed encodings.
+
+ 2. An SSRC is defined to identify a single timing and sequence number
+ space. Interleaving multiple payload types would require
+ different timing spaces if the media clock rates differ and would
+ require different sequence number spaces to tell which payload
+ type suffered packet loss.
+
+ 3. The RTCP sender and receiver reports (see Section 6.4) can only
+ describe one timing and sequence number space per SSRC and do not
+ carry a payload type field.
+
+ 4. An RTP mixer would not be able to combine interleaved streams of
+ incompatible media into one stream.
+
+ 5. Carrying multiple media in one RTP session precludes: the use of
+ different network paths or network resource allocations if
+ appropriate; reception of a subset of the media if desired, for
+ example just audio if video would exceed the available bandwidth;
+ and receiver implementations that use separate processes for the
+ different media, whereas using separate RTP sessions permits
+ either single- or multiple-process implementations.
+
+ Using a different SSRC for each medium but sending them in the same
+ RTP session would avoid the first three problems but not the last
+ two.
+
+ On the other hand, multiplexing multiple related sources of the same
+ medium in one RTP session using different SSRC values is the norm for
+ multicast sessions. The problems listed above don't apply: an RTP
+ mixer can combine multiple audio sources, for example, and the same
+ treatment is applicable for all of them. It may also be appropriate
+ to multiplex streams of the same medium using different SSRC values
+ in other scenarios where the last two problems do not apply.
+
+
+
+
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 17]
+
+RFC 3550 RTP July 2003
+
+
+5.3 Profile-Specific Modifications to the RTP Header
+
+ The existing RTP data packet header is believed to be complete for
+ the set of functions required in common across all the application
+ classes that RTP might support. However, in keeping with the ALF
+ design principle, the header MAY be tailored through modifications or
+ additions defined in a profile specification while still allowing
+ profile-independent monitoring and recording tools to function.
+
+ o The marker bit and payload type field carry profile-specific
+ information, but they are allocated in the fixed header since many
+ applications are expected to need them and might otherwise have to
+ add another 32-bit word just to hold them. The octet containing
+ these fields MAY be redefined by a profile to suit different
+ requirements, for example with more or fewer marker bits. If
+ there are any marker bits, one SHOULD be located in the most
+ significant bit of the octet since profile-independent monitors
+ may be able to observe a correlation between packet loss patterns
+ and the marker bit.
+
+ o Additional information that is required for a particular payload
+ format, such as a video encoding, SHOULD be carried in the payload
+ section of the packet. This might be in a header that is always
+ present at the start of the payload section, or might be indicated
+ by a reserved value in the data pattern.
+
+ o If a particular class of applications needs additional
+ functionality independent of payload format, the profile under
+ which those applications operate SHOULD define additional fixed
+ fields to follow immediately after the SSRC field of the existing
+ fixed header. Those applications will be able to quickly and
+ directly access the additional fields while profile-independent
+ monitors or recorders can still process the RTP packets by
+ interpreting only the first twelve octets.
+
+ If it turns out that additional functionality is needed in common
+ across all profiles, then a new version of RTP should be defined to
+ make a permanent change to the fixed header.
+
+5.3.1 RTP Header Extension
+
+ An extension mechanism is provided to allow individual
+ implementations to experiment with new payload-format-independent
+ functions that require additional information to be carried in the
+ RTP data packet header. This mechanism is designed so that the
+ header extension may be ignored by other interoperating
+ implementations that have not been extended.
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 18]
+
+RFC 3550 RTP July 2003
+
+
+ Note that this header extension is intended only for limited use.
+ Most potential uses of this mechanism would be better done another
+ way, using the methods described in the previous section. For
+ example, a profile-specific extension to the fixed header is less
+ expensive to process because it is not conditional nor in a variable
+ location. Additional information required for a particular payload
+ format SHOULD NOT use this header extension, but SHOULD be carried in
+ the payload section of the packet.
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | defined by profile | length |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | header extension |
+ | .... |
+
+ If the X bit in the RTP header is one, a variable-length header
+ extension MUST be appended to the RTP header, following the CSRC list
+ if present. The header extension contains a 16-bit length field that
+ counts the number of 32-bit words in the extension, excluding the
+ four-octet extension header (therefore zero is a valid length). Only
+ a single extension can be appended to the RTP data header. To allow
+ multiple interoperating implementations to each experiment
+ independently with different header extensions, or to allow a
+ particular implementation to experiment with more than one type of
+ header extension, the first 16 bits of the header extension are left
+ open for distinguishing identifiers or parameters. The format of
+ these 16 bits is to be defined by the profile specification under
+ which the implementations are operating. This RTP specification does
+ not define any header extensions itself.
+
+6. RTP Control Protocol -- RTCP
+
+ The RTP control protocol (RTCP) is based on the periodic transmission
+ of control packets to all participants in the session, using the same
+ distribution mechanism as the data packets. The underlying protocol
+ MUST provide multiplexing of the data and control packets, for
+ example using separate port numbers with UDP. RTCP performs four
+ functions:
+
+ 1. The primary function is to provide feedback on the quality of the
+ data distribution. This is an integral part of the RTP's role as
+ a transport protocol and is related to the flow and congestion
+ control functions of other transport protocols (see Section 10 on
+ the requirement for congestion control). The feedback may be
+ directly useful for control of adaptive encodings [18,19], but
+ experiments with IP multicasting have shown that it is also
+
+
+
+Schulzrinne, et al. Standards Track [Page 19]
+
+RFC 3550 RTP July 2003
+
+
+ critical to get feedback from the receivers to diagnose faults in
+ the distribution. Sending reception feedback reports to all
+ participants allows one who is observing problems to evaluate
+ whether those problems are local or global. With a distribution
+ mechanism like IP multicast, it is also possible for an entity
+ such as a network service provider who is not otherwise involved
+ in the session to receive the feedback information and act as a
+ third-party monitor to diagnose network problems. This feedback
+ function is performed by the RTCP sender and receiver reports,
+ described below in Section 6.4.
+
+ 2. RTCP carries a persistent transport-level identifier for an RTP
+ source called the canonical name or CNAME, Section 6.5.1. Since
+ the SSRC identifier may change if a conflict is discovered or a
+ program is restarted, receivers require the CNAME to keep track of
+ each participant. Receivers may also require the CNAME to
+ associate multiple data streams from a given participant in a set
+ of related RTP sessions, for example to synchronize audio and
+ video. Inter-media synchronization also requires the NTP and RTP
+ timestamps included in RTCP packets by data senders.
+
+ 3. The first two functions require that all participants send RTCP
+ packets, therefore the rate must be controlled in order for RTP to
+ scale up to a large number of participants. By having each
+ participant send its control packets to all the others, each can
+ independently observe the number of participants. This number is
+ used to calculate the rate at which the packets are sent, as
+ explained in Section 6.2.
+
+ 4. A fourth, OPTIONAL function is to convey minimal session control
+ information, for example participant identification to be
+ displayed in the user interface. This is most likely to be useful
+ in "loosely controlled" sessions where participants enter and
+ leave without membership control or parameter negotiation. RTCP
+ serves as a convenient channel to reach all the participants, but
+ it is not necessarily expected to support all the control
+ communication requirements of an application. A higher-level
+ session control protocol, which is beyond the scope of this
+ document, may be needed.
+
+ Functions 1-3 SHOULD be used in all environments, but particularly in
+ the IP multicast environment. RTP application designers SHOULD avoid
+ mechanisms that can only work in unicast mode and will not scale to
+ larger numbers. Transmission of RTCP MAY be controlled separately
+ for senders and receivers, as described in Section 6.2, for cases
+ such as unidirectional links where feedback from receivers is not
+ possible.
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 20]
+
+RFC 3550 RTP July 2003
+
+
+ Non-normative note: In the multicast routing approach
+ called Source-Specific Multicast (SSM), there is only one sender
+ per "channel" (a source address, group address pair), and
+ receivers (except for the channel source) cannot use multicast to
+ communicate directly with other channel members. The
+ recommendations here accommodate SSM only through Section 6.2's
+ option of turning off receivers' RTCP entirely. Future work will
+ specify adaptation of RTCP for SSM so that feedback from receivers
+ can be maintained.
+
+6.1 RTCP Packet Format
+
+ This specification defines several RTCP packet types to carry a
+ variety of control information:
+
+ SR: Sender report, for transmission and reception statistics from
+ participants that are active senders
+
+ RR: Receiver report, for reception statistics from participants
+ that are not active senders and in combination with SR for
+ active senders reporting on more than 31 sources
+
+ SDES: Source description items, including CNAME
+
+ BYE: Indicates end of participation
+
+ APP: Application-specific functions
+
+ Each RTCP packet begins with a fixed part similar to that of RTP data
+ packets, followed by structured elements that MAY be of variable
+ length according to the packet type but MUST end on a 32-bit
+ boundary. The alignment requirement and a length field in the fixed
+ part of each packet are included to make RTCP packets "stackable".
+ Multiple RTCP packets can be concatenated without any intervening
+ separators to form a compound RTCP packet that is sent in a single
+ packet of the lower layer protocol, for example UDP. There is no
+ explicit count of individual RTCP packets in the compound packet
+ since the lower layer protocols are expected to provide an overall
+ length to determine the end of the compound packet.
+
+ Each individual RTCP packet in the compound packet may be processed
+ independently with no requirements upon the order or combination of
+ packets. However, in order to perform the functions of the protocol,
+ the following constraints are imposed:
+
+
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 21]
+
+RFC 3550 RTP July 2003
+
+
+ o Reception statistics (in SR or RR) should be sent as often as
+ bandwidth constraints will allow to maximize the resolution of the
+ statistics, therefore each periodically transmitted compound RTCP
+ packet MUST include a report packet.
+
+ o New receivers need to receive the CNAME for a source as soon as
+ possible to identify the source and to begin associating media for
+ purposes such as lip-sync, so each compound RTCP packet MUST also
+ include the SDES CNAME except when the compound RTCP packet is
+ split for partial encryption as described in Section 9.1.
+
+ o The number of packet types that may appear first in the compound
+ packet needs to be limited to increase the number of constant bits
+ in the first word and the probability of successfully validating
+ RTCP packets against misaddressed RTP data packets or other
+ unrelated packets.
+
+ Thus, all RTCP packets MUST be sent in a compound packet of at least
+ two individual packets, with the following format:
+
+ Encryption prefix: If and only if the compound packet is to be
+ encrypted according to the method in Section 9.1, it MUST be
+ prefixed by a random 32-bit quantity redrawn for every compound
+ packet transmitted. If padding is required for the encryption, it
+ MUST be added to the last packet of the compound packet.
+
+ SR or RR: The first RTCP packet in the compound packet MUST
+ always be a report packet to facilitate header validation as
+ described in Appendix A.2. This is true even if no data has been
+ sent or received, in which case an empty RR MUST be sent, and even
+ if the only other RTCP packet in the compound packet is a BYE.
+
+ Additional RRs: If the number of sources for which reception
+ statistics are being reported exceeds 31, the number that will fit
+ into one SR or RR packet, then additional RR packets SHOULD follow
+ the initial report packet.
+
+ SDES: An SDES packet containing a CNAME item MUST be included
+ in each compound RTCP packet, except as noted in Section 9.1.
+ Other source description items MAY optionally be included if
+ required by a particular application, subject to bandwidth
+ constraints (see Section 6.3.9).
+
+ BYE or APP: Other RTCP packet types, including those yet to be
+ defined, MAY follow in any order, except that BYE SHOULD be the
+ last packet sent with a given SSRC/CSRC. Packet types MAY appear
+ more than once.
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 22]
+
+RFC 3550 RTP July 2003
+
+
+ An individual RTP participant SHOULD send only one compound RTCP
+ packet per report interval in order for the RTCP bandwidth per
+ participant to be estimated correctly (see Section 6.2), except when
+ the compound RTCP packet is split for partial encryption as described
+ in Section 9.1. If there are too many sources to fit all the
+ necessary RR packets into one compound RTCP packet without exceeding
+ the maximum transmission unit (MTU) of the network path, then only
+ the subset that will fit into one MTU SHOULD be included in each
+ interval. The subsets SHOULD be selected round-robin across multiple
+ intervals so that all sources are reported.
+
+ It is RECOMMENDED that translators and mixers combine individual RTCP
+ packets from the multiple sources they are forwarding into one
+ compound packet whenever feasible in order to amortize the packet
+ overhead (see Section 7). An example RTCP compound packet as might
+ be produced by a mixer is shown in Fig. 1. If the overall length of
+ a compound packet would exceed the MTU of the network path, it SHOULD
+ be segmented into multiple shorter compound packets to be transmitted
+ in separate packets of the underlying protocol. This does not impair
+ the RTCP bandwidth estimation because each compound packet represents
+ at least one distinct participant. Note that each of the compound
+ packets MUST begin with an SR or RR packet.
+
+ An implementation SHOULD ignore incoming RTCP packets with types
+ unknown to it. Additional RTCP packet types may be registered with
+ the Internet Assigned Numbers Authority (IANA) as described in
+ Section 15.
+
+ if encrypted: random 32-bit integer
+ |
+ |[--------- packet --------][---------- packet ----------][-packet-]
+ |
+ | receiver chunk chunk
+ V reports item item item item
+ --------------------------------------------------------------------
+ R[SR #sendinfo #site1#site2][SDES #CNAME PHONE #CNAME LOC][BYE##why]
+ --------------------------------------------------------------------
+ | |
+ |<----------------------- compound packet ----------------------->|
+ |<-------------------------- UDP packet ------------------------->|
+
+ #: SSRC/CSRC identifier
+
+ Figure 1: Example of an RTCP compound packet
+
+
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 23]
+
+RFC 3550 RTP July 2003
+
+
+6.2 RTCP Transmission Interval
+
+ RTP is designed to allow an application to scale automatically over
+ session sizes ranging from a few participants to thousands. For
+ example, in an audio conference the data traffic is inherently self-
+ limiting because only one or two people will speak at a time, so with
+ multicast distribution the data rate on any given link remains
+ relatively constant independent of the number of participants.
+ However, the control traffic is not self-limiting. If the reception
+ reports from each participant were sent at a constant rate, the
+ control traffic would grow linearly with the number of participants.
+ Therefore, the rate must be scaled down by dynamically calculating
+ the interval between RTCP packet transmissions.
+
+ For each session, it is assumed that the data traffic is subject to
+ an aggregate limit called the "session bandwidth" to be divided among
+ the participants. This bandwidth might be reserved and the limit
+ enforced by the network. If there is no reservation, there may be
+ other constraints, depending on the environment, that establish the
+ "reasonable" maximum for the session to use, and that would be the
+ session bandwidth. The session bandwidth may be chosen based on some
+ cost or a priori knowledge of the available network bandwidth for the
+ session. It is somewhat independent of the media encoding, but the
+ encoding choice may be limited by the session bandwidth. Often, the
+ session bandwidth is the sum of the nominal bandwidths of the senders
+ expected to be concurrently active. For teleconference audio, this
+ number would typically be one sender's bandwidth. For layered
+ encodings, each layer is a separate RTP session with its own session
+ bandwidth parameter.
+
+ The session bandwidth parameter is expected to be supplied by a
+ session management application when it invokes a media application,
+ but media applications MAY set a default based on the single-sender
+ data bandwidth for the encoding selected for the session. The
+ application MAY also enforce bandwidth limits based on multicast
+ scope rules or other criteria. All participants MUST use the same
+ value for the session bandwidth so that the same RTCP interval will
+ be calculated.
+
+ Bandwidth calculations for control and data traffic include lower-
+ layer transport and network protocols (e.g., UDP and IP) since that
+ is what the resource reservation system would need to know. The
+ application can also be expected to know which of these protocols are
+ in use. Link level headers are not included in the calculation since
+ the packet will be encapsulated with different link level headers as
+ it travels.
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 24]
+
+RFC 3550 RTP July 2003
+
+
+ The control traffic should be limited to a small and known fraction
+ of the session bandwidth: small so that the primary function of the
+ transport protocol to carry data is not impaired; known so that the
+ control traffic can be included in the bandwidth specification given
+ to a resource reservation protocol, and so that each participant can
+ independently calculate its share. The control traffic bandwidth is
+ in addition to the session bandwidth for the data traffic. It is
+ RECOMMENDED that the fraction of the session bandwidth added for RTCP
+ be fixed at 5%. It is also RECOMMENDED that 1/4 of the RTCP
+ bandwidth be dedicated to participants that are sending data so that
+ in sessions with a large number of receivers but a small number of
+ senders, newly joining participants will more quickly receive the
+ CNAME for the sending sites. When the proportion of senders is
+ greater than 1/4 of the participants, the senders get their
+ proportion of the full RTCP bandwidth. While the values of these and
+ other constants in the interval calculation are not critical, all
+ participants in the session MUST use the same values so the same
+ interval will be calculated. Therefore, these constants SHOULD be
+ fixed for a particular profile.
+
+ A profile MAY specify that the control traffic bandwidth may be a
+ separate parameter of the session rather than a strict percentage of
+ the session bandwidth. Using a separate parameter allows rate-
+ adaptive applications to set an RTCP bandwidth consistent with a
+ "typical" data bandwidth that is lower than the maximum bandwidth
+ specified by the session bandwidth parameter.
+
+ The profile MAY further specify that the control traffic bandwidth
+ may be divided into two separate session parameters for those
+ participants which are active data senders and those which are not;
+ let us call the parameters S and R. Following the recommendation
+ that 1/4 of the RTCP bandwidth be dedicated to data senders, the
+ RECOMMENDED default values for these two parameters would be 1.25%
+ and 3.75%, respectively. When the proportion of senders is greater
+ than S/(S+R) of the participants, the senders get their proportion of
+ the sum of these parameters. Using two parameters allows RTCP
+ reception reports to be turned off entirely for a particular session
+ by setting the RTCP bandwidth for non-data-senders to zero while
+ keeping the RTCP bandwidth for data senders non-zero so that sender
+ reports can still be sent for inter-media synchronization. Turning
+ off RTCP reception reports is NOT RECOMMENDED because they are needed
+ for the functions listed at the beginning of Section 6, particularly
+ reception quality feedback and congestion control. However, doing so
+ may be appropriate for systems operating on unidirectional links or
+ for sessions that don't require feedback on the quality of reception
+ or liveness of receivers and that have other means to avoid
+ congestion.
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 25]
+
+RFC 3550 RTP July 2003
+
+
+ The calculated interval between transmissions of compound RTCP
+ packets SHOULD also have a lower bound to avoid having bursts of
+ packets exceed the allowed bandwidth when the number of participants
+ is small and the traffic isn't smoothed according to the law of large
+ numbers. It also keeps the report interval from becoming too small
+ during transient outages like a network partition such that
+ adaptation is delayed when the partition heals. At application
+ startup, a delay SHOULD be imposed before the first compound RTCP
+ packet is sent to allow time for RTCP packets to be received from
+ other participants so the report interval will converge to the
+ correct value more quickly. This delay MAY be set to half the
+ minimum interval to allow quicker notification that the new
+ participant is present. The RECOMMENDED value for a fixed minimum
+ interval is 5 seconds.
+
+ An implementation MAY scale the minimum RTCP interval to a smaller
+ value inversely proportional to the session bandwidth parameter with
+ the following limitations:
+
+ o For multicast sessions, only active data senders MAY use the
+ reduced minimum value to calculate the interval for transmission
+ of compound RTCP packets.
+
+ o For unicast sessions, the reduced value MAY be used by
+ participants that are not active data senders as well, and the
+ delay before sending the initial compound RTCP packet MAY be zero.
+
+ o For all sessions, the fixed minimum SHOULD be used when
+ calculating the participant timeout interval (see Section 6.3.5)
+ so that implementations which do not use the reduced value for
+ transmitting RTCP packets are not timed out by other participants
+ prematurely.
+
+ o The RECOMMENDED value for the reduced minimum in seconds is 360
+ divided by the session bandwidth in kilobits/second. This minimum
+ is smaller than 5 seconds for bandwidths greater than 72 kb/s.
+
+ The algorithm described in Section 6.3 and Appendix A.7 was designed
+ to meet the goals outlined in this section. It calculates the
+ interval between sending compound RTCP packets to divide the allowed
+ control traffic bandwidth among the participants. This allows an
+ application to provide fast response for small sessions where, for
+ example, identification of all participants is important, yet
+ automatically adapt to large sessions. The algorithm incorporates
+ the following characteristics:
+
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 26]
+
+RFC 3550 RTP July 2003
+
+
+ o The calculated interval between RTCP packets scales linearly with
+ the number of members in the group. It is this linear factor
+ which allows for a constant amount of control traffic when summed
+ across all members.
+
+ o The interval between RTCP packets is varied randomly over the
+ range [0.5,1.5] times the calculated interval to avoid unintended
+ synchronization of all participants [20]. The first RTCP packet
+ sent after joining a session is also delayed by a random variation
+ of half the minimum RTCP interval.
+
+ o A dynamic estimate of the average compound RTCP packet size is
+ calculated, including all those packets received and sent, to
+ automatically adapt to changes in the amount of control
+ information carried.
+
+ o Since the calculated interval is dependent on the number of
+ observed group members, there may be undesirable startup effects
+ when a new user joins an existing session, or many users
+ simultaneously join a new session. These new users will initially
+ have incorrect estimates of the group membership, and thus their
+ RTCP transmission interval will be too short. This problem can be
+ significant if many users join the session simultaneously. To
+ deal with this, an algorithm called "timer reconsideration" is
+ employed. This algorithm implements a simple back-off mechanism
+ which causes users to hold back RTCP packet transmission if the
+ group sizes are increasing.
+
+ o When users leave a session, either with a BYE or by timeout, the
+ group membership decreases, and thus the calculated interval
+ should decrease. A "reverse reconsideration" algorithm is used to
+ allow members to more quickly reduce their intervals in response
+ to group membership decreases.
+
+ o BYE packets are given different treatment than other RTCP packets.
+ When a user leaves a group, and wishes to send a BYE packet, it
+ may do so before its next scheduled RTCP packet. However,
+ transmission of BYEs follows a back-off algorithm which avoids
+ floods of BYE packets should a large number of members
+ simultaneously leave the session.
+
+ This algorithm may be used for sessions in which all participants are
+ allowed to send. In that case, the session bandwidth parameter is
+ the product of the individual sender's bandwidth times the number of
+ participants, and the RTCP bandwidth is 5% of that.
+
+ Details of the algorithm's operation are given in the sections that
+ follow. Appendix A.7 gives an example implementation.
+
+
+
+Schulzrinne, et al. Standards Track [Page 27]
+
+RFC 3550 RTP July 2003
+
+
+6.2.1 Maintaining the Number of Session Members
+
+ Calculation of the RTCP packet interval depends upon an estimate of
+ the number of sites participating in the session. New sites are
+ added to the count when they are heard, and an entry for each SHOULD
+ be created in a table indexed by the SSRC or CSRC identifier (see
+ Section 8.2) to keep track of them. New entries MAY be considered
+ not valid until multiple packets carrying the new SSRC have been
+ received (see Appendix A.1), or until an SDES RTCP packet containing
+ a CNAME for that SSRC has been received. Entries MAY be deleted from
+ the table when an RTCP BYE packet with the corresponding SSRC
+ identifier is received, except that some straggler data packets might
+ arrive after the BYE and cause the entry to be recreated. Instead,
+ the entry SHOULD be marked as having received a BYE and then deleted
+ after an appropriate delay.
+
+ A participant MAY mark another site inactive, or delete it if not yet
+ valid, if no RTP or RTCP packet has been received for a small number
+ of RTCP report intervals (5 is RECOMMENDED). This provides some
+ robustness against packet loss. All sites must have the same value
+ for this multiplier and must calculate roughly the same value for the
+ RTCP report interval in order for this timeout to work properly.
+ Therefore, this multiplier SHOULD be fixed for a particular profile.
+
+ For sessions with a very large number of participants, it may be
+ impractical to maintain a table to store the SSRC identifier and
+ state information for all of them. An implementation MAY use SSRC
+ sampling, as described in [21], to reduce the storage requirements.
+ An implementation MAY use any other algorithm with similar
+ performance. A key requirement is that any algorithm considered
+ SHOULD NOT substantially underestimate the group size, although it
+ MAY overestimate.
+
+6.3 RTCP Packet Send and Receive Rules
+
+ The rules for how to send, and what to do when receiving an RTCP
+ packet are outlined here. An implementation that allows operation in
+ a multicast environment or a multipoint unicast environment MUST meet
+ the requirements in Section 6.2. Such an implementation MAY use the
+ algorithm defined in this section to meet those requirements, or MAY
+ use some other algorithm so long as it provides equivalent or better
+ performance. An implementation which is constrained to two-party
+ unicast operation SHOULD still use randomization of the RTCP
+ transmission interval to avoid unintended synchronization of multiple
+ instances operating in the same environment, but MAY omit the "timer
+ reconsideration" and "reverse reconsideration" algorithms in Sections
+ 6.3.3, 6.3.6 and 6.3.7.
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 28]
+
+RFC 3550 RTP July 2003
+
+
+ To execute these rules, a session participant must maintain several
+ pieces of state:
+
+ tp: the last time an RTCP packet was transmitted;
+
+ tc: the current time;
+
+ tn: the next scheduled transmission time of an RTCP packet;
+
+ pmembers: the estimated number of session members at the time tn
+ was last recomputed;
+
+ members: the most current estimate for the number of session
+ members;
+
+ senders: the most current estimate for the number of senders in
+ the session;
+
+ rtcp_bw: The target RTCP bandwidth, i.e., the total bandwidth
+ that will be used for RTCP packets by all members of this session,
+ in octets per second. This will be a specified fraction of the
+ "session bandwidth" parameter supplied to the application at
+ startup.
+
+ we_sent: Flag that is true if the application has sent data
+ since the 2nd previous RTCP report was transmitted.
+
+ avg_rtcp_size: The average compound RTCP packet size, in octets,
+ over all RTCP packets sent and received by this participant. The
+ size includes lower-layer transport and network protocol headers
+ (e.g., UDP and IP) as explained in Section 6.2.
+
+ initial: Flag that is true if the application has not yet sent
+ an RTCP packet.
+
+ Many of these rules make use of the "calculated interval" between
+ packet transmissions. This interval is described in the following
+ section.
+
+6.3.1 Computing the RTCP Transmission Interval
+
+ To maintain scalability, the average interval between packets from a
+ session participant should scale with the group size. This interval
+ is called the calculated interval. It is obtained by combining a
+ number of the pieces of state described above. The calculated
+ interval T is then determined as follows:
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 29]
+
+RFC 3550 RTP July 2003
+
+
+ 1. If the number of senders is less than or equal to 25% of the
+ membership (members), the interval depends on whether the
+ participant is a sender or not (based on the value of we_sent).
+ If the participant is a sender (we_sent true), the constant C is
+ set to the average RTCP packet size (avg_rtcp_size) divided by 25%
+ of the RTCP bandwidth (rtcp_bw), and the constant n is set to the
+ number of senders. If we_sent is not true, the constant C is set
+ to the average RTCP packet size divided by 75% of the RTCP
+ bandwidth. The constant n is set to the number of receivers
+ (members - senders). If the number of senders is greater than
+ 25%, senders and receivers are treated together. The constant C
+ is set to the average RTCP packet size divided by the total RTCP
+ bandwidth and n is set to the total number of members. As stated
+ in Section 6.2, an RTP profile MAY specify that the RTCP bandwidth
+ may be explicitly defined by two separate parameters (call them S
+ and R) for those participants which are senders and those which
+ are not. In that case, the 25% fraction becomes S/(S+R) and the
+ 75% fraction becomes R/(S+R). Note that if R is zero, the
+ percentage of senders is never greater than S/(S+R), and the
+ implementation must avoid division by zero.
+
+ 2. If the participant has not yet sent an RTCP packet (the variable
+ initial is true), the constant Tmin is set to 2.5 seconds, else it
+ is set to 5 seconds.
+
+ 3. The deterministic calculated interval Td is set to max(Tmin, n*C).
+
+ 4. The calculated interval T is set to a number uniformly distributed
+ between 0.5 and 1.5 times the deterministic calculated interval.
+
+ 5. The resulting value of T is divided by e-3/2=1.21828 to compensate
+ for the fact that the timer reconsideration algorithm converges to
+ a value of the RTCP bandwidth below the intended average.
+
+ This procedure results in an interval which is random, but which, on
+ average, gives at least 25% of the RTCP bandwidth to senders and the
+ rest to receivers. If the senders constitute more than one quarter
+ of the membership, this procedure splits the bandwidth equally among
+ all participants, on average.
+
+6.3.2 Initialization
+
+ Upon joining the session, the participant initializes tp to 0, tc to
+ 0, senders to 0, pmembers to 1, members to 1, we_sent to false,
+ rtcp_bw to the specified fraction of the session bandwidth, initial
+ to true, and avg_rtcp_size to the probable size of the first RTCP
+ packet that the application will later construct. The calculated
+ interval T is then computed, and the first packet is scheduled for
+
+
+
+Schulzrinne, et al. Standards Track [Page 30]
+
+RFC 3550 RTP July 2003
+
+
+ time tn = T. This means that a transmission timer is set which
+ expires at time T. Note that an application MAY use any desired
+ approach for implementing this timer.
+
+ The participant adds its own SSRC to the member table.
+
+6.3.3 Receiving an RTP or Non-BYE RTCP Packet
+
+ When an RTP or RTCP packet is received from a participant whose SSRC
+ is not in the member table, the SSRC is added to the table, and the
+ value for members is updated once the participant has been validated
+ as described in Section 6.2.1. The same processing occurs for each
+ CSRC in a validated RTP packet.
+
+ When an RTP packet is received from a participant whose SSRC is not
+ in the sender table, the SSRC is added to the table, and the value
+ for senders is updated.
+
+ For each compound RTCP packet received, the value of avg_rtcp_size is
+ updated:
+
+ avg_rtcp_size = (1/16) * packet_size + (15/16) * avg_rtcp_size
+
+ where packet_size is the size of the RTCP packet just received.
+
+6.3.4 Receiving an RTCP BYE Packet
+
+ Except as described in Section 6.3.7 for the case when an RTCP BYE is
+ to be transmitted, if the received packet is an RTCP BYE packet, the
+ SSRC is checked against the member table. If present, the entry is
+ removed from the table, and the value for members is updated. The
+ SSRC is then checked against the sender table. If present, the entry
+ is removed from the table, and the value for senders is updated.
+
+ Furthermore, to make the transmission rate of RTCP packets more
+ adaptive to changes in group membership, the following "reverse
+ reconsideration" algorithm SHOULD be executed when a BYE packet is
+ received that reduces members to a value less than pmembers:
+
+ o The value for tn is updated according to the following formula:
+
+ tn = tc + (members/pmembers) * (tn - tc)
+
+ o The value for tp is updated according the following formula:
+
+ tp = tc - (members/pmembers) * (tc - tp).
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 31]
+
+RFC 3550 RTP July 2003
+
+
+ o The next RTCP packet is rescheduled for transmission at time tn,
+ which is now earlier.
+
+ o The value of pmembers is set equal to members.
+
+ This algorithm does not prevent the group size estimate from
+ incorrectly dropping to zero for a short time due to premature
+ timeouts when most participants of a large session leave at once but
+ some remain. The algorithm does make the estimate return to the
+ correct value more rapidly. This situation is unusual enough and the
+ consequences are sufficiently harmless that this problem is deemed
+ only a secondary concern.
+
+6.3.5 Timing Out an SSRC
+
+ At occasional intervals, the participant MUST check to see if any of
+ the other participants time out. To do this, the participant
+ computes the deterministic (without the randomization factor)
+ calculated interval Td for a receiver, that is, with we_sent false.
+ Any other session member who has not sent an RTP or RTCP packet since
+ time tc - MTd (M is the timeout multiplier, and defaults to 5) is
+ timed out. This means that its SSRC is removed from the member list,
+ and members is updated. A similar check is performed on the sender
+ list. Any member on the sender list who has not sent an RTP packet
+ since time tc - 2T (within the last two RTCP report intervals) is
+ removed from the sender list, and senders is updated.
+
+ If any members time out, the reverse reconsideration algorithm
+ described in Section 6.3.4 SHOULD be performed.
+
+ The participant MUST perform this check at least once per RTCP
+ transmission interval.
+
+6.3.6 Expiration of Transmission Timer
+
+ When the packet transmission timer expires, the participant performs
+ the following operations:
+
+ o The transmission interval T is computed as described in Section
+ 6.3.1, including the randomization factor.
+
+ o If tp + T is less than or equal to tc, an RTCP packet is
+ transmitted. tp is set to tc, then another value for T is
+ calculated as in the previous step and tn is set to tc + T. The
+ transmission timer is set to expire again at time tn. If tp + T
+ is greater than tc, tn is set to tp + T. No RTCP packet is
+ transmitted. The transmission timer is set to expire at time tn.
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 32]
+
+RFC 3550 RTP July 2003
+
+
+ o pmembers is set to members.
+
+ If an RTCP packet is transmitted, the value of initial is set to
+ FALSE. Furthermore, the value of avg_rtcp_size is updated:
+
+ avg_rtcp_size = (1/16) * packet_size + (15/16) * avg_rtcp_size
+
+ where packet_size is the size of the RTCP packet just transmitted.
+
+6.3.7 Transmitting a BYE Packet
+
+ When a participant wishes to leave a session, a BYE packet is
+ transmitted to inform the other participants of the event. In order
+ to avoid a flood of BYE packets when many participants leave the
+ system, a participant MUST execute the following algorithm if the
+ number of members is more than 50 when the participant chooses to
+ leave. This algorithm usurps the normal role of the members variable
+ to count BYE packets instead:
+
+ o When the participant decides to leave the system, tp is reset to
+ tc, the current time, members and pmembers are initialized to 1,
+ initial is set to 1, we_sent is set to false, senders is set to 0,
+ and avg_rtcp_size is set to the size of the compound BYE packet.
+ The calculated interval T is computed. The BYE packet is then
+ scheduled for time tn = tc + T.
+
+ o Every time a BYE packet from another participant is received,
+ members is incremented by 1 regardless of whether that participant
+ exists in the member table or not, and when SSRC sampling is in
+ use, regardless of whether or not the BYE SSRC would be included
+ in the sample. members is NOT incremented when other RTCP packets
+ or RTP packets are received, but only for BYE packets. Similarly,
+ avg_rtcp_size is updated only for received BYE packets. senders
+ is NOT updated when RTP packets arrive; it remains 0.
+
+ o Transmission of the BYE packet then follows the rules for
+ transmitting a regular RTCP packet, as above.
+
+ This allows BYE packets to be sent right away, yet controls their
+ total bandwidth usage. In the worst case, this could cause RTCP
+ control packets to use twice the bandwidth as normal (10%) -- 5% for
+ non-BYE RTCP packets and 5% for BYE.
+
+ A participant that does not want to wait for the above mechanism to
+ allow transmission of a BYE packet MAY leave the group without
+ sending a BYE at all. That participant will eventually be timed out
+ by the other group members.
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 33]
+
+RFC 3550 RTP July 2003
+
+
+ If the group size estimate members is less than 50 when the
+ participant decides to leave, the participant MAY send a BYE packet
+ immediately. Alternatively, the participant MAY choose to execute
+ the above BYE backoff algorithm.
+
+ In either case, a participant which never sent an RTP or RTCP packet
+ MUST NOT send a BYE packet when they leave the group.
+
+6.3.8 Updating we_sent
+
+ The variable we_sent contains true if the participant has sent an RTP
+ packet recently, false otherwise. This determination is made by
+ using the same mechanisms as for managing the set of other
+ participants listed in the senders table. If the participant sends
+ an RTP packet when we_sent is false, it adds itself to the sender
+ table and sets we_sent to true. The reverse reconsideration
+ algorithm described in Section 6.3.4 SHOULD be performed to possibly
+ reduce the delay before sending an SR packet. Every time another RTP
+ packet is sent, the time of transmission of that packet is maintained
+ in the table. The normal sender timeout algorithm is then applied to
+ the participant -- if an RTP packet has not been transmitted since
+ time tc - 2T, the participant removes itself from the sender table,
+ decrements the sender count, and sets we_sent to false.
+
+6.3.9 Allocation of Source Description Bandwidth
+
+ This specification defines several source description (SDES) items in
+ addition to the mandatory CNAME item, such as NAME (personal name)
+ and EMAIL (email address). It also provides a means to define new
+ application-specific RTCP packet types. Applications should exercise
+ caution in allocating control bandwidth to this additional
+ information because it will slow down the rate at which reception
+ reports and CNAME are sent, thus impairing the performance of the
+ protocol. It is RECOMMENDED that no more than 20% of the RTCP
+ bandwidth allocated to a single participant be used to carry the
+ additional information. Furthermore, it is not intended that all
+ SDES items will be included in every application. Those that are
+ included SHOULD be assigned a fraction of the bandwidth according to
+ their utility. Rather than estimate these fractions dynamically, it
+ is recommended that the percentages be translated statically into
+ report interval counts based on the typical length of an item.
+
+ For example, an application may be designed to send only CNAME, NAME
+ and EMAIL and not any others. NAME might be given much higher
+ priority than EMAIL because the NAME would be displayed continuously
+ in the application's user interface, whereas EMAIL would be displayed
+ only when requested. At every RTCP interval, an RR packet and an
+ SDES packet with the CNAME item would be sent. For a small session
+
+
+
+Schulzrinne, et al. Standards Track [Page 34]
+
+RFC 3550 RTP July 2003
+
+
+ operating at the minimum interval, that would be every 5 seconds on
+ the average. Every third interval (15 seconds), one extra item would
+ be included in the SDES packet. Seven out of eight times this would
+ be the NAME item, and every eighth time (2 minutes) it would be the
+ EMAIL item.
+
+ When multiple applications operate in concert using cross-application
+ binding through a common CNAME for each participant, for example in a
+ multimedia conference composed of an RTP session for each medium, the
+ additional SDES information MAY be sent in only one RTP session. The
+ other sessions would carry only the CNAME item. In particular, this
+ approach should be applied to the multiple sessions of a layered
+ encoding scheme (see Section 2.4).
+
+6.4 Sender and Receiver Reports
+
+ RTP receivers provide reception quality feedback using RTCP report
+ packets which may take one of two forms depending upon whether or not
+ the receiver is also a sender. The only difference between the
+ sender report (SR) and receiver report (RR) forms, besides the packet
+ type code, is that the sender report includes a 20-byte sender
+ information section for use by active senders. The SR is issued if a
+ site has sent any data packets during the interval since issuing the
+ last report or the previous one, otherwise the RR is issued.
+
+ Both the SR and RR forms include zero or more reception report
+ blocks, one for each of the synchronization sources from which this
+ receiver has received RTP data packets since the last report.
+ Reports are not issued for contributing sources listed in the CSRC
+ list. Each reception report block provides statistics about the data
+ received from the particular source indicated in that block. Since a
+ maximum of 31 reception report blocks will fit in an SR or RR packet,
+ additional RR packets SHOULD be stacked after the initial SR or RR
+ packet as needed to contain the reception reports for all sources
+ heard during the interval since the last report. If there are too
+ many sources to fit all the necessary RR packets into one compound
+ RTCP packet without exceeding the MTU of the network path, then only
+ the subset that will fit into one MTU SHOULD be included in each
+ interval. The subsets SHOULD be selected round-robin across multiple
+ intervals so that all sources are reported.
+
+ The next sections define the formats of the two reports, how they may
+ be extended in a profile-specific manner if an application requires
+ additional feedback information, and how the reports may be used.
+ Details of reception reporting by translators and mixers is given in
+ Section 7.
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 35]
+
+RFC 3550 RTP July 2003
+
+
+6.4.1 SR: Sender Report RTCP Packet
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+header |V=2|P| RC | PT=SR=200 | length |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | SSRC of sender |
+ +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+sender | NTP timestamp, most significant word |
+info +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | NTP timestamp, least significant word |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | RTP timestamp |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | sender's packet count |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | sender's octet count |
+ +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+report | SSRC_1 (SSRC of first source) |
+block +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ 1 | fraction lost | cumulative number of packets lost |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | extended highest sequence number received |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | interarrival jitter |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | last SR (LSR) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | delay since last SR (DLSR) |
+ +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+report | SSRC_2 (SSRC of second source) |
+block +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ 2 : ... :
+ +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+ | profile-specific extensions |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ The sender report packet consists of three sections, possibly
+ followed by a fourth profile-specific extension section if defined.
+ The first section, the header, is 8 octets long. The fields have the
+ following meaning:
+
+ version (V): 2 bits
+ Identifies the version of RTP, which is the same in RTCP packets
+ as in RTP data packets. The version defined by this specification
+ is two (2).
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 36]
+
+RFC 3550 RTP July 2003
+
+
+ padding (P): 1 bit
+ If the padding bit is set, this individual RTCP packet contains
+ some additional padding octets at the end which are not part of
+ the control information but are included in the length field. The
+ last octet of the padding is a count of how many padding octets
+ should be ignored, including itself (it will be a multiple of
+ four). Padding may be needed by some encryption algorithms with
+ fixed block sizes. In a compound RTCP packet, padding is only
+ required on one individual packet because the compound packet is
+ encrypted as a whole for the method in Section 9.1. Thus, padding
+ MUST only be added to the last individual packet, and if padding
+ is added to that packet, the padding bit MUST be set only on that
+ packet. This convention aids the header validity checks described
+ in Appendix A.2 and allows detection of packets from some early
+ implementations that incorrectly set the padding bit on the first
+ individual packet and add padding to the last individual packet.
+
+ reception report count (RC): 5 bits
+ The number of reception report blocks contained in this packet. A
+ value of zero is valid.
+
+ packet type (PT): 8 bits
+ Contains the constant 200 to identify this as an RTCP SR packet.
+
+ length: 16 bits
+ The length of this RTCP packet in 32-bit words minus one,
+ including the header and any padding. (The offset of one makes
+ zero a valid length and avoids a possible infinite loop in
+ scanning a compound RTCP packet, while counting 32-bit words
+ avoids a validity check for a multiple of 4.)
+
+ SSRC: 32 bits
+ The synchronization source identifier for the originator of this
+ SR packet.
+
+ The second section, the sender information, is 20 octets long and is
+ present in every sender report packet. It summarizes the data
+ transmissions from this sender. The fields have the following
+ meaning:
+
+ NTP timestamp: 64 bits
+ Indicates the wallclock time (see Section 4) when this report was
+ sent so that it may be used in combination with timestamps
+ returned in reception reports from other receivers to measure
+ round-trip propagation to those receivers. Receivers should
+ expect that the measurement accuracy of the timestamp may be
+ limited to far less than the resolution of the NTP timestamp. The
+ measurement uncertainty of the timestamp is not indicated as it
+
+
+
+Schulzrinne, et al. Standards Track [Page 37]
+
+RFC 3550 RTP July 2003
+
+
+ may not be known. On a system that has no notion of wallclock
+ time but does have some system-specific clock such as "system
+ uptime", a sender MAY use that clock as a reference to calculate
+ relative NTP timestamps. It is important to choose a commonly
+ used clock so that if separate implementations are used to produce
+ the individual streams of a multimedia session, all
+ implementations will use the same clock. Until the year 2036,
+ relative and absolute timestamps will differ in the high bit so
+ (invalid) comparisons will show a large difference; by then one
+ hopes relative timestamps will no longer be needed. A sender that
+ has no notion of wallclock or elapsed time MAY set the NTP
+ timestamp to zero.
+
+ RTP timestamp: 32 bits
+ Corresponds to the same time as the NTP timestamp (above), but in
+ the same units and with the same random offset as the RTP
+ timestamps in data packets. This correspondence may be used for
+ intra- and inter-media synchronization for sources whose NTP
+ timestamps are synchronized, and may be used by media-independent
+ receivers to estimate the nominal RTP clock frequency. Note that
+ in most cases this timestamp will not be equal to the RTP
+ timestamp in any adjacent data packet. Rather, it MUST be
+ calculated from the corresponding NTP timestamp using the
+ relationship between the RTP timestamp counter and real time as
+ maintained by periodically checking the wallclock time at a
+ sampling instant.
+
+ sender's packet count: 32 bits
+ The total number of RTP data packets transmitted by the sender
+ since starting transmission up until the time this SR packet was
+ generated. The count SHOULD be reset if the sender changes its
+ SSRC identifier.
+
+ sender's octet count: 32 bits
+ The total number of payload octets (i.e., not including header or
+ padding) transmitted in RTP data packets by the sender since
+ starting transmission up until the time this SR packet was
+ generated. The count SHOULD be reset if the sender changes its
+ SSRC identifier. This field can be used to estimate the average
+ payload data rate.
+
+ The third section contains zero or more reception report blocks
+ depending on the number of other sources heard by this sender since
+ the last report. Each reception report block conveys statistics on
+ the reception of RTP packets from a single synchronization source.
+ Receivers SHOULD NOT carry over statistics when a source changes its
+ SSRC identifier due to a collision. These statistics are:
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 38]
+
+RFC 3550 RTP July 2003
+
+
+ SSRC_n (source identifier): 32 bits
+ The SSRC identifier of the source to which the information in this
+ reception report block pertains.
+
+ fraction lost: 8 bits
+ The fraction of RTP data packets from source SSRC_n lost since the
+ previous SR or RR packet was sent, expressed as a fixed point
+ number with the binary point at the left edge of the field. (That
+ is equivalent to taking the integer part after multiplying the
+ loss fraction by 256.) This fraction is defined to be the number
+ of packets lost divided by the number of packets expected, as
+ defined in the next paragraph. An implementation is shown in
+ Appendix A.3. If the loss is negative due to duplicates, the
+ fraction lost is set to zero. Note that a receiver cannot tell
+ whether any packets were lost after the last one received, and
+ that there will be no reception report block issued for a source
+ if all packets from that source sent during the last reporting
+ interval have been lost.
+
+ cumulative number of packets lost: 24 bits
+ The total number of RTP data packets from source SSRC_n that have
+ been lost since the beginning of reception. This number is
+ defined to be the number of packets expected less the number of
+ packets actually received, where the number of packets received
+ includes any which are late or duplicates. Thus, packets that
+ arrive late are not counted as lost, and the loss may be negative
+ if there are duplicates. The number of packets expected is
+ defined to be the extended last sequence number received, as
+ defined next, less the initial sequence number received. This may
+ be calculated as shown in Appendix A.3.
+
+ extended highest sequence number received: 32 bits
+ The low 16 bits contain the highest sequence number received in an
+ RTP data packet from source SSRC_n, and the most significant 16
+ bits extend that sequence number with the corresponding count of
+ sequence number cycles, which may be maintained according to the
+ algorithm in Appendix A.1. Note that different receivers within
+ the same session will generate different extensions to the
+ sequence number if their start times differ significantly.
+
+ interarrival jitter: 32 bits
+ An estimate of the statistical variance of the RTP data packet
+ interarrival time, measured in timestamp units and expressed as an
+ unsigned integer. The interarrival jitter J is defined to be the
+ mean deviation (smoothed absolute value) of the difference D in
+ packet spacing at the receiver compared to the sender for a pair
+ of packets. As shown in the equation below, this is equivalent to
+ the difference in the "relative transit time" for the two packets;
+
+
+
+Schulzrinne, et al. Standards Track [Page 39]
+
+RFC 3550 RTP July 2003
+
+
+ the relative transit time is the difference between a packet's RTP
+ timestamp and the receiver's clock at the time of arrival,
+ measured in the same units.
+
+ If Si is the RTP timestamp from packet i, and Ri is the time of
+ arrival in RTP timestamp units for packet i, then for two packets
+ i and j, D may be expressed as
+
+ D(i,j) = (Rj - Ri) - (Sj - Si) = (Rj - Sj) - (Ri - Si)
+
+ The interarrival jitter SHOULD be calculated continuously as each
+ data packet i is received from source SSRC_n, using this
+ difference D for that packet and the previous packet i-1 in order
+ of arrival (not necessarily in sequence), according to the formula
+
+ J(i) = J(i-1) + (|D(i-1,i)| - J(i-1))/16
+
+ Whenever a reception report is issued, the current value of J is
+ sampled.
+
+ The jitter calculation MUST conform to the formula specified here
+ in order to allow profile-independent monitors to make valid
+ interpretations of reports coming from different implementations.
+ This algorithm is the optimal first-order estimator and the gain
+ parameter 1/16 gives a good noise reduction ratio while
+ maintaining a reasonable rate of convergence [22]. A sample
+ implementation is shown in Appendix A.8. See Section 6.4.4 for a
+ discussion of the effects of varying packet duration and delay
+ before transmission.
+
+ last SR timestamp (LSR): 32 bits
+ The middle 32 bits out of 64 in the NTP timestamp (as explained in
+ Section 4) received as part of the most recent RTCP sender report
+ (SR) packet from source SSRC_n. If no SR has been received yet,
+ the field is set to zero.
+
+ delay since last SR (DLSR): 32 bits
+ The delay, expressed in units of 1/65536 seconds, between
+ receiving the last SR packet from source SSRC_n and sending this
+ reception report block. If no SR packet has been received yet
+ from SSRC_n, the DLSR field is set to zero.
+
+ Let SSRC_r denote the receiver issuing this receiver report.
+ Source SSRC_n can compute the round-trip propagation delay to
+ SSRC_r by recording the time A when this reception report block is
+ received. It calculates the total round-trip time A-LSR using the
+ last SR timestamp (LSR) field, and then subtracting this field to
+ leave the round-trip propagation delay as (A - LSR - DLSR). This
+
+
+
+Schulzrinne, et al. Standards Track [Page 40]
+
+RFC 3550 RTP July 2003
+
+
+ is illustrated in Fig. 2. Times are shown in both a hexadecimal
+ representation of the 32-bit fields and the equivalent floating-
+ point decimal representation. Colons indicate a 32-bit field
+ divided into a 16-bit integer part and 16-bit fraction part.
+
+ This may be used as an approximate measure of distance to cluster
+ receivers, although some links have very asymmetric delays.
+
+ [10 Nov 1995 11:33:25.125 UTC] [10 Nov 1995 11:33:36.5 UTC]
+ n SR(n) A=b710:8000 (46864.500 s)
+ ---------------------------------------------------------------->
+ v ^
+ ntp_sec =0xb44db705 v ^ dlsr=0x0005:4000 ( 5.250s)
+ ntp_frac=0x20000000 v ^ lsr =0xb705:2000 (46853.125s)
+ (3024992005.125 s) v ^
+ r v ^ RR(n)
+ ---------------------------------------------------------------->
+ |<-DLSR->|
+ (5.250 s)
+
+ A 0xb710:8000 (46864.500 s)
+ DLSR -0x0005:4000 ( 5.250 s)
+ LSR -0xb705:2000 (46853.125 s)
+ -------------------------------
+ delay 0x0006:2000 ( 6.125 s)
+
+ Figure 2: Example for round-trip time computation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 41]
+
+RFC 3550 RTP July 2003
+
+
+6.4.2 RR: Receiver Report RTCP Packet
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+header |V=2|P| RC | PT=RR=201 | length |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | SSRC of packet sender |
+ +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+report | SSRC_1 (SSRC of first source) |
+block +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ 1 | fraction lost | cumulative number of packets lost |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | extended highest sequence number received |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | interarrival jitter |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | last SR (LSR) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | delay since last SR (DLSR) |
+ +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+report | SSRC_2 (SSRC of second source) |
+block +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ 2 : ... :
+ +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+ | profile-specific extensions |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ The format of the receiver report (RR) packet is the same as that of
+ the SR packet except that the packet type field contains the constant
+ 201 and the five words of sender information are omitted (these are
+ the NTP and RTP timestamps and sender's packet and octet counts).
+ The remaining fields have the same meaning as for the SR packet.
+
+ An empty RR packet (RC = 0) MUST be put at the head of a compound
+ RTCP packet when there is no data transmission or reception to
+ report.
+
+6.4.3 Extending the Sender and Receiver Reports
+
+ A profile SHOULD define profile-specific extensions to the sender
+ report and receiver report if there is additional information that
+ needs to be reported regularly about the sender or receivers. This
+ method SHOULD be used in preference to defining another RTCP packet
+ type because it requires less overhead:
+
+ o fewer octets in the packet (no RTCP header or SSRC field);
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 42]
+
+RFC 3550 RTP July 2003
+
+
+ o simpler and faster parsing because applications running under that
+ profile would be programmed to always expect the extension fields
+ in the directly accessible location after the reception reports.
+
+ The extension is a fourth section in the sender- or receiver-report
+ packet which comes at the end after the reception report blocks, if
+ any. If additional sender information is required, then for sender
+ reports it would be included first in the extension section, but for
+ receiver reports it would not be present. If information about
+ receivers is to be included, that data SHOULD be structured as an
+ array of blocks parallel to the existing array of reception report
+ blocks; that is, the number of blocks would be indicated by the RC
+ field.
+
+6.4.4 Analyzing Sender and Receiver Reports
+
+ It is expected that reception quality feedback will be useful not
+ only for the sender but also for other receivers and third-party
+ monitors. The sender may modify its transmissions based on the
+ feedback; receivers can determine whether problems are local,
+ regional or global; network managers may use profile-independent
+ monitors that receive only the RTCP packets and not the corresponding
+ RTP data packets to evaluate the performance of their networks for
+ multicast distribution.
+
+ Cumulative counts are used in both the sender information and
+ receiver report blocks so that differences may be calculated between
+ any two reports to make measurements over both short and long time
+ periods, and to provide resilience against the loss of a report. The
+ difference between the last two reports received can be used to
+ estimate the recent quality of the distribution. The NTP timestamp
+ is included so that rates may be calculated from these differences
+ over the interval between two reports. Since that timestamp is
+ independent of the clock rate for the data encoding, it is possible
+ to implement encoding- and profile-independent quality monitors.
+
+ An example calculation is the packet loss rate over the interval
+ between two reception reports. The difference in the cumulative
+ number of packets lost gives the number lost during that interval.
+ The difference in the extended last sequence numbers received gives
+ the number of packets expected during the interval. The ratio of
+ these two is the packet loss fraction over the interval. This ratio
+ should equal the fraction lost field if the two reports are
+ consecutive, but otherwise it may not. The loss rate per second can
+ be obtained by dividing the loss fraction by the difference in NTP
+ timestamps, expressed in seconds. The number of packets received is
+ the number of packets expected minus the number lost. The number of
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 43]
+
+RFC 3550 RTP July 2003
+
+
+ packets expected may also be used to judge the statistical validity
+ of any loss estimates. For example, 1 out of 5 packets lost has a
+ lower significance than 200 out of 1000.
+
+ From the sender information, a third-party monitor can calculate the
+ average payload data rate and the average packet rate over an
+ interval without receiving the data. Taking the ratio of the two
+ gives the average payload size. If it can be assumed that packet
+ loss is independent of packet size, then the number of packets
+ received by a particular receiver times the average payload size (or
+ the corresponding packet size) gives the apparent throughput
+ available to that receiver.
+
+ In addition to the cumulative counts which allow long-term packet
+ loss measurements using differences between reports, the fraction
+ lost field provides a short-term measurement from a single report.
+ This becomes more important as the size of a session scales up enough
+ that reception state information might not be kept for all receivers
+ or the interval between reports becomes long enough that only one
+ report might have been received from a particular receiver.
+
+ The interarrival jitter field provides a second short-term measure of
+ network congestion. Packet loss tracks persistent congestion while
+ the jitter measure tracks transient congestion. The jitter measure
+ may indicate congestion before it leads to packet loss. The
+ interarrival jitter field is only a snapshot of the jitter at the
+ time of a report and is not intended to be taken quantitatively.
+ Rather, it is intended for comparison across a number of reports from
+ one receiver over time or from multiple receivers, e.g., within a
+ single network, at the same time. To allow comparison across
+ receivers, it is important the the jitter be calculated according to
+ the same formula by all receivers.
+
+ Because the jitter calculation is based on the RTP timestamp which
+ represents the instant when the first data in the packet was sampled,
+ any variation in the delay between that sampling instant and the time
+ the packet is transmitted will affect the resulting jitter that is
+ calculated. Such a variation in delay would occur for audio packets
+ of varying duration. It will also occur for video encodings because
+ the timestamp is the same for all the packets of one frame but those
+ packets are not all transmitted at the same time. The variation in
+ delay until transmission does reduce the accuracy of the jitter
+ calculation as a measure of the behavior of the network by itself,
+ but it is appropriate to include considering that the receiver buffer
+ must accommodate it. When the jitter calculation is used as a
+ comparative measure, the (constant) component due to variation in
+ delay until transmission subtracts out so that a change in the
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 44]
+
+RFC 3550 RTP July 2003
+
+
+ network jitter component can then be observed unless it is relatively
+ small. If the change is small, then it is likely to be
+ inconsequential.
+
+6.5 SDES: Source Description RTCP Packet
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+header |V=2|P| SC | PT=SDES=202 | length |
+ +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+chunk | SSRC/CSRC_1 |
+ 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | SDES items |
+ | ... |
+ +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+chunk | SSRC/CSRC_2 |
+ 2 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | SDES items |
+ | ... |
+ +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+
+ The SDES packet is a three-level structure composed of a header and
+ zero or more chunks, each of which is composed of items describing
+ the source identified in that chunk. The items are described
+ individually in subsequent sections.
+
+ version (V), padding (P), length:
+ As described for the SR packet (see Section 6.4.1).
+
+ packet type (PT): 8 bits
+ Contains the constant 202 to identify this as an RTCP SDES packet.
+
+ source count (SC): 5 bits
+ The number of SSRC/CSRC chunks contained in this SDES packet. A
+ value of zero is valid but useless.
+
+ Each chunk consists of an SSRC/CSRC identifier followed by a list of
+ zero or more items, which carry information about the SSRC/CSRC.
+ Each chunk starts on a 32-bit boundary. Each item consists of an 8-
+ bit type field, an 8-bit octet count describing the length of the
+ text (thus, not including this two-octet header), and the text
+ itself. Note that the text can be no longer than 255 octets, but
+ this is consistent with the need to limit RTCP bandwidth consumption.
+
+
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 45]
+
+RFC 3550 RTP July 2003
+
+
+ The text is encoded according to the UTF-8 encoding specified in RFC
+ 2279 [5]. US-ASCII is a subset of this encoding and requires no
+ additional encoding. The presence of multi-octet encodings is
+ indicated by setting the most significant bit of a character to a
+ value of one.
+
+ Items are contiguous, i.e., items are not individually padded to a
+ 32-bit boundary. Text is not null terminated because some multi-
+ octet encodings include null octets. The list of items in each chunk
+ MUST be terminated by one or more null octets, the first of which is
+ interpreted as an item type of zero to denote the end of the list.
+ No length octet follows the null item type octet, but additional null
+ octets MUST be included if needed to pad until the next 32-bit
+ boundary. Note that this padding is separate from that indicated by
+ the P bit in the RTCP header. A chunk with zero items (four null
+ octets) is valid but useless.
+
+ End systems send one SDES packet containing their own source
+ identifier (the same as the SSRC in the fixed RTP header). A mixer
+ sends one SDES packet containing a chunk for each contributing source
+ from which it is receiving SDES information, or multiple complete
+ SDES packets in the format above if there are more than 31 such
+ sources (see Section 7).
+
+ The SDES items currently defined are described in the next sections.
+ Only the CNAME item is mandatory. Some items shown here may be
+ useful only for particular profiles, but the item types are all
+ assigned from one common space to promote shared use and to simplify
+ profile-independent applications. Additional items may be defined in
+ a profile by registering the type numbers with IANA as described in
+ Section 15.
+
+6.5.1 CNAME: Canonical End-Point Identifier SDES Item
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | CNAME=1 | length | user and domain name ...
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ The CNAME identifier has the following properties:
+
+ o Because the randomly allocated SSRC identifier may change if a
+ conflict is discovered or if a program is restarted, the CNAME
+ item MUST be included to provide the binding from the SSRC
+ identifier to an identifier for the source (sender or receiver)
+ that remains constant.
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 46]
+
+RFC 3550 RTP July 2003
+
+
+ o Like the SSRC identifier, the CNAME identifier SHOULD also be
+ unique among all participants within one RTP session.
+
+ o To provide a binding across multiple media tools used by one
+ participant in a set of related RTP sessions, the CNAME SHOULD be
+ fixed for that participant.
+
+ o To facilitate third-party monitoring, the CNAME SHOULD be suitable
+ for either a program or a person to locate the source.
+
+ Therefore, the CNAME SHOULD be derived algorithmically and not
+ entered manually, when possible. To meet these requirements, the
+ following format SHOULD be used unless a profile specifies an
+ alternate syntax or semantics. The CNAME item SHOULD have the format
+ "user@host", or "host" if a user name is not available as on single-
+ user systems. For both formats, "host" is either the fully qualified
+ domain name of the host from which the real-time data originates,
+ formatted according to the rules specified in RFC 1034 [6], RFC 1035
+ [7] and Section 2.1 of RFC 1123 [8]; or the standard ASCII
+ representation of the host's numeric address on the interface used
+ for the RTP communication. For example, the standard ASCII
+ representation of an IP Version 4 address is "dotted decimal", also
+ known as dotted quad, and for IP Version 6, addresses are textually
+ represented as groups of hexadecimal digits separated by colons (with
+ variations as detailed in RFC 3513 [23]). Other address types are
+ expected to have ASCII representations that are mutually unique. The
+ fully qualified domain name is more convenient for a human observer
+ and may avoid the need to send a NAME item in addition, but it may be
+ difficult or impossible to obtain reliably in some operating
+ environments. Applications that may be run in such environments
+ SHOULD use the ASCII representation of the address instead.
+
+ Examples are "doe@sleepy.example.com", "doe@192.0.2.89" or
+ "doe@2201:056D::112E:144A:1E24" for a multi-user system. On a system
+ with no user name, examples would be "sleepy.example.com",
+ "192.0.2.89" or "2201:056D::112E:144A:1E24".
+
+ The user name SHOULD be in a form that a program such as "finger" or
+ "talk" could use, i.e., it typically is the login name rather than
+ the personal name. The host name is not necessarily identical to the
+ one in the participant's electronic mail address.
+
+ This syntax will not provide unique identifiers for each source if an
+ application permits a user to generate multiple sources from one
+ host. Such an application would have to rely on the SSRC to further
+ identify the source, or the profile for that application would have
+ to specify additional syntax for the CNAME identifier.
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 47]
+
+RFC 3550 RTP July 2003
+
+
+ If each application creates its CNAME independently, the resulting
+ CNAMEs may not be identical as would be required to provide a binding
+ across multiple media tools belonging to one participant in a set of
+ related RTP sessions. If cross-media binding is required, it may be
+ necessary for the CNAME of each tool to be externally configured with
+ the same value by a coordination tool.
+
+ Application writers should be aware that private network address
+ assignments such as the Net-10 assignment proposed in RFC 1918 [24]
+ may create network addresses that are not globally unique. This
+ would lead to non-unique CNAMEs if hosts with private addresses and
+ no direct IP connectivity to the public Internet have their RTP
+ packets forwarded to the public Internet through an RTP-level
+ translator. (See also RFC 1627 [25].) To handle this case,
+ applications MAY provide a means to configure a unique CNAME, but the
+ burden is on the translator to translate CNAMEs from private
+ addresses to public addresses if necessary to keep private addresses
+ from being exposed.
+
+6.5.2 NAME: User Name SDES Item
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | NAME=2 | length | common name of source ...
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ This is the real name used to describe the source, e.g., "John Doe,
+ Bit Recycler". It may be in any form desired by the user. For
+ applications such as conferencing, this form of name may be the most
+ desirable for display in participant lists, and therefore might be
+ sent most frequently of those items other than CNAME. Profiles MAY
+ establish such priorities. The NAME value is expected to remain
+ constant at least for the duration of a session. It SHOULD NOT be
+ relied upon to be unique among all participants in the session.
+
+6.5.3 EMAIL: Electronic Mail Address SDES Item
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | EMAIL=3 | length | email address of source ...
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ The email address is formatted according to RFC 2822 [9], for
+ example, "John.Doe@example.com". The EMAIL value is expected to
+ remain constant for the duration of a session.
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 48]
+
+RFC 3550 RTP July 2003
+
+
+6.5.4 PHONE: Phone Number SDES Item
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | PHONE=4 | length | phone number of source ...
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ The phone number SHOULD be formatted with the plus sign replacing the
+ international access code. For example, "+1 908 555 1212" for a
+ number in the United States.
+
+6.5.5 LOC: Geographic User Location SDES Item
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | LOC=5 | length | geographic location of site ...
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ Depending on the application, different degrees of detail are
+ appropriate for this item. For conference applications, a string
+ like "Murray Hill, New Jersey" may be sufficient, while, for an
+ active badge system, strings like "Room 2A244, AT&T BL MH" might be
+ appropriate. The degree of detail is left to the implementation
+ and/or user, but format and content MAY be prescribed by a profile.
+ The LOC value is expected to remain constant for the duration of a
+ session, except for mobile hosts.
+
+6.5.6 TOOL: Application or Tool Name SDES Item
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | TOOL=6 | length |name/version of source appl. ...
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ A string giving the name and possibly version of the application
+ generating the stream, e.g., "videotool 1.2". This information may
+ be useful for debugging purposes and is similar to the Mailer or
+ Mail-System-Version SMTP headers. The TOOL value is expected to
+ remain constant for the duration of the session.
+
+
+
+
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 49]
+
+RFC 3550 RTP July 2003
+
+
+6.5.7 NOTE: Notice/Status SDES Item
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | NOTE=7 | length | note about the source ...
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ The following semantics are suggested for this item, but these or
+ other semantics MAY be explicitly defined by a profile. The NOTE
+ item is intended for transient messages describing the current state
+ of the source, e.g., "on the phone, can't talk". Or, during a
+ seminar, this item might be used to convey the title of the talk. It
+ should be used only to carry exceptional information and SHOULD NOT
+ be included routinely by all participants because this would slow
+ down the rate at which reception reports and CNAME are sent, thus
+ impairing the performance of the protocol. In particular, it SHOULD
+ NOT be included as an item in a user's configuration file nor
+ automatically generated as in a quote-of-the-day.
+
+ Since the NOTE item may be important to display while it is active,
+ the rate at which other non-CNAME items such as NAME are transmitted
+ might be reduced so that the NOTE item can take that part of the RTCP
+ bandwidth. When the transient message becomes inactive, the NOTE
+ item SHOULD continue to be transmitted a few times at the same
+ repetition rate but with a string of length zero to signal the
+ receivers. However, receivers SHOULD also consider the NOTE item
+ inactive if it is not received for a small multiple of the repetition
+ rate, or perhaps 20-30 RTCP intervals.
+
+6.5.8 PRIV: Private Extensions SDES Item
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | PRIV=8 | length | prefix length |prefix string...
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ ... | value string ...
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ This item is used to define experimental or application-specific SDES
+ extensions. The item contains a prefix consisting of a length-string
+ pair, followed by the value string filling the remainder of the item
+ and carrying the desired information. The prefix length field is 8
+ bits long. The prefix string is a name chosen by the person defining
+ the PRIV item to be unique with respect to other PRIV items this
+ application might receive. The application creator might choose to
+ use the application name plus an additional subtype identification if
+
+
+
+Schulzrinne, et al. Standards Track [Page 50]
+
+RFC 3550 RTP July 2003
+
+
+ needed. Alternatively, it is RECOMMENDED that others choose a name
+ based on the entity they represent, then coordinate the use of the
+ name within that entity.
+
+ Note that the prefix consumes some space within the item's total
+ length of 255 octets, so the prefix should be kept as short as
+ possible. This facility and the constrained RTCP bandwidth SHOULD
+ NOT be overloaded; it is not intended to satisfy all the control
+ communication requirements of all applications.
+
+ SDES PRIV prefixes will not be registered by IANA. If some form of
+ the PRIV item proves to be of general utility, it SHOULD instead be
+ assigned a regular SDES item type registered with IANA so that no
+ prefix is required. This simplifies use and increases transmission
+ efficiency.
+
+6.6 BYE: Goodbye RTCP Packet
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |V=2|P| SC | PT=BYE=203 | length |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | SSRC/CSRC |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ : ... :
+ +=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+=+
+(opt) | length | reason for leaving ...
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ The BYE packet indicates that one or more sources are no longer
+ active.
+
+ version (V), padding (P), length:
+ As described for the SR packet (see Section 6.4.1).
+
+ packet type (PT): 8 bits
+ Contains the constant 203 to identify this as an RTCP BYE packet.
+
+ source count (SC): 5 bits
+ The number of SSRC/CSRC identifiers included in this BYE packet.
+ A count value of zero is valid, but useless.
+
+ The rules for when a BYE packet should be sent are specified in
+ Sections 6.3.7 and 8.2.
+
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 51]
+
+RFC 3550 RTP July 2003
+
+
+ If a BYE packet is received by a mixer, the mixer SHOULD forward the
+ BYE packet with the SSRC/CSRC identifier(s) unchanged. If a mixer
+ shuts down, it SHOULD send a BYE packet listing all contributing
+ sources it handles, as well as its own SSRC identifier. Optionally,
+ the BYE packet MAY include an 8-bit octet count followed by that many
+ octets of text indicating the reason for leaving, e.g., "camera
+ malfunction" or "RTP loop detected". The string has the same
+ encoding as that described for SDES. If the string fills the packet
+ to the next 32-bit boundary, the string is not null terminated. If
+ not, the BYE packet MUST be padded with null octets to the next 32-
+ bit boundary. This padding is separate from that indicated by the P
+ bit in the RTCP header.
+
+6.7 APP: Application-Defined RTCP Packet
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |V=2|P| subtype | PT=APP=204 | length |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | SSRC/CSRC |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | name (ASCII) |
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | application-dependent data ...
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ The APP packet is intended for experimental use as new applications
+ and new features are developed, without requiring packet type value
+ registration. APP packets with unrecognized names SHOULD be ignored.
+ After testing and if wider use is justified, it is RECOMMENDED that
+ each APP packet be redefined without the subtype and name fields and
+ registered with IANA using an RTCP packet type.
+
+ version (V), padding (P), length:
+ As described for the SR packet (see Section 6.4.1).
+
+ subtype: 5 bits
+ May be used as a subtype to allow a set of APP packets to be
+ defined under one unique name, or for any application-dependent
+ data.
+
+ packet type (PT): 8 bits
+ Contains the constant 204 to identify this as an RTCP APP packet.
+
+
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 52]
+
+RFC 3550 RTP July 2003
+
+
+ name: 4 octets
+ A name chosen by the person defining the set of APP packets to be
+ unique with respect to other APP packets this application might
+ receive. The application creator might choose to use the
+ application name, and then coordinate the allocation of subtype
+ values to others who want to define new packet types for the
+ application. Alternatively, it is RECOMMENDED that others choose
+ a name based on the entity they represent, then coordinate the use
+ of the name within that entity. The name is interpreted as a
+ sequence of four ASCII characters, with uppercase and lowercase
+ characters treated as distinct.
+
+ application-dependent data: variable length
+ Application-dependent data may or may not appear in an APP packet.
+ It is interpreted by the application and not RTP itself. It MUST
+ be a multiple of 32 bits long.
+
+7. RTP Translators and Mixers
+
+ In addition to end systems, RTP supports the notion of "translators"
+ and "mixers", which could be considered as "intermediate systems" at
+ the RTP level. Although this support adds some complexity to the
+ protocol, the need for these functions has been clearly established
+ by experiments with multicast audio and video applications in the
+ Internet. Example uses of translators and mixers given in Section
+ 2.3 stem from the presence of firewalls and low bandwidth
+ connections, both of which are likely to remain.
+
+7.1 General Description
+
+ An RTP translator/mixer connects two or more transport-level
+ "clouds". Typically, each cloud is defined by a common network and
+ transport protocol (e.g., IP/UDP) plus a multicast address and
+ transport level destination port or a pair of unicast addresses and
+ ports. (Network-level protocol translators, such as IP version 4 to
+ IP version 6, may be present within a cloud invisibly to RTP.) One
+ system may serve as a translator or mixer for a number of RTP
+ sessions, but each is considered a logically separate entity.
+
+ In order to avoid creating a loop when a translator or mixer is
+ installed, the following rules MUST be observed:
+
+ o Each of the clouds connected by translators and mixers
+ participating in one RTP session either MUST be distinct from all
+ the others in at least one of these parameters (protocol, address,
+ port), or MUST be isolated at the network level from the others.
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 53]
+
+RFC 3550 RTP July 2003
+
+
+ o A derivative of the first rule is that there MUST NOT be multiple
+ translators or mixers connected in parallel unless by some
+ arrangement they partition the set of sources to be forwarded.
+
+ Similarly, all RTP end systems that can communicate through one or
+ more RTP translators or mixers share the same SSRC space, that is,
+ the SSRC identifiers MUST be unique among all these end systems.
+ Section 8.2 describes the collision resolution algorithm by which
+ SSRC identifiers are kept unique and loops are detected.
+
+ There may be many varieties of translators and mixers designed for
+ different purposes and applications. Some examples are to add or
+ remove encryption, change the encoding of the data or the underlying
+ protocols, or replicate between a multicast address and one or more
+ unicast addresses. The distinction between translators and mixers is
+ that a translator passes through the data streams from different
+ sources separately, whereas a mixer combines them to form one new
+ stream:
+
+ Translator: Forwards RTP packets with their SSRC identifier
+ intact; this makes it possible for receivers to identify
+ individual sources even though packets from all the sources pass
+ through the same translator and carry the translator's network
+ source address. Some kinds of translators will pass through the
+ data untouched, but others MAY change the encoding of the data and
+ thus the RTP data payload type and timestamp. If multiple data
+ packets are re-encoded into one, or vice versa, a translator MUST
+ assign new sequence numbers to the outgoing packets. Losses in
+ the incoming packet stream may induce corresponding gaps in the
+ outgoing sequence numbers. Receivers cannot detect the presence
+ of a translator unless they know by some other means what payload
+ type or transport address was used by the original source.
+
+ Mixer: Receives streams of RTP data packets from one or more
+ sources, possibly changes the data format, combines the streams in
+ some manner and then forwards the combined stream. Since the
+ timing among multiple input sources will not generally be
+ synchronized, the mixer will make timing adjustments among the
+ streams and generate its own timing for the combined stream, so it
+ is the synchronization source. Thus, all data packets forwarded
+ by a mixer MUST be marked with the mixer's own SSRC identifier.
+ In order to preserve the identity of the original sources
+ contributing to the mixed packet, the mixer SHOULD insert their
+ SSRC identifiers into the CSRC identifier list following the fixed
+ RTP header of the packet. A mixer that is also itself a
+ contributing source for some packet SHOULD explicitly include its
+ own SSRC identifier in the CSRC list for that packet.
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 54]
+
+RFC 3550 RTP July 2003
+
+
+ For some applications, it MAY be acceptable for a mixer not to
+ identify sources in the CSRC list. However, this introduces the
+ danger that loops involving those sources could not be detected.
+
+ The advantage of a mixer over a translator for applications like
+ audio is that the output bandwidth is limited to that of one source
+ even when multiple sources are active on the input side. This may be
+ important for low-bandwidth links. The disadvantage is that
+ receivers on the output side don't have any control over which
+ sources are passed through or muted, unless some mechanism is
+ implemented for remote control of the mixer. The regeneration of
+ synchronization information by mixers also means that receivers can't
+ do inter-media synchronization of the original streams. A multi-
+ media mixer could do it.
+
+ [E1] [E6]
+ | |
+ E1:17 | E6:15 |
+ | | E6:15
+ V M1:48 (1,17) M1:48 (1,17) V M1:48 (1,17)
+ (M1)-------------><T1>-----------------><T2>-------------->[E7]
+ ^ ^ E4:47 ^ E4:47
+ E2:1 | E4:47 | | M3:89 (64,45)
+ | | |
+ [E2] [E4] M3:89 (64,45) |
+ | legend:
+ [E3] --------->(M2)----------->(M3)------------| [End system]
+ E3:64 M2:12 (64) ^ (Mixer)
+ | E5:45 <Translator>
+ |
+ [E5] source: SSRC (CSRCs)
+ ------------------->
+
+ Figure 3: Sample RTP network with end systems, mixers and translators
+
+ A collection of mixers and translators is shown in Fig. 3 to
+ illustrate their effect on SSRC and CSRC identifiers. In the figure,
+ end systems are shown as rectangles (named E), translators as
+ triangles (named T) and mixers as ovals (named M). The notation "M1:
+ 48(1,17)" designates a packet originating a mixer M1, identified by
+ M1's (random) SSRC value of 48 and two CSRC identifiers, 1 and 17,
+ copied from the SSRC identifiers of packets from E1 and E2.
+
+7.2 RTCP Processing in Translators
+
+ In addition to forwarding data packets, perhaps modified, translators
+ and mixers MUST also process RTCP packets. In many cases, they will
+ take apart the compound RTCP packets received from end systems to
+
+
+
+Schulzrinne, et al. Standards Track [Page 55]
+
+RFC 3550 RTP July 2003
+
+
+ aggregate SDES information and to modify the SR or RR packets.
+ Retransmission of this information may be triggered by the packet
+ arrival or by the RTCP interval timer of the translator or mixer
+ itself.
+
+ A translator that does not modify the data packets, for example one
+ that just replicates between a multicast address and a unicast
+ address, MAY simply forward RTCP packets unmodified as well. A
+ translator that transforms the payload in some way MUST make
+ corresponding transformations in the SR and RR information so that it
+ still reflects the characteristics of the data and the reception
+ quality. These translators MUST NOT simply forward RTCP packets. In
+ general, a translator SHOULD NOT aggregate SR and RR packets from
+ different sources into one packet since that would reduce the
+ accuracy of the propagation delay measurements based on the LSR and
+ DLSR fields.
+
+ SR sender information: A translator does not generate its own
+ sender information, but forwards the SR packets received from one
+ cloud to the others. The SSRC is left intact but the sender
+ information MUST be modified if required by the translation. If a
+ translator changes the data encoding, it MUST change the "sender's
+ byte count" field. If it also combines several data packets into
+ one output packet, it MUST change the "sender's packet count"
+ field. If it changes the timestamp frequency, it MUST change the
+ "RTP timestamp" field in the SR packet.
+
+ SR/RR reception report blocks: A translator forwards reception
+ reports received from one cloud to the others. Note that these
+ flow in the direction opposite to the data. The SSRC is left
+ intact. If a translator combines several data packets into one
+ output packet, and therefore changes the sequence numbers, it MUST
+ make the inverse manipulation for the packet loss fields and the
+ "extended last sequence number" field. This may be complex. In
+ the extreme case, there may be no meaningful way to translate the
+ reception reports, so the translator MAY pass on no reception
+ report at all or a synthetic report based on its own reception.
+ The general rule is to do what makes sense for a particular
+ translation.
+
+ A translator does not require an SSRC identifier of its own, but
+ MAY choose to allocate one for the purpose of sending reports
+ about what it has received. These would be sent to all the
+ connected clouds, each corresponding to the translation of the
+ data stream as sent to that cloud, since reception reports are
+ normally multicast to all participants.
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 56]
+
+RFC 3550 RTP July 2003
+
+
+ SDES: Translators typically forward without change the SDES
+ information they receive from one cloud to the others, but MAY,
+ for example, decide to filter non-CNAME SDES information if
+ bandwidth is limited. The CNAMEs MUST be forwarded to allow SSRC
+ identifier collision detection to work. A translator that
+ generates its own RR packets MUST send SDES CNAME information
+ about itself to the same clouds that it sends those RR packets.
+
+ BYE: Translators forward BYE packets unchanged. A translator
+ that is about to cease forwarding packets SHOULD send a BYE packet
+ to each connected cloud containing all the SSRC identifiers that
+ were previously being forwarded to that cloud, including the
+ translator's own SSRC identifier if it sent reports of its own.
+
+ APP: Translators forward APP packets unchanged.
+
+7.3 RTCP Processing in Mixers
+
+ Since a mixer generates a new data stream of its own, it does not
+ pass through SR or RR packets at all and instead generates new
+ information for both sides.
+
+ SR sender information: A mixer does not pass through sender
+ information from the sources it mixes because the characteristics
+ of the source streams are lost in the mix. As a synchronization
+ source, the mixer SHOULD generate its own SR packets with sender
+ information about the mixed data stream and send them in the same
+ direction as the mixed stream.
+
+ SR/RR reception report blocks: A mixer generates its own
+ reception reports for sources in each cloud and sends them out
+ only to the same cloud. It MUST NOT send these reception reports
+ to the other clouds and MUST NOT forward reception reports from
+ one cloud to the others because the sources would not be SSRCs
+ there (only CSRCs).
+
+ SDES: Mixers typically forward without change the SDES
+ information they receive from one cloud to the others, but MAY,
+ for example, decide to filter non-CNAME SDES information if
+ bandwidth is limited. The CNAMEs MUST be forwarded to allow SSRC
+ identifier collision detection to work. (An identifier in a CSRC
+ list generated by a mixer might collide with an SSRC identifier
+ generated by an end system.) A mixer MUST send SDES CNAME
+ information about itself to the same clouds that it sends SR or RR
+ packets.
+
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 57]
+
+RFC 3550 RTP July 2003
+
+
+ Since mixers do not forward SR or RR packets, they will typically
+ be extracting SDES packets from a compound RTCP packet. To
+ minimize overhead, chunks from the SDES packets MAY be aggregated
+ into a single SDES packet which is then stacked on an SR or RR
+ packet originating from the mixer. A mixer which aggregates SDES
+ packets will use more RTCP bandwidth than an individual source
+ because the compound packets will be longer, but that is
+ appropriate since the mixer represents multiple sources.
+ Similarly, a mixer which passes through SDES packets as they are
+ received will be transmitting RTCP packets at higher than the
+ single source rate, but again that is correct since the packets
+ come from multiple sources. The RTCP packet rate may be different
+ on each side of the mixer.
+
+ A mixer that does not insert CSRC identifiers MAY also refrain
+ from forwarding SDES CNAMEs. In this case, the SSRC identifier
+ spaces in the two clouds are independent. As mentioned earlier,
+ this mode of operation creates a danger that loops can't be
+ detected.
+
+ BYE: Mixers MUST forward BYE packets. A mixer that is about to
+ cease forwarding packets SHOULD send a BYE packet to each
+ connected cloud containing all the SSRC identifiers that were
+ previously being forwarded to that cloud, including the mixer's
+ own SSRC identifier if it sent reports of its own.
+
+ APP: The treatment of APP packets by mixers is application-specific.
+
+7.4 Cascaded Mixers
+
+ An RTP session may involve a collection of mixers and translators as
+ shown in Fig. 3. If two mixers are cascaded, such as M2 and M3 in
+ the figure, packets received by a mixer may already have been mixed
+ and may include a CSRC list with multiple identifiers. The second
+ mixer SHOULD build the CSRC list for the outgoing packet using the
+ CSRC identifiers from already-mixed input packets and the SSRC
+ identifiers from unmixed input packets. This is shown in the output
+ arc from mixer M3 labeled M3:89(64,45) in the figure. As in the case
+ of mixers that are not cascaded, if the resulting CSRC list has more
+ than 15 identifiers, the remainder cannot be included.
+
+
+
+
+
+
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 58]
+
+RFC 3550 RTP July 2003
+
+
+8. SSRC Identifier Allocation and Use
+
+ The SSRC identifier carried in the RTP header and in various fields
+ of RTCP packets is a random 32-bit number that is required to be
+ globally unique within an RTP session. It is crucial that the number
+ be chosen with care in order that participants on the same network or
+ starting at the same time are not likely to choose the same number.
+
+ It is not sufficient to use the local network address (such as an
+ IPv4 address) for the identifier because the address may not be
+ unique. Since RTP translators and mixers enable interoperation among
+ multiple networks with different address spaces, the allocation
+ patterns for addresses within two spaces might result in a much
+ higher rate of collision than would occur with random allocation.
+
+ Multiple sources running on one host would also conflict.
+
+ It is also not sufficient to obtain an SSRC identifier simply by
+ calling random() without carefully initializing the state. An
+ example of how to generate a random identifier is presented in
+ Appendix A.6.
+
+8.1 Probability of Collision
+
+ Since the identifiers are chosen randomly, it is possible that two or
+ more sources will choose the same number. Collision occurs with the
+ highest probability when all sources are started simultaneously, for
+ example when triggered automatically by some session management
+ event. If N is the number of sources and L the length of the
+ identifier (here, 32 bits), the probability that two sources
+ independently pick the same value can be approximated for large N
+ [26] as 1 - exp(-N**2 / 2**(L+1)). For N=1000, the probability is
+ roughly 10**-4.
+
+ The typical collision probability is much lower than the worst-case
+ above. When one new source joins an RTP session in which all the
+ other sources already have unique identifiers, the probability of
+ collision is just the fraction of numbers used out of the space.
+ Again, if N is the number of sources and L the length of the
+ identifier, the probability of collision is N / 2**L. For N=1000,
+ the probability is roughly 2*10**-7.
+
+ The probability of collision is further reduced by the opportunity
+ for a new source to receive packets from other participants before
+ sending its first packet (either data or control). If the new source
+ keeps track of the other participants (by SSRC identifier), then
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 59]
+
+RFC 3550 RTP July 2003
+
+
+ before transmitting its first packet the new source can verify that
+ its identifier does not conflict with any that have been received, or
+ else choose again.
+
+8.2 Collision Resolution and Loop Detection
+
+ Although the probability of SSRC identifier collision is low, all RTP
+ implementations MUST be prepared to detect collisions and take the
+ appropriate actions to resolve them. If a source discovers at any
+ time that another source is using the same SSRC identifier as its
+ own, it MUST send an RTCP BYE packet for the old identifier and
+ choose another random one. (As explained below, this step is taken
+ only once in case of a loop.) If a receiver discovers that two other
+ sources are colliding, it MAY keep the packets from one and discard
+ the packets from the other when this can be detected by different
+ source transport addresses or CNAMEs. The two sources are expected
+ to resolve the collision so that the situation doesn't last.
+
+ Because the random SSRC identifiers are kept globally unique for each
+ RTP session, they can also be used to detect loops that may be
+ introduced by mixers or translators. A loop causes duplication of
+ data and control information, either unmodified or possibly mixed, as
+ in the following examples:
+
+ o A translator may incorrectly forward a packet to the same
+ multicast group from which it has received the packet, either
+ directly or through a chain of translators. In that case, the
+ same packet appears several times, originating from different
+ network sources.
+
+ o Two translators incorrectly set up in parallel, i.e., with the
+ same multicast groups on both sides, would both forward packets
+ from one multicast group to the other. Unidirectional translators
+ would produce two copies; bidirectional translators would form a
+ loop.
+
+ o A mixer can close a loop by sending to the same transport
+ destination upon which it receives packets, either directly or
+ through another mixer or translator. In this case a source might
+ show up both as an SSRC on a data packet and a CSRC in a mixed
+ data packet.
+
+ A source may discover that its own packets are being looped, or that
+ packets from another source are being looped (a third-party loop).
+ Both loops and collisions in the random selection of a source
+ identifier result in packets arriving with the same SSRC identifier
+ but a different source transport address, which may be that of the
+ end system originating the packet or an intermediate system.
+
+
+
+Schulzrinne, et al. Standards Track [Page 60]
+
+RFC 3550 RTP July 2003
+
+
+ Therefore, if a source changes its source transport address, it MAY
+ also choose a new SSRC identifier to avoid being interpreted as a
+ looped source. (This is not MUST because in some applications of RTP
+ sources may be expected to change addresses during a session.) Note
+ that if a translator restarts and consequently changes the source
+ transport address (e.g., changes the UDP source port number) on which
+ it forwards packets, then all those packets will appear to receivers
+ to be looped because the SSRC identifiers are applied by the original
+ source and will not change. This problem can be avoided by keeping
+ the source transport address fixed across restarts, but in any case
+ will be resolved after a timeout at the receivers.
+
+ Loops or collisions occurring on the far side of a translator or
+ mixer cannot be detected using the source transport address if all
+ copies of the packets go through the translator or mixer, however,
+ collisions may still be detected when chunks from two RTCP SDES
+ packets contain the same SSRC identifier but different CNAMEs.
+
+ To detect and resolve these conflicts, an RTP implementation MUST
+ include an algorithm similar to the one described below, though the
+ implementation MAY choose a different policy for which packets from
+ colliding third-party sources are kept. The algorithm described
+ below ignores packets from a new source or loop that collide with an
+ established source. It resolves collisions with the participant's
+ own SSRC identifier by sending an RTCP BYE for the old identifier and
+ choosing a new one. However, when the collision was induced by a
+ loop of the participant's own packets, the algorithm will choose a
+ new identifier only once and thereafter ignore packets from the
+ looping source transport address. This is required to avoid a flood
+ of BYE packets.
+
+ This algorithm requires keeping a table indexed by the source
+ identifier and containing the source transport addresses from the
+ first RTP packet and first RTCP packet received with that identifier,
+ along with other state for that source. Two source transport
+ addresses are required since, for example, the UDP source port
+ numbers may be different on RTP and RTCP packets. However, it may be
+ assumed that the network address is the same in both source transport
+ addresses.
+
+ Each SSRC or CSRC identifier received in an RTP or RTCP packet is
+ looked up in the source identifier table in order to process that
+ data or control information. The source transport address from the
+ packet is compared to the corresponding source transport address in
+ the table to detect a loop or collision if they don't match. For
+ control packets, each element with its own SSRC identifier, for
+ example an SDES chunk, requires a separate lookup. (The SSRC
+ identifier in a reception report block is an exception because it
+
+
+
+Schulzrinne, et al. Standards Track [Page 61]
+
+RFC 3550 RTP July 2003
+
+
+ identifies a source heard by the reporter, and that SSRC identifier
+ is unrelated to the source transport address of the RTCP packet sent
+ by the reporter.) If the SSRC or CSRC is not found, a new entry is
+ created. These table entries are removed when an RTCP BYE packet is
+ received with the corresponding SSRC identifier and validated by a
+ matching source transport address, or after no packets have arrived
+ for a relatively long time (see Section 6.2.1).
+
+ Note that if two sources on the same host are transmitting with the
+ same source identifier at the time a receiver begins operation, it
+ would be possible that the first RTP packet received came from one of
+ the sources while the first RTCP packet received came from the other.
+ This would cause the wrong RTCP information to be associated with the
+ RTP data, but this situation should be sufficiently rare and harmless
+ that it may be disregarded.
+
+ In order to track loops of the participant's own data packets, the
+ implementation MUST also keep a separate list of source transport
+ addresses (not identifiers) that have been found to be conflicting.
+ As in the source identifier table, two source transport addresses
+ MUST be kept to separately track conflicting RTP and RTCP packets.
+ Note that the conflicting address list should be short, usually
+ empty. Each element in this list stores the source addresses plus
+ the time when the most recent conflicting packet was received. An
+ element MAY be removed from the list when no conflicting packet has
+ arrived from that source for a time on the order of 10 RTCP report
+ intervals (see Section 6.2).
+
+ For the algorithm as shown, it is assumed that the participant's own
+ source identifier and state are included in the source identifier
+ table. The algorithm could be restructured to first make a separate
+ comparison against the participant's own source identifier.
+
+ if (SSRC or CSRC identifier is not found in the source
+ identifier table) {
+ create a new entry storing the data or control source
+ transport address, the SSRC or CSRC and other state;
+ }
+
+ /* Identifier is found in the table */
+
+ else if (table entry was created on receipt of a control packet
+ and this is the first data packet or vice versa) {
+ store the source transport address from this packet;
+ }
+ else if (source transport address from the packet does not match
+ the one saved in the table entry for this identifier) {
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 62]
+
+RFC 3550 RTP July 2003
+
+
+ /* An identifier collision or a loop is indicated */
+
+ if (source identifier is not the participant's own) {
+ /* OPTIONAL error counter step */
+ if (source identifier is from an RTCP SDES chunk
+ containing a CNAME item that differs from the CNAME
+ in the table entry) {
+ count a third-party collision;
+ } else {
+ count a third-party loop;
+ }
+ abort processing of data packet or control element;
+ /* MAY choose a different policy to keep new source */
+ }
+
+ /* A collision or loop of the participant's own packets */
+
+ else if (source transport address is found in the list of
+ conflicting data or control source transport
+ addresses) {
+ /* OPTIONAL error counter step */
+ if (source identifier is not from an RTCP SDES chunk
+ containing a CNAME item or CNAME is the
+ participant's own) {
+ count occurrence of own traffic looped;
+ }
+ mark current time in conflicting address list entry;
+ abort processing of data packet or control element;
+ }
+
+ /* New collision, change SSRC identifier */
+
+ else {
+ log occurrence of a collision;
+ create a new entry in the conflicting data or control
+ source transport address list and mark current time;
+ send an RTCP BYE packet with the old SSRC identifier;
+ choose a new SSRC identifier;
+ create a new entry in the source identifier table with
+ the old SSRC plus the source transport address from
+ the data or control packet being processed;
+ }
+ }
+
+ In this algorithm, packets from a newly conflicting source address
+ will be ignored and packets from the original source address will be
+ kept. If no packets arrive from the original source for an extended
+ period, the table entry will be timed out and the new source will be
+
+
+
+Schulzrinne, et al. Standards Track [Page 63]
+
+RFC 3550 RTP July 2003
+
+
+ able to take over. This might occur if the original source detects
+ the collision and moves to a new source identifier, but in the usual
+ case an RTCP BYE packet will be received from the original source to
+ delete the state without having to wait for a timeout.
+
+ If the original source address was received through a mixer (i.e.,
+ learned as a CSRC) and later the same source is received directly,
+ the receiver may be well advised to switch to the new source address
+ unless other sources in the mix would be lost. Furthermore, for
+ applications such as telephony in which some sources such as mobile
+ entities may change addresses during the course of an RTP session,
+ the RTP implementation SHOULD modify the collision detection
+ algorithm to accept packets from the new source transport address.
+ To guard against flip-flopping between addresses if a genuine
+ collision does occur, the algorithm SHOULD include some means to
+ detect this case and avoid switching.
+
+ When a new SSRC identifier is chosen due to a collision, the
+ candidate identifier SHOULD first be looked up in the source
+ identifier table to see if it was already in use by some other
+ source. If so, another candidate MUST be generated and the process
+ repeated.
+
+ A loop of data packets to a multicast destination can cause severe
+ network flooding. All mixers and translators MUST implement a loop
+ detection algorithm like the one here so that they can break loops.
+ This should limit the excess traffic to no more than one duplicate
+ copy of the original traffic, which may allow the session to continue
+ so that the cause of the loop can be found and fixed. However, in
+ extreme cases where a mixer or translator does not properly break the
+ loop and high traffic levels result, it may be necessary for end
+ systems to cease transmitting data or control packets entirely. This
+ decision may depend upon the application. An error condition SHOULD
+ be indicated as appropriate. Transmission MAY be attempted again
+ periodically after a long, random time (on the order of minutes).
+
+8.3 Use with Layered Encodings
+
+ For layered encodings transmitted on separate RTP sessions (see
+ Section 2.4), a single SSRC identifier space SHOULD be used across
+ the sessions of all layers and the core (base) layer SHOULD be used
+ for SSRC identifier allocation and collision resolution. When a
+ source discovers that it has collided, it transmits an RTCP BYE
+ packet on only the base layer but changes the SSRC identifier to the
+ new value in all layers.
+
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 64]
+
+RFC 3550 RTP July 2003
+
+
+9. Security
+
+ Lower layer protocols may eventually provide all the security
+ services that may be desired for applications of RTP, including
+ authentication, integrity, and confidentiality. These services have
+ been specified for IP in [27]. Since the initial audio and video
+ applications using RTP needed a confidentiality service before such
+ services were available for the IP layer, the confidentiality service
+ described in the next section was defined for use with RTP and RTCP.
+ That description is included here to codify existing practice. New
+ applications of RTP MAY implement this RTP-specific confidentiality
+ service for backward compatibility, and/or they MAY implement
+ alternative security services. The overhead on the RTP protocol for
+ this confidentiality service is low, so the penalty will be minimal
+ if this service is obsoleted by other services in the future.
+
+ Alternatively, other services, other implementations of services and
+ other algorithms may be defined for RTP in the future. In
+ particular, an RTP profile called Secure Real-time Transport Protocol
+ (SRTP) [28] is being developed to provide confidentiality of the RTP
+ payload while leaving the RTP header in the clear so that link-level
+ header compression algorithms can still operate. It is expected that
+ SRTP will be the correct choice for many applications. SRTP is based
+ on the Advanced Encryption Standard (AES) and provides stronger
+ security than the service described here. No claim is made that the
+ methods presented here are appropriate for a particular security
+ need. A profile may specify which services and algorithms should be
+ offered by applications, and may provide guidance as to their
+ appropriate use.
+
+ Key distribution and certificates are outside the scope of this
+ document.
+
+9.1 Confidentiality
+
+ Confidentiality means that only the intended receiver(s) can decode
+ the received packets; for others, the packet contains no useful
+ information. Confidentiality of the content is achieved by
+ encryption.
+
+ When it is desired to encrypt RTP or RTCP according to the method
+ specified in this section, all the octets that will be encapsulated
+ for transmission in a single lower-layer packet are encrypted as a
+ unit. For RTCP, a 32-bit random number redrawn for each unit MUST be
+ prepended to the unit before encryption. For RTP, no prefix is
+ prepended; instead, the sequence number and timestamp fields are
+ initialized with random offsets. This is considered to be a weak
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 65]
+
+RFC 3550 RTP July 2003
+
+
+ initialization vector (IV) because of poor randomness properties. In
+ addition, if the subsequent field, the SSRC, can be manipulated by an
+ enemy, there is further weakness of the encryption method.
+
+ For RTCP, an implementation MAY segregate the individual RTCP packets
+ in a compound RTCP packet into two separate compound RTCP packets,
+ one to be encrypted and one to be sent in the clear. For example,
+ SDES information might be encrypted while reception reports were sent
+ in the clear to accommodate third-party monitors that are not privy
+ to the encryption key. In this example, depicted in Fig. 4, the SDES
+ information MUST be appended to an RR packet with no reports (and the
+ random number) to satisfy the requirement that all compound RTCP
+ packets begin with an SR or RR packet. The SDES CNAME item is
+ required in either the encrypted or unencrypted packet, but not both.
+ The same SDES information SHOULD NOT be carried in both packets as
+ this may compromise the encryption.
+
+ UDP packet UDP packet
+ ----------------------------- ------------------------------
+ [random][RR][SDES #CNAME ...] [SR #senderinfo #site1 #site2]
+ ----------------------------- ------------------------------
+ encrypted not encrypted
+
+ #: SSRC identifier
+
+ Figure 4: Encrypted and non-encrypted RTCP packets
+
+ The presence of encryption and the use of the correct key are
+ confirmed by the receiver through header or payload validity checks.
+ Examples of such validity checks for RTP and RTCP headers are given
+ in Appendices A.1 and A.2.
+
+ To be consistent with existing implementations of the initial
+ specification of RTP in RFC 1889, the default encryption algorithm is
+ the Data Encryption Standard (DES) algorithm in cipher block chaining
+ (CBC) mode, as described in Section 1.1 of RFC 1423 [29], except that
+ padding to a multiple of 8 octets is indicated as described for the P
+ bit in Section 5.1. The initialization vector is zero because random
+ values are supplied in the RTP header or by the random prefix for
+ compound RTCP packets. For details on the use of CBC initialization
+ vectors, see [30].
+
+ Implementations that support the encryption method specified here
+ SHOULD always support the DES algorithm in CBC mode as the default
+ cipher for this method to maximize interoperability. This method was
+ chosen because it has been demonstrated to be easy and practical to
+ use in experimental audio and video tools in operation on the
+ Internet. However, DES has since been found to be too easily broken.
+
+
+
+Schulzrinne, et al. Standards Track [Page 66]
+
+RFC 3550 RTP July 2003
+
+
+ It is RECOMMENDED that stronger encryption algorithms such as
+ Triple-DES be used in place of the default algorithm. Furthermore,
+ secure CBC mode requires that the first block of each packet be XORed
+ with a random, independent IV of the same size as the cipher's block
+ size. For RTCP, this is (partially) achieved by prepending each
+ packet with a 32-bit random number, independently chosen for each
+ packet. For RTP, the timestamp and sequence number start from random
+ values, but consecutive packets will not be independently randomized.
+ It should be noted that the randomness in both cases (RTP and RTCP)
+ is limited. High-security applications SHOULD consider other, more
+ conventional, protection means. Other encryption algorithms MAY be
+ specified dynamically for a session by non-RTP means. In particular,
+ the SRTP profile [28] based on AES is being developed to take into
+ account known plaintext and CBC plaintext manipulation concerns, and
+ will be the correct choice in the future.
+
+ As an alternative to encryption at the IP level or at the RTP level
+ as described above, profiles MAY define additional payload types for
+ encrypted encodings. Those encodings MUST specify how padding and
+ other aspects of the encryption are to be handled. This method
+ allows encrypting only the data while leaving the headers in the
+ clear for applications where that is desired. It may be particularly
+ useful for hardware devices that will handle both decryption and
+ decoding. It is also valuable for applications where link-level
+ compression of RTP and lower-layer headers is desired and
+ confidentiality of the payload (but not addresses) is sufficient
+ since encryption of the headers precludes compression.
+
+9.2 Authentication and Message Integrity
+
+ Authentication and message integrity services are not defined at the
+ RTP level since these services would not be directly feasible without
+ a key management infrastructure. It is expected that authentication
+ and integrity services will be provided by lower layer protocols.
+
+10. Congestion Control
+
+ All transport protocols used on the Internet need to address
+ congestion control in some way [31]. RTP is not an exception, but
+ because the data transported over RTP is often inelastic (generated
+ at a fixed or controlled rate), the means to control congestion in
+ RTP may be quite different from those for other transport protocols
+ such as TCP. In one sense, inelasticity reduces the risk of
+ congestion because the RTP stream will not expand to consume all
+ available bandwidth as a TCP stream can. However, inelasticity also
+ means that the RTP stream cannot arbitrarily reduce its load on the
+ network to eliminate congestion when it occurs.
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 67]
+
+RFC 3550 RTP July 2003
+
+
+ Since RTP may be used for a wide variety of applications in many
+ different contexts, there is no single congestion control mechanism
+ that will work for all. Therefore, congestion control SHOULD be
+ defined in each RTP profile as appropriate. For some profiles, it
+ may be sufficient to include an applicability statement restricting
+ the use of that profile to environments where congestion is avoided
+ by engineering. For other profiles, specific methods such as data
+ rate adaptation based on RTCP feedback may be required.
+
+11. RTP over Network and Transport Protocols
+
+ This section describes issues specific to carrying RTP packets within
+ particular network and transport protocols. The following rules
+ apply unless superseded by protocol-specific definitions outside this
+ specification.
+
+ RTP relies on the underlying protocol(s) to provide demultiplexing of
+ RTP data and RTCP control streams. For UDP and similar protocols,
+ RTP SHOULD use an even destination port number and the corresponding
+ RTCP stream SHOULD use the next higher (odd) destination port number.
+ For applications that take a single port number as a parameter and
+ derive the RTP and RTCP port pair from that number, if an odd number
+ is supplied then the application SHOULD replace that number with the
+ next lower (even) number to use as the base of the port pair. For
+ applications in which the RTP and RTCP destination port numbers are
+ specified via explicit, separate parameters (using a signaling
+ protocol or other means), the application MAY disregard the
+ restrictions that the port numbers be even/odd and consecutive
+ although the use of an even/odd port pair is still encouraged. The
+ RTP and RTCP port numbers MUST NOT be the same since RTP relies on
+ the port numbers to demultiplex the RTP data and RTCP control
+ streams.
+
+ In a unicast session, both participants need to identify a port pair
+ for receiving RTP and RTCP packets. Both participants MAY use the
+ same port pair. A participant MUST NOT assume that the source port
+ of the incoming RTP or RTCP packet can be used as the destination
+ port for outgoing RTP or RTCP packets. When RTP data packets are
+ being sent in both directions, each participant's RTCP SR packets
+ MUST be sent to the port that the other participant has specified for
+ reception of RTCP. The RTCP SR packets combine sender information
+ for the outgoing data plus reception report information for the
+ incoming data. If a side is not actively sending data (see Section
+ 6.4), an RTCP RR packet is sent instead.
+
+ It is RECOMMENDED that layered encoding applications (see Section
+ 2.4) use a set of contiguous port numbers. The port numbers MUST be
+ distinct because of a widespread deficiency in existing operating
+
+
+
+Schulzrinne, et al. Standards Track [Page 68]
+
+RFC 3550 RTP July 2003
+
+
+ systems that prevents use of the same port with multiple multicast
+ addresses, and for unicast, there is only one permissible address.
+ Thus for layer n, the data port is P + 2n, and the control port is P
+ + 2n + 1. When IP multicast is used, the addresses MUST also be
+ distinct because multicast routing and group membership are managed
+ on an address granularity. However, allocation of contiguous IP
+ multicast addresses cannot be assumed because some groups may require
+ different scopes and may therefore be allocated from different
+ address ranges.
+
+ The previous paragraph conflicts with the SDP specification, RFC 2327
+ [15], which says that it is illegal for both multiple addresses and
+ multiple ports to be specified in the same session description
+ because the association of addresses with ports could be ambiguous.
+ It is intended that this restriction will be relaxed in a revision of
+ RFC 2327 to allow an equal number of addresses and ports to be
+ specified with a one-to-one mapping implied.
+
+ RTP data packets contain no length field or other delineation,
+ therefore RTP relies on the underlying protocol(s) to provide a
+ length indication. The maximum length of RTP packets is limited only
+ by the underlying protocols.
+
+ If RTP packets are to be carried in an underlying protocol that
+ provides the abstraction of a continuous octet stream rather than
+ messages (packets), an encapsulation of the RTP packets MUST be
+ defined to provide a framing mechanism. Framing is also needed if
+ the underlying protocol may contain padding so that the extent of the
+ RTP payload cannot be determined. The framing mechanism is not
+ defined here.
+
+ A profile MAY specify a framing method to be used even when RTP is
+ carried in protocols that do provide framing in order to allow
+ carrying several RTP packets in one lower-layer protocol data unit,
+ such as a UDP packet. Carrying several RTP packets in one network or
+ transport packet reduces header overhead and may simplify
+ synchronization between different streams.
+
+12. Summary of Protocol Constants
+
+ This section contains a summary listing of the constants defined in
+ this specification.
+
+ The RTP payload type (PT) constants are defined in profiles rather
+ than this document. However, the octet of the RTP header which
+ contains the marker bit(s) and payload type MUST avoid the reserved
+ values 200 and 201 (decimal) to distinguish RTP packets from the RTCP
+ SR and RR packet types for the header validation procedure described
+
+
+
+Schulzrinne, et al. Standards Track [Page 69]
+
+RFC 3550 RTP July 2003
+
+
+ in Appendix A.1. For the standard definition of one marker bit and a
+ 7-bit payload type field as shown in this specification, this
+ restriction means that payload types 72 and 73 are reserved.
+
+12.1 RTCP Packet Types
+
+ abbrev. name value
+ SR sender report 200
+ RR receiver report 201
+ SDES source description 202
+ BYE goodbye 203
+ APP application-defined 204
+
+ These type values were chosen in the range 200-204 for improved
+ header validity checking of RTCP packets compared to RTP packets or
+ other unrelated packets. When the RTCP packet type field is compared
+ to the corresponding octet of the RTP header, this range corresponds
+ to the marker bit being 1 (which it usually is not in data packets)
+ and to the high bit of the standard payload type field being 1 (since
+ the static payload types are typically defined in the low half).
+ This range was also chosen to be some distance numerically from 0 and
+ 255 since all-zeros and all-ones are common data patterns.
+
+ Since all compound RTCP packets MUST begin with SR or RR, these codes
+ were chosen as an even/odd pair to allow the RTCP validity check to
+ test the maximum number of bits with mask and value.
+
+ Additional RTCP packet types may be registered through IANA (see
+ Section 15).
+
+12.2 SDES Types
+
+ abbrev. name value
+ END end of SDES list 0
+ CNAME canonical name 1
+ NAME user name 2
+ EMAIL user's electronic mail address 3
+ PHONE user's phone number 4
+ LOC geographic user location 5
+ TOOL name of application or tool 6
+ NOTE notice about the source 7
+ PRIV private extensions 8
+
+ Additional SDES types may be registered through IANA (see Section
+ 15).
+
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 70]
+
+RFC 3550 RTP July 2003
+
+
+13. RTP Profiles and Payload Format Specifications
+
+ A complete specification of RTP for a particular application will
+ require one or more companion documents of two types described here:
+ profiles, and payload format specifications.
+
+ RTP may be used for a variety of applications with somewhat differing
+ requirements. The flexibility to adapt to those requirements is
+ provided by allowing multiple choices in the main protocol
+ specification, then selecting the appropriate choices or defining
+ extensions for a particular environment and class of applications in
+ a separate profile document. Typically an application will operate
+ under only one profile in a particular RTP session, so there is no
+ explicit indication within the RTP protocol itself as to which
+ profile is in use. A profile for audio and video applications may be
+ found in the companion RFC 3551. Profiles are typically titled "RTP
+ Profile for ...".
+
+ The second type of companion document is a payload format
+ specification, which defines how a particular kind of payload data,
+ such as H.261 encoded video, should be carried in RTP. These
+ documents are typically titled "RTP Payload Format for XYZ
+ Audio/Video Encoding". Payload formats may be useful under multiple
+ profiles and may therefore be defined independently of any particular
+ profile. The profile documents are then responsible for assigning a
+ default mapping of that format to a payload type value if needed.
+
+ Within this specification, the following items have been identified
+ for possible definition within a profile, but this list is not meant
+ to be exhaustive:
+
+ RTP data header: The octet in the RTP data header that contains
+ the marker bit and payload type field MAY be redefined by a
+ profile to suit different requirements, for example with more or
+ fewer marker bits (Section 5.3, p. 18).
+
+ Payload types: Assuming that a payload type field is included,
+ the profile will usually define a set of payload formats (e.g.,
+ media encodings) and a default static mapping of those formats to
+ payload type values. Some of the payload formats may be defined
+ by reference to separate payload format specifications. For each
+ payload type defined, the profile MUST specify the RTP timestamp
+ clock rate to be used (Section 5.1, p. 14).
+
+ RTP data header additions: Additional fields MAY be appended to
+ the fixed RTP data header if some additional functionality is
+ required across the profile's class of applications independent of
+ payload type (Section 5.3, p. 18).
+
+
+
+Schulzrinne, et al. Standards Track [Page 71]
+
+RFC 3550 RTP July 2003
+
+
+ RTP data header extensions: The contents of the first 16 bits of
+ the RTP data header extension structure MUST be defined if use of
+ that mechanism is to be allowed under the profile for
+ implementation-specific extensions (Section 5.3.1, p. 18).
+
+ RTCP packet types: New application-class-specific RTCP packet
+ types MAY be defined and registered with IANA.
+
+ RTCP report interval: A profile SHOULD specify that the values
+ suggested in Section 6.2 for the constants employed in the
+ calculation of the RTCP report interval will be used. Those are
+ the RTCP fraction of session bandwidth, the minimum report
+ interval, and the bandwidth split between senders and receivers.
+ A profile MAY specify alternate values if they have been
+ demonstrated to work in a scalable manner.
+
+ SR/RR extension: An extension section MAY be defined for the
+ RTCP SR and RR packets if there is additional information that
+ should be reported regularly about the sender or receivers
+ (Section 6.4.3, p. 42 and 43).
+
+ SDES use: The profile MAY specify the relative priorities for
+ RTCP SDES items to be transmitted or excluded entirely (Section
+ 6.3.9); an alternate syntax or semantics for the CNAME item
+ (Section 6.5.1); the format of the LOC item (Section 6.5.5); the
+ semantics and use of the NOTE item (Section 6.5.7); or new SDES
+ item types to be registered with IANA.
+
+ Security: A profile MAY specify which security services and
+ algorithms should be offered by applications, and MAY provide
+ guidance as to their appropriate use (Section 9, p. 65).
+
+ String-to-key mapping: A profile MAY specify how a user-provided
+ password or pass phrase is mapped into an encryption key.
+
+ Congestion: A profile SHOULD specify the congestion control
+ behavior appropriate for that profile.
+
+ Underlying protocol: Use of a particular underlying network or
+ transport layer protocol to carry RTP packets MAY be required.
+
+ Transport mapping: A mapping of RTP and RTCP to transport-level
+ addresses, e.g., UDP ports, other than the standard mapping
+ defined in Section 11, p. 68 may be specified.
+
+
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 72]
+
+RFC 3550 RTP July 2003
+
+
+ Encapsulation: An encapsulation of RTP packets may be defined to
+ allow multiple RTP data packets to be carried in one lower-layer
+ packet or to provide framing over underlying protocols that do not
+ already do so (Section 11, p. 69).
+
+ It is not expected that a new profile will be required for every
+ application. Within one application class, it would be better to
+ extend an existing profile rather than make a new one in order to
+ facilitate interoperation among the applications since each will
+ typically run under only one profile. Simple extensions such as the
+ definition of additional payload type values or RTCP packet types may
+ be accomplished by registering them through IANA and publishing their
+ descriptions in an addendum to the profile or in a payload format
+ specification.
+
+14. Security Considerations
+
+ RTP suffers from the same security liabilities as the underlying
+ protocols. For example, an impostor can fake source or destination
+ network addresses, or change the header or payload. Within RTCP, the
+ CNAME and NAME information may be used to impersonate another
+ participant. In addition, RTP may be sent via IP multicast, which
+ provides no direct means for a sender to know all the receivers of
+ the data sent and therefore no measure of privacy. Rightly or not,
+ users may be more sensitive to privacy concerns with audio and video
+ communication than they have been with more traditional forms of
+ network communication [33]. Therefore, the use of security
+ mechanisms with RTP is important. These mechanisms are discussed in
+ Section 9.
+
+ RTP-level translators or mixers may be used to allow RTP traffic to
+ reach hosts behind firewalls. Appropriate firewall security
+ principles and practices, which are beyond the scope of this
+ document, should be followed in the design and installation of these
+ devices and in the admission of RTP applications for use behind the
+ firewall.
+
+15. IANA Considerations
+
+ Additional RTCP packet types and SDES item types may be registered
+ through the Internet Assigned Numbers Authority (IANA). Since these
+ number spaces are small, allowing unconstrained registration of new
+ values would not be prudent. To facilitate review of requests and to
+ promote shared use of new types among multiple applications, requests
+ for registration of new values must be documented in an RFC or other
+ permanent and readily available reference such as the product of
+ another cooperative standards body (e.g., ITU-T). Other requests may
+ also be accepted, under the advice of a "designated expert."
+
+
+
+Schulzrinne, et al. Standards Track [Page 73]
+
+RFC 3550 RTP July 2003
+
+
+ (Contact the IANA for the contact information of the current expert.)
+
+ RTP profile specifications SHOULD register with IANA a name for the
+ profile in the form "RTP/xxx", where xxx is a short abbreviation of
+ the profile title. These names are for use by higher-level control
+ protocols, such as the Session Description Protocol (SDP), RFC 2327
+ [15], to refer to transport methods.
+
+16. Intellectual Property Rights Statement
+
+ The IETF takes no position regarding the validity or scope of any
+ intellectual property or other rights that might be claimed to
+ pertain to the implementation or use of the technology described in
+ this document or the extent to which any license under such rights
+ might or might not be available; neither does it represent that it
+ has made any effort to identify any such rights. Information on the
+ IETF's procedures with respect to rights in standards-track and
+ standards-related documentation can be found in BCP-11. Copies of
+ claims of rights made available for publication and any assurances of
+ licenses to be made available, or the result of an attempt made to
+ obtain a general license or permission for the use of such
+ proprietary rights by implementors or users of this specification can
+ be obtained from the IETF Secretariat.
+
+ The IETF invites any interested party to bring to its attention any
+ copyrights, patents or patent applications, or other proprietary
+ rights which may cover technology that may be required to practice
+ this standard. Please address the information to the IETF Executive
+ Director.
+
+17. Acknowledgments
+
+ This memorandum is based on discussions within the IETF Audio/Video
+ Transport working group chaired by Stephen Casner and Colin Perkins.
+ The current protocol has its origins in the Network Voice Protocol
+ and the Packet Video Protocol (Danny Cohen and Randy Cole) and the
+ protocol implemented by the vat application (Van Jacobson and Steve
+ McCanne). Christian Huitema provided ideas for the random identifier
+ generator. Extensive analysis and simulation of the timer
+ reconsideration algorithm was done by Jonathan Rosenberg. The
+ additions for layered encodings were specified by Michael Speer and
+ Steve McCanne.
+
+
+
+
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 74]
+
+RFC 3550 RTP July 2003
+
+
+Appendix A - Algorithms
+
+ We provide examples of C code for aspects of RTP sender and receiver
+ algorithms. There may be other implementation methods that are
+ faster in particular operating environments or have other advantages.
+ These implementation notes are for informational purposes only and
+ are meant to clarify the RTP specification.
+
+ The following definitions are used for all examples; for clarity and
+ brevity, the structure definitions are only valid for 32-bit big-
+ endian (most significant octet first) architectures. Bit fields are
+ assumed to be packed tightly in big-endian bit order, with no
+ additional padding. Modifications would be required to construct a
+ portable implementation.
+
+ /*
+ * rtp.h -- RTP header file
+ */
+ #include <sys/types.h>
+
+ /*
+ * The type definitions below are valid for 32-bit architectures and
+ * may have to be adjusted for 16- or 64-bit architectures.
+ */
+ typedef unsigned char u_int8;
+ typedef unsigned short u_int16;
+ typedef unsigned int u_int32;
+ typedef short int16;
+
+ /*
+ * Current protocol version.
+ */
+ #define RTP_VERSION 2
+
+ #define RTP_SEQ_MOD (1<<16)
+ #define RTP_MAX_SDES 255 /* maximum text length for SDES */
+
+ typedef enum {
+ RTCP_SR = 200,
+ RTCP_RR = 201,
+ RTCP_SDES = 202,
+ RTCP_BYE = 203,
+ RTCP_APP = 204
+ } rtcp_type_t;
+
+ typedef enum {
+ RTCP_SDES_END = 0,
+ RTCP_SDES_CNAME = 1,
+
+
+
+Schulzrinne, et al. Standards Track [Page 75]
+
+RFC 3550 RTP July 2003
+
+
+ RTCP_SDES_NAME = 2,
+ RTCP_SDES_EMAIL = 3,
+ RTCP_SDES_PHONE = 4,
+ RTCP_SDES_LOC = 5,
+ RTCP_SDES_TOOL = 6,
+ RTCP_SDES_NOTE = 7,
+ RTCP_SDES_PRIV = 8
+ } rtcp_sdes_type_t;
+
+ /*
+ * RTP data header
+ */
+ typedef struct {
+ unsigned int version:2; /* protocol version */
+ unsigned int p:1; /* padding flag */
+ unsigned int x:1; /* header extension flag */
+ unsigned int cc:4; /* CSRC count */
+ unsigned int m:1; /* marker bit */
+ unsigned int pt:7; /* payload type */
+ unsigned int seq:16; /* sequence number */
+ u_int32 ts; /* timestamp */
+ u_int32 ssrc; /* synchronization source */
+ u_int32 csrc[1]; /* optional CSRC list */
+ } rtp_hdr_t;
+
+ /*
+ * RTCP common header word
+ */
+ typedef struct {
+ unsigned int version:2; /* protocol version */
+ unsigned int p:1; /* padding flag */
+ unsigned int count:5; /* varies by packet type */
+ unsigned int pt:8; /* RTCP packet type */
+ u_int16 length; /* pkt len in words, w/o this word */
+ } rtcp_common_t;
+
+ /*
+ * Big-endian mask for version, padding bit and packet type pair
+ */
+ #define RTCP_VALID_MASK (0xc000 | 0x2000 | 0xfe)
+ #define RTCP_VALID_VALUE ((RTP_VERSION << 14) | RTCP_SR)
+
+ /*
+ * Reception report block
+ */
+ typedef struct {
+ u_int32 ssrc; /* data source being reported */
+ unsigned int fraction:8; /* fraction lost since last SR/RR */
+
+
+
+Schulzrinne, et al. Standards Track [Page 76]
+
+RFC 3550 RTP July 2003
+
+
+ int lost:24; /* cumul. no. pkts lost (signed!) */
+ u_int32 last_seq; /* extended last seq. no. received */
+ u_int32 jitter; /* interarrival jitter */
+ u_int32 lsr; /* last SR packet from this source */
+ u_int32 dlsr; /* delay since last SR packet */
+ } rtcp_rr_t;
+
+ /*
+ * SDES item
+ */
+ typedef struct {
+ u_int8 type; /* type of item (rtcp_sdes_type_t) */
+ u_int8 length; /* length of item (in octets) */
+ char data[1]; /* text, not null-terminated */
+ } rtcp_sdes_item_t;
+
+ /*
+ * One RTCP packet
+ */
+ typedef struct {
+ rtcp_common_t common; /* common header */
+ union {
+ /* sender report (SR) */
+ struct {
+ u_int32 ssrc; /* sender generating this report */
+ u_int32 ntp_sec; /* NTP timestamp */
+ u_int32 ntp_frac;
+ u_int32 rtp_ts; /* RTP timestamp */
+ u_int32 psent; /* packets sent */
+ u_int32 osent; /* octets sent */
+ rtcp_rr_t rr[1]; /* variable-length list */
+ } sr;
+
+ /* reception report (RR) */
+ struct {
+ u_int32 ssrc; /* receiver generating this report */
+ rtcp_rr_t rr[1]; /* variable-length list */
+ } rr;
+
+ /* source description (SDES) */
+ struct rtcp_sdes {
+ u_int32 src; /* first SSRC/CSRC */
+ rtcp_sdes_item_t item[1]; /* list of SDES items */
+ } sdes;
+
+ /* BYE */
+ struct {
+ u_int32 src[1]; /* list of sources */
+
+
+
+Schulzrinne, et al. Standards Track [Page 77]
+
+RFC 3550 RTP July 2003
+
+
+ /* can't express trailing text for reason */
+ } bye;
+ } r;
+ } rtcp_t;
+
+ typedef struct rtcp_sdes rtcp_sdes_t;
+
+ /*
+ * Per-source state information
+ */
+ typedef struct {
+ u_int16 max_seq; /* highest seq. number seen */
+ u_int32 cycles; /* shifted count of seq. number cycles */
+ u_int32 base_seq; /* base seq number */
+ u_int32 bad_seq; /* last 'bad' seq number + 1 */
+ u_int32 probation; /* sequ. packets till source is valid */
+ u_int32 received; /* packets received */
+ u_int32 expected_prior; /* packet expected at last interval */
+ u_int32 received_prior; /* packet received at last interval */
+ u_int32 transit; /* relative trans time for prev pkt */
+ u_int32 jitter; /* estimated jitter */
+ /* ... */
+ } source;
+
+A.1 RTP Data Header Validity Checks
+
+ An RTP receiver should check the validity of the RTP header on
+ incoming packets since they might be encrypted or might be from a
+ different application that happens to be misaddressed. Similarly, if
+ encryption according to the method described in Section 9 is enabled,
+ the header validity check is needed to verify that incoming packets
+ have been correctly decrypted, although a failure of the header
+ validity check (e.g., unknown payload type) may not necessarily
+ indicate decryption failure.
+
+ Only weak validity checks are possible on an RTP data packet from a
+ source that has not been heard before:
+
+ o RTP version field must equal 2.
+
+ o The payload type must be known, and in particular it must not be
+ equal to SR or RR.
+
+ o If the P bit is set, then the last octet of the packet must
+ contain a valid octet count, in particular, less than the total
+ packet length minus the header size.
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 78]
+
+RFC 3550 RTP July 2003
+
+
+ o The X bit must be zero if the profile does not specify that the
+ header extension mechanism may be used. Otherwise, the extension
+ length field must be less than the total packet size minus the
+ fixed header length and padding.
+
+ o The length of the packet must be consistent with CC and payload
+ type (if payloads have a known length).
+
+ The last three checks are somewhat complex and not always possible,
+ leaving only the first two which total just a few bits. If the SSRC
+ identifier in the packet is one that has been received before, then
+ the packet is probably valid and checking if the sequence number is
+ in the expected range provides further validation. If the SSRC
+ identifier has not been seen before, then data packets carrying that
+ identifier may be considered invalid until a small number of them
+ arrive with consecutive sequence numbers. Those invalid packets MAY
+ be discarded or they MAY be stored and delivered once validation has
+ been achieved if the resulting delay is acceptable.
+
+ The routine update_seq shown below ensures that a source is declared
+ valid only after MIN_SEQUENTIAL packets have been received in
+ sequence. It also validates the sequence number seq of a newly
+ received packet and updates the sequence state for the packet's
+ source in the structure to which s points.
+
+ When a new source is heard for the first time, that is, its SSRC
+ identifier is not in the table (see Section 8.2), and the per-source
+ state is allocated for it, s->probation is set to the number of
+ sequential packets required before declaring a source valid
+ (parameter MIN_SEQUENTIAL) and other variables are initialized:
+
+ init_seq(s, seq);
+ s->max_seq = seq - 1;
+ s->probation = MIN_SEQUENTIAL;
+
+ A non-zero s->probation marks the source as not yet valid so the
+ state may be discarded after a short timeout rather than a long one,
+ as discussed in Section 6.2.1.
+
+ After a source is considered valid, the sequence number is considered
+ valid if it is no more than MAX_DROPOUT ahead of s->max_seq nor more
+ than MAX_MISORDER behind. If the new sequence number is ahead of
+ max_seq modulo the RTP sequence number range (16 bits), but is
+ smaller than max_seq, it has wrapped around and the (shifted) count
+ of sequence number cycles is incremented. A value of one is returned
+ to indicate a valid sequence number.
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 79]
+
+RFC 3550 RTP July 2003
+
+
+ Otherwise, the value zero is returned to indicate that the validation
+ failed, and the bad sequence number plus 1 is stored. If the next
+ packet received carries the next higher sequence number, it is
+ considered the valid start of a new packet sequence presumably caused
+ by an extended dropout or a source restart. Since multiple complete
+ sequence number cycles may have been missed, the packet loss
+ statistics are reset.
+
+ Typical values for the parameters are shown, based on a maximum
+ misordering time of 2 seconds at 50 packets/second and a maximum
+ dropout of 1 minute. The dropout parameter MAX_DROPOUT should be a
+ small fraction of the 16-bit sequence number space to give a
+ reasonable probability that new sequence numbers after a restart will
+ not fall in the acceptable range for sequence numbers from before the
+ restart.
+
+ void init_seq(source *s, u_int16 seq)
+ {
+ s->base_seq = seq;
+ s->max_seq = seq;
+ s->bad_seq = RTP_SEQ_MOD + 1; /* so seq == bad_seq is false */
+ s->cycles = 0;
+ s->received = 0;
+ s->received_prior = 0;
+ s->expected_prior = 0;
+ /* other initialization */
+ }
+
+ int update_seq(source *s, u_int16 seq)
+ {
+ u_int16 udelta = seq - s->max_seq;
+ const int MAX_DROPOUT = 3000;
+ const int MAX_MISORDER = 100;
+ const int MIN_SEQUENTIAL = 2;
+
+ /*
+ * Source is not valid until MIN_SEQUENTIAL packets with
+ * sequential sequence numbers have been received.
+ */
+ if (s->probation) {
+ /* packet is in sequence */
+ if (seq == s->max_seq + 1) {
+ s->probation--;
+ s->max_seq = seq;
+ if (s->probation == 0) {
+ init_seq(s, seq);
+ s->received++;
+ return 1;
+
+
+
+Schulzrinne, et al. Standards Track [Page 80]
+
+RFC 3550 RTP July 2003
+
+
+ }
+ } else {
+ s->probation = MIN_SEQUENTIAL - 1;
+ s->max_seq = seq;
+ }
+ return 0;
+ } else if (udelta < MAX_DROPOUT) {
+ /* in order, with permissible gap */
+ if (seq < s->max_seq) {
+ /*
+ * Sequence number wrapped - count another 64K cycle.
+ */
+ s->cycles += RTP_SEQ_MOD;
+ }
+ s->max_seq = seq;
+ } else if (udelta <= RTP_SEQ_MOD - MAX_MISORDER) {
+ /* the sequence number made a very large jump */
+ if (seq == s->bad_seq) {
+ /*
+ * Two sequential packets -- assume that the other side
+ * restarted without telling us so just re-sync
+ * (i.e., pretend this was the first packet).
+ */
+ init_seq(s, seq);
+ }
+ else {
+ s->bad_seq = (seq + 1) & (RTP_SEQ_MOD-1);
+ return 0;
+ }
+ } else {
+ /* duplicate or reordered packet */
+ }
+ s->received++;
+ return 1;
+ }
+
+ The validity check can be made stronger requiring more than two
+ packets in sequence. The disadvantages are that a larger number of
+ initial packets will be discarded (or delayed in a queue) and that
+ high packet loss rates could prevent validation. However, because
+ the RTCP header validation is relatively strong, if an RTCP packet is
+ received from a source before the data packets, the count could be
+ adjusted so that only two packets are required in sequence. If
+ initial data loss for a few seconds can be tolerated, an application
+ MAY choose to discard all data packets from a source until a valid
+ RTCP packet has been received from that source.
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 81]
+
+RFC 3550 RTP July 2003
+
+
+ Depending on the application and encoding, algorithms may exploit
+ additional knowledge about the payload format for further validation.
+ For payload types where the timestamp increment is the same for all
+ packets, the timestamp values can be predicted from the previous
+ packet received from the same source using the sequence number
+ difference (assuming no change in payload type).
+
+ A strong "fast-path" check is possible since with high probability
+ the first four octets in the header of a newly received RTP data
+ packet will be just the same as that of the previous packet from the
+ same SSRC except that the sequence number will have increased by one.
+ Similarly, a single-entry cache may be used for faster SSRC lookups
+ in applications where data is typically received from one source at a
+ time.
+
+A.2 RTCP Header Validity Checks
+
+ The following checks should be applied to RTCP packets.
+
+ o RTP version field must equal 2.
+
+ o The payload type field of the first RTCP packet in a compound
+ packet must be equal to SR or RR.
+
+ o The padding bit (P) should be zero for the first packet of a
+ compound RTCP packet because padding should only be applied, if it
+ is needed, to the last packet.
+
+ o The length fields of the individual RTCP packets must add up to
+ the overall length of the compound RTCP packet as received. This
+ is a fairly strong check.
+
+ The code fragment below performs all of these checks. The packet
+ type is not checked for subsequent packets since unknown packet types
+ may be present and should be ignored.
+
+ u_int32 len; /* length of compound RTCP packet in words */
+ rtcp_t *r; /* RTCP header */
+ rtcp_t *end; /* end of compound RTCP packet */
+
+ if ((*(u_int16 *)r & RTCP_VALID_MASK) != RTCP_VALID_VALUE) {
+ /* something wrong with packet format */
+ }
+ end = (rtcp_t *)((u_int32 *)r + len);
+
+ do r = (rtcp_t *)((u_int32 *)r + r->common.length + 1);
+ while (r < end && r->common.version == 2);
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 82]
+
+RFC 3550 RTP July 2003
+
+
+ if (r != end) {
+ /* something wrong with packet format */
+ }
+
+A.3 Determining Number of Packets Expected and Lost
+
+ In order to compute packet loss rates, the number of RTP packets
+ expected and actually received from each source needs to be known,
+ using per-source state information defined in struct source
+ referenced via pointer s in the code below. The number of packets
+ received is simply the count of packets as they arrive, including any
+ late or duplicate packets. The number of packets expected can be
+ computed by the receiver as the difference between the highest
+ sequence number received (s->max_seq) and the first sequence number
+ received (s->base_seq). Since the sequence number is only 16 bits
+ and will wrap around, it is necessary to extend the highest sequence
+ number with the (shifted) count of sequence number wraparounds
+ (s->cycles). Both the received packet count and the count of cycles
+ are maintained the RTP header validity check routine in Appendix A.1.
+
+ extended_max = s->cycles + s->max_seq;
+ expected = extended_max - s->base_seq + 1;
+
+ The number of packets lost is defined to be the number of packets
+ expected less the number of packets actually received:
+
+ lost = expected - s->received;
+
+ Since this signed number is carried in 24 bits, it should be clamped
+ at 0x7fffff for positive loss or 0x800000 for negative loss rather
+ than wrapping around.
+
+ The fraction of packets lost during the last reporting interval
+ (since the previous SR or RR packet was sent) is calculated from
+ differences in the expected and received packet counts across the
+ interval, where expected_prior and received_prior are the values
+ saved when the previous reception report was generated:
+
+ expected_interval = expected - s->expected_prior;
+ s->expected_prior = expected;
+ received_interval = s->received - s->received_prior;
+ s->received_prior = s->received;
+ lost_interval = expected_interval - received_interval;
+ if (expected_interval == 0 || lost_interval <= 0) fraction = 0;
+ else fraction = (lost_interval << 8) / expected_interval;
+
+ The resulting fraction is an 8-bit fixed point number with the binary
+ point at the left edge.
+
+
+
+Schulzrinne, et al. Standards Track [Page 83]
+
+RFC 3550 RTP July 2003
+
+
+A.4 Generating RTCP SDES Packets
+
+ This function builds one SDES chunk into buffer b composed of argc
+ items supplied in arrays type, value and length. It returns a
+ pointer to the next available location within b.
+
+ char *rtp_write_sdes(char *b, u_int32 src, int argc,
+ rtcp_sdes_type_t type[], char *value[],
+ int length[])
+ {
+ rtcp_sdes_t *s = (rtcp_sdes_t *)b;
+ rtcp_sdes_item_t *rsp;
+ int i;
+ int len;
+ int pad;
+
+ /* SSRC header */
+ s->src = src;
+ rsp = &s->item[0];
+
+ /* SDES items */
+ for (i = 0; i < argc; i++) {
+ rsp->type = type[i];
+ len = length[i];
+ if (len > RTP_MAX_SDES) {
+ /* invalid length, may want to take other action */
+ len = RTP_MAX_SDES;
+ }
+ rsp->length = len;
+ memcpy(rsp->data, value[i], len);
+ rsp = (rtcp_sdes_item_t *)&rsp->data[len];
+ }
+
+ /* terminate with end marker and pad to next 4-octet boundary */
+ len = ((char *) rsp) - b;
+ pad = 4 - (len & 0x3);
+ b = (char *) rsp;
+ while (pad--) *b++ = RTCP_SDES_END;
+
+ return b;
+ }
+
+
+
+
+
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 84]
+
+RFC 3550 RTP July 2003
+
+
+A.5 Parsing RTCP SDES Packets
+
+ This function parses an SDES packet, calling functions find_member()
+ to find a pointer to the information for a session member given the
+ SSRC identifier and member_sdes() to store the new SDES information
+ for that member. This function expects a pointer to the header of
+ the RTCP packet.
+
+ void rtp_read_sdes(rtcp_t *r)
+ {
+ int count = r->common.count;
+ rtcp_sdes_t *sd = &r->r.sdes;
+ rtcp_sdes_item_t *rsp, *rspn;
+ rtcp_sdes_item_t *end = (rtcp_sdes_item_t *)
+ ((u_int32 *)r + r->common.length + 1);
+ source *s;
+
+ while (--count >= 0) {
+ rsp = &sd->item[0];
+ if (rsp >= end) break;
+ s = find_member(sd->src);
+
+ for (; rsp->type; rsp = rspn ) {
+ rspn = (rtcp_sdes_item_t *)((char*)rsp+rsp->length+2);
+ if (rspn >= end) {
+ rsp = rspn;
+ break;
+ }
+ member_sdes(s, rsp->type, rsp->data, rsp->length);
+ }
+ sd = (rtcp_sdes_t *)
+ ((u_int32 *)sd + (((char *)rsp - (char *)sd) >> 2)+1);
+ }
+ if (count >= 0) {
+ /* invalid packet format */
+ }
+ }
+
+A.6 Generating a Random 32-bit Identifier
+
+ The following subroutine generates a random 32-bit identifier using
+ the MD5 routines published in RFC 1321 [32]. The system routines may
+ not be present on all operating systems, but they should serve as
+ hints as to what kinds of information may be used. Other system
+ calls that may be appropriate include
+
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 85]
+
+RFC 3550 RTP July 2003
+
+
+ o getdomainname(),
+
+ o getwd(), or
+
+ o getrusage().
+
+ "Live" video or audio samples are also a good source of random
+ numbers, but care must be taken to avoid using a turned-off
+ microphone or blinded camera as a source [17].
+
+ Use of this or a similar routine is recommended to generate the
+ initial seed for the random number generator producing the RTCP
+ period (as shown in Appendix A.7), to generate the initial values for
+ the sequence number and timestamp, and to generate SSRC values.
+ Since this routine is likely to be CPU-intensive, its direct use to
+ generate RTCP periods is inappropriate because predictability is not
+ an issue. Note that this routine produces the same result on
+ repeated calls until the value of the system clock changes unless
+ different values are supplied for the type argument.
+
+ /*
+ * Generate a random 32-bit quantity.
+ */
+ #include <sys/types.h> /* u_long */
+ #include <sys/time.h> /* gettimeofday() */
+ #include <unistd.h> /* get..() */
+ #include <stdio.h> /* printf() */
+ #include <time.h> /* clock() */
+ #include <sys/utsname.h> /* uname() */
+ #include "global.h" /* from RFC 1321 */
+ #include "md5.h" /* from RFC 1321 */
+
+ #define MD_CTX MD5_CTX
+ #define MDInit MD5Init
+ #define MDUpdate MD5Update
+ #define MDFinal MD5Final
+
+ static u_long md_32(char *string, int length)
+ {
+ MD_CTX context;
+ union {
+ char c[16];
+ u_long x[4];
+ } digest;
+ u_long r;
+ int i;
+
+ MDInit (&context);
+
+
+
+Schulzrinne, et al. Standards Track [Page 86]
+
+RFC 3550 RTP July 2003
+
+
+ MDUpdate (&context, string, length);
+ MDFinal ((unsigned char *)&digest, &context);
+ r = 0;
+ for (i = 0; i < 3; i++) {
+ r ^= digest.x[i];
+ }
+ return r;
+ } /* md_32 */
+
+ /*
+ * Return random unsigned 32-bit quantity. Use 'type' argument if
+ * you need to generate several different values in close succession.
+ */
+ u_int32 random32(int type)
+ {
+ struct {
+ int type;
+ struct timeval tv;
+ clock_t cpu;
+ pid_t pid;
+ u_long hid;
+ uid_t uid;
+ gid_t gid;
+ struct utsname name;
+ } s;
+
+ gettimeofday(&s.tv, 0);
+ uname(&s.name);
+ s.type = type;
+ s.cpu = clock();
+ s.pid = getpid();
+ s.hid = gethostid();
+ s.uid = getuid();
+ s.gid = getgid();
+ /* also: system uptime */
+
+ return md_32((char *)&s, sizeof(s));
+ } /* random32 */
+
+A.7 Computing the RTCP Transmission Interval
+
+ The following functions implement the RTCP transmission and reception
+ rules described in Section 6.2. These rules are coded in several
+ functions:
+
+ o rtcp_interval() computes the deterministic calculated interval,
+ measured in seconds. The parameters are defined in Section 6.3.
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 87]
+
+RFC 3550 RTP July 2003
+
+
+ o OnExpire() is called when the RTCP transmission timer expires.
+
+ o OnReceive() is called whenever an RTCP packet is received.
+
+ Both OnExpire() and OnReceive() have event e as an argument. This is
+ the next scheduled event for that participant, either an RTCP report
+ or a BYE packet. It is assumed that the following functions are
+ available:
+
+ o Schedule(time t, event e) schedules an event e to occur at time t.
+ When time t arrives, the function OnExpire is called with e as an
+ argument.
+
+ o Reschedule(time t, event e) reschedules a previously scheduled
+ event e for time t.
+
+ o SendRTCPReport(event e) sends an RTCP report.
+
+ o SendBYEPacket(event e) sends a BYE packet.
+
+ o TypeOfEvent(event e) returns EVENT_BYE if the event being
+ processed is for a BYE packet to be sent, else it returns
+ EVENT_REPORT.
+
+ o PacketType(p) returns PACKET_RTCP_REPORT if packet p is an RTCP
+ report (not BYE), PACKET_BYE if its a BYE RTCP packet, and
+ PACKET_RTP if its a regular RTP data packet.
+
+ o ReceivedPacketSize() and SentPacketSize() return the size of the
+ referenced packet in octets.
+
+ o NewMember(p) returns a 1 if the participant who sent packet p is
+ not currently in the member list, 0 otherwise. Note this function
+ is not sufficient for a complete implementation because each CSRC
+ identifier in an RTP packet and each SSRC in a BYE packet should
+ be processed.
+
+ o NewSender(p) returns a 1 if the participant who sent packet p is
+ not currently in the sender sublist of the member list, 0
+ otherwise.
+
+ o AddMember() and RemoveMember() to add and remove participants from
+ the member list.
+
+ o AddSender() and RemoveSender() to add and remove participants from
+ the sender sublist of the member list.
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 88]
+
+RFC 3550 RTP July 2003
+
+
+ These functions would have to be extended for an implementation that
+ allows the RTCP bandwidth fractions for senders and non-senders to be
+ specified as explicit parameters rather than fixed values of 25% and
+ 75%. The extended implementation of rtcp_interval() would need to
+ avoid division by zero if one of the parameters was zero.
+
+ double rtcp_interval(int members,
+ int senders,
+ double rtcp_bw,
+ int we_sent,
+ double avg_rtcp_size,
+ int initial)
+ {
+ /*
+ * Minimum average time between RTCP packets from this site (in
+ * seconds). This time prevents the reports from `clumping' when
+ * sessions are small and the law of large numbers isn't helping
+ * to smooth out the traffic. It also keeps the report interval
+ * from becoming ridiculously small during transient outages like
+ * a network partition.
+ */
+ double const RTCP_MIN_TIME = 5.;
+ /*
+ * Fraction of the RTCP bandwidth to be shared among active
+ * senders. (This fraction was chosen so that in a typical
+ * session with one or two active senders, the computed report
+ * time would be roughly equal to the minimum report time so that
+ * we don't unnecessarily slow down receiver reports.) The
+ * receiver fraction must be 1 - the sender fraction.
+ */
+ double const RTCP_SENDER_BW_FRACTION = 0.25;
+ double const RTCP_RCVR_BW_FRACTION = (1-RTCP_SENDER_BW_FRACTION);
+ /*
+ /* To compensate for "timer reconsideration" converging to a
+ * value below the intended average.
+ */
+ double const COMPENSATION = 2.71828 - 1.5;
+
+ double t; /* interval */
+ double rtcp_min_time = RTCP_MIN_TIME;
+ int n; /* no. of members for computation */
+
+ /*
+ * Very first call at application start-up uses half the min
+ * delay for quicker notification while still allowing some time
+ * before reporting for randomization and to learn about other
+ * sources so the report interval will converge to the correct
+ * interval more quickly.
+
+
+
+Schulzrinne, et al. Standards Track [Page 89]
+
+RFC 3550 RTP July 2003
+
+
+ */
+ if (initial) {
+ rtcp_min_time /= 2;
+ }
+ /*
+ * Dedicate a fraction of the RTCP bandwidth to senders unless
+ * the number of senders is large enough that their share is
+ * more than that fraction.
+ */
+ n = members;
+ if (senders <= members * RTCP_SENDER_BW_FRACTION) {
+ if (we_sent) {
+ rtcp_bw *= RTCP_SENDER_BW_FRACTION;
+ n = senders;
+ } else {
+ rtcp_bw *= RTCP_RCVR_BW_FRACTION;
+ n -= senders;
+ }
+ }
+
+ /*
+ * The effective number of sites times the average packet size is
+ * the total number of octets sent when each site sends a report.
+ * Dividing this by the effective bandwidth gives the time
+ * interval over which those packets must be sent in order to
+ * meet the bandwidth target, with a minimum enforced. In that
+ * time interval we send one report so this time is also our
+ * average time between reports.
+ */
+ t = avg_rtcp_size * n / rtcp_bw;
+ if (t < rtcp_min_time) t = rtcp_min_time;
+
+ /*
+ * To avoid traffic bursts from unintended synchronization with
+ * other sites, we then pick our actual next report interval as a
+ * random number uniformly distributed between 0.5*t and 1.5*t.
+ */
+ t = t * (drand48() + 0.5);
+ t = t / COMPENSATION;
+ return t;
+ }
+
+ void OnExpire(event e,
+ int members,
+ int senders,
+ double rtcp_bw,
+ int we_sent,
+ double *avg_rtcp_size,
+
+
+
+Schulzrinne, et al. Standards Track [Page 90]
+
+RFC 3550 RTP July 2003
+
+
+ int *initial,
+ time_tp tc,
+ time_tp *tp,
+ int *pmembers)
+ {
+ /* This function is responsible for deciding whether to send an
+ * RTCP report or BYE packet now, or to reschedule transmission.
+ * It is also responsible for updating the pmembers, initial, tp,
+ * and avg_rtcp_size state variables. This function should be
+ * called upon expiration of the event timer used by Schedule().
+ */
+
+ double t; /* Interval */
+ double tn; /* Next transmit time */
+
+ /* In the case of a BYE, we use "timer reconsideration" to
+ * reschedule the transmission of the BYE if necessary */
+
+ if (TypeOfEvent(e) == EVENT_BYE) {
+ t = rtcp_interval(members,
+ senders,
+ rtcp_bw,
+ we_sent,
+ *avg_rtcp_size,
+ *initial);
+ tn = *tp + t;
+ if (tn <= tc) {
+ SendBYEPacket(e);
+ exit(1);
+ } else {
+ Schedule(tn, e);
+ }
+
+ } else if (TypeOfEvent(e) == EVENT_REPORT) {
+ t = rtcp_interval(members,
+ senders,
+ rtcp_bw,
+ we_sent,
+ *avg_rtcp_size,
+ *initial);
+ tn = *tp + t;
+ if (tn <= tc) {
+ SendRTCPReport(e);
+ *avg_rtcp_size = (1./16.)*SentPacketSize(e) +
+ (15./16.)*(*avg_rtcp_size);
+ *tp = tc;
+
+ /* We must redraw the interval. Don't reuse the
+
+
+
+Schulzrinne, et al. Standards Track [Page 91]
+
+RFC 3550 RTP July 2003
+
+
+ one computed above, since its not actually
+ distributed the same, as we are conditioned
+ on it being small enough to cause a packet to
+ be sent */
+
+ t = rtcp_interval(members,
+ senders,
+ rtcp_bw,
+ we_sent,
+ *avg_rtcp_size,
+ *initial);
+
+ Schedule(t+tc,e);
+ *initial = 0;
+ } else {
+ Schedule(tn, e);
+ }
+ *pmembers = members;
+ }
+ }
+
+ void OnReceive(packet p,
+ event e,
+ int *members,
+ int *pmembers,
+ int *senders,
+ double *avg_rtcp_size,
+ double *tp,
+ double tc,
+ double tn)
+ {
+ /* What we do depends on whether we have left the group, and are
+ * waiting to send a BYE (TypeOfEvent(e) == EVENT_BYE) or an RTCP
+ * report. p represents the packet that was just received. */
+
+ if (PacketType(p) == PACKET_RTCP_REPORT) {
+ if (NewMember(p) && (TypeOfEvent(e) == EVENT_REPORT)) {
+ AddMember(p);
+ *members += 1;
+ }
+ *avg_rtcp_size = (1./16.)*ReceivedPacketSize(p) +
+ (15./16.)*(*avg_rtcp_size);
+ } else if (PacketType(p) == PACKET_RTP) {
+ if (NewMember(p) && (TypeOfEvent(e) == EVENT_REPORT)) {
+ AddMember(p);
+ *members += 1;
+ }
+ if (NewSender(p) && (TypeOfEvent(e) == EVENT_REPORT)) {
+
+
+
+Schulzrinne, et al. Standards Track [Page 92]
+
+RFC 3550 RTP July 2003
+
+
+ AddSender(p);
+ *senders += 1;
+ }
+ } else if (PacketType(p) == PACKET_BYE) {
+ *avg_rtcp_size = (1./16.)*ReceivedPacketSize(p) +
+ (15./16.)*(*avg_rtcp_size);
+
+ if (TypeOfEvent(e) == EVENT_REPORT) {
+ if (NewSender(p) == FALSE) {
+ RemoveSender(p);
+ *senders -= 1;
+ }
+
+ if (NewMember(p) == FALSE) {
+ RemoveMember(p);
+ *members -= 1;
+ }
+
+ if (*members < *pmembers) {
+ tn = tc +
+ (((double) *members)/(*pmembers))*(tn - tc);
+ *tp = tc -
+ (((double) *members)/(*pmembers))*(tc - *tp);
+
+ /* Reschedule the next report for time tn */
+
+ Reschedule(tn, e);
+ *pmembers = *members;
+ }
+
+ } else if (TypeOfEvent(e) == EVENT_BYE) {
+ *members += 1;
+ }
+ }
+ }
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 93]
+
+RFC 3550 RTP July 2003
+
+
+A.8 Estimating the Interarrival Jitter
+
+ The code fragments below implement the algorithm given in Section
+ 6.4.1 for calculating an estimate of the statistical variance of the
+ RTP data interarrival time to be inserted in the interarrival jitter
+ field of reception reports. The inputs are r->ts, the timestamp from
+ the incoming packet, and arrival, the current time in the same units.
+ Here s points to state for the source; s->transit holds the relative
+ transit time for the previous packet, and s->jitter holds the
+ estimated jitter. The jitter field of the reception report is
+ measured in timestamp units and expressed as an unsigned integer, but
+ the jitter estimate is kept in a floating point. As each data packet
+ arrives, the jitter estimate is updated:
+
+ int transit = arrival - r->ts;
+ int d = transit - s->transit;
+ s->transit = transit;
+ if (d < 0) d = -d;
+ s->jitter += (1./16.) * ((double)d - s->jitter);
+
+ When a reception report block (to which rr points) is generated for
+ this member, the current jitter estimate is returned:
+
+ rr->jitter = (u_int32) s->jitter;
+
+ Alternatively, the jitter estimate can be kept as an integer, but
+ scaled to reduce round-off error. The calculation is the same except
+ for the last line:
+
+ s->jitter += d - ((s->jitter + 8) >> 4);
+
+ In this case, the estimate is sampled for the reception report as:
+
+ rr->jitter = s->jitter >> 4;
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 94]
+
+RFC 3550 RTP July 2003
+
+
+Appendix B - Changes from RFC 1889
+
+ Most of this RFC is identical to RFC 1889. There are no changes in
+ the packet formats on the wire, only changes to the rules and
+ algorithms governing how the protocol is used. The biggest change is
+ an enhancement to the scalable timer algorithm for calculating when
+ to send RTCP packets:
+
+ o The algorithm for calculating the RTCP transmission interval
+ specified in Sections 6.2 and 6.3 and illustrated in Appendix A.7
+ is augmented to include "reconsideration" to minimize transmission
+ in excess of the intended rate when many participants join a
+ session simultaneously, and "reverse reconsideration" to reduce
+ the incidence and duration of false participant timeouts when the
+ number of participants drops rapidly. Reverse reconsideration is
+ also used to possibly shorten the delay before sending RTCP SR
+ when transitioning from passive receiver to active sender mode.
+
+ o Section 6.3.7 specifies new rules controlling when an RTCP BYE
+ packet should be sent in order to avoid a flood of packets when
+ many participants leave a session simultaneously.
+
+ o The requirement to retain state for inactive participants for a
+ period long enough to span typical network partitions was removed
+ from Section 6.2.1. In a session where many participants join for
+ a brief time and fail to send BYE, this requirement would cause a
+ significant overestimate of the number of participants. The
+ reconsideration algorithm added in this revision compensates for
+ the large number of new participants joining simultaneously when a
+ partition heals.
+
+ It should be noted that these enhancements only have a significant
+ effect when the number of session participants is large (thousands)
+ and most of the participants join or leave at the same time. This
+ makes testing in a live network difficult. However, the algorithm
+ was subjected to a thorough analysis and simulation to verify its
+ performance. Furthermore, the enhanced algorithm was designed to
+ interoperate with the algorithm in RFC 1889 such that the degree of
+ reduction in excess RTCP bandwidth during a step join is proportional
+ to the fraction of participants that implement the enhanced
+ algorithm. Interoperation of the two algorithms has been verified
+ experimentally on live networks.
+
+ Other functional changes were:
+
+ o Section 6.2.1 specifies that implementations may store only a
+ sampling of the participants' SSRC identifiers to allow scaling to
+ very large sessions. Algorithms are specified in RFC 2762 [21].
+
+
+
+Schulzrinne, et al. Standards Track [Page 95]
+
+RFC 3550 RTP July 2003
+
+
+ o In Section 6.2 it is specified that RTCP sender and non-sender
+ bandwidths may be set as separate parameters of the session rather
+ than a strict percentage of the session bandwidth, and may be set
+ to zero. The requirement that RTCP was mandatory for RTP sessions
+ using IP multicast was relaxed. However, a clarification was also
+ added that turning off RTCP is NOT RECOMMENDED.
+
+ o In Sections 6.2, 6.3.1 and Appendix A.7, it is specified that the
+ fraction of participants below which senders get dedicated RTCP
+ bandwidth changes from the fixed 1/4 to a ratio based on the RTCP
+ sender and non-sender bandwidth parameters when those are given.
+ The condition that no bandwidth is dedicated to senders when there
+ are no senders was removed since that is expected to be a
+ transitory state. It also keeps non-senders from using sender
+ RTCP bandwidth when that is not intended.
+
+ o Also in Section 6.2 it is specified that the minimum RTCP interval
+ may be scaled to smaller values for high bandwidth sessions, and
+ that the initial RTCP delay may be set to zero for unicast
+ sessions.
+
+ o Timing out a participant is to be based on inactivity for a number
+ of RTCP report intervals calculated using the receiver RTCP
+ bandwidth fraction even for active senders.
+
+ o Sections 7.2 and 7.3 specify that translators and mixers should
+ send BYE packets for the sources they are no longer forwarding.
+
+ o Rule changes for layered encodings are defined in Sections 2.4,
+ 6.3.9, 8.3 and 11. In the last of these, it is noted that the
+ address and port assignment rule conflicts with the SDP
+ specification, RFC 2327 [15], but it is intended that this
+ restriction will be relaxed in a revision of RFC 2327.
+
+ o The convention for using even/odd port pairs for RTP and RTCP in
+ Section 11 was clarified to refer to destination ports. The
+ requirement to use an even/odd port pair was removed if the two
+ ports are specified explicitly. For unicast RTP sessions,
+ distinct port pairs may be used for the two ends (Sections 3, 7.1
+ and 11).
+
+ o A new Section 10 was added to explain the requirement for
+ congestion control in applications using RTP.
+
+ o In Section 8.2, the requirement that a new SSRC identifier MUST be
+ chosen whenever the source transport address is changed has been
+ relaxed to say that a new SSRC identifier MAY be chosen.
+ Correspondingly, it was clarified that an implementation MAY
+
+
+
+Schulzrinne, et al. Standards Track [Page 96]
+
+RFC 3550 RTP July 2003
+
+
+ choose to keep packets from the new source address rather than the
+ existing source address when an SSRC collision occurs between two
+ other participants, and SHOULD do so for applications such as
+ telephony in which some sources such as mobile entities may change
+ addresses during the course of an RTP session.
+
+ o An indentation bug in the RFC 1889 printing of the pseudo-code for
+ the collision detection and resolution algorithm in Section 8.2
+ has been corrected by translating the syntax to pseudo C language,
+ and the algorithm has been modified to remove the restriction that
+ both RTP and RTCP must be sent from the same source port number.
+
+ o The description of the padding mechanism for RTCP packets was
+ clarified and it is specified that padding MUST only be applied to
+ the last packet of a compound RTCP packet.
+
+ o In Section A.1, initialization of base_seq was corrected to be seq
+ rather than seq - 1, and the text was corrected to say the bad
+ sequence number plus 1 is stored. The initialization of max_seq
+ and other variables for the algorithm was separated from the text
+ to make clear that this initialization must be done in addition to
+ calling the init_seq() function (and a few words lost in RFC 1889
+ when processing the document from source to output form were
+ restored).
+
+ o Clamping of number of packets lost in Section A.3 was corrected to
+ use both positive and negative limits.
+
+ o The specification of "relative" NTP timestamp in the RTCP SR
+ section now defines these timestamps to be based on the most
+ common system-specific clock, such as system uptime, rather than
+ on session elapsed time which would not be the same for multiple
+ applications started on the same machine at different times.
+
+ Non-functional changes:
+
+ o It is specified that a receiver MUST ignore packets with payload
+ types it does not understand.
+
+ o In Fig. 2, the floating point NTP timestamp value was corrected,
+ some missing leading zeros were added in a hex number, and the UTC
+ timezone was specified.
+
+ o The inconsequence of NTP timestamps wrapping around in the year
+ 2036 is explained.
+
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 97]
+
+RFC 3550 RTP July 2003
+
+
+ o The policy for registration of RTCP packet types and SDES types
+ was clarified in a new Section 15, IANA Considerations. The
+ suggestion that experimenters register the numbers they need and
+ then unregister those which prove to be unneeded has been removed
+ in favor of using APP and PRIV. Registration of profile names was
+ also specified.
+
+ o The reference for the UTF-8 character set was changed from an
+ X/Open Preliminary Specification to be RFC 2279.
+
+ o The reference for RFC 1597 was updated to RFC 1918 and the
+ reference for RFC 2543 was updated to RFC 3261.
+
+ o The last paragraph of the introduction in RFC 1889, which
+ cautioned implementors to limit deployment in the Internet, was
+ removed because it was deemed no longer relevant.
+
+ o A non-normative note regarding the use of RTP with Source-Specific
+ Multicast (SSM) was added in Section 6.
+
+ o The definition of "RTP session" in Section 3 was expanded to
+ acknowledge that a single session may use multiple destination
+ transport addresses (as was always the case for a translator or
+ mixer) and to explain that the distinguishing feature of an RTP
+ session is that each corresponds to a separate SSRC identifier
+ space. A new definition of "multimedia session" was added to
+ reduce confusion about the word "session".
+
+ o The meaning of "sampling instant" was explained in more detail as
+ part of the definition of the timestamp field of the RTP header in
+ Section 5.1.
+
+ o Small clarifications of the text have been made in several places,
+ some in response to questions from readers. In particular:
+
+ - In RFC 1889, the first five words of the second sentence of
+ Section 2.2 were lost in processing the document from source to
+ output form, but are now restored.
+
+ - A definition for "RTP media type" was added in Section 3 to
+ allow the explanation of multiplexing RTP sessions in Section
+ 5.2 to be more clear regarding the multiplexing of multiple
+ media. That section also now explains that multiplexing
+ multiple sources of the same medium based on SSRC identifiers
+ may be appropriate and is the norm for multicast sessions.
+
+ - The definition for "non-RTP means" was expanded to include
+ examples of other protocols constituting non-RTP means.
+
+
+
+Schulzrinne, et al. Standards Track [Page 98]
+
+RFC 3550 RTP July 2003
+
+
+ - The description of the session bandwidth parameter is expanded
+ in Section 6.2, including a clarification that the control
+ traffic bandwidth is in addition to the session bandwidth for
+ the data traffic.
+
+ - The effect of varying packet duration on the jitter calculation
+ was explained in Section 6.4.4.
+
+ - The method for terminating and padding a sequence of SDES items
+ was clarified in Section 6.5.
+
+ - IPv6 address examples were added in the description of SDES
+ CNAME in Section 6.5.1, and "example.com" was used in place of
+ other example domain names.
+
+ - The Security section added a formal reference to IPSEC now that
+ it is available, and says that the confidentiality method
+ defined in this specification is primarily to codify existing
+ practice. It is RECOMMENDED that stronger encryption
+ algorithms such as Triple-DES be used in place of the default
+ algorithm, and noted that the SRTP profile based on AES will be
+ the correct choice in the future. A caution about the weakness
+ of the RTP header as an initialization vector was added. It
+ was also noted that payload-only encryption is necessary to
+ allow for header compression.
+
+ - The method for partial encryption of RTCP was clarified; in
+ particular, SDES CNAME is carried in only one part when the
+ compound RTCP packet is split.
+
+ - It is clarified that only one compound RTCP packet should be
+ sent per reporting interval and that if there are too many
+ active sources for the reports to fit in the MTU, then a subset
+ of the sources should be selected round-robin over multiple
+ intervals.
+
+ - A note was added in Appendix A.1 that packets may be saved
+ during RTP header validation and delivered upon success.
+
+ - Section 7.3 now explains that a mixer aggregating SDES packets
+ uses more RTCP bandwidth due to longer packets, and a mixer
+ passing through RTCP naturally sends packets at higher than the
+ single source rate, but both behaviors are valid.
+
+ - Section 13 clarifies that an RTP application may use multiple
+ profiles but typically only one in a given session.
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 99]
+
+RFC 3550 RTP July 2003
+
+
+ - The terms MUST, SHOULD, MAY, etc. are used as defined in RFC
+ 2119.
+
+ - The bibliography was divided into normative and informative
+ references.
+
+References
+
+Normative References
+
+ [1] Schulzrinne, H. and S. Casner, "RTP Profile for Audio and Video
+ Conferences with Minimal Control", RFC 3551, July 2003.
+
+ [2] Bradner, S., "Key Words for Use in RFCs to Indicate Requirement
+ Levels", BCP 14, RFC 2119, March 1997.
+
+ [3] Postel, J., "Internet Protocol", STD 5, RFC 791, September 1981.
+
+ [4] Mills, D., "Network Time Protocol (Version 3) Specification,
+ Implementation and Analysis", RFC 1305, March 1992.
+
+ [5] Yergeau, F., "UTF-8, a Transformation Format of ISO 10646", RFC
+ 2279, January 1998.
+
+ [6] Mockapetris, P., "Domain Names - Concepts and Facilities", STD
+ 13, RFC 1034, November 1987.
+
+ [7] Mockapetris, P., "Domain Names - Implementation and
+ Specification", STD 13, RFC 1035, November 1987.
+
+ [8] Braden, R., "Requirements for Internet Hosts - Application and
+ Support", STD 3, RFC 1123, October 1989.
+
+ [9] Resnick, P., "Internet Message Format", RFC 2822, April 2001.
+
+Informative References
+
+ [10] Clark, D. and D. Tennenhouse, "Architectural Considerations for
+ a New Generation of Protocols," in SIGCOMM Symposium on
+ Communications Architectures and Protocols , (Philadelphia,
+ Pennsylvania), pp. 200--208, IEEE Computer Communications
+ Review, Vol. 20(4), September 1990.
+
+ [11] Schulzrinne, H., "Issues in designing a transport protocol for
+ audio and video conferences and other multiparticipant real-time
+ applications." expired Internet Draft, October 1993.
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 100]
+
+RFC 3550 RTP July 2003
+
+
+ [12] Comer, D., Internetworking with TCP/IP , vol. 1. Englewood
+ Cliffs, New Jersey: Prentice Hall, 1991.
+
+ [13] Rosenberg, J., Schulzrinne, H., Camarillo, G., Johnston, A.,
+ Peterson, J., Sparks, R., Handley, M. and E. Schooler, "SIP:
+ Session Initiation Protocol", RFC 3261, June 2002.
+
+ [14] International Telecommunication Union, "Visual telephone systems
+ and equipment for local area networks which provide a non-
+ guaranteed quality of service", Recommendation H.323,
+ Telecommunication Standardization Sector of ITU, Geneva,
+ Switzerland, July 2003.
+
+ [15] Handley, M. and V. Jacobson, "SDP: Session Description
+ Protocol", RFC 2327, April 1998.
+
+ [16] Schulzrinne, H., Rao, A. and R. Lanphier, "Real Time Streaming
+ Protocol (RTSP)", RFC 2326, April 1998.
+
+ [17] Eastlake 3rd, D., Crocker, S. and J. Schiller, "Randomness
+ Recommendations for Security", RFC 1750, December 1994.
+
+ [18] Bolot, J.-C., Turletti, T. and I. Wakeman, "Scalable Feedback
+ Control for Multicast Video Distribution in the Internet", in
+ SIGCOMM Symposium on Communications Architectures and Protocols,
+ (London, England), pp. 58--67, ACM, August 1994.
+
+ [19] Busse, I., Deffner, B. and H. Schulzrinne, "Dynamic QoS Control
+ of Multimedia Applications Based on RTP", Computer
+ Communications , vol. 19, pp. 49--58, January 1996.
+
+ [20] Floyd, S. and V. Jacobson, "The Synchronization of Periodic
+ Routing Messages", in SIGCOMM Symposium on Communications
+ Architectures and Protocols (D. P. Sidhu, ed.), (San Francisco,
+ California), pp. 33--44, ACM, September 1993. Also in [34].
+
+ [21] Rosenberg, J. and H. Schulzrinne, "Sampling of the Group
+ Membership in RTP", RFC 2762, February 2000.
+
+ [22] Cadzow, J., Foundations of Digital Signal Processing and Data
+ Analysis New York, New York: Macmillan, 1987.
+
+ [23] Hinden, R. and S. Deering, "Internet Protocol Version 6 (IPv6)
+ Addressing Architecture", RFC 3513, April 2003.
+
+ [24] Rekhter, Y., Moskowitz, B., Karrenberg, D., de Groot, G. and E.
+ Lear, "Address Allocation for Private Internets", RFC 1918,
+ February 1996.
+
+
+
+Schulzrinne, et al. Standards Track [Page 101]
+
+RFC 3550 RTP July 2003
+
+
+ [25] Lear, E., Fair, E., Crocker, D. and T. Kessler, "Network 10
+ Considered Harmful (Some Practices Shouldn't be Codified)", RFC
+ 1627, July 1994.
+
+ [26] Feller, W., An Introduction to Probability Theory and its
+ Applications, vol. 1. New York, New York: John Wiley and Sons,
+ third ed., 1968.
+
+ [27] Kent, S. and R. Atkinson, "Security Architecture for the
+ Internet Protocol", RFC 2401, November 1998.
+
+ [28] Baugher, M., Blom, R., Carrara, E., McGrew, D., Naslund, M.,
+ Norrman, K. and D. Oran, "Secure Real-time Transport Protocol",
+ Work in Progress, April 2003.
+
+ [29] Balenson, D., "Privacy Enhancement for Internet Electronic Mail:
+ Part III", RFC 1423, February 1993.
+
+ [30] Voydock, V. and S. Kent, "Security Mechanisms in High-Level
+ Network Protocols", ACM Computing Surveys, vol. 15, pp. 135-171,
+ June 1983.
+
+ [31] Floyd, S., "Congestion Control Principles", BCP 41, RFC 2914,
+ September 2000.
+
+ [32] Rivest, R., "The MD5 Message-Digest Algorithm", RFC 1321, April
+ 1992.
+
+ [33] Stubblebine, S., "Security Services for Multimedia
+ Conferencing", in 16th National Computer Security Conference,
+ (Baltimore, Maryland), pp. 391--395, September 1993.
+
+ [34] Floyd, S. and V. Jacobson, "The Synchronization of Periodic
+ Routing Messages", IEEE/ACM Transactions on Networking, vol. 2,
+ pp. 122--136, April 1994.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 102]
+
+RFC 3550 RTP July 2003
+
+
+Authors' Addresses
+
+ Henning Schulzrinne
+ Department of Computer Science
+ Columbia University
+ 1214 Amsterdam Avenue
+ New York, NY 10027
+ United States
+
+ EMail: schulzrinne@cs.columbia.edu
+
+
+ Stephen L. Casner
+ Packet Design
+ 3400 Hillview Avenue, Building 3
+ Palo Alto, CA 94304
+ United States
+
+ EMail: casner@acm.org
+
+
+ Ron Frederick
+ Blue Coat Systems Inc.
+ 650 Almanor Avenue
+ Sunnyvale, CA 94085
+ United States
+
+ EMail: ronf@bluecoat.com
+
+
+ Van Jacobson
+ Packet Design
+ 3400 Hillview Avenue, Building 3
+ Palo Alto, CA 94304
+ United States
+
+ EMail: van@packetdesign.com
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 103]
+
+RFC 3550 RTP July 2003
+
+
+Full Copyright Statement
+
+ Copyright (C) The Internet Society (2003). All Rights Reserved.
+
+ This document and translations of it may be copied and furnished to
+ others, and derivative works that comment on or otherwise explain it
+ or assist in its implementation may be prepared, copied, published
+ and distributed, in whole or in part, without restriction of any
+ kind, provided that the above copyright notice and this paragraph are
+ included on all such copies and derivative works. However, this
+ document itself may not be modified in any way, such as by removing
+ the copyright notice or references to the Internet Society or other
+ Internet organizations, except as needed for the purpose of
+ developing Internet standards in which case the procedures for
+ copyrights defined in the Internet Standards process must be
+ followed, or as required to translate it into languages other than
+ English.
+
+ The limited permissions granted above are perpetual and will not be
+ revoked by the Internet Society or its successors or assigns.
+
+ This document and the information contained herein is provided on an
+ "AS IS" basis and THE INTERNET SOCIETY AND THE INTERNET ENGINEERING
+ TASK FORCE DISCLAIMS ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING
+ BUT NOT LIMITED TO ANY WARRANTY THAT THE USE OF THE INFORMATION
+ HEREIN WILL NOT INFRINGE ANY RIGHTS OR ANY IMPLIED WARRANTIES OF
+ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+
+Acknowledgement
+
+ Funding for the RFC Editor function is currently provided by the
+ Internet Society.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Schulzrinne, et al. Standards Track [Page 104]
+
diff --git a/src/modules/rtp/rfc3551.txt b/src/modules/rtp/rfc3551.txt
new file mode 100644
index 00000000..c43ff34d
--- /dev/null
+++ b/src/modules/rtp/rfc3551.txt
@@ -0,0 +1,2467 @@
+
+
+
+
+
+
+Network Working Group H. Schulzrinne
+Request for Comments: 3551 Columbia University
+Obsoletes: 1890 S. Casner
+Category: Standards Track Packet Design
+ July 2003
+
+
+ RTP Profile for Audio and Video Conferences
+ with Minimal Control
+
+Status of this Memo
+
+ This document specifies an Internet standards track protocol for the
+ Internet community, and requests discussion and suggestions for
+ improvements. Please refer to the current edition of the "Internet
+ Official Protocol Standards" (STD 1) for the standardization state
+ and status of this protocol. Distribution of this memo is unlimited.
+
+Copyright Notice
+
+ Copyright (C) The Internet Society (2003). All Rights Reserved.
+
+Abstract
+
+ This document describes a profile called "RTP/AVP" for the use of the
+ real-time transport protocol (RTP), version 2, and the associated
+ control protocol, RTCP, within audio and video multiparticipant
+ conferences with minimal control. It provides interpretations of
+ generic fields within the RTP specification suitable for audio and
+ video conferences. In particular, this document defines a set of
+ default mappings from payload type numbers to encodings.
+
+ This document also describes how audio and video data may be carried
+ within RTP. It defines a set of standard encodings and their names
+ when used within RTP. The descriptions provide pointers to reference
+ implementations and the detailed standards. This document is meant
+ as an aid for implementors of audio, video and other real-time
+ multimedia applications.
+
+ This memorandum obsoletes RFC 1890. It is mostly backwards-
+ compatible except for functions removed because two interoperable
+ implementations were not found. The additions to RFC 1890 codify
+ existing practice in the use of payload formats under this profile
+ and include new payload formats defined since RFC 1890 was published.
+
+
+
+
+
+
+
+Schulzrinne & Casner Standards Track [Page 1]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+Table of Contents
+
+ 1. Introduction ................................................. 3
+ 1.1 Terminology ............................................. 3
+ 2. RTP and RTCP Packet Forms and Protocol Behavior .............. 4
+ 3. Registering Additional Encodings ............................. 6
+ 4. Audio ........................................................ 8
+ 4.1 Encoding-Independent Rules .............................. 8
+ 4.2 Operating Recommendations ............................... 9
+ 4.3 Guidelines for Sample-Based Audio Encodings ............. 10
+ 4.4 Guidelines for Frame-Based Audio Encodings .............. 11
+ 4.5 Audio Encodings ......................................... 12
+ 4.5.1 DVI4 ............................................ 13
+ 4.5.2 G722 ............................................ 14
+ 4.5.3 G723 ............................................ 14
+ 4.5.4 G726-40, G726-32, G726-24, and G726-16 .......... 18
+ 4.5.5 G728 ............................................ 19
+ 4.5.6 G729 ............................................ 20
+ 4.5.7 G729D and G729E ................................. 22
+ 4.5.8 GSM ............................................. 24
+ 4.5.9 GSM-EFR ......................................... 27
+ 4.5.10 L8 .............................................. 27
+ 4.5.11 L16 ............................................. 27
+ 4.5.12 LPC ............................................. 27
+ 4.5.13 MPA ............................................. 28
+ 4.5.14 PCMA and PCMU ................................... 28
+ 4.5.15 QCELP ........................................... 28
+ 4.5.16 RED ............................................. 29
+ 4.5.17 VDVI ............................................ 29
+ 5. Video ........................................................ 30
+ 5.1 CelB .................................................... 30
+ 5.2 JPEG .................................................... 30
+ 5.3 H261 .................................................... 30
+ 5.4 H263 .................................................... 31
+ 5.5 H263-1998 ............................................... 31
+ 5.6 MPV ..................................................... 31
+ 5.7 MP2T .................................................... 31
+ 5.8 nv ...................................................... 32
+ 6. Payload Type Definitions ..................................... 32
+ 7. RTP over TCP and Similar Byte Stream Protocols ............... 34
+ 8. Port Assignment .............................................. 34
+ 9. Changes from RFC 1890 ........................................ 35
+ 10. Security Considerations ...................................... 38
+ 11. IANA Considerations .......................................... 39
+ 12. References ................................................... 39
+ 12.1 Normative References .................................... 39
+ 12.2 Informative References .................................. 39
+ 13. Current Locations of Related Resources ....................... 41
+
+
+
+Schulzrinne & Casner Standards Track [Page 2]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+ 14. Acknowledgments .............................................. 42
+ 15. Intellectual Property Rights Statement ....................... 43
+ 16. Authors' Addresses ........................................... 43
+ 17. Full Copyright Statement ..................................... 44
+
+1. Introduction
+
+ This profile defines aspects of RTP left unspecified in the RTP
+ Version 2 protocol definition (RFC 3550) [1]. This profile is
+ intended for the use within audio and video conferences with minimal
+ session control. In particular, no support for the negotiation of
+ parameters or membership control is provided. The profile is
+ expected to be useful in sessions where no negotiation or membership
+ control are used (e.g., using the static payload types and the
+ membership indications provided by RTCP), but this profile may also
+ be useful in conjunction with a higher-level control protocol.
+
+ Use of this profile may be implicit in the use of the appropriate
+ applications; there may be no explicit indication by port number,
+ protocol identifier or the like. Applications such as session
+ directories may use the name for this profile specified in Section
+ 11.
+
+ Other profiles may make different choices for the items specified
+ here.
+
+ This document also defines a set of encodings and payload formats for
+ audio and video. These payload format descriptions are included here
+ only as a matter of convenience since they are too small to warrant
+ separate documents. Use of these payload formats is NOT REQUIRED to
+ use this profile. Only the binding of some of the payload formats to
+ static payload type numbers in Tables 4 and 5 is normative.
+
+1.1 Terminology
+
+ The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT",
+ "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this
+ document are to be interpreted as described in RFC 2119 [2] and
+ indicate requirement levels for implementations compliant with this
+ RTP profile.
+
+ This document defines the term media type as dividing encodings of
+ audio and video content into three classes: audio, video and
+ audio/video (interleaved).
+
+
+
+
+
+
+
+Schulzrinne & Casner Standards Track [Page 3]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+2. RTP and RTCP Packet Forms and Protocol Behavior
+
+ The section "RTP Profiles and Payload Format Specifications" of RFC
+ 3550 enumerates a number of items that can be specified or modified
+ in a profile. This section addresses these items. Generally, this
+ profile follows the default and/or recommended aspects of the RTP
+ specification.
+
+ RTP data header: The standard format of the fixed RTP data
+ header is used (one marker bit).
+
+ Payload types: Static payload types are defined in Section 6.
+
+ RTP data header additions: No additional fixed fields are
+ appended to the RTP data header.
+
+ RTP data header extensions: No RTP header extensions are
+ defined, but applications operating under this profile MAY use
+ such extensions. Thus, applications SHOULD NOT assume that the
+ RTP header X bit is always zero and SHOULD be prepared to ignore
+ the header extension. If a header extension is defined in the
+ future, that definition MUST specify the contents of the first 16
+ bits in such a way that multiple different extensions can be
+ identified.
+
+ RTCP packet types: No additional RTCP packet types are defined
+ by this profile specification.
+
+ RTCP report interval: The suggested constants are to be used for
+ the RTCP report interval calculation. Sessions operating under
+ this profile MAY specify a separate parameter for the RTCP traffic
+ bandwidth rather than using the default fraction of the session
+ bandwidth. The RTCP traffic bandwidth MAY be divided into two
+ separate session parameters for those participants which are
+ active data senders and those which are not. Following the
+ recommendation in the RTP specification [1] that 1/4 of the RTCP
+ bandwidth be dedicated to data senders, the RECOMMENDED default
+ values for these two parameters would be 1.25% and 3.75%,
+ respectively. For a particular session, the RTCP bandwidth for
+ non-data-senders MAY be set to zero when operating on
+ unidirectional links or for sessions that don't require feedback
+ on the quality of reception. The RTCP bandwidth for data senders
+ SHOULD be kept non-zero so that sender reports can still be sent
+ for inter-media synchronization and to identify the source by
+ CNAME. The means by which the one or two session parameters for
+ RTCP bandwidth are specified is beyond the scope of this memo.
+
+
+
+
+
+Schulzrinne & Casner Standards Track [Page 4]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+ SR/RR extension: No extension section is defined for the RTCP SR
+ or RR packet.
+
+ SDES use: Applications MAY use any of the SDES items described
+ in the RTP specification. While CNAME information MUST be sent
+ every reporting interval, other items SHOULD only be sent every
+ third reporting interval, with NAME sent seven out of eight times
+ within that slot and the remaining SDES items cyclically taking up
+ the eighth slot, as defined in Section 6.2.2 of the RTP
+ specification. In other words, NAME is sent in RTCP packets 1, 4,
+ 7, 10, 13, 16, 19, while, say, EMAIL is used in RTCP packet 22.
+
+ Security: The RTP default security services are also the default
+ under this profile.
+
+ String-to-key mapping: No mapping is specified by this profile.
+
+ Congestion: RTP and this profile may be used in the context of
+ enhanced network service, for example, through Integrated Services
+ (RFC 1633) [4] or Differentiated Services (RFC 2475) [5], or they
+ may be used with best effort service.
+
+ If enhanced service is being used, RTP receivers SHOULD monitor
+ packet loss to ensure that the service that was requested is
+ actually being delivered. If it is not, then they SHOULD assume
+ that they are receiving best-effort service and behave
+ accordingly.
+
+ If best-effort service is being used, RTP receivers SHOULD monitor
+ packet loss to ensure that the packet loss rate is within
+ acceptable parameters. Packet loss is considered acceptable if a
+ TCP flow across the same network path and experiencing the same
+ network conditions would achieve an average throughput, measured
+ on a reasonable timescale, that is not less than the RTP flow is
+ achieving. This condition can be satisfied by implementing
+ congestion control mechanisms to adapt the transmission rate (or
+ the number of layers subscribed for a layered multicast session),
+ or by arranging for a receiver to leave the session if the loss
+ rate is unacceptably high.
+
+ The comparison to TCP cannot be specified exactly, but is intended
+ as an "order-of-magnitude" comparison in timescale and throughput.
+ The timescale on which TCP throughput is measured is the round-
+ trip time of the connection. In essence, this requirement states
+ that it is not acceptable to deploy an application (using RTP or
+ any other transport protocol) on the best-effort Internet which
+ consumes bandwidth arbitrarily and does not compete fairly with
+ TCP within an order of magnitude.
+
+
+
+Schulzrinne & Casner Standards Track [Page 5]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+ Underlying protocol: The profile specifies the use of RTP over
+ unicast and multicast UDP as well as TCP. (This does not preclude
+ the use of these definitions when RTP is carried by other lower-
+ layer protocols.)
+
+ Transport mapping: The standard mapping of RTP and RTCP to
+ transport-level addresses is used.
+
+ Encapsulation: This profile leaves to applications the
+ specification of RTP encapsulation in protocols other than UDP.
+
+3. Registering Additional Encodings
+
+ This profile lists a set of encodings, each of which is comprised of
+ a particular media data compression or representation plus a payload
+ format for encapsulation within RTP. Some of those payload formats
+ are specified here, while others are specified in separate RFCs. It
+ is expected that additional encodings beyond the set listed here will
+ be created in the future and specified in additional payload format
+ RFCs.
+
+ This profile also assigns to each encoding a short name which MAY be
+ used by higher-level control protocols, such as the Session
+ Description Protocol (SDP), RFC 2327 [6], to identify encodings
+ selected for a particular RTP session.
+
+ In some contexts it may be useful to refer to these encodings in the
+ form of a MIME content-type. To facilitate this, RFC 3555 [7]
+ provides registrations for all of the encodings names listed here as
+ MIME subtype names under the "audio" and "video" MIME types through
+ the MIME registration procedure as specified in RFC 2048 [8].
+
+ Any additional encodings specified for use under this profile (or
+ others) may also be assigned names registered as MIME subtypes with
+ the Internet Assigned Numbers Authority (IANA). This registry
+ provides a means to insure that the names assigned to the additional
+ encodings are kept unique. RFC 3555 specifies the information that
+ is required for the registration of RTP encodings.
+
+ In addition to assigning names to encodings, this profile also
+ assigns static RTP payload type numbers to some of them. However,
+ the payload type number space is relatively small and cannot
+ accommodate assignments for all existing and future encodings.
+ During the early stages of RTP development, it was necessary to use
+ statically assigned payload types because no other mechanism had been
+ specified to bind encodings to payload types. It was anticipated
+ that non-RTP means beyond the scope of this memo (such as directory
+ services or invitation protocols) would be specified to establish a
+
+
+
+Schulzrinne & Casner Standards Track [Page 6]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+ dynamic mapping between a payload type and an encoding. Now,
+ mechanisms for defining dynamic payload type bindings have been
+ specified in the Session Description Protocol (SDP) and in other
+ protocols such as ITU-T Recommendation H.323/H.245. These mechanisms
+ associate the registered name of the encoding/payload format, along
+ with any additional required parameters, such as the RTP timestamp
+ clock rate and number of channels, with a payload type number. This
+ association is effective only for the duration of the RTP session in
+ which the dynamic payload type binding is made. This association
+ applies only to the RTP session for which it is made, thus the
+ numbers can be re-used for different encodings in different sessions
+ so the number space limitation is avoided.
+
+ This profile reserves payload type numbers in the range 96-127
+ exclusively for dynamic assignment. Applications SHOULD first use
+ values in this range for dynamic payload types. Those applications
+ which need to define more than 32 dynamic payload types MAY bind
+ codes below 96, in which case it is RECOMMENDED that unassigned
+ payload type numbers be used first. However, the statically assigned
+ payload types are default bindings and MAY be dynamically bound to
+ new encodings if needed. Redefining payload types below 96 may cause
+ incorrect operation if an attempt is made to join a session without
+ obtaining session description information that defines the dynamic
+ payload types.
+
+ Dynamic payload types SHOULD NOT be used without a well-defined
+ mechanism to indicate the mapping. Systems that expect to
+ interoperate with others operating under this profile SHOULD NOT make
+ their own assignments of proprietary encodings to particular, fixed
+ payload types.
+
+ This specification establishes the policy that no additional static
+ payload types will be assigned beyond the ones defined in this
+ document. Establishing this policy avoids the problem of trying to
+ create a set of criteria for accepting static assignments and
+ encourages the implementation and deployment of the dynamic payload
+ type mechanisms.
+
+ The final set of static payload type assignments is provided in
+ Tables 4 and 5.
+
+
+
+
+
+
+
+
+
+
+
+Schulzrinne & Casner Standards Track [Page 7]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+4. Audio
+
+4.1 Encoding-Independent Rules
+
+ Since the ability to suppress silence is one of the primary
+ motivations for using packets to transmit voice, the RTP header
+ carries both a sequence number and a timestamp to allow a receiver to
+ distinguish between lost packets and periods of time when no data was
+ transmitted. Discontiguous transmission (silence suppression) MAY be
+ used with any audio payload format. Receivers MUST assume that
+ senders may suppress silence unless this is restricted by signaling
+ specified elsewhere. (Even if the transmitter does not suppress
+ silence, the receiver should be prepared to handle periods when no
+ data is present since packets may be lost.)
+
+ Some payload formats (see Sections 4.5.3 and 4.5.6) define a "silence
+ insertion descriptor" or "comfort noise" frame to specify parameters
+ for artificial noise that may be generated during a period of silence
+ to approximate the background noise at the source. For other payload
+ formats, a generic Comfort Noise (CN) payload format is specified in
+ RFC 3389 [9]. When the CN payload format is used with another
+ payload format, different values in the RTP payload type field
+ distinguish comfort-noise packets from those of the selected payload
+ format.
+
+ For applications which send either no packets or occasional comfort-
+ noise packets during silence, the first packet of a talkspurt, that
+ is, the first packet after a silence period during which packets have
+ not been transmitted contiguously, SHOULD be distinguished by setting
+ the marker bit in the RTP data header to one. The marker bit in all
+ other packets is zero. The beginning of a talkspurt MAY be used to
+ adjust the playout delay to reflect changing network delays.
+ Applications without silence suppression MUST set the marker bit to
+ zero.
+
+ The RTP clock rate used for generating the RTP timestamp is
+ independent of the number of channels and the encoding; it usually
+ equals the number of sampling periods per second. For N-channel
+ encodings, each sampling period (say, 1/8,000 of a second) generates
+ N samples. (This terminology is standard, but somewhat confusing, as
+ the total number of samples generated per second is then the sampling
+ rate times the channel count.)
+
+ If multiple audio channels are used, channels are numbered left-to-
+ right, starting at one. In RTP audio packets, information from
+ lower-numbered channels precedes that from higher-numbered channels.
+
+
+
+
+
+Schulzrinne & Casner Standards Track [Page 8]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+ For more than two channels, the convention followed by the AIFF-C
+ audio interchange format SHOULD be followed [3], using the following
+ notation, unless some other convention is specified for a particular
+ encoding or payload format:
+
+ l left
+ r right
+ c center
+ S surround
+ F front
+ R rear
+
+ channels description channel
+ 1 2 3 4 5 6
+ _________________________________________________
+ 2 stereo l r
+ 3 l r c
+ 4 l c r S
+ 5 Fl Fr Fc Sl Sr
+ 6 l lc c r rc S
+
+ Note: RFC 1890 defined two conventions for the ordering of four
+ audio channels. Since the ordering is indicated implicitly by
+ the number of channels, this was ambiguous. In this revision,
+ the order described as "quadrophonic" has been eliminated to
+ remove the ambiguity. This choice was based on the observation
+ that quadrophonic consumer audio format did not become popular
+ whereas surround-sound subsequently has.
+
+ Samples for all channels belonging to a single sampling instant MUST
+ be within the same packet. The interleaving of samples from
+ different channels depends on the encoding. General guidelines are
+ given in Section 4.3 and 4.4.
+
+ The sampling frequency SHOULD be drawn from the set: 8,000, 11,025,
+ 16,000, 22,050, 24,000, 32,000, 44,100 and 48,000 Hz. (Older Apple
+ Macintosh computers had a native sample rate of 22,254.54 Hz, which
+ can be converted to 22,050 with acceptable quality by dropping 4
+ samples in a 20 ms frame.) However, most audio encodings are defined
+ for a more restricted set of sampling frequencies. Receivers SHOULD
+ be prepared to accept multi-channel audio, but MAY choose to only
+ play a single channel.
+
+4.2 Operating Recommendations
+
+ The following recommendations are default operating parameters.
+ Applications SHOULD be prepared to handle other values. The ranges
+ given are meant to give guidance to application writers, allowing a
+
+
+
+Schulzrinne & Casner Standards Track [Page 9]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+ set of applications conforming to these guidelines to interoperate
+ without additional negotiation. These guidelines are not intended to
+ restrict operating parameters for applications that can negotiate a
+ set of interoperable parameters, e.g., through a conference control
+ protocol.
+
+ For packetized audio, the default packetization interval SHOULD have
+ a duration of 20 ms or one frame, whichever is longer, unless
+ otherwise noted in Table 1 (column "ms/packet"). The packetization
+ interval determines the minimum end-to-end delay; longer packets
+ introduce less header overhead but higher delay and make packet loss
+ more noticeable. For non-interactive applications such as lectures
+ or for links with severe bandwidth constraints, a higher
+ packetization delay MAY be used. A receiver SHOULD accept packets
+ representing between 0 and 200 ms of audio data. (For framed audio
+ encodings, a receiver SHOULD accept packets with a number of frames
+ equal to 200 ms divided by the frame duration, rounded up.) This
+ restriction allows reasonable buffer sizing for the receiver.
+
+4.3 Guidelines for Sample-Based Audio Encodings
+
+ In sample-based encodings, each audio sample is represented by a
+ fixed number of bits. Within the compressed audio data, codes for
+ individual samples may span octet boundaries. An RTP audio packet
+ may contain any number of audio samples, subject to the constraint
+ that the number of bits per sample times the number of samples per
+ packet yields an integral octet count. Fractional encodings produce
+ less than one octet per sample.
+
+ The duration of an audio packet is determined by the number of
+ samples in the packet.
+
+ For sample-based encodings producing one or more octets per sample,
+ samples from different channels sampled at the same sampling instant
+ SHOULD be packed in consecutive octets. For example, for a two-
+ channel encoding, the octet sequence is (left channel, first sample),
+ (right channel, first sample), (left channel, second sample), (right
+ channel, second sample), .... For multi-octet encodings, octets
+ SHOULD be transmitted in network byte order (i.e., most significant
+ octet first).
+
+ The packing of sample-based encodings producing less than one octet
+ per sample is encoding-specific.
+
+ The RTP timestamp reflects the instant at which the first sample in
+ the packet was sampled, that is, the oldest information in the
+ packet.
+
+
+
+
+Schulzrinne & Casner Standards Track [Page 10]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+4.4 Guidelines for Frame-Based Audio Encodings
+
+ Frame-based encodings encode a fixed-length block of audio into
+ another block of compressed data, typically also of fixed length.
+ For frame-based encodings, the sender MAY choose to combine several
+ such frames into a single RTP packet. The receiver can tell the
+ number of frames contained in an RTP packet, if all the frames have
+ the same length, by dividing the RTP payload length by the audio
+ frame size which is defined as part of the encoding. This does not
+ work when carrying frames of different sizes unless the frame sizes
+ are relatively prime. If not, the frames MUST indicate their size.
+
+ For frame-based codecs, the channel order is defined for the whole
+ block. That is, for two-channel audio, right and left samples SHOULD
+ be coded independently, with the encoded frame for the left channel
+ preceding that for the right channel.
+
+ All frame-oriented audio codecs SHOULD be able to encode and decode
+ several consecutive frames within a single packet. Since the frame
+ size for the frame-oriented codecs is given, there is no need to use
+ a separate designation for the same encoding, but with different
+ number of frames per packet.
+
+ RTP packets SHALL contain a whole number of frames, with frames
+ inserted according to age within a packet, so that the oldest frame
+ (to be played first) occurs immediately after the RTP packet header.
+ The RTP timestamp reflects the instant at which the first sample in
+ the first frame was sampled, that is, the oldest information in the
+ packet.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Schulzrinne & Casner Standards Track [Page 11]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+4.5 Audio Encodings
+
+ name of sampling default
+ encoding sample/frame bits/sample rate ms/frame ms/packet
+ __________________________________________________________________
+ DVI4 sample 4 var. 20
+ G722 sample 8 16,000 20
+ G723 frame N/A 8,000 30 30
+ G726-40 sample 5 8,000 20
+ G726-32 sample 4 8,000 20
+ G726-24 sample 3 8,000 20
+ G726-16 sample 2 8,000 20
+ G728 frame N/A 8,000 2.5 20
+ G729 frame N/A 8,000 10 20
+ G729D frame N/A 8,000 10 20
+ G729E frame N/A 8,000 10 20
+ GSM frame N/A 8,000 20 20
+ GSM-EFR frame N/A 8,000 20 20
+ L8 sample 8 var. 20
+ L16 sample 16 var. 20
+ LPC frame N/A 8,000 20 20
+ MPA frame N/A var. var.
+ PCMA sample 8 var. 20
+ PCMU sample 8 var. 20
+ QCELP frame N/A 8,000 20 20
+ VDVI sample var. var. 20
+
+ Table 1: Properties of Audio Encodings (N/A: not applicable; var.:
+ variable)
+
+ The characteristics of the audio encodings described in this document
+ are shown in Table 1; they are listed in order of their payload type
+ in Table 4. While most audio codecs are only specified for a fixed
+ sampling rate, some sample-based algorithms (indicated by an entry of
+ "var." in the sampling rate column of Table 1) may be used with
+ different sampling rates, resulting in different coded bit rates.
+ When used with a sampling rate other than that for which a static
+ payload type is defined, non-RTP means beyond the scope of this memo
+ MUST be used to define a dynamic payload type and MUST indicate the
+ selected RTP timestamp clock rate, which is usually the same as the
+ sampling rate for audio.
+
+
+
+
+
+
+
+
+
+
+Schulzrinne & Casner Standards Track [Page 12]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+4.5.1 DVI4
+
+ DVI4 uses an adaptive delta pulse code modulation (ADPCM) encoding
+ scheme that was specified by the Interactive Multimedia Association
+ (IMA) as the "IMA ADPCM wave type". However, the encoding defined
+ here as DVI4 differs in three respects from the IMA specification:
+
+ o The RTP DVI4 header contains the predicted value rather than the
+ first sample value contained the IMA ADPCM block header.
+
+ o IMA ADPCM blocks contain an odd number of samples, since the first
+ sample of a block is contained just in the header (uncompressed),
+ followed by an even number of compressed samples. DVI4 has an
+ even number of compressed samples only, using the `predict' word
+ from the header to decode the first sample.
+
+ o For DVI4, the 4-bit samples are packed with the first sample in
+ the four most significant bits and the second sample in the four
+ least significant bits. In the IMA ADPCM codec, the samples are
+ packed in the opposite order.
+
+ Each packet contains a single DVI block. This profile only defines
+ the 4-bit-per-sample version, while IMA also specified a 3-bit-per-
+ sample encoding.
+
+ The "header" word for each channel has the following structure:
+
+ int16 predict; /* predicted value of first sample
+ from the previous block (L16 format) */
+ u_int8 index; /* current index into stepsize table */
+ u_int8 reserved; /* set to zero by sender, ignored by receiver */
+
+ Each octet following the header contains two 4-bit samples, thus the
+ number of samples per packet MUST be even because there is no means
+ to indicate a partially filled last octet.
+
+ Packing of samples for multiple channels is for further study.
+
+ The IMA ADPCM algorithm was described in the document IMA Recommended
+ Practices for Enhancing Digital Audio Compatibility in Multimedia
+ Systems (version 3.0). However, the Interactive Multimedia
+ Association ceased operations in 1997. Resources for an archived
+ copy of that document and a software implementation of the RTP DVI4
+ encoding are listed in Section 13.
+
+
+
+
+
+
+
+Schulzrinne & Casner Standards Track [Page 13]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+4.5.2 G722
+
+ G722 is specified in ITU-T Recommendation G.722, "7 kHz audio-coding
+ within 64 kbit/s". The G.722 encoder produces a stream of octets,
+ each of which SHALL be octet-aligned in an RTP packet. The first bit
+ transmitted in the G.722 octet, which is the most significant bit of
+ the higher sub-band sample, SHALL correspond to the most significant
+ bit of the octet in the RTP packet.
+
+ Even though the actual sampling rate for G.722 audio is 16,000 Hz,
+ the RTP clock rate for the G722 payload format is 8,000 Hz because
+ that value was erroneously assigned in RFC 1890 and must remain
+ unchanged for backward compatibility. The octet rate or sample-pair
+ rate is 8,000 Hz.
+
+4.5.3 G723
+
+ G723 is specified in ITU Recommendation G.723.1, "Dual-rate speech
+ coder for multimedia communications transmitting at 5.3 and 6.3
+ kbit/s". The G.723.1 5.3/6.3 kbit/s codec was defined by the ITU-T
+ as a mandatory codec for ITU-T H.324 GSTN videophone terminal
+ applications. The algorithm has a floating point specification in
+ Annex B to G.723.1, a silence compression algorithm in Annex A to
+ G.723.1 and a scalable channel coding scheme for wireless
+ applications in G.723.1 Annex C.
+
+ This Recommendation specifies a coded representation that can be used
+ for compressing the speech signal component of multi-media services
+ at a very low bit rate. Audio is encoded in 30 ms frames, with an
+ additional delay of 7.5 ms due to look-ahead. A G.723.1 frame can be
+ one of three sizes: 24 octets (6.3 kb/s frame), 20 octets (5.3 kb/s
+ frame), or 4 octets. These 4-octet frames are called SID frames
+ (Silence Insertion Descriptor) and are used to specify comfort noise
+ parameters. There is no restriction on how 4, 20, and 24 octet
+ frames are intermixed. The least significant two bits of the first
+ octet in the frame determine the frame size and codec type:
+
+ bits content octets/frame
+ 00 high-rate speech (6.3 kb/s) 24
+ 01 low-rate speech (5.3 kb/s) 20
+ 10 SID frame 4
+ 11 reserved
+
+
+
+
+
+
+
+
+
+Schulzrinne & Casner Standards Track [Page 14]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+ It is possible to switch between the two rates at any 30 ms frame
+ boundary. Both (5.3 kb/s and 6.3 kb/s) rates are a mandatory part of
+ the encoder and decoder. Receivers MUST accept both data rates and
+ MUST accept SID frames unless restriction of these capabilities has
+ been signaled. The MIME registration for G723 in RFC 3555 [7]
+ specifies parameters that MAY be used with MIME or SDP to restrict to
+ a single data rate or to restrict the use of SID frames. This coder
+ was optimized to represent speech with near-toll quality at the above
+ rates using a limited amount of complexity.
+
+ The packing of the encoded bit stream into octets and the
+ transmission order of the octets is specified in Rec. G.723.1 and is
+ the same as that produced by the G.723 C code reference
+ implementation. For the 6.3 kb/s data rate, this packing is
+ illustrated as follows, where the header (HDR) bits are always "0 0"
+ as shown in Fig. 1 to indicate operation at 6.3 kb/s, and the Z bit
+ is always set to zero. The diagrams show the bit packing in "network
+ byte order", also known as big-endian order. The bits of each 32-bit
+ word are numbered 0 to 31, with the most significant bit on the left
+ and numbered 0. The octets (bytes) of each word are transmitted most
+ significant octet first. The bits of each data field are numbered in
+ the order of the bit stream representation of the encoding (least
+ significant bit first). The vertical bars indicate the boundaries
+ between field fragments.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Schulzrinne & Casner Standards Track [Page 15]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | LPC |HDR| LPC | LPC | ACL0 |LPC|
+ | | | | | | |
+ |0 0 0 0 0 0|0 0|1 1 1 1 0 0 0 0|2 2 1 1 1 1 1 1|0 0 0 0 0 0|2 2|
+ |5 4 3 2 1 0| |3 2 1 0 9 8 7 6|1 0 9 8 7 6 5 4|5 4 3 2 1 0|3 2|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | ACL2 |ACL|A| GAIN0 |ACL|ACL| GAIN0 | GAIN1 |
+ | | 1 |C| | 3 | 2 | | |
+ |0 0 0 0 0|0 0|0|0 0 0 0|0 0|0 0|1 1 0 0 0 0 0 0|0 0 0 0 0 0 0 0|
+ |4 3 2 1 0|1 0|6|3 2 1 0|1 0|6 5|1 0 9 8 7 6 5 4|7 6 5 4 3 2 1 0|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | GAIN2 | GAIN1 | GAIN2 | GAIN3 | GRID | GAIN3 |
+ | | | | | | |
+ |0 0 0 0|1 1 0 0|1 1 0 0 0 0 0 0|0 0 0 0 0 0 0 0|0 0 0 0|1 1 0 0|
+ |3 2 1 0|1 0 9 8|1 0 9 8 7 6 5 4|7 6 5 4 3 2 1 0|3 2 1 0|1 0 9 8|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | MSBPOS |Z|POS| MSBPOS | POS0 |POS| POS0 |
+ | | | 0 | | | 1 | |
+ |0 0 0 0 0 0 0|0|0 0|1 1 1 0 0 0|0 0 0 0 0 0 0 0|0 0|1 1 1 1 1 1|
+ |6 5 4 3 2 1 0| |1 0|2 1 0 9 8 7|9 8 7 6 5 4 3 2|1 0|5 4 3 2 1 0|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | POS1 | POS2 | POS1 | POS2 | POS3 | POS2 |
+ | | | | | | |
+ |0 0 0 0 0 0 0 0|0 0 0 0|1 1 1 1|1 1 0 0 0 0 0 0|0 0 0 0|1 1 1 1|
+ |9 8 7 6 5 4 3 2|3 2 1 0|3 2 1 0|1 0 9 8 7 6 5 4|3 2 1 0|5 4 3 2|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | POS3 | PSIG0 |POS|PSIG2| PSIG1 | PSIG3 |PSIG2|
+ | | | 3 | | | | |
+ |1 1 0 0 0 0 0 0|0 0 0 0 0 0|1 1|0 0 0|0 0 0 0 0|0 0 0 0 0|0 0 0|
+ |1 0 9 8 7 6 5 4|5 4 3 2 1 0|3 2|2 1 0|4 3 2 1 0|4 3 2 1 0|5 4 3|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ Figure 1: G.723 (6.3 kb/s) bit packing
+
+ For the 5.3 kb/s data rate, the header (HDR) bits are always "0 1",
+ as shown in Fig. 2, to indicate operation at 5.3 kb/s.
+
+
+
+
+
+
+
+
+
+
+
+
+
+Schulzrinne & Casner Standards Track [Page 16]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | LPC |HDR| LPC | LPC | ACL0 |LPC|
+ | | | | | | |
+ |0 0 0 0 0 0|0 1|1 1 1 1 0 0 0 0|2 2 1 1 1 1 1 1|0 0 0 0 0 0|2 2|
+ |5 4 3 2 1 0| |3 2 1 0 9 8 7 6|1 0 9 8 7 6 5 4|5 4 3 2 1 0|3 2|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | ACL2 |ACL|A| GAIN0 |ACL|ACL| GAIN0 | GAIN1 |
+ | | 1 |C| | 3 | 2 | | |
+ |0 0 0 0 0|0 0|0|0 0 0 0|0 0|0 0|1 1 0 0 0 0 0 0|0 0 0 0 0 0 0 0|
+ |4 3 2 1 0|1 0|6|3 2 1 0|1 0|6 5|1 0 9 8 7 6 5 4|7 6 5 4 3 2 1 0|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | GAIN2 | GAIN1 | GAIN2 | GAIN3 | GRID | GAIN3 |
+ | | | | | | |
+ |0 0 0 0|1 1 0 0|1 1 0 0 0 0 0 0|0 0 0 0 0 0 0 0|0 0 0 0|1 1 0 0|
+ |3 2 1 0|1 0 9 8|1 0 9 8 7 6 5 4|7 6 5 4 3 2 1 0|4 3 2 1|1 0 9 8|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | POS0 | POS1 | POS0 | POS1 | POS2 |
+ | | | | | |
+ |0 0 0 0 0 0 0 0|0 0 0 0|1 1 0 0|1 1 0 0 0 0 0 0|0 0 0 0 0 0 0 0|
+ |7 6 5 4 3 2 1 0|3 2 1 0|1 0 9 8|1 0 9 8 7 6 5 4|7 6 5 4 3 2 1 0|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | POS3 | POS2 | POS3 | PSIG1 | PSIG0 | PSIG3 | PSIG2 |
+ | | | | | | | |
+ |0 0 0 0|1 1 0 0|1 1 0 0 0 0 0 0|0 0 0 0|0 0 0 0|0 0 0 0|0 0 0 0|
+ |3 2 1 0|1 0 9 8|1 0 9 8 7 6 5 4|3 2 1 0|3 2 1 0|3 2 1 0|3 2 1 0|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ Figure 2: G.723 (5.3 kb/s) bit packing
+
+ The packing of G.723.1 SID (silence) frames, which are indicated by
+ the header (HDR) bits having the pattern "1 0", is depicted in Fig.
+ 3.
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | LPC |HDR| LPC | LPC | GAIN |LPC|
+ | | | | | | |
+ |0 0 0 0 0 0|1 0|1 1 1 1 0 0 0 0|2 2 1 1 1 1 1 1|0 0 0 0 0 0|2 2|
+ |5 4 3 2 1 0| |3 2 1 0 9 8 7 6|1 0 9 8 7 6 5 4|5 4 3 2 1 0|3 2|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ Figure 3: G.723 SID mode bit packing
+
+
+
+
+
+
+Schulzrinne & Casner Standards Track [Page 17]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+4.5.4 G726-40, G726-32, G726-24, and G726-16
+
+ ITU-T Recommendation G.726 describes, among others, the algorithm
+ recommended for conversion of a single 64 kbit/s A-law or mu-law PCM
+ channel encoded at 8,000 samples/sec to and from a 40, 32, 24, or 16
+ kbit/s channel. The conversion is applied to the PCM stream using an
+ Adaptive Differential Pulse Code Modulation (ADPCM) transcoding
+ technique. The ADPCM representation consists of a series of
+ codewords with a one-to-one correspondence to the samples in the PCM
+ stream. The G726 data rates of 40, 32, 24, and 16 kbit/s have
+ codewords of 5, 4, 3, and 2 bits, respectively.
+
+ The 16 and 24 kbit/s encodings do not provide toll quality speech.
+ They are designed for used in overloaded Digital Circuit
+ Multiplication Equipment (DCME). ITU-T G.726 recommends that the 16
+ and 24 kbit/s encodings should be alternated with higher data rate
+ encodings to provide an average sample size of between 3.5 and 3.7
+ bits per sample.
+
+ The encodings of G.726 are here denoted as G726-40, G726-32, G726-24,
+ and G726-16. Prior to 1990, G721 described the 32 kbit/s ADPCM
+ encoding, and G723 described the 40, 32, and 16 kbit/s encodings.
+ Thus, G726-32 designates the same algorithm as G721 in RFC 1890.
+
+ A stream of G726 codewords contains no information on the encoding
+ being used, therefore transitions between G726 encoding types are not
+ permitted within a sequence of packed codewords. Applications MUST
+ determine the encoding type of packed codewords from the RTP payload
+ identifier.
+
+ No payload-specific header information SHALL be included as part of
+ the audio data. A stream of G726 codewords MUST be packed into
+ octets as follows: the first codeword is placed into the first octet
+ such that the least significant bit of the codeword aligns with the
+ least significant bit in the octet, the second codeword is then
+ packed so that its least significant bit coincides with the least
+ significant unoccupied bit in the octet. When a complete codeword
+ cannot be placed into an octet, the bits overlapping the octet
+ boundary are placed into the least significant bits of the next
+ octet. Packing MUST end with a completely packed final octet. The
+ number of codewords packed will therefore be a multiple of 8, 2, 8,
+ and 4 for G726-40, G726-32, G726-24, and G726-16, respectively. An
+ example of the packing scheme for G726-32 codewords is as shown,
+ where bit 7 is the least significant bit of the first octet, and bit
+ A3 is the least significant bit of the first codeword:
+
+
+
+
+
+
+Schulzrinne & Casner Standards Track [Page 18]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+ 0 1
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
+ |B B B B|A A A A|D D D D|C C C C| ...
+ |0 1 2 3|0 1 2 3|0 1 2 3|0 1 2 3|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
+
+ An example of the packing scheme for G726-24 codewords follows, where
+ again bit 7 is the least significant bit of the first octet, and bit
+ A2 is the least significant bit of the first codeword:
+
+ 0 1 2
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
+ |C C|B B B|A A A|F|E E E|D D D|C|H H H|G G G|F F| ...
+ |1 2|0 1 2|0 1 2|2|0 1 2|0 1 2|0|0 1 2|0 1 2|0 1|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
+
+ Note that the "little-endian" direction in which samples are packed
+ into octets in the G726-16, -24, -32 and -40 payload formats
+ specified here is consistent with ITU-T Recommendation X.420, but is
+ the opposite of what is specified in ITU-T Recommendation I.366.2
+ Annex E for ATM AAL2 transport. A second set of RTP payload formats
+ matching the packetization of I.366.2 Annex E and identified by MIME
+ subtypes AAL2-G726-16, -24, -32 and -40 will be specified in a
+ separate document.
+
+4.5.5 G728
+
+ G728 is specified in ITU-T Recommendation G.728, "Coding of speech at
+ 16 kbit/s using low-delay code excited linear prediction".
+
+ A G.278 encoder translates 5 consecutive audio samples into a 10-bit
+ codebook index, resulting in a bit rate of 16 kb/s for audio sampled
+ at 8,000 samples per second. The group of five consecutive samples
+ is called a vector. Four consecutive vectors, labeled V1 to V4
+ (where V1 is to be played first by the receiver), build one G.728
+ frame. The four vectors of 40 bits are packed into 5 octets, labeled
+ B1 through B5. B1 SHALL be placed first in the RTP packet.
+
+ Referring to the figure below, the principle for bit order is
+ "maintenance of bit significance". Bits from an older vector are
+ more significant than bits from newer vectors. The MSB of the frame
+ goes to the MSB of B1 and the LSB of the frame goes to LSB of B5.
+
+
+
+
+
+
+
+Schulzrinne & Casner Standards Track [Page 19]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+ 1 2 3 3
+ 0 0 0 0 9
+ ++++++++++++++++++++++++++++++++++++++++
+ <---V1---><---V2---><---V3---><---V4---> vectors
+ <--B1--><--B2--><--B3--><--B4--><--B5--> octets
+ <------------- frame 1 ---------------->
+
+ In particular, B1 contains the eight most significant bits of V1,
+ with the MSB of V1 being the MSB of B1. B2 contains the two least
+ significant bits of V1, the more significant of the two in its MSB,
+ and the six most significant bits of V2. B1 SHALL be placed first in
+ the RTP packet and B5 last.
+
+4.5.6 G729
+
+ G729 is specified in ITU-T Recommendation G.729, "Coding of speech at
+ 8 kbit/s using conjugate structure-algebraic code excited linear
+ prediction (CS-ACELP)". A reduced-complexity version of the G.729
+ algorithm is specified in Annex A to Rec. G.729. The speech coding
+ algorithms in the main body of G.729 and in G.729 Annex A are fully
+ interoperable with each other, so there is no need to further
+ distinguish between them. An implementation that signals or accepts
+ use of G729 payload format may implement either G.729 or G.729A
+ unless restricted by additional signaling specified elsewhere related
+ specifically to the encoding rather than the payload format. The
+ G.729 and G.729 Annex A codecs were optimized to represent speech
+ with high quality, where G.729 Annex A trades some speech quality for
+ an approximate 50% complexity reduction [10]. See the next Section
+ (4.5.7) for other data rates added in later G.729 Annexes. For all
+ data rates, the sampling frequency (and RTP timestamp clock rate) is
+ 8,000 Hz.
+
+ A voice activity detector (VAD) and comfort noise generator (CNG)
+ algorithm in Annex B of G.729 is RECOMMENDED for digital simultaneous
+ voice and data applications and can be used in conjunction with G.729
+ or G.729 Annex A. A G.729 or G.729 Annex A frame contains 10 octets,
+ while the G.729 Annex B comfort noise frame occupies 2 octets.
+ Receivers MUST accept comfort noise frames if restriction of their
+ use has not been signaled. The MIME registration for G729 in RFC
+ 3555 [7] specifies a parameter that MAY be used with MIME or SDP to
+ restrict the use of comfort noise frames.
+
+ A G729 RTP packet may consist of zero or more G.729 or G.729 Annex A
+ frames, followed by zero or one G.729 Annex B frames. The presence
+ of a comfort noise frame can be deduced from the length of the RTP
+ payload. The default packetization interval is 20 ms (two frames),
+ but in some situations it may be desirable to send 10 ms packets. An
+
+
+
+
+Schulzrinne & Casner Standards Track [Page 20]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+ example would be a transition from speech to comfort noise in the
+ first 10 ms of the packet. For some applications, a longer
+ packetization interval may be required to reduce the packet rate.
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |L| L1 | L2 | L3 | P1 |P| C1 |
+ |0| | | | |0| |
+ | |0 1 2 3 4 5 6|0 1 2 3 4|0 1 2 3 4|0 1 2 3 4 5 6 7| |0 1 2 3 4|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | C1 | S1 | GA1 | GB1 | P2 | C2 |
+ | 1 1 1| | | | | |
+ |5 6 7 8 9 0 1 2|0 1 2 3|0 1 2|0 1 2 3|0 1 2 3 4|0 1 2 3 4 5 6 7|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | C2 | S2 | GA2 | GB2 |
+ | 1 1 1| | | |
+ |8 9 0 1 2|0 1 2 3|0 1 2|0 1 2 3|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ Figure 4: G.729 and G.729A bit packing
+
+ The transmitted parameters of a G.729/G.729A 10-ms frame, consisting
+ of 80 bits, are defined in Recommendation G.729, Table 8/G.729. The
+ mapping of the these parameters is given below in Fig. 4. The
+ diagrams show the bit packing in "network byte order", also known as
+ big-endian order. The bits of each 32-bit word are numbered 0 to 31,
+ with the most significant bit on the left and numbered 0. The octets
+ (bytes) of each word are transmitted most significant octet first.
+ The bits of each data field are numbered in the order as produced by
+ the G.729 C code reference implementation.
+
+ The packing of the G.729 Annex B comfort noise frame is shown in Fig.
+ 5.
+
+ 0 1
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |L| LSF1 | LSF2 | GAIN |R|
+ |S| | | |E|
+ |F| | | |S|
+ |0|0 1 2 3 4|0 1 2 3|0 1 2 3 4|V| RESV = Reserved (zero)
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ Figure 5: G.729 Annex B bit packing
+
+
+
+
+
+
+Schulzrinne & Casner Standards Track [Page 21]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+4.5.7 G729D and G729E
+
+ Annexes D and E to ITU-T Recommendation G.729 provide additional data
+ rates. Because the data rate is not signaled in the bitstream, the
+ different data rates are given distinct RTP encoding names which are
+ mapped to distinct payload type numbers. G729D indicates a 6.4
+ kbit/s coding mode (G.729 Annex D, for momentary reduction in channel
+ capacity), while G729E indicates an 11.8 kbit/s mode (G.729 Annex E,
+ for improved performance with a wide range of narrow-band input
+ signals, e.g., music and background noise). Annex E has two
+ operating modes, backward adaptive and forward adaptive, which are
+ signaled by the first two bits in each frame (the most significant
+ two bits of the first octet).
+
+ The voice activity detector (VAD) and comfort noise generator (CNG)
+ algorithm specified in Annex B of G.729 may be used with Annex D and
+ Annex E frames in addition to G.729 and G.729 Annex A frames. The
+ algorithm details for the operation of Annexes D and E with the Annex
+ B CNG are specified in G.729 Annexes F and G. Note that Annexes F
+ and G do not introduce any new encodings. Receivers MUST accept
+ comfort noise frames if restriction of their use has not been
+ signaled. The MIME registrations for G729D and G729E in RFC 3555 [7]
+ specify a parameter that MAY be used with MIME or SDP to restrict the
+ use of comfort noise frames.
+
+ For G729D, an RTP packet may consist of zero or more G.729 Annex D
+ frames, followed by zero or one G.729 Annex B frame. Similarly, for
+ G729E, an RTP packet may consist of zero or more G.729 Annex E
+ frames, followed by zero or one G.729 Annex B frame. The presence of
+ a comfort noise frame can be deduced from the length of the RTP
+ payload.
+
+ A single RTP packet must contain frames of only one data rate,
+ optionally followed by one comfort noise frame. The data rate may be
+ changed from packet to packet by changing the payload type number.
+ G.729 Annexes D, E and H describe what the encoding and decoding
+ algorithms must do to accommodate a change in data rate.
+
+ For G729D, the bits of a G.729 Annex D frame are formatted as shown
+ below in Fig. 6 (cf. Table D.1/G.729). The frame length is 64 bits.
+
+
+
+
+
+
+
+
+
+
+
+Schulzrinne & Casner Standards Track [Page 22]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |L| L1 | L2 | L3 | P1 | C1 |
+ |0| | | | | |
+ | |0 1 2 3 4 5 6|0 1 2 3 4|0 1 2 3 4|0 1 2 3 4 5 6 7|0 1 2 3 4 5|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | C1 |S1 | GA1 | GB1 | P2 | C2 |S2 | GA2 | GB2 |
+ | | | | | | | | | |
+ |6 7 8|0 1|0 1 2|0 1 2|0 1 2 3|0 1 2 3 4 5 6 7 8|0 1|0 1 2|0 1 2|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ Figure 6: G.729 Annex D bit packing
+
+ The net bit rate for the G.729 Annex E algorithm is 11.8 kbit/s and a
+ total of 118 bits are used. Two bits are appended as "don't care"
+ bits to complete an integer number of octets for the frame. For
+ G729E, the bits of a data frame are formatted as shown in the next
+ two diagrams (cf. Table E.1/G.729). The fields for the G729E forward
+ adaptive mode are packed as shown in Fig. 7.
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |0 0|L| L1 | L2 | L3 | P1 |P| C0_1|
+ | |0| | | | |0| |
+ | | |0 1 2 3 4 5 6|0 1 2 3 4|0 1 2 3 4|0 1 2 3 4 5 6 7| |0 1 2|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | | C1_1 | C2_1 | C3_1 | C4_1 |
+ | | | | | |
+ |3 4 5 6|0 1 2 3 4 5 6|0 1 2 3 4 5 6|0 1 2 3 4 5 6|0 1 2 3 4 5 6|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | GA1 | GB1 | P2 | C0_2 | C1_2 | C2_2 |
+ | | | | | | |
+ |0 1 2|0 1 2 3|0 1 2 3 4|0 1 2 3 4 5 6|0 1 2 3 4 5 6|0 1 2 3 4 5|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | | C3_2 | C4_2 | GA2 | GB2 |DC |
+ | | | | | | |
+ |6|0 1 2 3 4 5 6|0 1 2 3 4 5 6|0 1 2|0 1 2 3|0 1|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ Figure 7: G.729 Annex E (forward adaptive mode) bit packing
+
+ The fields for the G729E backward adaptive mode are packed as shown
+ in Fig. 8.
+
+
+
+
+
+
+Schulzrinne & Casner Standards Track [Page 23]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+ 0 1 2 3
+ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ |1 1| P1 |P| C0_1 | C1_1 |
+ | | |0| 1 1 1| |
+ | |0 1 2 3 4 5 6 7|0|0 1 2 3 4 5 6 7 8 9 0 1 2|0 1 2 3 4 5 6 7|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | | C2_1 | C3_1 | C4_1 |GA1 | GB1 |P2 |
+ | | | | | | | |
+ |8 9|0 1 2 3 4 5 6|0 1 2 3 4 5 6|0 1 2 3 4 5 6|0 1 2|0 1 2 3|0 1|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | | C0_2 | C1_2 | C2_2 |
+ | | 1 1 1| | |
+ |2 3 4|0 1 2 3 4 5 6 7 8 9 0 1 2|0 1 2 3 4 5 6 7 8 9|0 1 2 3 4 5|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ | | C3_2 | C4_2 | GA2 | GB2 |DC |
+ | | | | | | |
+ |6|0 1 2 3 4 5 6|0 1 2 3 4 5 6|0 1 2|0 1 2 3|0 1|
+ +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+ Figure 8: G.729 Annex E (backward adaptive mode) bit packing
+
+4.5.8 GSM
+
+ GSM (Group Speciale Mobile) denotes the European GSM 06.10 standard
+ for full-rate speech transcoding, ETS 300 961, which is based on
+ RPE/LTP (residual pulse excitation/long term prediction) coding at a
+ rate of 13 kb/s [11,12,13]. The text of the standard can be obtained
+ from:
+
+ ETSI (European Telecommunications Standards Institute)
+ ETSI Secretariat: B.P.152
+ F-06561 Valbonne Cedex
+ France
+ Phone: +33 92 94 42 00
+ Fax: +33 93 65 47 16
+
+ Blocks of 160 audio samples are compressed into 33 octets, for an
+ effective data rate of 13,200 b/s.
+
+4.5.8.1 General Packaging Issues
+
+ The GSM standard (ETS 300 961) specifies the bit stream produced by
+ the codec, but does not specify how these bits should be packed for
+ transmission. The packetization specified here has subsequently been
+ adopted in ETSI Technical Specification TS 101 318. Some software
+ implementations of the GSM codec use a different packing than that
+ specified here.
+
+
+
+Schulzrinne & Casner Standards Track [Page 24]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+ field field name bits field field name bits
+ ________________________________________________
+ 1 LARc[0] 6 39 xmc[22] 3
+ 2 LARc[1] 6 40 xmc[23] 3
+ 3 LARc[2] 5 41 xmc[24] 3
+ 4 LARc[3] 5 42 xmc[25] 3
+ 5 LARc[4] 4 43 Nc[2] 7
+ 6 LARc[5] 4 44 bc[2] 2
+ 7 LARc[6] 3 45 Mc[2] 2
+ 8 LARc[7] 3 46 xmaxc[2] 6
+ 9 Nc[0] 7 47 xmc[26] 3
+ 10 bc[0] 2 48 xmc[27] 3
+ 11 Mc[0] 2 49 xmc[28] 3
+ 12 xmaxc[0] 6 50 xmc[29] 3
+ 13 xmc[0] 3 51 xmc[30] 3
+ 14 xmc[1] 3 52 xmc[31] 3
+ 15 xmc[2] 3 53 xmc[32] 3
+ 16 xmc[3] 3 54 xmc[33] 3
+ 17 xmc[4] 3 55 xmc[34] 3
+ 18 xmc[5] 3 56 xmc[35] 3
+ 19 xmc[6] 3 57 xmc[36] 3
+ 20 xmc[7] 3 58 xmc[37] 3
+ 21 xmc[8] 3 59 xmc[38] 3
+ 22 xmc[9] 3 60 Nc[3] 7
+ 23 xmc[10] 3 61 bc[3] 2
+ 24 xmc[11] 3 62 Mc[3] 2
+ 25 xmc[12] 3 63 xmaxc[3] 6
+ 26 Nc[1] 7 64 xmc[39] 3
+ 27 bc[1] 2 65 xmc[40] 3
+ 28 Mc[1] 2 66 xmc[41] 3
+ 29 xmaxc[1] 6 67 xmc[42] 3
+ 30 xmc[13] 3 68 xmc[43] 3
+ 31 xmc[14] 3 69 xmc[44] 3
+ 32 xmc[15] 3 70 xmc[45] 3
+ 33 xmc[16] 3 71 xmc[46] 3
+ 34 xmc[17] 3 72 xmc[47] 3
+ 35 xmc[18] 3 73 xmc[48] 3
+ 36 xmc[19] 3 74 xmc[49] 3
+ 37 xmc[20] 3 75 xmc[50] 3
+ 38 xmc[21] 3 76 xmc[51] 3
+
+ Table 2: Ordering of GSM variables
+
+
+
+
+
+
+
+
+
+Schulzrinne & Casner Standards Track [Page 25]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+ Octet Bit 0 Bit 1 Bit 2 Bit 3 Bit 4 Bit 5 Bit 6 Bit 7
+ _____________________________________________________________________
+ 0 1 1 0 1 LARc0.0 LARc0.1 LARc0.2 LARc0.3
+ 1 LARc0.4 LARc0.5 LARc1.0 LARc1.1 LARc1.2 LARc1.3 LARc1.4 LARc1.5
+ 2 LARc2.0 LARc2.1 LARc2.2 LARc2.3 LARc2.4 LARc3.0 LARc3.1 LARc3.2
+ 3 LARc3.3 LARc3.4 LARc4.0 LARc4.1 LARc4.2 LARc4.3 LARc5.0 LARc5.1
+ 4 LARc5.2 LARc5.3 LARc6.0 LARc6.1 LARc6.2 LARc7.0 LARc7.1 LARc7.2
+ 5 Nc0.0 Nc0.1 Nc0.2 Nc0.3 Nc0.4 Nc0.5 Nc0.6 bc0.0
+ 6 bc0.1 Mc0.0 Mc0.1 xmaxc00 xmaxc01 xmaxc02 xmaxc03 xmaxc04
+ 7 xmaxc05 xmc0.0 xmc0.1 xmc0.2 xmc1.0 xmc1.1 xmc1.2 xmc2.0
+ 8 xmc2.1 xmc2.2 xmc3.0 xmc3.1 xmc3.2 xmc4.0 xmc4.1 xmc4.2
+ 9 xmc5.0 xmc5.1 xmc5.2 xmc6.0 xmc6.1 xmc6.2 xmc7.0 xmc7.1
+ 10 xmc7.2 xmc8.0 xmc8.1 xmc8.2 xmc9.0 xmc9.1 xmc9.2 xmc10.0
+ 11 xmc10.1 xmc10.2 xmc11.0 xmc11.1 xmc11.2 xmc12.0 xmc12.1 xcm12.2
+ 12 Nc1.0 Nc1.1 Nc1.2 Nc1.3 Nc1.4 Nc1.5 Nc1.6 bc1.0
+ 13 bc1.1 Mc1.0 Mc1.1 xmaxc10 xmaxc11 xmaxc12 xmaxc13 xmaxc14
+ 14 xmax15 xmc13.0 xmc13.1 xmc13.2 xmc14.0 xmc14.1 xmc14.2 xmc15.0
+ 15 xmc15.1 xmc15.2 xmc16.0 xmc16.1 xmc16.2 xmc17.0 xmc17.1 xmc17.2
+ 16 xmc18.0 xmc18.1 xmc18.2 xmc19.0 xmc19.1 xmc19.2 xmc20.0 xmc20.1
+ 17 xmc20.2 xmc21.0 xmc21.1 xmc21.2 xmc22.0 xmc22.1 xmc22.2 xmc23.0
+ 18 xmc23.1 xmc23.2 xmc24.0 xmc24.1 xmc24.2 xmc25.0 xmc25.1 xmc25.2
+ 19 Nc2.0 Nc2.1 Nc2.2 Nc2.3 Nc2.4 Nc2.5 Nc2.6 bc2.0
+ 20 bc2.1 Mc2.0 Mc2.1 xmaxc20 xmaxc21 xmaxc22 xmaxc23 xmaxc24
+ 21 xmaxc25 xmc26.0 xmc26.1 xmc26.2 xmc27.0 xmc27.1 xmc27.2 xmc28.0
+ 22 xmc28.1 xmc28.2 xmc29.0 xmc29.1 xmc29.2 xmc30.0 xmc30.1 xmc30.2
+ 23 xmc31.0 xmc31.1 xmc31.2 xmc32.0 xmc32.1 xmc32.2 xmc33.0 xmc33.1
+ 24 xmc33.2 xmc34.0 xmc34.1 xmc34.2 xmc35.0 xmc35.1 xmc35.2 xmc36.0
+ 25 Xmc36.1 xmc36.2 xmc37.0 xmc37.1 xmc37.2 xmc38.0 xmc38.1 xmc38.2
+ 26 Nc3.0 Nc3.1 Nc3.2 Nc3.3 Nc3.4 Nc3.5 Nc3.6 bc3.0
+ 27 bc3.1 Mc3.0 Mc3.1 xmaxc30 xmaxc31 xmaxc32 xmaxc33 xmaxc34
+ 28 xmaxc35 xmc39.0 xmc39.1 xmc39.2 xmc40.0 xmc40.1 xmc40.2 xmc41.0
+ 29 xmc41.1 xmc41.2 xmc42.0 xmc42.1 xmc42.2 xmc43.0 xmc43.1 xmc43.2
+ 30 xmc44.0 xmc44.1 xmc44.2 xmc45.0 xmc45.1 xmc45.2 xmc46.0 xmc46.1
+ 31 xmc46.2 xmc47.0 xmc47.1 xmc47.2 xmc48.0 xmc48.1 xmc48.2 xmc49.0
+ 32 xmc49.1 xmc49.2 xmc50.0 xmc50.1 xmc50.2 xmc51.0 xmc51.1 xmc51.2
+
+ Table 3: GSM payload format
+
+ In the GSM packing used by RTP, the bits SHALL be packed beginning
+ from the most significant bit. Every 160 sample GSM frame is coded
+ into one 33 octet (264 bit) buffer. Every such buffer begins with a
+ 4 bit signature (0xD), followed by the MSB encoding of the fields of
+ the frame. The first octet thus contains 1101 in the 4 most
+ significant bits (0-3) and the 4 most significant bits of F1 (0-3) in
+ the 4 least significant bits (4-7). The second octet contains the 2
+ least significant bits of F1 in bits 0-1, and F2 in bits 2-7, and so
+ on. The order of the fields in the frame is described in Table 2.
+
+
+
+
+Schulzrinne & Casner Standards Track [Page 26]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+4.5.8.2 GSM Variable Names and Numbers
+
+ In the RTP encoding we have the bit pattern described in Table 3,
+ where F.i signifies the ith bit of the field F, bit 0 is the most
+ significant bit, and the bits of every octet are numbered from 0 to 7
+ from most to least significant.
+
+4.5.9 GSM-EFR
+
+ GSM-EFR denotes GSM 06.60 enhanced full rate speech transcoding,
+ specified in ETS 300 726 which is available from ETSI at the address
+ given in Section 4.5.8. This codec has a frame length of 244 bits.
+ For transmission in RTP, each codec frame is packed into a 31 octet
+ (248 bit) buffer beginning with a 4-bit signature 0xC in a manner
+ similar to that specified here for the original GSM 06.10 codec. The
+ packing is specified in ETSI Technical Specification TS 101 318.
+
+4.5.10 L8
+
+ L8 denotes linear audio data samples, using 8-bits of precision with
+ an offset of 128, that is, the most negative signal is encoded as
+ zero.
+
+4.5.11 L16
+
+ L16 denotes uncompressed audio data samples, using 16-bit signed
+ representation with 65,535 equally divided steps between minimum and
+ maximum signal level, ranging from -32,768 to 32,767. The value is
+ represented in two's complement notation and transmitted in network
+ byte order (most significant byte first).
+
+ The MIME registration for L16 in RFC 3555 [7] specifies parameters
+ that MAY be used with MIME or SDP to indicate that analog pre-
+ emphasis was applied to the signal before quantization or to indicate
+ that a multiple-channel audio stream follows a different channel
+ ordering convention than is specified in Section 4.1.
+
+4.5.12 LPC
+
+ LPC designates an experimental linear predictive encoding contributed
+ by Ron Frederick, which is based on an implementation written by Ron
+ Zuckerman posted to the Usenet group comp.dsp on June 26, 1992. The
+ codec generates 14 octets for every frame. The framesize is set to
+ 20 ms, resulting in a bit rate of 5,600 b/s.
+
+
+
+
+
+
+
+Schulzrinne & Casner Standards Track [Page 27]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+4.5.13 MPA
+
+ MPA denotes MPEG-1 or MPEG-2 audio encapsulated as elementary
+ streams. The encoding is defined in ISO standards ISO/IEC 11172-3
+ and 13818-3. The encapsulation is specified in RFC 2250 [14].
+
+ The encoding may be at any of three levels of complexity, called
+ Layer I, II and III. The selected layer as well as the sampling rate
+ and channel count are indicated in the payload. The RTP timestamp
+ clock rate is always 90,000, independent of the sampling rate.
+ MPEG-1 audio supports sampling rates of 32, 44.1, and 48 kHz (ISO/IEC
+ 11172-3, section 1.1; "Scope"). MPEG-2 supports sampling rates of
+ 16, 22.05 and 24 kHz. The number of samples per frame is fixed, but
+ the frame size will vary with the sampling rate and bit rate.
+
+ The MIME registration for MPA in RFC 3555 [7] specifies parameters
+ that MAY be used with MIME or SDP to restrict the selection of layer,
+ channel count, sampling rate, and bit rate.
+
+4.5.14 PCMA and PCMU
+
+ PCMA and PCMU are specified in ITU-T Recommendation G.711. Audio
+ data is encoded as eight bits per sample, after logarithmic scaling.
+ PCMU denotes mu-law scaling, PCMA A-law scaling. A detailed
+ description is given by Jayant and Noll [15]. Each G.711 octet SHALL
+ be octet-aligned in an RTP packet. The sign bit of each G.711 octet
+ SHALL correspond to the most significant bit of the octet in the RTP
+ packet (i.e., assuming the G.711 samples are handled as octets on the
+ host machine, the sign bit SHALL be the most significant bit of the
+ octet as defined by the host machine format). The 56 kb/s and 48
+ kb/s modes of G.711 are not applicable to RTP, since PCMA and PCMU
+ MUST always be transmitted as 8-bit samples.
+
+ See Section 4.1 regarding silence suppression.
+
+4.5.15 QCELP
+
+ The Electronic Industries Association (EIA) & Telecommunications
+ Industry Association (TIA) standard IS-733, "TR45: High Rate Speech
+ Service Option for Wideband Spread Spectrum Communications Systems",
+ defines the QCELP audio compression algorithm for use in wireless
+ CDMA applications. The QCELP CODEC compresses each 20 milliseconds
+ of 8,000 Hz, 16-bit sampled input speech into one of four different
+ size output frames: Rate 1 (266 bits), Rate 1/2 (124 bits), Rate 1/4
+ (54 bits) or Rate 1/8 (20 bits). For typical speech patterns, this
+ results in an average output of 6.8 kb/s for normal mode and 4.7 kb/s
+ for reduced rate mode. The packetization of the QCELP audio codec is
+ described in [16].
+
+
+
+Schulzrinne & Casner Standards Track [Page 28]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+4.5.16 RED
+
+ The redundant audio payload format "RED" is specified by RFC 2198
+ [17]. It defines a means by which multiple redundant copies of an
+ audio packet may be transmitted in a single RTP stream. Each packet
+ in such a stream contains, in addition to the audio data for that
+ packetization interval, a (more heavily compressed) copy of the data
+ from a previous packetization interval. This allows an approximation
+ of the data from lost packets to be recovered upon decoding of a
+ subsequent packet, giving much improved sound quality when compared
+ with silence substitution for lost packets.
+
+4.5.17 VDVI
+
+ VDVI is a variable-rate version of DVI4, yielding speech bit rates of
+ between 10 and 25 kb/s. It is specified for single-channel operation
+ only. Samples are packed into octets starting at the most-
+ significant bit. The last octet is padded with 1 bits if the last
+ sample does not fill the last octet. This padding is distinct from
+ the valid codewords. The receiver needs to detect the padding
+ because there is no explicit count of samples in the packet.
+
+ It uses the following encoding:
+
+ DVI4 codeword VDVI bit pattern
+ _______________________________
+ 0 00
+ 1 010
+ 2 1100
+ 3 11100
+ 4 111100
+ 5 1111100
+ 6 11111100
+ 7 11111110
+ 8 10
+ 9 011
+ 10 1101
+ 11 11101
+ 12 111101
+ 13 1111101
+ 14 11111101
+ 15 11111111
+
+
+
+
+
+
+
+
+
+Schulzrinne & Casner Standards Track [Page 29]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+5. Video
+
+ The following sections describe the video encodings that are defined
+ in this memo and give their abbreviated names used for
+ identification. These video encodings and their payload types are
+ listed in Table 5.
+
+ All of these video encodings use an RTP timestamp frequency of 90,000
+ Hz, the same as the MPEG presentation time stamp frequency. This
+ frequency yields exact integer timestamp increments for the typical
+ 24 (HDTV), 25 (PAL), and 29.97 (NTSC) and 30 Hz (HDTV) frame rates
+ and 50, 59.94 and 60 Hz field rates. While 90 kHz is the RECOMMENDED
+ rate for future video encodings used within this profile, other rates
+ MAY be used. However, it is not sufficient to use the video frame
+ rate (typically between 15 and 30 Hz) because that does not provide
+ adequate resolution for typical synchronization requirements when
+ calculating the RTP timestamp corresponding to the NTP timestamp in
+ an RTCP SR packet. The timestamp resolution MUST also be sufficient
+ for the jitter estimate contained in the receiver reports.
+
+ For most of these video encodings, the RTP timestamp encodes the
+ sampling instant of the video image contained in the RTP data packet.
+ If a video image occupies more than one packet, the timestamp is the
+ same on all of those packets. Packets from different video images
+ are distinguished by their different timestamps.
+
+ Most of these video encodings also specify that the marker bit of the
+ RTP header SHOULD be set to one in the last packet of a video frame
+ and otherwise set to zero. Thus, it is not necessary to wait for a
+ following packet with a different timestamp to detect that a new
+ frame should be displayed.
+
+5.1 CelB
+
+ The CELL-B encoding is a proprietary encoding proposed by Sun
+ Microsystems. The byte stream format is described in RFC 2029 [18].
+
+5.2 JPEG
+
+ The encoding is specified in ISO Standards 10918-1 and 10918-2. The
+ RTP payload format is as specified in RFC 2435 [19].
+
+5.3 H261
+
+ The encoding is specified in ITU-T Recommendation H.261, "Video codec
+ for audiovisual services at p x 64 kbit/s". The packetization and
+ RTP-specific properties are described in RFC 2032 [20].
+
+
+
+
+Schulzrinne & Casner Standards Track [Page 30]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+5.4 H263
+
+ The encoding is specified in the 1996 version of ITU-T Recommendation
+ H.263, "Video coding for low bit rate communication". The
+ packetization and RTP-specific properties are described in RFC 2190
+ [21]. The H263-1998 payload format is RECOMMENDED over this one for
+ use by new implementations.
+
+5.5 H263-1998
+
+ The encoding is specified in the 1998 version of ITU-T Recommendation
+ H.263, "Video coding for low bit rate communication". The
+ packetization and RTP-specific properties are described in RFC 2429
+ [22]. Because the 1998 version of H.263 is a superset of the 1996
+ syntax, this payload format can also be used with the 1996 version of
+ H.263, and is RECOMMENDED for this use by new implementations. This
+ payload format does not replace RFC 2190, which continues to be used
+ by existing implementations, and may be required for backward
+ compatibility in new implementations. Implementations using the new
+ features of the 1998 version of H.263 MUST use the payload format
+ described in RFC 2429.
+
+5.6 MPV
+
+ MPV designates the use of MPEG-1 and MPEG-2 video encoding elementary
+ streams as specified in ISO Standards ISO/IEC 11172 and 13818-2,
+ respectively. The RTP payload format is as specified in RFC 2250
+ [14], Section 3.
+
+ The MIME registration for MPV in RFC 3555 [7] specifies a parameter
+ that MAY be used with MIME or SDP to restrict the selection of the
+ type of MPEG video.
+
+5.7 MP2T
+
+ MP2T designates the use of MPEG-2 transport streams, for either audio
+ or video. The RTP payload format is described in RFC 2250 [14],
+ Section 2.
+
+
+
+
+
+
+
+
+
+
+
+
+
+Schulzrinne & Casner Standards Track [Page 31]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+5.8 nv
+
+ The encoding is implemented in the program `nv', version 4, developed
+ at Xerox PARC by Ron Frederick. Further information is available
+ from the author:
+
+ Ron Frederick
+ Blue Coat Systems Inc.
+ 650 Almanor Avenue
+ Sunnyvale, CA 94085
+ United States
+ EMail: ronf@bluecoat.com
+
+6. Payload Type Definitions
+
+ Tables 4 and 5 define this profile's static payload type values for
+ the PT field of the RTP data header. In addition, payload type
+ values in the range 96-127 MAY be defined dynamically through a
+ conference control protocol, which is beyond the scope of this
+ document. For example, a session directory could specify that for a
+ given session, payload type 96 indicates PCMU encoding, 8,000 Hz
+ sampling rate, 2 channels. Entries in Tables 4 and 5 with payload
+ type "dyn" have no static payload type assigned and are only used
+ with a dynamic payload type. Payload type 2 was assigned to G721 in
+ RFC 1890 and to its equivalent successor G726-32 in draft versions of
+ this specification, but its use is now deprecated and that static
+ payload type is marked reserved due to conflicting use for the
+ payload formats G726-32 and AAL2-G726-32 (see Section 4.5.4).
+ Payload type 13 indicates the Comfort Noise (CN) payload format
+ specified in RFC 3389 [9]. Payload type 19 is marked "reserved"
+ because some draft versions of this specification assigned that
+ number to an earlier version of the comfort noise payload format.
+ The payload type range 72-76 is marked "reserved" so that RTCP and
+ RTP packets can be reliably distinguished (see Section "Summary of
+ Protocol Constants" of the RTP protocol specification).
+
+ The payload types currently defined in this profile are assigned to
+ exactly one of three categories or media types: audio only, video
+ only and those combining audio and video. The media types are marked
+ in Tables 4 and 5 as "A", "V" and "AV", respectively. Payload types
+ of different media types SHALL NOT be interleaved or multiplexed
+ within a single RTP session, but multiple RTP sessions MAY be used in
+ parallel to send multiple media types. An RTP source MAY change
+ payload types within the same media type during a session. See the
+ section "Multiplexing RTP Sessions" of RFC 3550 for additional
+ explanation.
+
+
+
+
+
+Schulzrinne & Casner Standards Track [Page 32]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+ PT encoding media type clock rate channels
+ name (Hz)
+ ___________________________________________________
+ 0 PCMU A 8,000 1
+ 1 reserved A
+ 2 reserved A
+ 3 GSM A 8,000 1
+ 4 G723 A 8,000 1
+ 5 DVI4 A 8,000 1
+ 6 DVI4 A 16,000 1
+ 7 LPC A 8,000 1
+ 8 PCMA A 8,000 1
+ 9 G722 A 8,000 1
+ 10 L16 A 44,100 2
+ 11 L16 A 44,100 1
+ 12 QCELP A 8,000 1
+ 13 CN A 8,000 1
+ 14 MPA A 90,000 (see text)
+ 15 G728 A 8,000 1
+ 16 DVI4 A 11,025 1
+ 17 DVI4 A 22,050 1
+ 18 G729 A 8,000 1
+ 19 reserved A
+ 20 unassigned A
+ 21 unassigned A
+ 22 unassigned A
+ 23 unassigned A
+ dyn G726-40 A 8,000 1
+ dyn G726-32 A 8,000 1
+ dyn G726-24 A 8,000 1
+ dyn G726-16 A 8,000 1
+ dyn G729D A 8,000 1
+ dyn G729E A 8,000 1
+ dyn GSM-EFR A 8,000 1
+ dyn L8 A var. var.
+ dyn RED A (see text)
+ dyn VDVI A var. 1
+
+ Table 4: Payload types (PT) for audio encodings
+
+
+
+
+
+
+
+
+
+
+
+
+Schulzrinne & Casner Standards Track [Page 33]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+ PT encoding media type clock rate
+ name (Hz)
+ _____________________________________________
+ 24 unassigned V
+ 25 CelB V 90,000
+ 26 JPEG V 90,000
+ 27 unassigned V
+ 28 nv V 90,000
+ 29 unassigned V
+ 30 unassigned V
+ 31 H261 V 90,000
+ 32 MPV V 90,000
+ 33 MP2T AV 90,000
+ 34 H263 V 90,000
+ 35-71 unassigned ?
+ 72-76 reserved N/A N/A
+ 77-95 unassigned ?
+ 96-127 dynamic ?
+ dyn H263-1998 V 90,000
+
+ Table 5: Payload types (PT) for video and combined
+ encodings
+
+ Session participants agree through mechanisms beyond the scope of
+ this specification on the set of payload types allowed in a given
+ session. This set MAY, for example, be defined by the capabilities
+ of the applications used, negotiated by a conference control protocol
+ or established by agreement between the human participants.
+
+ Audio applications operating under this profile SHOULD, at a minimum,
+ be able to send and/or receive payload types 0 (PCMU) and 5 (DVI4).
+ This allows interoperability without format negotiation and ensures
+ successful negotiation with a conference control protocol.
+
+7. RTP over TCP and Similar Byte Stream Protocols
+
+ Under special circumstances, it may be necessary to carry RTP in
+ protocols offering a byte stream abstraction, such as TCP, possibly
+ multiplexed with other data. The application MUST define its own
+ method of delineating RTP and RTCP packets (RTSP [23] provides an
+ example of such an encapsulation specification).
+
+8. Port Assignment
+
+ As specified in the RTP protocol definition, RTP data SHOULD be
+ carried on an even UDP port number and the corresponding RTCP packets
+ SHOULD be carried on the next higher (odd) port number.
+
+
+
+
+Schulzrinne & Casner Standards Track [Page 34]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+ Applications operating under this profile MAY use any such UDP port
+ pair. For example, the port pair MAY be allocated randomly by a
+ session management program. A single fixed port number pair cannot
+ be required because multiple applications using this profile are
+ likely to run on the same host, and there are some operating systems
+ that do not allow multiple processes to use the same UDP port with
+ different multicast addresses.
+
+ However, port numbers 5004 and 5005 have been registered for use with
+ this profile for those applications that choose to use them as the
+ default pair. Applications that operate under multiple profiles MAY
+ use this port pair as an indication to select this profile if they
+ are not subject to the constraint of the previous paragraph.
+ Applications need not have a default and MAY require that the port
+ pair be explicitly specified. The particular port numbers were
+ chosen to lie in the range above 5000 to accommodate port number
+ allocation practice within some versions of the Unix operating
+ system, where port numbers below 1024 can only be used by privileged
+ processes and port numbers between 1024 and 5000 are automatically
+ assigned by the operating system.
+
+9. Changes from RFC 1890
+
+ This RFC revises RFC 1890. It is mostly backwards-compatible with
+ RFC 1890 except for functions removed because two interoperable
+ implementations were not found. The additions to RFC 1890 codify
+ existing practice in the use of payload formats under this profile.
+ Since this profile may be used without using any of the payload
+ formats listed here, the addition of new payload formats in this
+ revision does not affect backwards compatibility. The changes are
+ listed below, categorized into functional and non-functional changes.
+
+ Functional changes:
+
+ o Section 11, "IANA Considerations" was added to specify the
+ registration of the name for this profile. That appendix also
+ references a new Section 3 "Registering Additional Encodings"
+ which establishes a policy that no additional registration of
+ static payload types for this profile will be made beyond those
+ added in this revision and included in Tables 4 and 5. Instead,
+ additional encoding names may be registered as MIME subtypes for
+ binding to dynamic payload types. Non-normative references were
+ added to RFC 3555 [7] where MIME subtypes for all the listed
+ payload formats are registered, some with optional parameters for
+ use of the payload formats.
+
+
+
+
+
+
+Schulzrinne & Casner Standards Track [Page 35]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+ o Static payload types 4, 16, 17 and 34 were added to incorporate
+ IANA registrations made since the publication of RFC 1890, along
+ with the corresponding payload format descriptions for G723 and
+ H263.
+
+ o Following working group discussion, static payload types 12 and 18
+ were added along with the corresponding payload format
+ descriptions for QCELP and G729. Static payload type 13 was
+ assigned to the Comfort Noise (CN) payload format defined in RFC
+ 3389. Payload type 19 was marked reserved because it had been
+ temporarily allocated to an earlier version of Comfort Noise
+ present in some draft revisions of this document.
+
+ o The payload format for G721 was renamed to G726-32 following the
+ ITU-T renumbering, and the payload format description for G726 was
+ expanded to include the -16, -24 and -40 data rates. Because of
+ confusion regarding draft revisions of this document, some
+ implementations of these G726 payload formats packed samples into
+ octets starting with the most significant bit rather than the
+ least significant bit as specified here. To partially resolve
+ this incompatibility, new payload formats named AAL2-G726-16, -24,
+ -32 and -40 will be specified in a separate document (see note in
+ Section 4.5.4), and use of static payload type 2 is deprecated as
+ explained in Section 6.
+
+ o Payload formats G729D and G729E were added following the ITU-T
+ addition of Annexes D and E to Recommendation G.729. Listings
+ were added for payload formats GSM-EFR, RED, and H263-1998
+ published in other documents subsequent to RFC 1890. These
+ additional payload formats are referenced only by dynamic payload
+ type numbers.
+
+ o The descriptions of the payload formats for G722, G728, GSM, VDVI
+ were expanded.
+
+ o The payload format for 1016 audio was removed and its static
+ payload type assignment 1 was marked "reserved" because two
+ interoperable implementations were not found.
+
+ o Requirements for congestion control were added in Section 2.
+
+ o This profile follows the suggestion in the revised RTP spec that
+ RTCP bandwidth may be specified separately from the session
+ bandwidth and separately for active senders and passive receivers.
+
+ o The mapping of a user pass-phrase string into an encryption key
+ was deleted from Section 2 because two interoperable
+ implementations were not found.
+
+
+
+Schulzrinne & Casner Standards Track [Page 36]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+ o The "quadrophonic" sample ordering convention for four-channel
+ audio was removed to eliminate an ambiguity as noted in Section
+ 4.1.
+
+ Non-functional changes:
+
+ o In Section 4.1, it is now explicitly stated that silence
+ suppression is allowed for all audio payload formats. (This has
+ always been the case and derives from a fundamental aspect of
+ RTP's design and the motivations for packet audio, but was not
+ explicit stated before.) The use of comfort noise is also
+ explained.
+
+ o In Section 4.1, the requirement level for setting of the marker
+ bit on the first packet after silence for audio was changed from
+ "is" to "SHOULD be", and clarified that the marker bit is set only
+ when packets are intentionally not sent.
+
+ o Similarly, text was added to specify that the marker bit SHOULD be
+ set to one on the last packet of a video frame, and that video
+ frames are distinguished by their timestamps.
+
+ o RFC references are added for payload formats published after RFC
+ 1890.
+
+ o The security considerations and full copyright sections were
+ added.
+
+ o According to Peter Hoddie of Apple, only pre-1994 Macintosh used
+ the 22254.54 rate and none the 11127.27 rate, so the latter was
+ dropped from the discussion of suggested sampling frequencies.
+
+ o Table 1 was corrected to move some values from the "ms/packet"
+ column to the "default ms/packet" column where they belonged.
+
+ o Since the Interactive Multimedia Association ceased operations, an
+ alternate resource was provided for a referenced IMA document.
+
+ o A note has been added for G722 to clarify a discrepancy between
+ the actual sampling rate and the RTP timestamp clock rate.
+
+ o Small clarifications of the text have been made in several places,
+ some in response to questions from readers. In particular:
+
+ - A definition for "media type" is given in Section 1.1 to allow
+ the explanation of multiplexing RTP sessions in Section 6 to be
+ more clear regarding the multiplexing of multiple media.
+
+
+
+
+Schulzrinne & Casner Standards Track [Page 37]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+ - The explanation of how to determine the number of audio frames
+ in a packet from the length was expanded.
+
+ - More description of the allocation of bandwidth to SDES items
+ is given.
+
+ - A note was added that the convention for the order of channels
+ specified in Section 4.1 may be overridden by a particular
+ encoding or payload format specification.
+
+ - The terms MUST, SHOULD, MAY, etc. are used as defined in RFC
+ 2119.
+
+ o A second author for this document was added.
+
+10. Security Considerations
+
+ Implementations using the profile defined in this specification are
+ subject to the security considerations discussed in the RTP
+ specification [1]. This profile does not specify any different
+ security services. The primary function of this profile is to list a
+ set of data compression encodings for audio and video media.
+
+ Confidentiality of the media streams is achieved by encryption.
+ Because the data compression used with the payload formats described
+ in this profile is applied end-to-end, encryption may be performed
+ after compression so there is no conflict between the two operations.
+
+ A potential denial-of-service threat exists for data encodings using
+ compression techniques that have non-uniform receiver-end
+ computational load. The attacker can inject pathological datagrams
+ into the stream which are complex to decode and cause the receiver to
+ be overloaded.
+
+ As with any IP-based protocol, in some circumstances a receiver may
+ be overloaded simply by the receipt of too many packets, either
+ desired or undesired. Network-layer authentication MAY be used to
+ discard packets from undesired sources, but the processing cost of
+ the authentication itself may be too high. In a multicast
+ environment, source pruning is implemented in IGMPv3 (RFC 3376) [24]
+ and in multicast routing protocols to allow a receiver to select
+ which sources are allowed to reach it.
+
+
+
+
+
+
+
+
+
+Schulzrinne & Casner Standards Track [Page 38]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+11. IANA Considerations
+
+ The RTP specification establishes a registry of profile names for use
+ by higher-level control protocols, such as the Session Description
+ Protocol (SDP), RFC 2327 [6], to refer to transport methods. This
+ profile registers the name "RTP/AVP".
+
+ Section 3 establishes the policy that no additional registration of
+ static RTP payload types for this profile will be made beyond those
+ added in this document revision and included in Tables 4 and 5. IANA
+ may reference that section in declining to accept any additional
+ registration requests. In Tables 4 and 5, note that types 1 and 2
+ have been marked reserved and the set of "dyn" payload types included
+ has been updated. These changes are explained in Sections 6 and 9.
+
+12. References
+
+12.1 Normative References
+
+ [1] Schulzrinne, H., Casner, S., Frederick, R. and V. Jacobson,
+ "RTP: A Transport Protocol for Real-Time Applications", RFC
+ 3550, July 2003.
+
+ [2] Bradner, S., "Key Words for Use in RFCs to Indicate Requirement
+ Levels", BCP 14, RFC 2119, March 1997.
+
+ [3] Apple Computer, "Audio Interchange File Format AIFF-C", August
+ 1991. (also ftp://ftp.sgi.com/sgi/aiff-c.9.26.91.ps.Z).
+
+12.2 Informative References
+
+ [4] Braden, R., Clark, D. and S. Shenker, "Integrated Services in
+ the Internet Architecture: an Overview", RFC 1633, June 1994.
+
+ [5] Blake, S., Black, D., Carlson, M., Davies, E., Wang, Z. and W.
+ Weiss, "An Architecture for Differentiated Service", RFC 2475,
+ December 1998.
+
+ [6] Handley, M. and V. Jacobson, "SDP: Session Description
+ Protocol", RFC 2327, April 1998.
+
+ [7] Casner, S. and P. Hoschka, "MIME Type Registration of RTP
+ Payload Types", RFC 3555, July 2003.
+
+ [8] Freed, N., Klensin, J. and J. Postel, "Multipurpose Internet
+ Mail Extensions (MIME) Part Four: Registration Procedures", BCP
+ 13, RFC 2048, November 1996.
+
+
+
+
+Schulzrinne & Casner Standards Track [Page 39]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+ [9] Zopf, R., "Real-time Transport Protocol (RTP) Payload for
+ Comfort Noise (CN)", RFC 3389, September 2002.
+
+ [10] Deleam, D. and J.-P. Petit, "Real-time implementations of the
+ recent ITU-T low bit rate speech coders on the TI TMS320C54X
+ DSP: results, methodology, and applications", in Proc. of
+ International Conference on Signal Processing, Technology, and
+ Applications (ICSPAT) , (Boston, Massachusetts), pp. 1656--1660,
+ October 1996.
+
+ [11] Mouly, M. and M.-B. Pautet, The GSM system for mobile
+ communications Lassay-les-Chateaux, France: Europe Media
+ Duplication, 1993.
+
+ [12] Degener, J., "Digital Speech Compression", Dr. Dobb's Journal,
+ December 1994.
+
+ [13] Redl, S., Weber, M. and M. Oliphant, An Introduction to GSM
+ Boston: Artech House, 1995.
+
+ [14] Hoffman, D., Fernando, G., Goyal, V. and M. Civanlar, "RTP
+ Payload Format for MPEG1/MPEG2 Video", RFC 2250, January 1998.
+
+ [15] Jayant, N. and P. Noll, Digital Coding of Waveforms--Principles
+ and Applications to Speech and Video Englewood Cliffs, New
+ Jersey: Prentice-Hall, 1984.
+
+ [16] McKay, K., "RTP Payload Format for PureVoice(tm) Audio", RFC
+ 2658, August 1999.
+
+ [17] Perkins, C., Kouvelas, I., Hodson, O., Hardman, V., Handley, M.,
+ Bolot, J.-C., Vega-Garcia, A. and S. Fosse-Parisis, "RTP Payload
+ for Redundant Audio Data", RFC 2198, September 1997.
+
+ [18] Speer, M. and D. Hoffman, "RTP Payload Format of Sun's CellB
+ Video Encoding", RFC 2029, October 1996.
+
+ [19] Berc, L., Fenner, W., Frederick, R., McCanne, S. and P. Stewart,
+ "RTP Payload Format for JPEG-Compressed Video", RFC 2435,
+ October 1998.
+
+ [20] Turletti, T. and C. Huitema, "RTP Payload Format for H.261 Video
+ Streams", RFC 2032, October 1996.
+
+ [21] Zhu, C., "RTP Payload Format for H.263 Video Streams", RFC 2190,
+ September 1997.
+
+
+
+
+
+Schulzrinne & Casner Standards Track [Page 40]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+ [22] Bormann, C., Cline, L., Deisher, G., Gardos, T., Maciocco, C.,
+ Newell, D., Ott, J., Sullivan, G., Wenger, S. and C. Zhu, "RTP
+ Payload Format for the 1998 Version of ITU-T Rec. H.263 Video
+ (H.263+)", RFC 2429, October 1998.
+
+ [23] Schulzrinne, H., Rao, A. and R. Lanphier, "Real Time Streaming
+ Protocol (RTSP)", RFC 2326, April 1998.
+
+ [24] Cain, B., Deering, S., Kouvelas, I., Fenner, B. and A.
+ Thyagarajan, "Internet Group Management Protocol, Version 3",
+ RFC 3376, October 2002.
+
+13. Current Locations of Related Resources
+
+ Note: Several sections below refer to the ITU-T Software Tool
+ Library (STL). It is available from the ITU Sales Service, Place des
+ Nations, CH-1211 Geneve 20, Switzerland (also check
+ http://www.itu.int). The ITU-T STL is covered by a license defined
+ in ITU-T Recommendation G.191, "Software tools for speech and audio
+ coding standardization".
+
+ DVI4
+
+ An archived copy of the document IMA Recommended Practices for
+ Enhancing Digital Audio Compatibility in Multimedia Systems (version
+ 3.0), which describes the IMA ADPCM algorithm, is available at:
+
+ http://www.cs.columbia.edu/~hgs/audio/dvi/
+
+ An implementation is available from Jack Jansen at
+
+ ftp://ftp.cwi.nl/local/pub/audio/adpcm.shar
+
+ G722
+
+ An implementation of the G.722 algorithm is available as part of the
+ ITU-T STL, described above.
+
+ G723
+
+ The reference C code implementation defining the G.723.1 algorithm
+ and its Annexes A, B, and C are available as an integral part of
+ Recommendation G.723.1 from the ITU Sales Service, address listed
+ above. Both the algorithm and C code are covered by a specific
+ license. The ITU-T Secretariat should be contacted to obtain such
+ licensing information.
+
+
+
+
+
+Schulzrinne & Casner Standards Track [Page 41]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+ G726
+
+ G726 is specified in the ITU-T Recommendation G.726, "40, 32, 24, and
+ 16 kb/s Adaptive Differential Pulse Code Modulation (ADPCM)". An
+ implementation of the G.726 algorithm is available as part of the
+ ITU-T STL, described above.
+
+ G729
+
+ The reference C code implementation defining the G.729 algorithm and
+ its Annexes A through I are available as an integral part of
+ Recommendation G.729 from the ITU Sales Service, listed above. Annex
+ I contains the integrated C source code for all G.729 operating
+ modes. The G.729 algorithm and associated C code are covered by a
+ specific license. The contact information for obtaining the license
+ is available from the ITU-T Secretariat.
+
+ GSM
+
+ A reference implementation was written by Carsten Bormann and Jutta
+ Degener (then at TU Berlin, Germany). It is available at
+
+ http://www.dmn.tzi.org/software/gsm/
+
+ Although the RPE-LTP algorithm is not an ITU-T standard, there is a C
+ code implementation of the RPE-LTP algorithm available as part of the
+ ITU-T STL. The STL implementation is an adaptation of the TU Berlin
+ version.
+
+ LPC
+
+ An implementation is available at
+
+ ftp://parcftp.xerox.com/pub/net-research/lpc.tar.Z
+
+ PCMU, PCMA
+
+ An implementation of these algorithms is available as part of the
+ ITU-T STL, described above.
+
+14. Acknowledgments
+
+ The comments and careful review of Simao Campos, Richard Cox and AVT
+ Working Group participants are gratefully acknowledged. The GSM
+ description was adopted from the IMTC Voice over IP Forum Service
+ Interoperability Implementation Agreement (January 1997). Fred Burg
+ and Terry Lyons helped with the G.729 description.
+
+
+
+
+Schulzrinne & Casner Standards Track [Page 42]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+15. Intellectual Property Rights Statement
+
+ The IETF takes no position regarding the validity or scope of any
+ intellectual property or other rights that might be claimed to
+ pertain to the implementation or use of the technology described in
+ this document or the extent to which any license under such rights
+ might or might not be available; neither does it represent that it
+ has made any effort to identify any such rights. Information on the
+ IETF's procedures with respect to rights in standards-track and
+ standards-related documentation can be found in BCP-11. Copies of
+ claims of rights made available for publication and any assurances of
+ licenses to be made available, or the result of an attempt made to
+ obtain a general license or permission for the use of such
+ proprietary rights by implementors or users of this specification can
+ be obtained from the IETF Secretariat.
+
+ The IETF invites any interested party to bring to its attention any
+ copyrights, patents or patent applications, or other proprietary
+ rights which may cover technology that may be required to practice
+ this standard. Please address the information to the IETF Executive
+ Director.
+
+16. Authors' Addresses
+
+ Henning Schulzrinne
+ Department of Computer Science
+ Columbia University
+ 1214 Amsterdam Avenue
+ New York, NY 10027
+ United States
+
+ EMail: schulzrinne@cs.columbia.edu
+
+
+ Stephen L. Casner
+ Packet Design
+ 3400 Hillview Avenue, Building 3
+ Palo Alto, CA 94304
+ United States
+
+ EMail: casner@acm.org
+
+
+
+
+
+
+
+
+
+
+Schulzrinne & Casner Standards Track [Page 43]
+
+RFC 3551 RTP A/V Profile July 2003
+
+
+17. Full Copyright Statement
+
+ Copyright (C) The Internet Society (2003). All Rights Reserved.
+
+ This document and translations of it may be copied and furnished to
+ others, and derivative works that comment on or otherwise explain it
+ or assist in its implementation may be prepared, copied, published
+ and distributed, in whole or in part, without restriction of any
+ kind, provided that the above copyright notice and this paragraph are
+ included on all such copies and derivative works. However, this
+ document itself may not be modified in any way, such as by removing
+ the copyright notice or references to the Internet Society or other
+ Internet organizations, except as needed for the purpose of
+ developing Internet standards in which case the procedures for
+ copyrights defined in the Internet Standards process must be
+ followed, or as required to translate it into languages other than
+ English.
+
+ The limited permissions granted above are perpetual and will not be
+ revoked by the Internet Society or its successors or assigns.
+
+ This document and the information contained herein is provided on an
+ "AS IS" basis and THE INTERNET SOCIETY AND THE INTERNET ENGINEERING
+ TASK FORCE DISCLAIMS ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING
+ BUT NOT LIMITED TO ANY WARRANTY THAT THE USE OF THE INFORMATION
+ HEREIN WILL NOT INFRINGE ANY RIGHTS OR ANY IMPLIED WARRANTIES OF
+ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
+
+Acknowledgement
+
+ Funding for the RFC Editor function is currently provided by the
+ Internet Society.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+Schulzrinne & Casner Standards Track [Page 44]
+
diff --git a/src/modules/rtp/rtp.c b/src/modules/rtp/rtp.c
new file mode 100644
index 00000000..997fcc34
--- /dev/null
+++ b/src/modules/rtp/rtp.c
@@ -0,0 +1,364 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2006 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <fcntl.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <arpa/inet.h>
+#include <unistd.h>
+#include <sys/ioctl.h>
+
+#ifdef HAVE_SYS_FILIO_H
+#include <sys/filio.h>
+#endif
+
+#include <pulsecore/core-error.h>
+#include <pulsecore/log.h>
+#include <pulsecore/macro.h>
+#include <pulsecore/core-util.h>
+
+#include "rtp.h"
+
+pa_rtp_context* pa_rtp_context_init_send(pa_rtp_context *c, int fd, uint32_t ssrc, uint8_t payload, size_t frame_size) {
+ pa_assert(c);
+ pa_assert(fd >= 0);
+
+ c->fd = fd;
+ c->sequence = (uint16_t) (rand()*rand());
+ c->timestamp = 0;
+ c->ssrc = ssrc ? ssrc : (uint32_t) (rand()*rand());
+ c->payload = payload & 127;
+ c->frame_size = frame_size;
+
+ return c;
+}
+
+#define MAX_IOVECS 16
+
+int pa_rtp_send(pa_rtp_context *c, size_t size, pa_memblockq *q) {
+ struct iovec iov[MAX_IOVECS];
+ pa_memblock* mb[MAX_IOVECS];
+ int iov_idx = 1;
+ size_t n = 0;
+
+ pa_assert(c);
+ pa_assert(size > 0);
+ pa_assert(q);
+
+ if (pa_memblockq_get_length(q) < size)
+ return 0;
+
+ for (;;) {
+ int r;
+ pa_memchunk chunk;
+
+ pa_memchunk_reset(&chunk);
+
+ if ((r = pa_memblockq_peek(q, &chunk)) >= 0) {
+
+ size_t k = n + chunk.length > size ? size - n : chunk.length;
+
+ pa_assert(chunk.memblock);
+
+ iov[iov_idx].iov_base = ((uint8_t*) pa_memblock_acquire(chunk.memblock) + chunk.index);
+ iov[iov_idx].iov_len = k;
+ mb[iov_idx] = chunk.memblock;
+ iov_idx ++;
+
+ n += k;
+ pa_memblockq_drop(q, k);
+ }
+
+ pa_assert(n % c->frame_size == 0);
+
+ if (r < 0 || n >= size || iov_idx >= MAX_IOVECS) {
+ uint32_t header[3];
+ struct msghdr m;
+ int k, i;
+
+ if (n > 0) {
+ header[0] = htonl(((uint32_t) 2 << 30) | ((uint32_t) c->payload << 16) | ((uint32_t) c->sequence));
+ header[1] = htonl(c->timestamp);
+ header[2] = htonl(c->ssrc);
+
+ iov[0].iov_base = (void*)header;
+ iov[0].iov_len = sizeof(header);
+
+ m.msg_name = NULL;
+ m.msg_namelen = 0;
+ m.msg_iov = iov;
+ m.msg_iovlen = iov_idx;
+ m.msg_control = NULL;
+ m.msg_controllen = 0;
+ m.msg_flags = 0;
+
+ k = sendmsg(c->fd, &m, MSG_DONTWAIT);
+
+ for (i = 1; i < iov_idx; i++) {
+ pa_memblock_release(mb[i]);
+ pa_memblock_unref(mb[i]);
+ }
+
+ c->sequence++;
+ } else
+ k = 0;
+
+ c->timestamp += n/c->frame_size;
+
+ if (k < 0) {
+ if (errno != EAGAIN && errno != EINTR) /* If the queue is full, just ignore it */
+ pa_log("sendmsg() failed: %s", pa_cstrerror(errno));
+ return -1;
+ }
+
+ if (r < 0 || pa_memblockq_get_length(q) < size)
+ break;
+
+ n = 0;
+ iov_idx = 1;
+ }
+ }
+
+ return 0;
+}
+
+pa_rtp_context* pa_rtp_context_init_recv(pa_rtp_context *c, int fd, size_t frame_size) {
+ pa_assert(c);
+
+ c->fd = fd;
+ c->frame_size = frame_size;
+ return c;
+}
+
+int pa_rtp_recv(pa_rtp_context *c, pa_memchunk *chunk, pa_mempool *pool) {
+ int size;
+ struct msghdr m;
+ struct iovec iov;
+ uint32_t header;
+ int cc;
+ ssize_t r;
+
+ pa_assert(c);
+ pa_assert(chunk);
+
+ pa_memchunk_reset(chunk);
+
+ if (ioctl(c->fd, FIONREAD, &size) < 0) {
+ pa_log_warn("FIONREAD failed: %s", pa_cstrerror(errno));
+ goto fail;
+ }
+
+ if (!size)
+ return 0;
+
+ chunk->memblock = pa_memblock_new(pool, size);
+
+ iov.iov_base = pa_memblock_acquire(chunk->memblock);
+ iov.iov_len = size;
+
+ m.msg_name = NULL;
+ m.msg_namelen = 0;
+ m.msg_iov = &iov;
+ m.msg_iovlen = 1;
+ m.msg_control = NULL;
+ m.msg_controllen = 0;
+ m.msg_flags = 0;
+
+ r = recvmsg(c->fd, &m, 0);
+ pa_memblock_release(chunk->memblock);
+
+ if (r != size) {
+ if (r < 0 && errno != EAGAIN && errno != EINTR)
+ pa_log_warn("recvmsg() failed: %s", r < 0 ? pa_cstrerror(errno) : "size mismatch");
+
+ goto fail;
+ }
+
+ if (size < 12) {
+ pa_log_warn("RTP packet too short.");
+ goto fail;
+ }
+
+ memcpy(&header, iov.iov_base, sizeof(uint32_t));
+ memcpy(&c->timestamp, (uint8_t*) iov.iov_base + 4, sizeof(uint32_t));
+ memcpy(&c->ssrc, (uint8_t*) iov.iov_base + 8, sizeof(uint32_t));
+
+ header = ntohl(header);
+ c->timestamp = ntohl(c->timestamp);
+ c->ssrc = ntohl(c->ssrc);
+
+ if ((header >> 30) != 2) {
+ pa_log_warn("Unsupported RTP version.");
+ goto fail;
+ }
+
+ if ((header >> 29) & 1) {
+ pa_log_warn("RTP padding not supported.");
+ goto fail;
+ }
+
+ if ((header >> 28) & 1) {
+ pa_log_warn("RTP header extensions not supported.");
+ goto fail;
+ }
+
+ cc = (header >> 24) & 0xF;
+ c->payload = (header >> 16) & 127;
+ c->sequence = header & 0xFFFF;
+
+ if (12 + cc*4 > size) {
+ pa_log_warn("RTP packet too short. (CSRC)");
+ goto fail;
+ }
+
+ chunk->index = 12 + cc*4;
+ chunk->length = size - chunk->index;
+
+ if (chunk->length % c->frame_size != 0) {
+ pa_log_warn("Bad RTP packet size.");
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ if (chunk->memblock)
+ pa_memblock_unref(chunk->memblock);
+
+ return -1;
+}
+
+uint8_t pa_rtp_payload_from_sample_spec(const pa_sample_spec *ss) {
+ pa_assert(ss);
+
+ if (ss->format == PA_SAMPLE_ULAW && ss->rate == 8000 && ss->channels == 1)
+ return 0;
+ if (ss->format == PA_SAMPLE_ALAW && ss->rate == 8000 && ss->channels == 1)
+ return 8;
+ if (ss->format == PA_SAMPLE_S16BE && ss->rate == 44100 && ss->channels == 2)
+ return 10;
+ if (ss->format == PA_SAMPLE_S16BE && ss->rate == 44100 && ss->channels == 1)
+ return 11;
+
+ return 127;
+}
+
+pa_sample_spec *pa_rtp_sample_spec_from_payload(uint8_t payload, pa_sample_spec *ss) {
+ pa_assert(ss);
+
+ switch (payload) {
+ case 0:
+ ss->channels = 1;
+ ss->format = PA_SAMPLE_ULAW;
+ ss->rate = 8000;
+ break;
+
+ case 8:
+ ss->channels = 1;
+ ss->format = PA_SAMPLE_ALAW;
+ ss->rate = 8000;
+ break;
+
+ case 10:
+ ss->channels = 2;
+ ss->format = PA_SAMPLE_S16BE;
+ ss->rate = 44100;
+ break;
+
+ case 11:
+ ss->channels = 1;
+ ss->format = PA_SAMPLE_S16BE;
+ ss->rate = 44100;
+ break;
+
+ default:
+ return NULL;
+ }
+
+ return ss;
+}
+
+pa_sample_spec *pa_rtp_sample_spec_fixup(pa_sample_spec * ss) {
+ pa_assert(ss);
+
+ if (!pa_rtp_sample_spec_valid(ss))
+ ss->format = PA_SAMPLE_S16BE;
+
+ pa_assert(pa_rtp_sample_spec_valid(ss));
+ return ss;
+}
+
+int pa_rtp_sample_spec_valid(const pa_sample_spec *ss) {
+ pa_assert(ss);
+
+ if (!pa_sample_spec_valid(ss))
+ return 0;
+
+ return
+ ss->format == PA_SAMPLE_U8 ||
+ ss->format == PA_SAMPLE_ALAW ||
+ ss->format == PA_SAMPLE_ULAW ||
+ ss->format == PA_SAMPLE_S16BE;
+}
+
+void pa_rtp_context_destroy(pa_rtp_context *c) {
+ pa_assert(c);
+
+ pa_close(c->fd);
+}
+
+const char* pa_rtp_format_to_string(pa_sample_format_t f) {
+ switch (f) {
+ case PA_SAMPLE_S16BE:
+ return "L16";
+ case PA_SAMPLE_U8:
+ return "L8";
+ case PA_SAMPLE_ALAW:
+ return "PCMA";
+ case PA_SAMPLE_ULAW:
+ return "PCMU";
+ default:
+ return NULL;
+ }
+}
+
+pa_sample_format_t pa_rtp_string_to_format(const char *s) {
+ pa_assert(s);
+
+ if (!(strcmp(s, "L16")))
+ return PA_SAMPLE_S16BE;
+ else if (!strcmp(s, "L8"))
+ return PA_SAMPLE_U8;
+ else if (!strcmp(s, "PCMA"))
+ return PA_SAMPLE_ALAW;
+ else if (!strcmp(s, "PCMU"))
+ return PA_SAMPLE_ULAW;
+ else
+ return PA_SAMPLE_INVALID;
+}
+
diff --git a/src/modules/rtp/rtp.h b/src/modules/rtp/rtp.h
new file mode 100644
index 00000000..ad7175ca
--- /dev/null
+++ b/src/modules/rtp/rtp.h
@@ -0,0 +1,59 @@
+#ifndef foortphfoo
+#define foortphfoo
+
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2006 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#include <inttypes.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <pulsecore/memblockq.h>
+#include <pulsecore/memchunk.h>
+
+typedef struct pa_rtp_context {
+ int fd;
+ uint16_t sequence;
+ uint32_t timestamp;
+ uint32_t ssrc;
+ uint8_t payload;
+ size_t frame_size;
+} pa_rtp_context;
+
+pa_rtp_context* pa_rtp_context_init_send(pa_rtp_context *c, int fd, uint32_t ssrc, uint8_t payload, size_t frame_size);
+int pa_rtp_send(pa_rtp_context *c, size_t size, pa_memblockq *q);
+
+pa_rtp_context* pa_rtp_context_init_recv(pa_rtp_context *c, int fd, size_t frame_size);
+int pa_rtp_recv(pa_rtp_context *c, pa_memchunk *chunk, pa_mempool *pool);
+
+void pa_rtp_context_destroy(pa_rtp_context *c);
+
+pa_sample_spec* pa_rtp_sample_spec_fixup(pa_sample_spec *ss);
+int pa_rtp_sample_spec_valid(const pa_sample_spec *ss);
+
+uint8_t pa_rtp_payload_from_sample_spec(const pa_sample_spec *ss);
+pa_sample_spec *pa_rtp_sample_spec_from_payload(uint8_t payload, pa_sample_spec *ss);
+
+const char* pa_rtp_format_to_string(pa_sample_format_t f);
+pa_sample_format_t pa_rtp_string_to_format(const char *s);
+
+#endif
diff --git a/src/modules/rtp/sap.c b/src/modules/rtp/sap.c
new file mode 100644
index 00000000..ed7eb0be
--- /dev/null
+++ b/src/modules/rtp/sap.c
@@ -0,0 +1,223 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2006 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <time.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <errno.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/ioctl.h>
+
+#ifdef HAVE_SYS_FILIO_H
+#include <sys/filio.h>
+#endif
+
+#include <pulse/xmalloc.h>
+
+#include <pulsecore/core-error.h>
+#include <pulsecore/core-util.h>
+#include <pulsecore/log.h>
+#include <pulsecore/macro.h>
+
+#include "sap.h"
+#include "sdp.h"
+
+#define MIME_TYPE "application/sdp"
+
+pa_sap_context* pa_sap_context_init_send(pa_sap_context *c, int fd, char *sdp_data) {
+ pa_assert(c);
+ pa_assert(fd >= 0);
+ pa_assert(sdp_data);
+
+ c->fd = fd;
+ c->sdp_data = sdp_data;
+ c->msg_id_hash = (uint16_t) (rand()*rand());
+
+ return c;
+}
+
+void pa_sap_context_destroy(pa_sap_context *c) {
+ pa_assert(c);
+
+ pa_close(c->fd);
+ pa_xfree(c->sdp_data);
+}
+
+int pa_sap_send(pa_sap_context *c, int goodbye) {
+ uint32_t header;
+ struct sockaddr_storage sa_buf;
+ struct sockaddr *sa = (struct sockaddr*) &sa_buf;
+ socklen_t salen = sizeof(sa_buf);
+ struct iovec iov[4];
+ struct msghdr m;
+ int k;
+
+ if (getsockname(c->fd, sa, &salen) < 0) {
+ pa_log("getsockname() failed: %s\n", pa_cstrerror(errno));
+ return -1;
+ }
+
+ pa_assert(sa->sa_family == AF_INET || sa->sa_family == AF_INET6);
+
+ header = htonl(((uint32_t) 1 << 29) |
+ (sa->sa_family == AF_INET6 ? (uint32_t) 1 << 28 : 0) |
+ (goodbye ? (uint32_t) 1 << 26 : 0) |
+ (c->msg_id_hash));
+
+ iov[0].iov_base = &header;
+ iov[0].iov_len = sizeof(header);
+
+ iov[1].iov_base = sa->sa_family == AF_INET ? (void*) &((struct sockaddr_in*) sa)->sin_addr : (void*) &((struct sockaddr_in6*) sa)->sin6_addr;
+ iov[1].iov_len = sa->sa_family == AF_INET ? 4 : 16;
+
+ iov[2].iov_base = (char*) MIME_TYPE;
+ iov[2].iov_len = sizeof(MIME_TYPE);
+
+ iov[3].iov_base = c->sdp_data;
+ iov[3].iov_len = strlen(c->sdp_data);
+
+ m.msg_name = NULL;
+ m.msg_namelen = 0;
+ m.msg_iov = iov;
+ m.msg_iovlen = 4;
+ m.msg_control = NULL;
+ m.msg_controllen = 0;
+ m.msg_flags = 0;
+
+ if ((k = sendmsg(c->fd, &m, MSG_DONTWAIT)) < 0)
+ pa_log_warn("sendmsg() failed: %s\n", pa_cstrerror(errno));
+
+ return k;
+}
+
+pa_sap_context* pa_sap_context_init_recv(pa_sap_context *c, int fd) {
+ pa_assert(c);
+ pa_assert(fd >= 0);
+
+ c->fd = fd;
+ c->sdp_data = NULL;
+ return c;
+}
+
+int pa_sap_recv(pa_sap_context *c, int *goodbye) {
+ struct msghdr m;
+ struct iovec iov;
+ int size, k;
+ char *buf = NULL, *e;
+ uint32_t header;
+ int six, ac;
+ ssize_t r;
+
+ pa_assert(c);
+ pa_assert(goodbye);
+
+ if (ioctl(c->fd, FIONREAD, &size) < 0) {
+ pa_log_warn("FIONREAD failed: %s", pa_cstrerror(errno));
+ goto fail;
+ }
+
+ buf = pa_xnew(char, size+1);
+ buf[size] = 0;
+
+ iov.iov_base = buf;
+ iov.iov_len = size;
+
+ m.msg_name = NULL;
+ m.msg_namelen = 0;
+ m.msg_iov = &iov;
+ m.msg_iovlen = 1;
+ m.msg_control = NULL;
+ m.msg_controllen = 0;
+ m.msg_flags = 0;
+
+ if ((r = recvmsg(c->fd, &m, 0)) != size) {
+ pa_log_warn("recvmsg() failed: %s", r < 0 ? pa_cstrerror(errno) : "size mismatch");
+ goto fail;
+ }
+
+ if (size < 4) {
+ pa_log_warn("SAP packet too short.");
+ goto fail;
+ }
+
+ memcpy(&header, buf, sizeof(uint32_t));
+ header = ntohl(header);
+
+ if (header >> 29 != 1) {
+ pa_log_warn("Unsupported SAP version.");
+ goto fail;
+ }
+
+ if ((header >> 25) & 1) {
+ pa_log_warn("Encrypted SAP not supported.");
+ goto fail;
+ }
+
+ if ((header >> 24) & 1) {
+ pa_log_warn("Compressed SAP not supported.");
+ goto fail;
+ }
+
+ six = (header >> 28) & 1;
+ ac = (header >> 16) & 0xFF;
+
+ k = 4 + (six ? 16 : 4) + ac*4;
+ if (size < k) {
+ pa_log_warn("SAP packet too short (AD).");
+ goto fail;
+ }
+
+ e = buf + k;
+ size -= k;
+
+ if ((unsigned) size >= sizeof(MIME_TYPE) && !strcmp(e, MIME_TYPE)) {
+ e += sizeof(MIME_TYPE);
+ size -= sizeof(MIME_TYPE);
+ } else if ((unsigned) size < sizeof(PA_SDP_HEADER)-1 || strncmp(e, PA_SDP_HEADER, sizeof(PA_SDP_HEADER)-1)) {
+ pa_log_warn("Invalid SDP header.");
+ goto fail;
+ }
+
+ if (c->sdp_data)
+ pa_xfree(c->sdp_data);
+
+ c->sdp_data = pa_xstrndup(e, size);
+ pa_xfree(buf);
+
+ *goodbye = !!((header >> 26) & 1);
+
+ return 0;
+
+fail:
+ pa_xfree(buf);
+
+ return -1;
+}
diff --git a/src/modules/rtp/sap.h b/src/modules/rtp/sap.h
new file mode 100644
index 00000000..f906a32b
--- /dev/null
+++ b/src/modules/rtp/sap.h
@@ -0,0 +1,48 @@
+#ifndef foosaphfoo
+#define foosaphfoo
+
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2006 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#include <inttypes.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+#include <pulsecore/memblockq.h>
+#include <pulsecore/memchunk.h>
+
+typedef struct pa_sap_context {
+ int fd;
+ char *sdp_data;
+
+ uint16_t msg_id_hash;
+} pa_sap_context;
+
+pa_sap_context* pa_sap_context_init_send(pa_sap_context *c, int fd, char *sdp_data);
+void pa_sap_context_destroy(pa_sap_context *c);
+
+int pa_sap_send(pa_sap_context *c, int goodbye);
+
+pa_sap_context* pa_sap_context_init_recv(pa_sap_context *c, int fd);
+int pa_sap_recv(pa_sap_context *c, int *goodbye);
+
+#endif
diff --git a/src/modules/rtp/sdp.c b/src/modules/rtp/sdp.c
new file mode 100644
index 00000000..50ac157a
--- /dev/null
+++ b/src/modules/rtp/sdp.c
@@ -0,0 +1,261 @@
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2006 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#ifdef HAVE_CONFIG_H
+#include <config.h>
+#endif
+
+#include <time.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <string.h>
+
+#include <pulse/xmalloc.h>
+#include <pulse/util.h>
+
+#include <pulsecore/core-util.h>
+#include <pulsecore/log.h>
+#include <pulsecore/macro.h>
+
+#include "sdp.h"
+#include "rtp.h"
+
+char *pa_sdp_build(int af, const void *src, const void *dst, const char *name, uint16_t port, uint8_t payload, const pa_sample_spec *ss) {
+ uint32_t ntp;
+ char buf_src[64], buf_dst[64], un[64];
+ const char *u, *f, *a;
+
+ pa_assert(src);
+ pa_assert(dst);
+ pa_assert(af == AF_INET || af == AF_INET6);
+
+ pa_assert_se(f = pa_rtp_format_to_string(ss->format));
+
+ if (!(u = pa_get_user_name(un, sizeof(un))))
+ u = "-";
+
+ ntp = time(NULL) + 2208988800U;
+
+ pa_assert_se(a = inet_ntop(af, src, buf_src, sizeof(buf_src)));
+ pa_assert_se(a = inet_ntop(af, dst, buf_dst, sizeof(buf_dst)));
+
+ return pa_sprintf_malloc(
+ PA_SDP_HEADER
+ "o=%s %lu 0 IN %s %s\n"
+ "s=%s\n"
+ "c=IN %s %s\n"
+ "t=%lu 0\n"
+ "a=recvonly\n"
+ "m=audio %u RTP/AVP %i\n"
+ "a=rtpmap:%i %s/%u/%u\n"
+ "a=type:broadcast\n",
+ u, (unsigned long) ntp, af == AF_INET ? "IP4" : "IP6", buf_src,
+ name,
+ af == AF_INET ? "IP4" : "IP6", buf_dst,
+ (unsigned long) ntp,
+ port, payload,
+ payload, f, ss->rate, ss->channels);
+}
+
+static pa_sample_spec *parse_sdp_sample_spec(pa_sample_spec *ss, char *c) {
+ unsigned rate, channels;
+ pa_assert(ss);
+ pa_assert(c);
+
+ if (pa_startswith(c, "L16/")) {
+ ss->format = PA_SAMPLE_S16BE;
+ c += 4;
+ } else if (pa_startswith(c, "L8/")) {
+ ss->format = PA_SAMPLE_U8;
+ c += 3;
+ } else if (pa_startswith(c, "PCMA/")) {
+ ss->format = PA_SAMPLE_ALAW;
+ c += 5;
+ } else if (pa_startswith(c, "PCMU/")) {
+ ss->format = PA_SAMPLE_ULAW;
+ c += 5;
+ } else
+ return NULL;
+
+ if (sscanf(c, "%u/%u", &rate, &channels) == 2) {
+ ss->rate = rate;
+ ss->channels = channels;
+ } else if (sscanf(c, "%u", &rate) == 2) {
+ ss->rate = rate;
+ ss->channels = 1;
+ } else
+ return NULL;
+
+ if (!pa_sample_spec_valid(ss))
+ return NULL;
+
+ return ss;
+}
+
+pa_sdp_info *pa_sdp_parse(const char *t, pa_sdp_info *i, int is_goodbye) {
+ uint16_t port = 0;
+ int ss_valid = 0;
+
+ pa_assert(t);
+ pa_assert(i);
+
+ i->origin = i->session_name = NULL;
+ i->salen = 0;
+ i->payload = 255;
+
+ if (!pa_startswith(t, PA_SDP_HEADER)) {
+ pa_log("Failed to parse SDP data: invalid header.");
+ goto fail;
+ }
+
+ t += sizeof(PA_SDP_HEADER)-1;
+
+ while (*t) {
+ size_t l;
+
+ l = strcspn(t, "\n");
+
+ if (l <= 2) {
+ pa_log("Failed to parse SDP data: line too short: >%s<.", t);
+ goto fail;
+ }
+
+ if (pa_startswith(t, "o="))
+ i->origin = pa_xstrndup(t+2, l-2);
+ else if (pa_startswith(t, "s="))
+ i->session_name = pa_xstrndup(t+2, l-2);
+ else if (pa_startswith(t, "c=IN IP4 ")) {
+ char a[64];
+ size_t k;
+
+ k = l-8 > sizeof(a) ? sizeof(a) : l-8;
+
+ pa_strlcpy(a, t+9, k);
+ a[strcspn(a, "/")] = 0;
+
+ if (inet_pton(AF_INET, a, &((struct sockaddr_in*) &i->sa)->sin_addr) <= 0) {
+ pa_log("Failed to parse SDP data: bad address: >%s<.", a);
+ goto fail;
+ }
+
+ ((struct sockaddr_in*) &i->sa)->sin_family = AF_INET;
+ ((struct sockaddr_in*) &i->sa)->sin_port = 0;
+ i->salen = sizeof(struct sockaddr_in);
+ } else if (pa_startswith(t, "c=IN IP6 ")) {
+ char a[64];
+ size_t k;
+
+ k = l-8 > sizeof(a) ? sizeof(a) : l-8;
+
+ pa_strlcpy(a, t+9, k);
+ a[strcspn(a, "/")] = 0;
+
+ if (inet_pton(AF_INET6, a, &((struct sockaddr_in6*) &i->sa)->sin6_addr) <= 0) {
+ pa_log("Failed to parse SDP data: bad address: >%s<.", a);
+ goto fail;
+ }
+
+ ((struct sockaddr_in6*) &i->sa)->sin6_family = AF_INET6;
+ ((struct sockaddr_in6*) &i->sa)->sin6_port = 0;
+ i->salen = sizeof(struct sockaddr_in6);
+ } else if (pa_startswith(t, "m=audio ")) {
+
+ if (i->payload > 127) {
+ int _port, _payload;
+
+ if (sscanf(t+8, "%i RTP/AVP %i", &_port, &_payload) == 2) {
+
+ if (_port <= 0 || _port > 0xFFFF) {
+ pa_log("Failed to parse SDP data: invalid port %i.", _port);
+ goto fail;
+ }
+
+ if (_payload < 0 || _payload > 127) {
+ pa_log("Failed to parse SDP data: invalid payload %i.", _payload);
+ goto fail;
+ }
+
+ port = (uint16_t) _port;
+ i->payload = (uint8_t) _payload;
+
+ if (pa_rtp_sample_spec_from_payload(i->payload, &i->sample_spec))
+ ss_valid = 1;
+ }
+ }
+ } else if (pa_startswith(t, "a=rtpmap:")) {
+
+ if (i->payload <= 127) {
+ char c[64];
+ int _payload;
+
+ if (sscanf(t+9, "%i %64c", &_payload, c) == 2) {
+
+ if (_payload < 0 || _payload > 127) {
+ pa_log("Failed to parse SDP data: invalid payload %i.", _payload);
+ goto fail;
+ }
+ if (_payload == i->payload) {
+
+ c[strcspn(c, "\n")] = 0;
+
+ if (parse_sdp_sample_spec(&i->sample_spec, c))
+ ss_valid = 1;
+ }
+ }
+ }
+ }
+
+ t += l;
+
+ if (*t == '\n')
+ t++;
+ }
+
+ if (!i->origin || (!is_goodbye && (!i->salen || i->payload > 127 || !ss_valid || port == 0))) {
+ pa_log("Failed to parse SDP data: missing data.");
+ goto fail;
+ }
+
+ if (((struct sockaddr*) &i->sa)->sa_family == AF_INET)
+ ((struct sockaddr_in*) &i->sa)->sin_port = htons(port);
+ else
+ ((struct sockaddr_in6*) &i->sa)->sin6_port = htons(port);
+
+ return i;
+
+fail:
+ pa_xfree(i->origin);
+ pa_xfree(i->session_name);
+
+ return NULL;
+}
+
+void pa_sdp_info_destroy(pa_sdp_info *i) {
+ pa_assert(i);
+
+ pa_xfree(i->origin);
+ pa_xfree(i->session_name);
+}
diff --git a/src/modules/rtp/sdp.h b/src/modules/rtp/sdp.h
new file mode 100644
index 00000000..7c91fca6
--- /dev/null
+++ b/src/modules/rtp/sdp.h
@@ -0,0 +1,52 @@
+#ifndef foosdphfoo
+#define foosdphfoo
+
+/* $Id$ */
+
+/***
+ This file is part of PulseAudio.
+
+ Copyright 2006 Lennart Poettering
+
+ PulseAudio is free software; you can redistribute it and/or modify
+ it under the terms of the GNU Lesser General Public License as published
+ by the Free Software Foundation; either version 2 of the License,
+ or (at your option) any later version.
+
+ PulseAudio is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public License
+ along with PulseAudio; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ USA.
+***/
+
+#include <inttypes.h>
+#include <sys/socket.h>
+#include <sys/types.h>
+
+#include <pulse/sample.h>
+
+#define PA_SDP_HEADER "v=0\n"
+
+typedef struct pa_sdp_info {
+ char *origin;
+ char *session_name;
+
+ struct sockaddr_storage sa;
+ socklen_t salen;
+
+ pa_sample_spec sample_spec;
+ uint8_t payload;
+} pa_sdp_info;
+
+char *pa_sdp_build(int af, const void *src, const void *dst, const char *name, uint16_t port, uint8_t payload, const pa_sample_spec *ss);
+
+pa_sdp_info *pa_sdp_parse(const char *t, pa_sdp_info *info, int is_goodbye);
+
+void pa_sdp_info_destroy(pa_sdp_info *i);
+
+#endif