summaryrefslogtreecommitdiffstats
path: root/src/pulsecore/sink.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/pulsecore/sink.c')
-rw-r--r--src/pulsecore/sink.c3467
1 files changed, 3153 insertions, 314 deletions
diff --git a/src/pulsecore/sink.c b/src/pulsecore/sink.c
index 8acb7715..45761a6c 100644
--- a/src/pulsecore/sink.c
+++ b/src/pulsecore/sink.c
@@ -1,18 +1,19 @@
-/* $Id$ */
-
/***
This file is part of PulseAudio.
-
+
+ Copyright 2004-2006 Lennart Poettering
+ Copyright 2006 Pierre Ossman <ossman@cendio.se> for Cendio AB
+
PulseAudio is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published
- by the Free Software Foundation; either version 2 of the License,
+ by the Free Software Foundation; either version 2.1 of the License,
or (at your option) any later version.
-
+
PulseAudio is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
-
+
You should have received a copy of the GNU Lesser General Public License
along with PulseAudio; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
@@ -23,14 +24,18 @@
#include <config.h>
#endif
+#include <stdio.h>
#include <stdlib.h>
-#include <assert.h>
#include <string.h>
-#include <stdio.h>
#include <pulse/introspect.h>
#include <pulse/utf8.h>
#include <pulse/xmalloc.h>
+#include <pulse/timeval.h>
+#include <pulse/util.h>
+#include <pulse/i18n.h>
+#include <pulse/rtclock.h>
+#include <pulse/internal.h>
#include <pulsecore/sink-input.h>
#include <pulsecore/namereg.h>
@@ -38,243 +43,943 @@
#include <pulsecore/sample-util.h>
#include <pulsecore/core-subscribe.h>
#include <pulsecore/log.h>
+#include <pulsecore/macro.h>
+#include <pulsecore/play-memblockq.h>
+#include <pulsecore/flist.h>
#include "sink.h"
#define MAX_MIX_CHANNELS 32
+#define MIX_BUFFER_LENGTH (PA_PAGE_SIZE)
+#define ABSOLUTE_MIN_LATENCY (500)
+#define ABSOLUTE_MAX_LATENCY (10*PA_USEC_PER_SEC)
+#define DEFAULT_FIXED_LATENCY (250*PA_USEC_PER_MSEC)
+
+PA_DEFINE_PUBLIC_CLASS(pa_sink, pa_msgobject);
+
+struct pa_sink_volume_change {
+ pa_usec_t at;
+ pa_cvolume hw_volume;
+
+ PA_LLIST_FIELDS(pa_sink_volume_change);
+};
+
+struct sink_message_set_port {
+ pa_device_port *port;
+ int ret;
+};
+
+static void sink_free(pa_object *s);
+
+static void pa_sink_volume_change_push(pa_sink *s);
+static void pa_sink_volume_change_flush(pa_sink *s);
+static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes);
+
+pa_sink_new_data* pa_sink_new_data_init(pa_sink_new_data *data) {
+ pa_assert(data);
+
+ pa_zero(*data);
+ data->proplist = pa_proplist_new();
+
+ return data;
+}
+
+void pa_sink_new_data_set_name(pa_sink_new_data *data, const char *name) {
+ pa_assert(data);
+
+ pa_xfree(data->name);
+ data->name = pa_xstrdup(name);
+}
+
+void pa_sink_new_data_set_sample_spec(pa_sink_new_data *data, const pa_sample_spec *spec) {
+ pa_assert(data);
+
+ if ((data->sample_spec_is_set = !!spec))
+ data->sample_spec = *spec;
+}
+
+void pa_sink_new_data_set_channel_map(pa_sink_new_data *data, const pa_channel_map *map) {
+ pa_assert(data);
+
+ if ((data->channel_map_is_set = !!map))
+ data->channel_map = *map;
+}
+
+void pa_sink_new_data_set_volume(pa_sink_new_data *data, const pa_cvolume *volume) {
+ pa_assert(data);
+
+ if ((data->volume_is_set = !!volume))
+ data->volume = *volume;
+}
+
+void pa_sink_new_data_set_muted(pa_sink_new_data *data, pa_bool_t mute) {
+ pa_assert(data);
+
+ data->muted_is_set = TRUE;
+ data->muted = !!mute;
+}
-#define CHECK_VALIDITY_RETURN_NULL(condition) \
-do {\
-if (!(condition)) \
- return NULL; \
-} while (0)
+void pa_sink_new_data_set_port(pa_sink_new_data *data, const char *port) {
+ pa_assert(data);
+ pa_xfree(data->active_port);
+ data->active_port = pa_xstrdup(port);
+}
+
+void pa_sink_new_data_done(pa_sink_new_data *data) {
+ pa_assert(data);
+
+ pa_proplist_free(data->proplist);
+
+ if (data->ports) {
+ pa_device_port *p;
+
+ while ((p = pa_hashmap_steal_first(data->ports)))
+ pa_device_port_free(p);
+
+ pa_hashmap_free(data->ports, NULL, NULL);
+ }
+
+ pa_xfree(data->name);
+ pa_xfree(data->active_port);
+}
+
+pa_device_port *pa_device_port_new(const char *name, const char *description, size_t extra) {
+ pa_device_port *p;
+
+ pa_assert(name);
+
+ p = pa_xmalloc(PA_ALIGN(sizeof(pa_device_port)) + extra);
+ p->name = pa_xstrdup(name);
+ p->description = pa_xstrdup(description);
+
+ p->priority = 0;
+
+ return p;
+}
+
+void pa_device_port_free(pa_device_port *p) {
+ pa_assert(p);
+
+ pa_xfree(p->name);
+ pa_xfree(p->description);
+ pa_xfree(p);
+}
+
+/* Called from main context */
+static void reset_callbacks(pa_sink *s) {
+ pa_assert(s);
+
+ s->set_state = NULL;
+ s->get_volume = NULL;
+ s->set_volume = NULL;
+ s->get_mute = NULL;
+ s->set_mute = NULL;
+ s->request_rewind = NULL;
+ s->update_requested_latency = NULL;
+ s->set_port = NULL;
+ s->get_formats = NULL;
+}
+
+/* Called from main context */
pa_sink* pa_sink_new(
pa_core *core,
- const char *driver,
- const char *name,
- int fail,
- const pa_sample_spec *spec,
- const pa_channel_map *map) {
-
+ pa_sink_new_data *data,
+ pa_sink_flags_t flags) {
+
pa_sink *s;
- char *n = NULL;
- char st[256];
- int r;
- pa_channel_map tmap;
-
- assert(core);
- assert(name);
- assert(spec);
-
- CHECK_VALIDITY_RETURN_NULL(pa_sample_spec_valid(spec));
-
- if (!map)
- map = pa_channel_map_init_auto(&tmap, spec->channels, PA_CHANNEL_MAP_DEFAULT);
-
- CHECK_VALIDITY_RETURN_NULL(map && pa_channel_map_valid(map));
- CHECK_VALIDITY_RETURN_NULL(map->channels == spec->channels);
- CHECK_VALIDITY_RETURN_NULL(!driver || pa_utf8_valid(driver));
- CHECK_VALIDITY_RETURN_NULL(pa_utf8_valid(name) && *name);
-
- s = pa_xnew(pa_sink, 1);
-
- if (!(name = pa_namereg_register(core, name, PA_NAMEREG_SINK, s, fail))) {
+ const char *name;
+ char st[PA_SAMPLE_SPEC_SNPRINT_MAX], cm[PA_CHANNEL_MAP_SNPRINT_MAX];
+ pa_source_new_data source_data;
+ const char *dn;
+ char *pt;
+
+ pa_assert(core);
+ pa_assert(data);
+ pa_assert(data->name);
+ pa_assert_ctl_context();
+
+ s = pa_msgobject_new(pa_sink);
+
+ if (!(name = pa_namereg_register(core, data->name, PA_NAMEREG_SINK, s, data->namereg_fail))) {
+ pa_log_debug("Failed to register name %s.", data->name);
+ pa_xfree(s);
+ return NULL;
+ }
+
+ pa_sink_new_data_set_name(data, name);
+
+ if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_NEW], data) < 0) {
+ pa_xfree(s);
+ pa_namereg_unregister(core, name);
+ return NULL;
+ }
+
+ /* FIXME, need to free s here on failure */
+
+ pa_return_null_if_fail(!data->driver || pa_utf8_valid(data->driver));
+ pa_return_null_if_fail(data->name && pa_utf8_valid(data->name) && data->name[0]);
+
+ pa_return_null_if_fail(data->sample_spec_is_set && pa_sample_spec_valid(&data->sample_spec));
+
+ if (!data->channel_map_is_set)
+ pa_return_null_if_fail(pa_channel_map_init_auto(&data->channel_map, data->sample_spec.channels, PA_CHANNEL_MAP_DEFAULT));
+
+ pa_return_null_if_fail(pa_channel_map_valid(&data->channel_map));
+ pa_return_null_if_fail(data->channel_map.channels == data->sample_spec.channels);
+
+ /* FIXME: There should probably be a general function for checking whether
+ * the sink volume is allowed to be set, like there is for sink inputs. */
+ pa_assert(!data->volume_is_set || !(flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
+
+ if (!data->volume_is_set) {
+ pa_cvolume_reset(&data->volume, data->sample_spec.channels);
+ data->save_volume = FALSE;
+ }
+
+ pa_return_null_if_fail(pa_cvolume_valid(&data->volume));
+ pa_return_null_if_fail(pa_cvolume_compatible(&data->volume, &data->sample_spec));
+
+ if (!data->muted_is_set)
+ data->muted = FALSE;
+
+ if (data->card)
+ pa_proplist_update(data->proplist, PA_UPDATE_MERGE, data->card->proplist);
+
+ pa_device_init_description(data->proplist);
+ pa_device_init_icon(data->proplist, TRUE);
+ pa_device_init_intended_roles(data->proplist);
+
+ if (pa_hook_fire(&core->hooks[PA_CORE_HOOK_SINK_FIXATE], data) < 0) {
pa_xfree(s);
+ pa_namereg_unregister(core, name);
return NULL;
}
- s->ref = 1;
+ s->parent.parent.free = sink_free;
+ s->parent.process_msg = pa_sink_process_msg;
+
s->core = core;
- s->state = PA_SINK_RUNNING;
+ s->state = PA_SINK_INIT;
+ s->flags = flags;
+ s->priority = 0;
+ s->suspend_cause = 0;
s->name = pa_xstrdup(name);
- s->description = NULL;
- s->driver = pa_xstrdup(driver);
- s->owner = NULL;
+ s->proplist = pa_proplist_copy(data->proplist);
+ s->driver = pa_xstrdup(pa_path_get_filename(data->driver));
+ s->module = data->module;
+ s->card = data->card;
+
+ s->priority = pa_device_init_priority(s->proplist);
+
+ s->sample_spec = data->sample_spec;
+ s->channel_map = data->channel_map;
- s->sample_spec = *spec;
- s->channel_map = *map;
-
s->inputs = pa_idxset_new(NULL, NULL);
+ s->n_corked = 0;
+ s->input_to_master = NULL;
- pa_cvolume_reset(&s->sw_volume, spec->channels);
- pa_cvolume_reset(&s->hw_volume, spec->channels);
- s->sw_muted = 0;
- s->hw_muted = 0;
-
- s->is_hardware = 0;
-
- s->get_latency = NULL;
- s->notify = NULL;
- s->set_hw_volume = NULL;
- s->get_hw_volume = NULL;
- s->set_hw_mute = NULL;
- s->get_hw_mute = NULL;
+ s->reference_volume = s->real_volume = data->volume;
+ pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
+ s->base_volume = PA_VOLUME_NORM;
+ s->n_volume_steps = PA_VOLUME_NORM+1;
+ s->muted = data->muted;
+ s->refresh_volume = s->refresh_muted = FALSE;
+
+ reset_callbacks(s);
s->userdata = NULL;
- r = pa_idxset_put(core->sinks, s, &s->index);
- assert(s->index != PA_IDXSET_INVALID && r >= 0);
-
- pa_sample_spec_snprint(st, sizeof(st), spec);
- pa_log_info(__FILE__": created %u \"%s\" with sample spec \"%s\"", s->index, s->name, st);
+ s->asyncmsgq = NULL;
+
+ /* As a minor optimization we just steal the list instead of
+ * copying it here */
+ s->ports = data->ports;
+ data->ports = NULL;
+
+ s->active_port = NULL;
+ s->save_port = FALSE;
+
+ if (data->active_port && s->ports)
+ if ((s->active_port = pa_hashmap_get(s->ports, data->active_port)))
+ s->save_port = data->save_port;
+
+ if (!s->active_port && s->ports) {
+ void *state;
+ pa_device_port *p;
+
+ PA_HASHMAP_FOREACH(p, s->ports, state)
+ if (!s->active_port || p->priority > s->active_port->priority)
+ s->active_port = p;
+ }
+
+ s->save_volume = data->save_volume;
+ s->save_muted = data->save_muted;
+
+ pa_silence_memchunk_get(
+ &core->silence_cache,
+ core->mempool,
+ &s->silence,
+ &s->sample_spec,
+ 0);
+
+ s->thread_info.rtpoll = NULL;
+ s->thread_info.inputs = pa_hashmap_new(pa_idxset_trivial_hash_func, pa_idxset_trivial_compare_func);
+ s->thread_info.soft_volume = s->soft_volume;
+ s->thread_info.soft_muted = s->muted;
+ s->thread_info.state = s->state;
+ s->thread_info.rewind_nbytes = 0;
+ s->thread_info.rewind_requested = FALSE;
+ s->thread_info.max_rewind = 0;
+ s->thread_info.max_request = 0;
+ s->thread_info.requested_latency_valid = FALSE;
+ s->thread_info.requested_latency = 0;
+ s->thread_info.min_latency = ABSOLUTE_MIN_LATENCY;
+ s->thread_info.max_latency = ABSOLUTE_MAX_LATENCY;
+ s->thread_info.fixed_latency = flags & PA_SINK_DYNAMIC_LATENCY ? 0 : DEFAULT_FIXED_LATENCY;
+
+ PA_LLIST_HEAD_INIT(pa_sink_volume_change, s->thread_info.volume_changes);
+ s->thread_info.volume_changes_tail = NULL;
+ pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
+ s->thread_info.volume_change_safety_margin = core->sync_volume_safety_margin_usec;
+ s->thread_info.volume_change_extra_delay = core->sync_volume_extra_delay_usec;
+
+ /* FIXME: This should probably be moved to pa_sink_put() */
+ pa_assert_se(pa_idxset_put(core->sinks, s, &s->index) >= 0);
+
+ if (s->card)
+ pa_assert_se(pa_idxset_put(s->card->sinks, s, NULL) >= 0);
+
+ pt = pa_proplist_to_string_sep(s->proplist, "\n ");
+ pa_log_info("Created sink %u \"%s\" with sample spec %s and channel map %s\n %s",
+ s->index,
+ s->name,
+ pa_sample_spec_snprint(st, sizeof(st), &s->sample_spec),
+ pa_channel_map_snprint(cm, sizeof(cm), &s->channel_map),
+ pt);
+ pa_xfree(pt);
+
+ pa_source_new_data_init(&source_data);
+ pa_source_new_data_set_sample_spec(&source_data, &s->sample_spec);
+ pa_source_new_data_set_channel_map(&source_data, &s->channel_map);
+ source_data.name = pa_sprintf_malloc("%s.monitor", name);
+ source_data.driver = data->driver;
+ source_data.module = data->module;
+ source_data.card = data->card;
+
+ dn = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
+ pa_proplist_setf(source_data.proplist, PA_PROP_DEVICE_DESCRIPTION, "Monitor of %s", dn ? dn : s->name);
+ pa_proplist_sets(source_data.proplist, PA_PROP_DEVICE_CLASS, "monitor");
+
+ s->monitor_source = pa_source_new(core, &source_data,
+ ((flags & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
+ ((flags & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0));
+
+ pa_source_new_data_done(&source_data);
+
+ if (!s->monitor_source) {
+ pa_sink_unlink(s);
+ pa_sink_unref(s);
+ return NULL;
+ }
- n = pa_sprintf_malloc("%s_monitor", name);
- s->monitor_source = pa_source_new(core, driver, n, 0, spec, map);
- assert(s->monitor_source);
- pa_xfree(n);
s->monitor_source->monitor_of = s;
- s->monitor_source->description = pa_sprintf_malloc("Monitor source of sink '%s'", s->name);
-
- pa_subscription_post(core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
-
+
+ pa_source_set_latency_range(s->monitor_source, s->thread_info.min_latency, s->thread_info.max_latency);
+ pa_source_set_fixed_latency(s->monitor_source, s->thread_info.fixed_latency);
+ pa_source_set_max_rewind(s->monitor_source, s->thread_info.max_rewind);
+
return s;
}
-void pa_sink_disconnect(pa_sink* s) {
+/* Called from main context */
+static int sink_set_state(pa_sink *s, pa_sink_state_t state) {
+ int ret;
+ pa_bool_t suspend_change;
+ pa_sink_state_t original_state;
+
+ pa_assert(s);
+ pa_assert_ctl_context();
+
+ if (s->state == state)
+ return 0;
+
+ original_state = s->state;
+
+ suspend_change =
+ (original_state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(state)) ||
+ (PA_SINK_IS_OPENED(original_state) && state == PA_SINK_SUSPENDED);
+
+ if (s->set_state)
+ if ((ret = s->set_state(s, state)) < 0)
+ return ret;
+
+ if (s->asyncmsgq)
+ if ((ret = pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_STATE, PA_UINT_TO_PTR(state), 0, NULL)) < 0) {
+
+ if (s->set_state)
+ s->set_state(s, original_state);
+
+ return ret;
+ }
+
+ s->state = state;
+
+ if (state != PA_SINK_UNLINKED) { /* if we enter UNLINKED state pa_sink_unlink() will fire the apropriate events */
+ pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_STATE_CHANGED], s);
+ pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
+ }
+
+ if (suspend_change) {
+ pa_sink_input *i;
+ uint32_t idx;
+
+ /* We're suspending or resuming, tell everyone about it */
+
+ PA_IDXSET_FOREACH(i, s->inputs, idx)
+ if (s->state == PA_SINK_SUSPENDED &&
+ (i->flags & PA_SINK_INPUT_KILL_ON_SUSPEND))
+ pa_sink_input_kill(i);
+ else if (i->suspend)
+ i->suspend(i, state == PA_SINK_SUSPENDED);
+
+ if (s->monitor_source)
+ pa_source_sync_suspend(s->monitor_source);
+ }
+
+ return 0;
+}
+
+/* Called from main context */
+void pa_sink_put(pa_sink* s) {
+ pa_sink_assert_ref(s);
+ pa_assert_ctl_context();
+
+ pa_assert(s->state == PA_SINK_INIT);
+ pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || s->input_to_master);
+
+ /* The following fields must be initialized properly when calling _put() */
+ pa_assert(s->asyncmsgq);
+ pa_assert(s->thread_info.min_latency <= s->thread_info.max_latency);
+
+ /* Generally, flags should be initialized via pa_sink_new(). As a
+ * special exception we allow volume related flags to be set
+ * between _new() and _put(). */
+
+ /* XXX: Currently decibel volume is disabled for all sinks that use volume
+ * sharing. When the master sink supports decibel volume, it would be good
+ * to have the flag also in the filter sink, but currently we don't do that
+ * so that the flags of the filter sink never change when it's moved from
+ * a master sink to another. One solution for this problem would be to
+ * remove user-visible volume altogether from filter sinks when volume
+ * sharing is used, but the current approach was easier to implement... */
+ if (!(s->flags & PA_SINK_HW_VOLUME_CTRL) && !(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
+ s->flags |= PA_SINK_DECIBEL_VOLUME;
+
+ if ((s->flags & PA_SINK_DECIBEL_VOLUME) && s->core->flat_volumes)
+ s->flags |= PA_SINK_FLAT_VOLUME;
+
+ if (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) {
+ pa_sink *root_sink = s->input_to_master->sink;
+
+ while (root_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
+ root_sink = root_sink->input_to_master->sink;
+
+ s->reference_volume = root_sink->reference_volume;
+ pa_cvolume_remap(&s->reference_volume, &root_sink->channel_map, &s->channel_map);
+
+ s->real_volume = root_sink->real_volume;
+ pa_cvolume_remap(&s->real_volume, &root_sink->channel_map, &s->channel_map);
+ } else
+ /* We assume that if the sink implementor changed the default
+ * volume he did so in real_volume, because that is the usual
+ * place where he is supposed to place his changes. */
+ s->reference_volume = s->real_volume;
+
+ s->thread_info.soft_volume = s->soft_volume;
+ s->thread_info.soft_muted = s->muted;
+ pa_sw_cvolume_multiply(&s->thread_info.current_hw_volume, &s->soft_volume, &s->real_volume);
+
+ pa_assert((s->flags & PA_SINK_HW_VOLUME_CTRL)
+ || (s->base_volume == PA_VOLUME_NORM
+ && ((s->flags & PA_SINK_DECIBEL_VOLUME || (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)))));
+ pa_assert(!(s->flags & PA_SINK_DECIBEL_VOLUME) || s->n_volume_steps == PA_VOLUME_NORM+1);
+ pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == (s->thread_info.fixed_latency != 0));
+ pa_assert(!(s->flags & PA_SINK_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_LATENCY));
+ pa_assert(!(s->flags & PA_SINK_DYNAMIC_LATENCY) == !(s->monitor_source->flags & PA_SOURCE_DYNAMIC_LATENCY));
+ pa_assert(!(s->flags & PA_SINK_HW_VOLUME_CTRL) || s->set_volume);
+ pa_assert(!(s->flags & PA_SINK_SYNC_VOLUME) || (s->flags & PA_SINK_HW_VOLUME_CTRL));
+ pa_assert(!(s->flags & PA_SINK_SYNC_VOLUME) || s->write_volume);
+ pa_assert(!(s->flags & PA_SINK_HW_MUTE_CTRL) || s->set_mute);
+
+ pa_assert(s->monitor_source->thread_info.fixed_latency == s->thread_info.fixed_latency);
+ pa_assert(s->monitor_source->thread_info.min_latency == s->thread_info.min_latency);
+ pa_assert(s->monitor_source->thread_info.max_latency == s->thread_info.max_latency);
+
+ pa_assert_se(sink_set_state(s, PA_SINK_IDLE) == 0);
+
+ pa_source_put(s->monitor_source);
+
+ pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_NEW, s->index);
+ pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PUT], s);
+}
+
+/* Called from main context */
+void pa_sink_unlink(pa_sink* s) {
+ pa_bool_t linked;
pa_sink_input *i, *j = NULL;
-
- assert(s);
- assert(s->state == PA_SINK_RUNNING);
- pa_namereg_unregister(s->core, s->name);
-
+ pa_assert(s);
+ pa_assert_ctl_context();
+
+ /* Please note that pa_sink_unlink() does more than simply
+ * reversing pa_sink_put(). It also undoes the registrations
+ * already done in pa_sink_new()! */
+
+ /* All operations here shall be idempotent, i.e. pa_sink_unlink()
+ * may be called multiple times on the same sink without bad
+ * effects. */
+
+ linked = PA_SINK_IS_LINKED(s->state);
+
+ if (linked)
+ pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK], s);
+
+ if (s->state != PA_SINK_UNLINKED)
+ pa_namereg_unregister(s->core, s->name);
+ pa_idxset_remove_by_data(s->core->sinks, s, NULL);
+
+ if (s->card)
+ pa_idxset_remove_by_data(s->card->sinks, s, NULL);
+
while ((i = pa_idxset_first(s->inputs, NULL))) {
- assert(i != j);
+ pa_assert(i != j);
pa_sink_input_kill(i);
j = i;
}
- pa_source_disconnect(s->monitor_source);
+ if (linked)
+ sink_set_state(s, PA_SINK_UNLINKED);
+ else
+ s->state = PA_SINK_UNLINKED;
- pa_idxset_remove_by_data(s->core->sinks, s, NULL);
+ reset_callbacks(s);
+
+ if (s->monitor_source)
+ pa_source_unlink(s->monitor_source);
+
+ if (linked) {
+ pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
+ pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_UNLINK_POST], s);
+ }
+}
+
+/* Called from main context */
+static void sink_free(pa_object *o) {
+ pa_sink *s = PA_SINK(o);
+ pa_sink_input *i;
+
+ pa_assert(s);
+ pa_assert_ctl_context();
+ pa_assert(pa_sink_refcnt(s) == 0);
+
+ if (PA_SINK_IS_LINKED(s->state))
+ pa_sink_unlink(s);
+
+ pa_log_info("Freeing sink %u \"%s\"", s->index, s->name);
+
+ if (s->monitor_source) {
+ pa_source_unref(s->monitor_source);
+ s->monitor_source = NULL;
+ }
- s->get_latency = NULL;
- s->notify = NULL;
- s->get_hw_volume = NULL;
- s->set_hw_volume = NULL;
- s->set_hw_mute = NULL;
- s->get_hw_mute = NULL;
-
- s->state = PA_SINK_DISCONNECTED;
- pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK | PA_SUBSCRIPTION_EVENT_REMOVE, s->index);
-}
-
-static void sink_free(pa_sink *s) {
- assert(s);
- assert(!s->ref);
-
- if (s->state != PA_SINK_DISCONNECTED)
- pa_sink_disconnect(s);
-
- pa_log_info(__FILE__": freed %u \"%s\"", s->index, s->name);
-
- pa_source_unref(s->monitor_source);
- s->monitor_source = NULL;
-
pa_idxset_free(s->inputs, NULL, NULL);
+ while ((i = pa_hashmap_steal_first(s->thread_info.inputs)))
+ pa_sink_input_unref(i);
+
+ pa_hashmap_free(s->thread_info.inputs, NULL, NULL);
+
+ if (s->silence.memblock)
+ pa_memblock_unref(s->silence.memblock);
+
pa_xfree(s->name);
- pa_xfree(s->description);
pa_xfree(s->driver);
+
+ if (s->proplist)
+ pa_proplist_free(s->proplist);
+
+ if (s->ports) {
+ pa_device_port *p;
+
+ while ((p = pa_hashmap_steal_first(s->ports)))
+ pa_device_port_free(p);
+
+ pa_hashmap_free(s->ports, NULL, NULL);
+ }
+
pa_xfree(s);
}
-void pa_sink_unref(pa_sink*s) {
- assert(s);
- assert(s->ref >= 1);
+/* Called from main context, and not while the IO thread is active, please */
+void pa_sink_set_asyncmsgq(pa_sink *s, pa_asyncmsgq *q) {
+ pa_sink_assert_ref(s);
+ pa_assert_ctl_context();
- if (!(--s->ref))
- sink_free(s);
+ s->asyncmsgq = q;
+
+ if (s->monitor_source)
+ pa_source_set_asyncmsgq(s->monitor_source, q);
}
-pa_sink* pa_sink_ref(pa_sink *s) {
- assert(s);
- assert(s->ref >= 1);
-
- s->ref++;
- return s;
+/* Called from main context, and not while the IO thread is active, please */
+void pa_sink_update_flags(pa_sink *s, pa_sink_flags_t mask, pa_sink_flags_t value) {
+ pa_sink_assert_ref(s);
+ pa_assert_ctl_context();
+
+ if (mask == 0)
+ return;
+
+ /* For now, allow only a minimal set of flags to be changed. */
+ pa_assert((mask & ~(PA_SINK_DYNAMIC_LATENCY|PA_SINK_LATENCY)) == 0);
+
+ s->flags = (s->flags & ~mask) | (value & mask);
+
+ pa_source_update_flags(s->monitor_source,
+ ((mask & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
+ ((mask & PA_SINK_DYNAMIC_LATENCY) ? PA_SOURCE_DYNAMIC_LATENCY : 0),
+ ((value & PA_SINK_LATENCY) ? PA_SOURCE_LATENCY : 0) |
+ ((value & PA_SINK_DYNAMIC_LATENCY) ? PA_SINK_DYNAMIC_LATENCY : 0));
}
-void pa_sink_notify(pa_sink*s) {
- assert(s);
- assert(s->ref >= 1);
+/* Called from IO context, or before _put() from main context */
+void pa_sink_set_rtpoll(pa_sink *s, pa_rtpoll *p) {
+ pa_sink_assert_ref(s);
+ pa_sink_assert_io_context(s);
- if (s->notify)
- s->notify(s);
+ s->thread_info.rtpoll = p;
+
+ if (s->monitor_source)
+ pa_source_set_rtpoll(s->monitor_source, p);
}
-static unsigned fill_mix_info(pa_sink *s, pa_mix_info *info, unsigned maxinfo) {
- uint32_t idx = PA_IDXSET_INVALID;
- pa_sink_input *i;
- unsigned n = 0;
-
- assert(s);
- assert(s->ref >= 1);
- assert(info);
-
- for (i = pa_idxset_first(s->inputs, &idx); maxinfo > 0 && i; i = pa_idxset_next(s->inputs, &idx)) {
- /* Increase ref counter, to make sure that this input doesn't
- * vanish while we still need it */
+/* Called from main context */
+int pa_sink_update_status(pa_sink*s) {
+ pa_sink_assert_ref(s);
+ pa_assert_ctl_context();
+ pa_assert(PA_SINK_IS_LINKED(s->state));
+
+ if (s->state == PA_SINK_SUSPENDED)
+ return 0;
+
+ return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
+}
+
+/* Called from main context */
+int pa_sink_suspend(pa_sink *s, pa_bool_t suspend, pa_suspend_cause_t cause) {
+ pa_sink_assert_ref(s);
+ pa_assert_ctl_context();
+ pa_assert(PA_SINK_IS_LINKED(s->state));
+ pa_assert(cause != 0);
+
+ if (suspend) {
+ s->suspend_cause |= cause;
+ s->monitor_source->suspend_cause |= cause;
+ } else {
+ s->suspend_cause &= ~cause;
+ s->monitor_source->suspend_cause &= ~cause;
+ }
+
+ if ((pa_sink_get_state(s) == PA_SINK_SUSPENDED) == !!s->suspend_cause)
+ return 0;
+
+ pa_log_debug("Suspend cause of sink %s is 0x%04x, %s", s->name, s->suspend_cause, s->suspend_cause ? "suspending" : "resuming");
+
+ if (s->suspend_cause)
+ return sink_set_state(s, PA_SINK_SUSPENDED);
+ else
+ return sink_set_state(s, pa_sink_used_by(s) ? PA_SINK_RUNNING : PA_SINK_IDLE);
+}
+
+/* Called from main context */
+pa_queue *pa_sink_move_all_start(pa_sink *s, pa_queue *q) {
+ pa_sink_input *i, *n;
+ uint32_t idx;
+
+ pa_sink_assert_ref(s);
+ pa_assert_ctl_context();
+ pa_assert(PA_SINK_IS_LINKED(s->state));
+
+ if (!q)
+ q = pa_queue_new();
+
+ for (i = PA_SINK_INPUT(pa_idxset_first(s->inputs, &idx)); i; i = n) {
+ n = PA_SINK_INPUT(pa_idxset_next(s->inputs, &idx));
+
pa_sink_input_ref(i);
- if (pa_sink_input_peek(i, &info->chunk, &info->volume) < 0) {
+ if (pa_sink_input_start_move(i) >= 0)
+ pa_queue_push(q, i);
+ else
pa_sink_input_unref(i);
+ }
+
+ return q;
+}
+
+/* Called from main context */
+void pa_sink_move_all_finish(pa_sink *s, pa_queue *q, pa_bool_t save) {
+ pa_sink_input *i;
+
+ pa_sink_assert_ref(s);
+ pa_assert_ctl_context();
+ pa_assert(PA_SINK_IS_LINKED(s->state));
+ pa_assert(q);
+
+ while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
+ if (pa_sink_input_finish_move(i, s, save) < 0)
+ pa_sink_input_fail_move(i);
+
+ pa_sink_input_unref(i);
+ }
+
+ pa_queue_free(q, NULL, NULL);
+}
+
+/* Called from main context */
+void pa_sink_move_all_fail(pa_queue *q) {
+ pa_sink_input *i;
+
+ pa_assert_ctl_context();
+ pa_assert(q);
+
+ while ((i = PA_SINK_INPUT(pa_queue_pop(q)))) {
+ pa_sink_input_fail_move(i);
+ pa_sink_input_unref(i);
+ }
+
+ pa_queue_free(q, NULL, NULL);
+}
+
+/* Called from IO thread context */
+void pa_sink_process_rewind(pa_sink *s, size_t nbytes) {
+ pa_sink_input *i;
+ void *state = NULL;
+
+ pa_sink_assert_ref(s);
+ pa_sink_assert_io_context(s);
+ pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
+
+ /* If nobody requested this and this is actually no real rewind
+ * then we can short cut this. Please note that this means that
+ * not all rewind requests triggered upstream will always be
+ * translated in actual requests! */
+ if (!s->thread_info.rewind_requested && nbytes <= 0)
+ return;
+
+ s->thread_info.rewind_nbytes = 0;
+ s->thread_info.rewind_requested = FALSE;
+
+ if (s->thread_info.state == PA_SINK_SUSPENDED)
+ return;
+
+ if (nbytes > 0) {
+ pa_log_debug("Processing rewind...");
+ if (s->flags & PA_SINK_SYNC_VOLUME)
+ pa_sink_volume_change_rewind(s, nbytes);
+ }
+
+ PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
+ pa_sink_input_assert_ref(i);
+ pa_sink_input_process_rewind(i, nbytes);
+ }
+
+ if (nbytes > 0) {
+ if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
+ pa_source_process_rewind(s->monitor_source, nbytes);
+ }
+}
+
+/* Called from IO thread context */
+static unsigned fill_mix_info(pa_sink *s, size_t *length, pa_mix_info *info, unsigned maxinfo) {
+ pa_sink_input *i;
+ unsigned n = 0;
+ void *state = NULL;
+ size_t mixlength = *length;
+
+ pa_sink_assert_ref(s);
+ pa_sink_assert_io_context(s);
+ pa_assert(info);
+
+ while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)) && maxinfo > 0) {
+ pa_sink_input_assert_ref(i);
+
+ pa_sink_input_peek(i, *length, &info->chunk, &info->volume);
+
+ if (mixlength == 0 || info->chunk.length < mixlength)
+ mixlength = info->chunk.length;
+
+ if (pa_memblock_is_silence(info->chunk.memblock)) {
+ pa_memblock_unref(info->chunk.memblock);
continue;
}
- info->userdata = i;
-
- assert(info->chunk.memblock);
- assert(info->chunk.memblock->data);
- assert(info->chunk.length);
-
+ info->userdata = pa_sink_input_ref(i);
+
+ pa_assert(info->chunk.memblock);
+ pa_assert(info->chunk.length > 0);
+
info++;
- maxinfo--;
n++;
+ maxinfo--;
}
+ if (mixlength > 0)
+ *length = mixlength;
+
return n;
}
-static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned maxinfo, size_t length) {
- assert(s);
- assert(s->ref >= 1);
- assert(info);
+/* Called from IO thread context */
+static void inputs_drop(pa_sink *s, pa_mix_info *info, unsigned n, pa_memchunk *result) {
+ pa_sink_input *i;
+ void *state;
+ unsigned p = 0;
+ unsigned n_unreffed = 0;
+
+ pa_sink_assert_ref(s);
+ pa_sink_assert_io_context(s);
+ pa_assert(result);
+ pa_assert(result->memblock);
+ pa_assert(result->length > 0);
+
+ /* We optimize for the case where the order of the inputs has not changed */
+
+ PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
+ unsigned j;
+ pa_mix_info* m = NULL;
- for (; maxinfo > 0; maxinfo--, info++) {
- pa_sink_input *i = info->userdata;
-
- assert(i);
- assert(info->chunk.memblock);
+ pa_sink_input_assert_ref(i);
+
+ /* Let's try to find the matching entry info the pa_mix_info array */
+ for (j = 0; j < n; j ++) {
+
+ if (info[p].userdata == i) {
+ m = info + p;
+ break;
+ }
+
+ p++;
+ if (p >= n)
+ p = 0;
+ }
/* Drop read data */
- pa_sink_input_drop(i, &info->chunk, length);
- pa_memblock_unref(info->chunk.memblock);
+ pa_sink_input_drop(i, result->length);
+
+ if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state)) {
+
+ if (pa_hashmap_size(i->thread_info.direct_outputs) > 0) {
+ void *ostate = NULL;
+ pa_source_output *o;
+ pa_memchunk c;
+
+ if (m && m->chunk.memblock) {
+ c = m->chunk;
+ pa_memblock_ref(c.memblock);
+ pa_assert(result->length <= c.length);
+ c.length = result->length;
+
+ pa_memchunk_make_writable(&c, 0);
+ pa_volume_memchunk(&c, &s->sample_spec, &m->volume);
+ } else {
+ c = s->silence;
+ pa_memblock_ref(c.memblock);
+ pa_assert(result->length <= c.length);
+ c.length = result->length;
+ }
+
+ while ((o = pa_hashmap_iterate(i->thread_info.direct_outputs, &ostate, NULL))) {
+ pa_source_output_assert_ref(o);
+ pa_assert(o->direct_on_input == i);
+ pa_source_post_direct(s->monitor_source, o, &c);
+ }
+
+ pa_memblock_unref(c.memblock);
+ }
+ }
- /* Decrease ref counter */
- pa_sink_input_unref(i);
- info->userdata = NULL;
+ if (m) {
+ if (m->chunk.memblock)
+ pa_memblock_unref(m->chunk.memblock);
+ pa_memchunk_reset(&m->chunk);
+
+ pa_sink_input_unref(m->userdata);
+ m->userdata = NULL;
+
+ n_unreffed += 1;
+ }
}
+
+ /* Now drop references to entries that are included in the
+ * pa_mix_info array but don't exist anymore */
+
+ if (n_unreffed < n) {
+ for (; n > 0; info++, n--) {
+ if (info->userdata)
+ pa_sink_input_unref(info->userdata);
+ if (info->chunk.memblock)
+ pa_memblock_unref(info->chunk.memblock);
+ }
+ }
+
+ if (s->monitor_source && PA_SOURCE_IS_LINKED(s->monitor_source->thread_info.state))
+ pa_source_post(s->monitor_source, result);
}
-
-int pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
+
+/* Called from IO thread context */
+void pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
pa_mix_info info[MAX_MIX_CHANNELS];
unsigned n;
- int r = -1;
-
- assert(s);
- assert(s->ref >= 1);
- assert(length);
- assert(result);
+ size_t block_size_max;
+
+ pa_sink_assert_ref(s);
+ pa_sink_assert_io_context(s);
+ pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
+ pa_assert(pa_frame_aligned(length, &s->sample_spec));
+ pa_assert(result);
+
+ pa_assert(!s->thread_info.rewind_requested);
+ pa_assert(s->thread_info.rewind_nbytes == 0);
+
+ if (s->thread_info.state == PA_SINK_SUSPENDED) {
+ result->memblock = pa_memblock_ref(s->silence.memblock);
+ result->index = s->silence.index;
+ result->length = PA_MIN(s->silence.length, length);
+ return;
+ }
pa_sink_ref(s);
-
- n = fill_mix_info(s, info, MAX_MIX_CHANNELS);
- if (n <= 0)
- goto finish;
+ if (length <= 0)
+ length = pa_frame_align(MIX_BUFFER_LENGTH, &s->sample_spec);
+
+ block_size_max = pa_mempool_block_size_max(s->core->mempool);
+ if (length > block_size_max)
+ length = pa_frame_align(block_size_max, &s->sample_spec);
+
+ pa_assert(length > 0);
+
+ n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
+
+ if (n == 0) {
+
+ *result = s->silence;
+ pa_memblock_ref(result->memblock);
+
+ if (result->length > length)
+ result->length = length;
- if (n == 1) {
+ } else if (n == 1) {
pa_cvolume volume;
*result = info[0].chunk;
@@ -283,233 +988,2367 @@ int pa_sink_render(pa_sink*s, size_t length, pa_memchunk *result) {
if (result->length > length)
result->length = length;
- pa_sw_cvolume_multiply(&volume, &s->sw_volume, &info[0].volume);
-
- if (s->sw_muted || !pa_cvolume_is_norm(&volume)) {
- pa_memchunk_make_writable(result, s->core->memblock_stat, 0);
- if (s->sw_muted)
- pa_silence_memchunk(result, &s->sample_spec);
- else
- pa_volume_memchunk(result, &s->sample_spec, &volume);
+ pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
+
+ if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume)) {
+ pa_memblock_unref(result->memblock);
+ pa_silence_memchunk_get(&s->core->silence_cache,
+ s->core->mempool,
+ result,
+ &s->sample_spec,
+ result->length);
+ } else if (!pa_cvolume_is_norm(&volume)) {
+ pa_memchunk_make_writable(result, 0);
+ pa_volume_memchunk(result, &s->sample_spec, &volume);
}
} else {
- result->memblock = pa_memblock_new(length, s->core->memblock_stat);
- assert(result->memblock);
+ void *ptr;
+ result->memblock = pa_memblock_new(s->core->mempool, length);
-/* pa_log("mixing %i", n); */
+ ptr = pa_memblock_acquire(result->memblock);
+ result->length = pa_mix(info, n,
+ ptr, length,
+ &s->sample_spec,
+ &s->thread_info.soft_volume,
+ s->thread_info.soft_muted);
+ pa_memblock_release(result->memblock);
- result->length = pa_mix(info, n, result->memblock->data, length,
- &s->sample_spec, &s->sw_volume, s->sw_muted);
result->index = 0;
}
- inputs_drop(s, info, n, result->length);
- pa_source_post(s->monitor_source, result);
-
- r = 0;
+ inputs_drop(s, info, n, result);
-finish:
pa_sink_unref(s);
-
- return r;
}
-int pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
+/* Called from IO thread context */
+void pa_sink_render_into(pa_sink*s, pa_memchunk *target) {
pa_mix_info info[MAX_MIX_CHANNELS];
unsigned n;
- int r = -1;
-
- assert(s);
- assert(s->ref >= 1);
- assert(target);
- assert(target->memblock);
- assert(target->length);
- assert(target->memblock->data);
+ size_t length, block_size_max;
+
+ pa_sink_assert_ref(s);
+ pa_sink_assert_io_context(s);
+ pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
+ pa_assert(target);
+ pa_assert(target->memblock);
+ pa_assert(target->length > 0);
+ pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
+
+ pa_assert(!s->thread_info.rewind_requested);
+ pa_assert(s->thread_info.rewind_nbytes == 0);
+
+ if (s->thread_info.state == PA_SINK_SUSPENDED) {
+ pa_silence_memchunk(target, &s->sample_spec);
+ return;
+ }
pa_sink_ref(s);
-
- n = fill_mix_info(s, info, MAX_MIX_CHANNELS);
- if (n <= 0)
- goto finish;
+ length = target->length;
+ block_size_max = pa_mempool_block_size_max(s->core->mempool);
+ if (length > block_size_max)
+ length = pa_frame_align(block_size_max, &s->sample_spec);
+
+ pa_assert(length > 0);
- if (n == 1) {
+ n = fill_mix_info(s, &length, info, MAX_MIX_CHANNELS);
+
+ if (n == 0) {
+ if (target->length > length)
+ target->length = length;
+
+ pa_silence_memchunk(target, &s->sample_spec);
+ } else if (n == 1) {
pa_cvolume volume;
- if (target->length > info[0].chunk.length)
- target->length = info[0].chunk.length;
-
- memcpy((uint8_t*) target->memblock->data + target->index,
- (uint8_t*) info[0].chunk.memblock->data + info[0].chunk.index,
- target->length);
+ if (target->length > length)
+ target->length = length;
- pa_sw_cvolume_multiply(&volume, &s->sw_volume, &info[0].volume);
+ pa_sw_cvolume_multiply(&volume, &s->thread_info.soft_volume, &info[0].volume);
+
+ if (s->thread_info.soft_muted || pa_cvolume_is_muted(&volume))
+ pa_silence_memchunk(target, &s->sample_spec);
+ else {
+ pa_memchunk vchunk;
+
+ vchunk = info[0].chunk;
+ pa_memblock_ref(vchunk.memblock);
+
+ if (vchunk.length > length)
+ vchunk.length = length;
+
+ if (!pa_cvolume_is_norm(&volume)) {
+ pa_memchunk_make_writable(&vchunk, 0);
+ pa_volume_memchunk(&vchunk, &s->sample_spec, &volume);
+ }
+
+ pa_memchunk_memcpy(target, &vchunk);
+ pa_memblock_unref(vchunk.memblock);
+ }
+
+ } else {
+ void *ptr;
+
+ ptr = pa_memblock_acquire(target->memblock);
- if (s->sw_muted)
- pa_silence_memchunk(target, &s->sample_spec);
- else if (!pa_cvolume_is_norm(&volume))
- pa_volume_memchunk(target, &s->sample_spec, &volume);
- } else
target->length = pa_mix(info, n,
- (uint8_t*) target->memblock->data + target->index,
- target->length,
+ (uint8_t*) ptr + target->index, length,
&s->sample_spec,
- &s->sw_volume,
- s->sw_muted);
-
- inputs_drop(s, info, n, target->length);
- pa_source_post(s->monitor_source, target);
+ &s->thread_info.soft_volume,
+ s->thread_info.soft_muted);
- r = 0;
+ pa_memblock_release(target->memblock);
+ }
+
+ inputs_drop(s, info, n, target);
-finish:
pa_sink_unref(s);
-
- return r;
}
+/* Called from IO thread context */
void pa_sink_render_into_full(pa_sink *s, pa_memchunk *target) {
pa_memchunk chunk;
size_t l, d;
-
- assert(s);
- assert(s->ref >= 1);
- assert(target);
- assert(target->memblock);
- assert(target->length);
- assert(target->memblock->data);
+
+ pa_sink_assert_ref(s);
+ pa_sink_assert_io_context(s);
+ pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
+ pa_assert(target);
+ pa_assert(target->memblock);
+ pa_assert(target->length > 0);
+ pa_assert(pa_frame_aligned(target->length, &s->sample_spec));
+
+ pa_assert(!s->thread_info.rewind_requested);
+ pa_assert(s->thread_info.rewind_nbytes == 0);
+
+ if (s->thread_info.state == PA_SINK_SUSPENDED) {
+ pa_silence_memchunk(target, &s->sample_spec);
+ return;
+ }
pa_sink_ref(s);
-
+
l = target->length;
d = 0;
while (l > 0) {
chunk = *target;
chunk.index += d;
chunk.length -= d;
-
- if (pa_sink_render_into(s, &chunk) < 0)
- break;
+
+ pa_sink_render_into(s, &chunk);
d += chunk.length;
l -= chunk.length;
}
- if (l > 0) {
- chunk = *target;
- chunk.index += d;
- chunk.length -= d;
- pa_silence_memchunk(&chunk, &s->sample_spec);
- }
-
pa_sink_unref(s);
}
+/* Called from IO thread context */
void pa_sink_render_full(pa_sink *s, size_t length, pa_memchunk *result) {
- assert(s);
- assert(s->ref >= 1);
- assert(length);
- assert(result);
+ pa_sink_assert_ref(s);
+ pa_sink_assert_io_context(s);
+ pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
+ pa_assert(length > 0);
+ pa_assert(pa_frame_aligned(length, &s->sample_spec));
+ pa_assert(result);
+
+ pa_assert(!s->thread_info.rewind_requested);
+ pa_assert(s->thread_info.rewind_nbytes == 0);
+
+ pa_sink_ref(s);
+
+ pa_sink_render(s, length, result);
+
+ if (result->length < length) {
+ pa_memchunk chunk;
+
+ pa_memchunk_make_writable(result, length);
+
+ chunk.memblock = result->memblock;
+ chunk.index = result->index + result->length;
+ chunk.length = length - result->length;
+
+ pa_sink_render_into_full(s, &chunk);
- /*** This needs optimization ***/
-
- result->memblock = pa_memblock_new(result->length = length, s->core->memblock_stat);
- result->index = 0;
+ result->length = length;
+ }
- pa_sink_render_into_full(s, result);
+ pa_sink_unref(s);
}
+/* Called from main thread */
pa_usec_t pa_sink_get_latency(pa_sink *s) {
- assert(s);
- assert(s->ref >= 1);
+ pa_usec_t usec = 0;
+
+ pa_sink_assert_ref(s);
+ pa_assert_ctl_context();
+ pa_assert(PA_SINK_IS_LINKED(s->state));
+
+ /* The returned value is supposed to be in the time domain of the sound card! */
- if (!s->get_latency)
+ if (s->state == PA_SINK_SUSPENDED)
return 0;
- return s->get_latency(s);
+ if (!(s->flags & PA_SINK_LATENCY))
+ return 0;
+
+ pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) == 0);
+
+ return usec;
}
-void pa_sink_set_owner(pa_sink *s, pa_module *m) {
- assert(s);
- assert(s->ref >= 1);
-
- s->owner = m;
+/* Called from IO thread */
+pa_usec_t pa_sink_get_latency_within_thread(pa_sink *s) {
+ pa_usec_t usec = 0;
+ pa_msgobject *o;
- if (s->monitor_source)
- pa_source_set_owner(s->monitor_source, m);
+ pa_sink_assert_ref(s);
+ pa_sink_assert_io_context(s);
+ pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
+
+ /* The returned value is supposed to be in the time domain of the sound card! */
+
+ if (s->thread_info.state == PA_SINK_SUSPENDED)
+ return 0;
+
+ if (!(s->flags & PA_SINK_LATENCY))
+ return 0;
+
+ o = PA_MSGOBJECT(s);
+
+ /* FIXME: We probably should make this a proper vtable callback instead of going through process_msg() */
+
+ if (o->process_msg(o, PA_SINK_MESSAGE_GET_LATENCY, &usec, 0, NULL) < 0)
+ return -1;
+
+ return usec;
+}
+
+/* Called from the main thread (and also from the IO thread while the main
+ * thread is waiting).
+ *
+ * When a sink uses volume sharing, it never has the PA_SINK_FLAT_VOLUME flag
+ * set. Instead, flat volume mode is detected by checking whether the root sink
+ * has the flag set. */
+pa_bool_t pa_sink_flat_volume_enabled(pa_sink *s) {
+ pa_sink_assert_ref(s);
+
+ while (s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
+ s = s->input_to_master->sink;
+
+ return (s->flags & PA_SINK_FLAT_VOLUME);
+}
+
+/* Called from main context */
+pa_bool_t pa_sink_is_passthrough(pa_sink *s) {
+ pa_sink_input *alt_i;
+ uint32_t idx;
+
+ pa_sink_assert_ref(s);
+
+ /* one and only one PASSTHROUGH input can possibly be connected */
+ if (pa_idxset_size(s->inputs) == 1) {
+ alt_i = pa_idxset_first(s->inputs, &idx);
+
+ if (pa_sink_input_is_passthrough(alt_i))
+ return TRUE;
+ }
+
+ return FALSE;
}
-void pa_sink_set_volume(pa_sink *s, pa_mixer_t m, const pa_cvolume *volume) {
- pa_cvolume *v;
-
- assert(s);
- assert(s->ref >= 1);
- assert(volume);
+/* Called from main context. */
+static void compute_reference_ratio(pa_sink_input *i) {
+ unsigned c = 0;
+ pa_cvolume remapped;
+
+ pa_assert(i);
+ pa_assert(pa_sink_flat_volume_enabled(i->sink));
+
+ /*
+ * Calculates the reference ratio from the sink's reference
+ * volume. This basically calculates:
+ *
+ * i->reference_ratio = i->volume / i->sink->reference_volume
+ */
+
+ remapped = i->sink->reference_volume;
+ pa_cvolume_remap(&remapped, &i->sink->channel_map, &i->channel_map);
+
+ i->reference_ratio.channels = i->sample_spec.channels;
+
+ for (c = 0; c < i->sample_spec.channels; c++) {
+
+ /* We don't update when the sink volume is 0 anyway */
+ if (remapped.values[c] <= PA_VOLUME_MUTED)
+ continue;
+
+ /* Don't update the reference ratio unless necessary */
+ if (pa_sw_volume_multiply(
+ i->reference_ratio.values[c],
+ remapped.values[c]) == i->volume.values[c])
+ continue;
+
+ i->reference_ratio.values[c] = pa_sw_volume_divide(
+ i->volume.values[c],
+ remapped.values[c]);
+ }
+}
+
+/* Called from main context. Only called for the root sink in volume sharing
+ * cases, except for internal recursive calls. */
+static void compute_reference_ratios(pa_sink *s) {
+ uint32_t idx;
+ pa_sink_input *i;
+
+ pa_sink_assert_ref(s);
+ pa_assert_ctl_context();
+ pa_assert(PA_SINK_IS_LINKED(s->state));
+ pa_assert(pa_sink_flat_volume_enabled(s));
+
+ PA_IDXSET_FOREACH(i, s->inputs, idx) {
+ compute_reference_ratio(i);
+
+ if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
+ compute_reference_ratios(i->origin_sink);
+ }
+}
+
+/* Called from main context. Only called for the root sink in volume sharing
+ * cases, except for internal recursive calls. */
+static void compute_real_ratios(pa_sink *s) {
+ pa_sink_input *i;
+ uint32_t idx;
+
+ pa_sink_assert_ref(s);
+ pa_assert_ctl_context();
+ pa_assert(PA_SINK_IS_LINKED(s->state));
+ pa_assert(pa_sink_flat_volume_enabled(s));
+
+ PA_IDXSET_FOREACH(i, s->inputs, idx) {
+ unsigned c;
+ pa_cvolume remapped;
+
+ if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
+ /* The origin sink uses volume sharing, so this input's real ratio
+ * is handled as a special case - the real ratio must be 0 dB, and
+ * as a result i->soft_volume must equal i->volume_factor. */
+ pa_cvolume_reset(&i->real_ratio, i->real_ratio.channels);
+ i->soft_volume = i->volume_factor;
+
+ compute_real_ratios(i->origin_sink);
+
+ continue;
+ }
+
+ /*
+ * This basically calculates:
+ *
+ * i->real_ratio := i->volume / s->real_volume
+ * i->soft_volume := i->real_ratio * i->volume_factor
+ */
+
+ remapped = s->real_volume;
+ pa_cvolume_remap(&remapped, &s->channel_map, &i->channel_map);
+
+ i->real_ratio.channels = i->sample_spec.channels;
+ i->soft_volume.channels = i->sample_spec.channels;
+
+ for (c = 0; c < i->sample_spec.channels; c++) {
+
+ if (remapped.values[c] <= PA_VOLUME_MUTED) {
+ /* We leave i->real_ratio untouched */
+ i->soft_volume.values[c] = PA_VOLUME_MUTED;
+ continue;
+ }
+
+ /* Don't lose accuracy unless necessary */
+ if (pa_sw_volume_multiply(
+ i->real_ratio.values[c],
+ remapped.values[c]) != i->volume.values[c])
+
+ i->real_ratio.values[c] = pa_sw_volume_divide(
+ i->volume.values[c],
+ remapped.values[c]);
- if (m == PA_MIXER_HARDWARE && s->set_hw_volume)
- v = &s->hw_volume;
+ i->soft_volume.values[c] = pa_sw_volume_multiply(
+ i->real_ratio.values[c],
+ i->volume_factor.values[c]);
+ }
+
+ /* We don't copy the soft_volume to the thread_info data
+ * here. That must be done by the caller */
+ }
+}
+
+static pa_cvolume *cvolume_remap_minimal_impact(
+ pa_cvolume *v,
+ const pa_cvolume *template,
+ const pa_channel_map *from,
+ const pa_channel_map *to) {
+
+ pa_cvolume t;
+
+ pa_assert(v);
+ pa_assert(template);
+ pa_assert(from);
+ pa_assert(to);
+ pa_assert(pa_cvolume_compatible_with_channel_map(v, from));
+ pa_assert(pa_cvolume_compatible_with_channel_map(template, to));
+
+ /* Much like pa_cvolume_remap(), but tries to minimize impact when
+ * mapping from sink input to sink volumes:
+ *
+ * If template is a possible remapping from v it is used instead
+ * of remapping anew.
+ *
+ * If the channel maps don't match we set an all-channel volume on
+ * the sink to ensure that changing a volume on one stream has no
+ * effect that cannot be compensated for in another stream that
+ * does not have the same channel map as the sink. */
+
+ if (pa_channel_map_equal(from, to))
+ return v;
+
+ t = *template;
+ if (pa_cvolume_equal(pa_cvolume_remap(&t, to, from), v)) {
+ *v = *template;
+ return v;
+ }
+
+ pa_cvolume_set(v, to->channels, pa_cvolume_max(v));
+ return v;
+}
+
+/* Called from main thread. Only called for the root sink in volume sharing
+ * cases, except for internal recursive calls. */
+static void get_maximum_input_volume(pa_sink *s, pa_cvolume *max_volume, const pa_channel_map *channel_map) {
+ pa_sink_input *i;
+ uint32_t idx;
+
+ pa_sink_assert_ref(s);
+ pa_assert(max_volume);
+ pa_assert(channel_map);
+ pa_assert(pa_sink_flat_volume_enabled(s));
+
+ PA_IDXSET_FOREACH(i, s->inputs, idx) {
+ pa_cvolume remapped;
+
+ if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
+ get_maximum_input_volume(i->origin_sink, max_volume, channel_map);
+
+ /* Ignore this input. The origin sink uses volume sharing, so this
+ * input's volume will be set to be equal to the root sink's real
+ * volume. Obviously this input's current volume must not then
+ * affect what the root sink's real volume will be. */
+ continue;
+ }
+
+ remapped = i->volume;
+ cvolume_remap_minimal_impact(&remapped, max_volume, &i->channel_map, channel_map);
+ pa_cvolume_merge(max_volume, max_volume, &remapped);
+ }
+}
+
+/* Called from main thread. Only called for the root sink in volume sharing
+ * cases, except for internal recursive calls. */
+static pa_bool_t has_inputs(pa_sink *s) {
+ pa_sink_input *i;
+ uint32_t idx;
+
+ pa_sink_assert_ref(s);
+
+ PA_IDXSET_FOREACH(i, s->inputs, idx) {
+ if (!i->origin_sink || !(i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER) || has_inputs(i->origin_sink))
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+/* Called from main thread. Only called for the root sink in volume sharing
+ * cases, except for internal recursive calls. */
+static void update_real_volume(pa_sink *s, const pa_cvolume *new_volume, pa_channel_map *channel_map) {
+ pa_sink_input *i;
+ uint32_t idx;
+
+ pa_sink_assert_ref(s);
+ pa_assert(new_volume);
+ pa_assert(channel_map);
+
+ s->real_volume = *new_volume;
+ pa_cvolume_remap(&s->real_volume, channel_map, &s->channel_map);
+
+ PA_IDXSET_FOREACH(i, s->inputs, idx) {
+ if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
+ if (pa_sink_flat_volume_enabled(s)) {
+ pa_cvolume old_volume = i->volume;
+
+ /* Follow the root sink's real volume. */
+ i->volume = *new_volume;
+ pa_cvolume_remap(&i->volume, channel_map, &i->channel_map);
+ compute_reference_ratio(i);
+
+ /* The volume changed, let's tell people so */
+ if (!pa_cvolume_equal(&old_volume, &i->volume)) {
+ if (i->volume_changed)
+ i->volume_changed(i);
+
+ pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
+ }
+ }
+
+ update_real_volume(i->origin_sink, new_volume, channel_map);
+ }
+ }
+}
+
+/* Called from main thread. Only called for the root sink in shared volume
+ * cases. */
+static void compute_real_volume(pa_sink *s) {
+ pa_sink_assert_ref(s);
+ pa_assert_ctl_context();
+ pa_assert(PA_SINK_IS_LINKED(s->state));
+ pa_assert(pa_sink_flat_volume_enabled(s));
+ pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
+
+ /* This determines the maximum volume of all streams and sets
+ * s->real_volume accordingly. */
+
+ if (!has_inputs(s)) {
+ /* In the special case that we have no sink inputs we leave the
+ * volume unmodified. */
+ update_real_volume(s, &s->reference_volume, &s->channel_map);
+ return;
+ }
+
+ pa_cvolume_mute(&s->real_volume, s->channel_map.channels);
+
+ /* First let's determine the new maximum volume of all inputs
+ * connected to this sink */
+ get_maximum_input_volume(s, &s->real_volume, &s->channel_map);
+ update_real_volume(s, &s->real_volume, &s->channel_map);
+
+ /* Then, let's update the real ratios/soft volumes of all inputs
+ * connected to this sink */
+ compute_real_ratios(s);
+}
+
+/* Called from main thread. Only called for the root sink in shared volume
+ * cases, except for internal recursive calls. */
+static void propagate_reference_volume(pa_sink *s) {
+ pa_sink_input *i;
+ uint32_t idx;
+
+ pa_sink_assert_ref(s);
+ pa_assert_ctl_context();
+ pa_assert(PA_SINK_IS_LINKED(s->state));
+ pa_assert(pa_sink_flat_volume_enabled(s));
+
+ /* This is called whenever the sink volume changes that is not
+ * caused by a sink input volume change. We need to fix up the
+ * sink input volumes accordingly */
+
+ PA_IDXSET_FOREACH(i, s->inputs, idx) {
+ pa_cvolume old_volume;
+
+ if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
+ propagate_reference_volume(i->origin_sink);
+
+ /* Since the origin sink uses volume sharing, this input's volume
+ * needs to be updated to match the root sink's real volume, but
+ * that will be done later in update_shared_real_volume(). */
+ continue;
+ }
+
+ old_volume = i->volume;
+
+ /* This basically calculates:
+ *
+ * i->volume := s->reference_volume * i->reference_ratio */
+
+ i->volume = s->reference_volume;
+ pa_cvolume_remap(&i->volume, &s->channel_map, &i->channel_map);
+ pa_sw_cvolume_multiply(&i->volume, &i->volume, &i->reference_ratio);
+
+ /* The volume changed, let's tell people so */
+ if (!pa_cvolume_equal(&old_volume, &i->volume)) {
+
+ if (i->volume_changed)
+ i->volume_changed(i);
+
+ pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
+ }
+ }
+}
+
+/* Called from main thread. Only called for the root sink in volume sharing
+ * cases, except for internal recursive calls. The return value indicates
+ * whether any reference volume actually changed. */
+static pa_bool_t update_reference_volume(pa_sink *s, const pa_cvolume *v, const pa_channel_map *channel_map, pa_bool_t save) {
+ pa_cvolume volume;
+ pa_bool_t reference_volume_changed;
+ pa_sink_input *i;
+ uint32_t idx;
+
+ pa_sink_assert_ref(s);
+ pa_assert(PA_SINK_IS_LINKED(s->state));
+ pa_assert(v);
+ pa_assert(channel_map);
+ pa_assert(pa_cvolume_valid(v));
+
+ volume = *v;
+ pa_cvolume_remap(&volume, channel_map, &s->channel_map);
+
+ reference_volume_changed = !pa_cvolume_equal(&volume, &s->reference_volume);
+ s->reference_volume = volume;
+
+ s->save_volume = (!reference_volume_changed && s->save_volume) || save;
+
+ if (reference_volume_changed)
+ pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
+ else if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
+ /* If the root sink's volume doesn't change, then there can't be any
+ * changes in the other sinks in the sink tree either.
+ *
+ * It's probably theoretically possible that even if the root sink's
+ * volume changes slightly, some filter sink doesn't change its volume
+ * due to rounding errors. If that happens, we still want to propagate
+ * the changed root sink volume to the sinks connected to the
+ * intermediate sink that didn't change its volume. This theoretical
+ * possiblity is the reason why we have that !(s->flags &
+ * PA_SINK_SHARE_VOLUME_WITH_MASTER) condition. Probably nobody would
+ * notice even if we returned here FALSE always if
+ * reference_volume_changed is FALSE. */
+ return FALSE;
+
+ PA_IDXSET_FOREACH(i, s->inputs, idx) {
+ if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
+ update_reference_volume(i->origin_sink, v, channel_map, FALSE);
+ }
+
+ return TRUE;
+}
+
+/* Called from main thread */
+void pa_sink_set_volume(
+ pa_sink *s,
+ const pa_cvolume *volume,
+ pa_bool_t send_msg,
+ pa_bool_t save) {
+
+ pa_cvolume new_reference_volume;
+ pa_sink *root_sink = s;
+
+ pa_sink_assert_ref(s);
+ pa_assert_ctl_context();
+ pa_assert(PA_SINK_IS_LINKED(s->state));
+ pa_assert(!volume || pa_cvolume_valid(volume));
+ pa_assert(volume || pa_sink_flat_volume_enabled(s));
+ pa_assert(!volume || volume->channels == 1 || pa_cvolume_compatible(volume, &s->sample_spec));
+
+ /* make sure we don't change the volume when a PASSTHROUGH input is connected */
+ if (pa_sink_is_passthrough(s)) {
+ /* FIXME: Need to notify client that volume control is disabled */
+ pa_log_warn("Cannot change volume, Sink is connected to PASSTHROUGH input");
+ return;
+ }
+
+ /* In case of volume sharing, the volume is set for the root sink first,
+ * from which it's then propagated to the sharing sinks. */
+ while (root_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
+ root_sink = root_sink->input_to_master->sink;
+
+ /* As a special exception we accept mono volumes on all sinks --
+ * even on those with more complex channel maps */
+
+ if (volume) {
+ if (pa_cvolume_compatible(volume, &s->sample_spec))
+ new_reference_volume = *volume;
+ else {
+ new_reference_volume = s->reference_volume;
+ pa_cvolume_scale(&new_reference_volume, pa_cvolume_max(volume));
+ }
+
+ pa_cvolume_remap(&new_reference_volume, &s->channel_map, &root_sink->channel_map);
+ }
+
+ /* If volume is NULL we synchronize the sink's real and reference
+ * volumes with the stream volumes. If it is not NULL we update
+ * the reference_volume with it. */
+
+ if (volume) {
+ if (update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save)) {
+ if (pa_sink_flat_volume_enabled(root_sink)) {
+ /* OK, propagate this volume change back to the inputs */
+ propagate_reference_volume(root_sink);
+
+ /* And now recalculate the real volume */
+ compute_real_volume(root_sink);
+ } else
+ update_real_volume(root_sink, &root_sink->reference_volume, &root_sink->channel_map);
+ }
+
+ } else {
+ pa_assert(pa_sink_flat_volume_enabled(root_sink));
+
+ /* Ok, let's determine the new real volume */
+ compute_real_volume(root_sink);
+
+ /* Let's 'push' the reference volume if necessary */
+ pa_cvolume_merge(&new_reference_volume, &s->reference_volume, &root_sink->real_volume);
+ update_reference_volume(root_sink, &new_reference_volume, &root_sink->channel_map, save);
+
+ /* Now that the reference volume is updated, we can update the streams'
+ * reference ratios. */
+ compute_reference_ratios(root_sink);
+ }
+
+ if (root_sink->set_volume) {
+ /* If we have a function set_volume(), then we do not apply a
+ * soft volume by default. However, set_volume() is free to
+ * apply one to root_sink->soft_volume */
+
+ pa_cvolume_reset(&root_sink->soft_volume, root_sink->sample_spec.channels);
+ if (!(root_sink->flags & PA_SINK_SYNC_VOLUME))
+ root_sink->set_volume(root_sink);
+
+ } else
+ /* If we have no function set_volume(), then the soft volume
+ * becomes the real volume */
+ root_sink->soft_volume = root_sink->real_volume;
+
+ /* This tells the sink that soft volume and/or real volume changed */
+ if (send_msg)
+ pa_assert_se(pa_asyncmsgq_send(root_sink->asyncmsgq, PA_MSGOBJECT(root_sink), PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL) == 0);
+}
+
+/* Called from the io thread if sync volume is used, otherwise from the main thread.
+ * Only to be called by sink implementor */
+void pa_sink_set_soft_volume(pa_sink *s, const pa_cvolume *volume) {
+
+ pa_sink_assert_ref(s);
+ pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
+
+ if (s->flags & PA_SINK_SYNC_VOLUME)
+ pa_sink_assert_io_context(s);
+ else
+ pa_assert_ctl_context();
+
+ if (!volume)
+ pa_cvolume_reset(&s->soft_volume, s->sample_spec.channels);
else
- v = &s->sw_volume;
+ s->soft_volume = *volume;
+
+ if (PA_SINK_IS_LINKED(s->state) && !(s->flags & PA_SINK_SYNC_VOLUME))
+ pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME, NULL, 0, NULL) == 0);
+ else
+ s->thread_info.soft_volume = s->soft_volume;
+}
+
+/* Called from the main thread. Only called for the root sink in volume sharing
+ * cases, except for internal recursive calls. */
+static void propagate_real_volume(pa_sink *s, const pa_cvolume *old_real_volume) {
+ pa_sink_input *i;
+ uint32_t idx;
+
+ pa_sink_assert_ref(s);
+ pa_assert(old_real_volume);
+ pa_assert_ctl_context();
+ pa_assert(PA_SINK_IS_LINKED(s->state));
+
+ /* This is called when the hardware's real volume changes due to
+ * some external event. We copy the real volume into our
+ * reference volume and then rebuild the stream volumes based on
+ * i->real_ratio which should stay fixed. */
+
+ if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)) {
+ if (pa_cvolume_equal(old_real_volume, &s->real_volume))
+ return;
+
+ /* 1. Make the real volume the reference volume */
+ update_reference_volume(s, &s->real_volume, &s->channel_map, TRUE);
+ }
+
+ if (pa_sink_flat_volume_enabled(s)) {
+
+ PA_IDXSET_FOREACH(i, s->inputs, idx) {
+ pa_cvolume old_volume = i->volume;
+
+ /* 2. Since the sink's reference and real volumes are equal
+ * now our ratios should be too. */
+ i->reference_ratio = i->real_ratio;
+
+ /* 3. Recalculate the new stream reference volume based on the
+ * reference ratio and the sink's reference volume.
+ *
+ * This basically calculates:
+ *
+ * i->volume = s->reference_volume * i->reference_ratio
+ *
+ * This is identical to propagate_reference_volume() */
+ i->volume = s->reference_volume;
+ pa_cvolume_remap(&i->volume, &s->channel_map, &i->channel_map);
+ pa_sw_cvolume_multiply(&i->volume, &i->volume, &i->reference_ratio);
+
+ /* Notify if something changed */
+ if (!pa_cvolume_equal(&old_volume, &i->volume)) {
+
+ if (i->volume_changed)
+ i->volume_changed(i);
+
+ pa_subscription_post(i->core, PA_SUBSCRIPTION_EVENT_SINK_INPUT|PA_SUBSCRIPTION_EVENT_CHANGE, i->index);
+ }
+
+ if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
+ propagate_real_volume(i->origin_sink, old_real_volume);
+ }
+ }
+
+ /* Something got changed in the hardware. It probably makes sense
+ * to save changed hw settings given that hw volume changes not
+ * triggered by PA are almost certainly done by the user. */
+ if (!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
+ s->save_volume = TRUE;
+}
+
+/* Called from io thread */
+void pa_sink_update_volume_and_mute(pa_sink *s) {
+ pa_assert(s);
+ pa_sink_assert_io_context(s);
+
+ pa_asyncmsgq_post(pa_thread_mq_get()->outq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE, NULL, 0, NULL, NULL);
+}
+
+/* Called from main thread */
+const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_bool_t force_refresh) {
+ pa_sink_assert_ref(s);
+ pa_assert_ctl_context();
+ pa_assert(PA_SINK_IS_LINKED(s->state));
+
+ if (s->refresh_volume || force_refresh) {
+ struct pa_cvolume old_real_volume;
- if (pa_cvolume_equal(v, volume))
+ pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
+
+ old_real_volume = s->real_volume;
+
+ if (!(s->flags & PA_SINK_SYNC_VOLUME) && s->get_volume)
+ s->get_volume(s);
+
+ pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_VOLUME, NULL, 0, NULL) == 0);
+
+ update_real_volume(s, &s->real_volume, &s->channel_map);
+ propagate_real_volume(s, &old_real_volume);
+ }
+
+ return &s->reference_volume;
+}
+
+/* Called from main thread. In volume sharing cases, only the root sink may
+ * call this. */
+void pa_sink_volume_changed(pa_sink *s, const pa_cvolume *new_real_volume) {
+ pa_cvolume old_real_volume;
+
+ pa_sink_assert_ref(s);
+ pa_assert_ctl_context();
+ pa_assert(PA_SINK_IS_LINKED(s->state));
+ pa_assert(!(s->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER));
+
+ /* The sink implementor may call this if the volume changed to make sure everyone is notified */
+
+ old_real_volume = s->real_volume;
+ update_real_volume(s, new_real_volume, &s->channel_map);
+ propagate_real_volume(s, &old_real_volume);
+}
+
+/* Called from main thread */
+void pa_sink_set_mute(pa_sink *s, pa_bool_t mute, pa_bool_t save) {
+ pa_bool_t old_muted;
+
+ pa_sink_assert_ref(s);
+ pa_assert_ctl_context();
+ pa_assert(PA_SINK_IS_LINKED(s->state));
+
+ old_muted = s->muted;
+ s->muted = mute;
+ s->save_muted = (old_muted == s->muted && s->save_muted) || save;
+
+ if (!(s->flags & PA_SINK_SYNC_VOLUME) && s->set_mute)
+ s->set_mute(s);
+
+ pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
+
+ if (old_muted != s->muted)
+ pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
+}
+
+/* Called from main thread */
+pa_bool_t pa_sink_get_mute(pa_sink *s, pa_bool_t force_refresh) {
+
+ pa_sink_assert_ref(s);
+ pa_assert_ctl_context();
+ pa_assert(PA_SINK_IS_LINKED(s->state));
+
+ if (s->refresh_muted || force_refresh) {
+ pa_bool_t old_muted = s->muted;
+
+ if (!(s->flags & PA_SINK_SYNC_VOLUME) && s->get_mute)
+ s->get_mute(s);
+
+ pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MUTE, NULL, 0, NULL) == 0);
+
+ if (old_muted != s->muted) {
+ s->save_muted = TRUE;
+
+ pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
+
+ /* Make sure the soft mute status stays in sync */
+ pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MUTE, NULL, 0, NULL) == 0);
+ }
+ }
+
+ return s->muted;
+}
+
+/* Called from main thread */
+void pa_sink_mute_changed(pa_sink *s, pa_bool_t new_muted) {
+ pa_sink_assert_ref(s);
+ pa_assert_ctl_context();
+ pa_assert(PA_SINK_IS_LINKED(s->state));
+
+ /* The sink implementor may call this if the volume changed to make sure everyone is notified */
+
+ if (s->muted == new_muted)
return;
-
- *v = *volume;
- if (v == &s->hw_volume)
- if (s->set_hw_volume(s) < 0)
- s->sw_volume = *volume;
+ s->muted = new_muted;
+ s->save_muted = TRUE;
pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
}
-const pa_cvolume *pa_sink_get_volume(pa_sink *s, pa_mixer_t m) {
- assert(s);
- assert(s->ref >= 1);
+/* Called from main thread */
+pa_bool_t pa_sink_update_proplist(pa_sink *s, pa_update_mode_t mode, pa_proplist *p) {
+ pa_sink_assert_ref(s);
+ pa_assert_ctl_context();
+
+ if (p)
+ pa_proplist_update(s->proplist, mode, p);
+
+ if (PA_SINK_IS_LINKED(s->state)) {
+ pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
+ pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
+ }
+
+ return TRUE;
+}
+
+/* Called from main thread */
+/* FIXME -- this should be dropped and be merged into pa_sink_update_proplist() */
+void pa_sink_set_description(pa_sink *s, const char *description) {
+ const char *old;
+ pa_sink_assert_ref(s);
+ pa_assert_ctl_context();
+
+ if (!description && !pa_proplist_contains(s->proplist, PA_PROP_DEVICE_DESCRIPTION))
+ return;
+
+ old = pa_proplist_gets(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
+
+ if (old && description && pa_streq(old, description))
+ return;
+
+ if (description)
+ pa_proplist_sets(s->proplist, PA_PROP_DEVICE_DESCRIPTION, description);
+ else
+ pa_proplist_unset(s->proplist, PA_PROP_DEVICE_DESCRIPTION);
+
+ if (s->monitor_source) {
+ char *n;
+
+ n = pa_sprintf_malloc("Monitor Source of %s", description ? description : s->name);
+ pa_source_set_description(s->monitor_source, n);
+ pa_xfree(n);
+ }
+
+ if (PA_SINK_IS_LINKED(s->state)) {
+ pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
+ pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PROPLIST_CHANGED], s);
+ }
+}
+
+/* Called from main thread */
+unsigned pa_sink_linked_by(pa_sink *s) {
+ unsigned ret;
+
+ pa_sink_assert_ref(s);
+ pa_assert_ctl_context();
+ pa_assert(PA_SINK_IS_LINKED(s->state));
+
+ ret = pa_idxset_size(s->inputs);
+
+ /* We add in the number of streams connected to us here. Please
+ * note the asymmmetry to pa_sink_used_by()! */
+
+ if (s->monitor_source)
+ ret += pa_source_linked_by(s->monitor_source);
+
+ return ret;
+}
+
+/* Called from main thread */
+unsigned pa_sink_used_by(pa_sink *s) {
+ unsigned ret;
+
+ pa_sink_assert_ref(s);
+ pa_assert_ctl_context();
+ pa_assert(PA_SINK_IS_LINKED(s->state));
+
+ ret = pa_idxset_size(s->inputs);
+ pa_assert(ret >= s->n_corked);
+
+ /* Streams connected to our monitor source do not matter for
+ * pa_sink_used_by()!.*/
+
+ return ret - s->n_corked;
+}
+
+/* Called from main thread */
+unsigned pa_sink_check_suspend(pa_sink *s) {
+ unsigned ret;
+ pa_sink_input *i;
+ uint32_t idx;
+
+ pa_sink_assert_ref(s);
+ pa_assert_ctl_context();
+
+ if (!PA_SINK_IS_LINKED(s->state))
+ return 0;
+
+ ret = 0;
+
+ PA_IDXSET_FOREACH(i, s->inputs, idx) {
+ pa_sink_input_state_t st;
+
+ st = pa_sink_input_get_state(i);
+
+ /* We do not assert here. It is perfectly valid for a sink input to
+ * be in the INIT state (i.e. created, marked done but not yet put)
+ * and we should not care if it's unlinked as it won't contribute
+ * towarards our busy status.
+ */
+ if (!PA_SINK_INPUT_IS_LINKED(st))
+ continue;
+
+ if (st == PA_SINK_INPUT_CORKED)
+ continue;
+
+ if (i->flags & PA_SINK_INPUT_DONT_INHIBIT_AUTO_SUSPEND)
+ continue;
+
+ ret ++;
+ }
+
+ if (s->monitor_source)
+ ret += pa_source_check_suspend(s->monitor_source);
+
+ return ret;
+}
+
+/* Called from the IO thread */
+static void sync_input_volumes_within_thread(pa_sink *s) {
+ pa_sink_input *i;
+ void *state = NULL;
+
+ pa_sink_assert_ref(s);
+ pa_sink_assert_io_context(s);
+
+ PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
+ if (pa_cvolume_equal(&i->thread_info.soft_volume, &i->soft_volume))
+ continue;
+
+ i->thread_info.soft_volume = i->soft_volume;
+ pa_sink_input_request_rewind(i, 0, TRUE, FALSE, FALSE);
+ }
+}
+
+/* Called from the IO thread. Only called for the root sink in volume sharing
+ * cases, except for internal recursive calls. */
+static void set_shared_volume_within_thread(pa_sink *s) {
+ pa_sink_input *i = NULL;
+ void *state = NULL;
+
+ pa_sink_assert_ref(s);
+
+ PA_MSGOBJECT(s)->process_msg(PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_VOLUME_SYNCED, NULL, 0, NULL);
+
+ PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state) {
+ if (i->origin_sink && (i->origin_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER))
+ set_shared_volume_within_thread(i->origin_sink);
+ }
+}
+
+/* Called from IO thread, except when it is not */
+int pa_sink_process_msg(pa_msgobject *o, int code, void *userdata, int64_t offset, pa_memchunk *chunk) {
+ pa_sink *s = PA_SINK(o);
+ pa_sink_assert_ref(s);
+
+ switch ((pa_sink_message_t) code) {
+
+ case PA_SINK_MESSAGE_ADD_INPUT: {
+ pa_sink_input *i = PA_SINK_INPUT(userdata);
+
+ /* If you change anything here, make sure to change the
+ * sink input handling a few lines down at
+ * PA_SINK_MESSAGE_FINISH_MOVE, too. */
+
+ pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
+
+ /* Since the caller sleeps in pa_sink_input_put(), we can
+ * safely access data outside of thread_info even though
+ * it is mutable */
+
+ if ((i->thread_info.sync_prev = i->sync_prev)) {
+ pa_assert(i->sink == i->thread_info.sync_prev->sink);
+ pa_assert(i->sync_prev->sync_next == i);
+ i->thread_info.sync_prev->thread_info.sync_next = i;
+ }
+
+ if ((i->thread_info.sync_next = i->sync_next)) {
+ pa_assert(i->sink == i->thread_info.sync_next->sink);
+ pa_assert(i->sync_next->sync_prev == i);
+ i->thread_info.sync_next->thread_info.sync_prev = i;
+ }
+
+ pa_assert(!i->thread_info.attached);
+ i->thread_info.attached = TRUE;
+
+ if (i->attach)
+ i->attach(i);
+
+ pa_sink_input_set_state_within_thread(i, i->state);
+
+ /* The requested latency of the sink input needs to be
+ * fixed up and then configured on the sink */
+
+ if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
+ pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
+
+ pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
+ pa_sink_input_update_max_request(i, s->thread_info.max_request);
+
+ /* We don't rewind here automatically. This is left to the
+ * sink input implementor because some sink inputs need a
+ * slow start, i.e. need some time to buffer client
+ * samples before beginning streaming. */
+
+ /* In flat volume mode we need to update the volume as
+ * well */
+ return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
+ }
+
+ case PA_SINK_MESSAGE_REMOVE_INPUT: {
+ pa_sink_input *i = PA_SINK_INPUT(userdata);
+
+ /* If you change anything here, make sure to change the
+ * sink input handling a few lines down at
+ * PA_SINK_MESSAGE_START_MOVE, too. */
+
+ if (i->detach)
+ i->detach(i);
+
+ pa_sink_input_set_state_within_thread(i, i->state);
+
+ pa_assert(i->thread_info.attached);
+ i->thread_info.attached = FALSE;
+
+ /* Since the caller sleeps in pa_sink_input_unlink(),
+ * we can safely access data outside of thread_info even
+ * though it is mutable */
+
+ pa_assert(!i->sync_prev);
+ pa_assert(!i->sync_next);
+
+ if (i->thread_info.sync_prev) {
+ i->thread_info.sync_prev->thread_info.sync_next = i->thread_info.sync_prev->sync_next;
+ i->thread_info.sync_prev = NULL;
+ }
+
+ if (i->thread_info.sync_next) {
+ i->thread_info.sync_next->thread_info.sync_prev = i->thread_info.sync_next->sync_prev;
+ i->thread_info.sync_next = NULL;
+ }
+
+ if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
+ pa_sink_input_unref(i);
+
+ pa_sink_invalidate_requested_latency(s, TRUE);
+ pa_sink_request_rewind(s, (size_t) -1);
+
+ /* In flat volume mode we need to update the volume as
+ * well */
+ return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
+ }
+
+ case PA_SINK_MESSAGE_START_MOVE: {
+ pa_sink_input *i = PA_SINK_INPUT(userdata);
+
+ /* We don't support moving synchronized streams. */
+ pa_assert(!i->sync_prev);
+ pa_assert(!i->sync_next);
+ pa_assert(!i->thread_info.sync_next);
+ pa_assert(!i->thread_info.sync_prev);
+
+ if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
+ pa_usec_t usec = 0;
+ size_t sink_nbytes, total_nbytes;
+
+ /* Get the latency of the sink */
+ usec = pa_sink_get_latency_within_thread(s);
+ sink_nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
+ total_nbytes = sink_nbytes + pa_memblockq_get_length(i->thread_info.render_memblockq);
+
+ if (total_nbytes > 0) {
+ i->thread_info.rewrite_nbytes = i->thread_info.resampler ? pa_resampler_request(i->thread_info.resampler, total_nbytes) : total_nbytes;
+ i->thread_info.rewrite_flush = TRUE;
+ pa_sink_input_process_rewind(i, sink_nbytes);
+ }
+ }
+
+ if (i->detach)
+ i->detach(i);
+
+ pa_assert(i->thread_info.attached);
+ i->thread_info.attached = FALSE;
+
+ /* Let's remove the sink input ...*/
+ if (pa_hashmap_remove(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index)))
+ pa_sink_input_unref(i);
+
+ pa_sink_invalidate_requested_latency(s, TRUE);
+
+ pa_log_debug("Requesting rewind due to started move");
+ pa_sink_request_rewind(s, (size_t) -1);
+
+ /* In flat volume mode we need to update the volume as
+ * well */
+ return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
+ }
+
+ case PA_SINK_MESSAGE_FINISH_MOVE: {
+ pa_sink_input *i = PA_SINK_INPUT(userdata);
+
+ /* We don't support moving synchronized streams. */
+ pa_assert(!i->sync_prev);
+ pa_assert(!i->sync_next);
+ pa_assert(!i->thread_info.sync_next);
+ pa_assert(!i->thread_info.sync_prev);
+
+ pa_hashmap_put(s->thread_info.inputs, PA_UINT32_TO_PTR(i->index), pa_sink_input_ref(i));
+
+ pa_assert(!i->thread_info.attached);
+ i->thread_info.attached = TRUE;
+
+ if (i->attach)
+ i->attach(i);
+
+ if (i->thread_info.requested_sink_latency != (pa_usec_t) -1)
+ pa_sink_input_set_requested_latency_within_thread(i, i->thread_info.requested_sink_latency);
+
+ pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
+ pa_sink_input_update_max_request(i, s->thread_info.max_request);
+
+ if (i->thread_info.state != PA_SINK_INPUT_CORKED) {
+ pa_usec_t usec = 0;
+ size_t nbytes;
+
+ /* Get the latency of the sink */
+ usec = pa_sink_get_latency_within_thread(s);
+ nbytes = pa_usec_to_bytes(usec, &s->sample_spec);
+
+ if (nbytes > 0)
+ pa_sink_input_drop(i, nbytes);
+
+ pa_log_debug("Requesting rewind due to finished move");
+ pa_sink_request_rewind(s, nbytes);
+ }
+
+ return o->process_msg(o, PA_SINK_MESSAGE_SET_SHARED_VOLUME, NULL, 0, NULL);
+ }
+
+ case PA_SINK_MESSAGE_SET_SHARED_VOLUME: {
+ pa_sink *root_sink = s;
+
+ while (root_sink->flags & PA_SINK_SHARE_VOLUME_WITH_MASTER)
+ root_sink = root_sink->input_to_master->sink;
+
+ set_shared_volume_within_thread(root_sink);
+ return 0;
+ }
+
+ case PA_SINK_MESSAGE_SET_VOLUME_SYNCED:
+
+ if (s->flags & PA_SINK_SYNC_VOLUME) {
+ s->set_volume(s);
+ pa_sink_volume_change_push(s);
+ }
+ /* Fall through ... */
+
+ case PA_SINK_MESSAGE_SET_VOLUME:
+
+ if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
+ s->thread_info.soft_volume = s->soft_volume;
+ pa_sink_request_rewind(s, (size_t) -1);
+ }
+
+ /* Fall through ... */
+
+ case PA_SINK_MESSAGE_SYNC_VOLUMES:
+ sync_input_volumes_within_thread(s);
+ return 0;
+
+ case PA_SINK_MESSAGE_GET_VOLUME:
+
+ if ((s->flags & PA_SINK_SYNC_VOLUME) && s->get_volume) {
+ s->get_volume(s);
+ pa_sink_volume_change_flush(s);
+ pa_sw_cvolume_divide(&s->thread_info.current_hw_volume, &s->real_volume, &s->soft_volume);
+ }
+
+ /* In case sink implementor reset SW volume. */
+ if (!pa_cvolume_equal(&s->thread_info.soft_volume, &s->soft_volume)) {
+ s->thread_info.soft_volume = s->soft_volume;
+ pa_sink_request_rewind(s, (size_t) -1);
+ }
+
+ return 0;
+
+ case PA_SINK_MESSAGE_SET_MUTE:
+
+ if (s->thread_info.soft_muted != s->muted) {
+ s->thread_info.soft_muted = s->muted;
+ pa_sink_request_rewind(s, (size_t) -1);
+ }
+
+ if (s->flags & PA_SINK_SYNC_VOLUME && s->set_mute)
+ s->set_mute(s);
+
+ return 0;
+
+ case PA_SINK_MESSAGE_GET_MUTE:
+
+ if (s->flags & PA_SINK_SYNC_VOLUME && s->get_mute)
+ s->get_mute(s);
+
+ return 0;
+
+ case PA_SINK_MESSAGE_SET_STATE: {
+
+ pa_bool_t suspend_change =
+ (s->thread_info.state == PA_SINK_SUSPENDED && PA_SINK_IS_OPENED(PA_PTR_TO_UINT(userdata))) ||
+ (PA_SINK_IS_OPENED(s->thread_info.state) && PA_PTR_TO_UINT(userdata) == PA_SINK_SUSPENDED);
+
+ s->thread_info.state = PA_PTR_TO_UINT(userdata);
+
+ if (s->thread_info.state == PA_SINK_SUSPENDED) {
+ s->thread_info.rewind_nbytes = 0;
+ s->thread_info.rewind_requested = FALSE;
+ }
+
+ if (suspend_change) {
+ pa_sink_input *i;
+ void *state = NULL;
+
+ while ((i = pa_hashmap_iterate(s->thread_info.inputs, &state, NULL)))
+ if (i->suspend_within_thread)
+ i->suspend_within_thread(i, s->thread_info.state == PA_SINK_SUSPENDED);
+ }
+
+ return 0;
+ }
+
+ case PA_SINK_MESSAGE_DETACH:
+
+ /* Detach all streams */
+ pa_sink_detach_within_thread(s);
+ return 0;
+
+ case PA_SINK_MESSAGE_ATTACH:
+
+ /* Reattach all streams */
+ pa_sink_attach_within_thread(s);
+ return 0;
+
+ case PA_SINK_MESSAGE_GET_REQUESTED_LATENCY: {
+
+ pa_usec_t *usec = userdata;
+ *usec = pa_sink_get_requested_latency_within_thread(s);
+
+ /* Yes, that's right, the IO thread will see -1 when no
+ * explicit requested latency is configured, the main
+ * thread will see max_latency */
+ if (*usec == (pa_usec_t) -1)
+ *usec = s->thread_info.max_latency;
+
+ return 0;
+ }
+
+ case PA_SINK_MESSAGE_SET_LATENCY_RANGE: {
+ pa_usec_t *r = userdata;
+
+ pa_sink_set_latency_range_within_thread(s, r[0], r[1]);
+
+ return 0;
+ }
+
+ case PA_SINK_MESSAGE_GET_LATENCY_RANGE: {
+ pa_usec_t *r = userdata;
+
+ r[0] = s->thread_info.min_latency;
+ r[1] = s->thread_info.max_latency;
+
+ return 0;
+ }
+
+ case PA_SINK_MESSAGE_GET_FIXED_LATENCY:
+
+ *((pa_usec_t*) userdata) = s->thread_info.fixed_latency;
+ return 0;
+
+ case PA_SINK_MESSAGE_SET_FIXED_LATENCY:
+
+ pa_sink_set_fixed_latency_within_thread(s, (pa_usec_t) offset);
+ return 0;
+
+ case PA_SINK_MESSAGE_GET_MAX_REWIND:
+
+ *((size_t*) userdata) = s->thread_info.max_rewind;
+ return 0;
+
+ case PA_SINK_MESSAGE_GET_MAX_REQUEST:
+
+ *((size_t*) userdata) = s->thread_info.max_request;
+ return 0;
+
+ case PA_SINK_MESSAGE_SET_MAX_REWIND:
+
+ pa_sink_set_max_rewind_within_thread(s, (size_t) offset);
+ return 0;
+
+ case PA_SINK_MESSAGE_SET_MAX_REQUEST:
+
+ pa_sink_set_max_request_within_thread(s, (size_t) offset);
+ return 0;
+
+ case PA_SINK_MESSAGE_SET_PORT:
+
+ pa_assert(userdata);
+ if (s->set_port) {
+ struct sink_message_set_port *msg_data = userdata;
+ msg_data->ret = s->set_port(s, msg_data->port);
+ }
+ return 0;
+
+ case PA_SINK_MESSAGE_UPDATE_VOLUME_AND_MUTE:
+ /* This message is sent from IO-thread and handled in main thread. */
+ pa_assert_ctl_context();
+
+ pa_sink_get_volume(s, TRUE);
+ pa_sink_get_mute(s, TRUE);
+ return 0;
+
+ case PA_SINK_MESSAGE_GET_LATENCY:
+ case PA_SINK_MESSAGE_MAX:
+ ;
+ }
+
+ return -1;
+}
+
+/* Called from main thread */
+int pa_sink_suspend_all(pa_core *c, pa_bool_t suspend, pa_suspend_cause_t cause) {
+ pa_sink *sink;
+ uint32_t idx;
+ int ret = 0;
+
+ pa_core_assert_ref(c);
+ pa_assert_ctl_context();
+ pa_assert(cause != 0);
+
+ PA_IDXSET_FOREACH(sink, c->sinks, idx) {
+ int r;
+
+ if ((r = pa_sink_suspend(sink, suspend, cause)) < 0)
+ ret = r;
+ }
+
+ return ret;
+}
+
+/* Called from main thread */
+void pa_sink_detach(pa_sink *s) {
+ pa_sink_assert_ref(s);
+ pa_assert_ctl_context();
+ pa_assert(PA_SINK_IS_LINKED(s->state));
+
+ pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_DETACH, NULL, 0, NULL) == 0);
+}
+
+/* Called from main thread */
+void pa_sink_attach(pa_sink *s) {
+ pa_sink_assert_ref(s);
+ pa_assert_ctl_context();
+ pa_assert(PA_SINK_IS_LINKED(s->state));
+
+ pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_ATTACH, NULL, 0, NULL) == 0);
+}
+
+/* Called from IO thread */
+void pa_sink_detach_within_thread(pa_sink *s) {
+ pa_sink_input *i;
+ void *state = NULL;
+
+ pa_sink_assert_ref(s);
+ pa_sink_assert_io_context(s);
+ pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
+
+ PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
+ if (i->detach)
+ i->detach(i);
+
+ if (s->monitor_source)
+ pa_source_detach_within_thread(s->monitor_source);
+}
+
+/* Called from IO thread */
+void pa_sink_attach_within_thread(pa_sink *s) {
+ pa_sink_input *i;
+ void *state = NULL;
+
+ pa_sink_assert_ref(s);
+ pa_sink_assert_io_context(s);
+ pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
+
+ PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
+ if (i->attach)
+ i->attach(i);
+
+ if (s->monitor_source)
+ pa_source_attach_within_thread(s->monitor_source);
+}
+
+/* Called from IO thread */
+void pa_sink_request_rewind(pa_sink*s, size_t nbytes) {
+ pa_sink_assert_ref(s);
+ pa_sink_assert_io_context(s);
+ pa_assert(PA_SINK_IS_LINKED(s->thread_info.state));
+
+ if (s->thread_info.state == PA_SINK_SUSPENDED)
+ return;
- if (m == PA_MIXER_HARDWARE && s->set_hw_volume) {
+ if (nbytes == (size_t) -1)
+ nbytes = s->thread_info.max_rewind;
- if (s->get_hw_volume)
- s->get_hw_volume(s);
-
- return &s->hw_volume;
+ nbytes = PA_MIN(nbytes, s->thread_info.max_rewind);
+
+ if (s->thread_info.rewind_requested &&
+ nbytes <= s->thread_info.rewind_nbytes)
+ return;
+
+ s->thread_info.rewind_nbytes = nbytes;
+ s->thread_info.rewind_requested = TRUE;
+
+ if (s->request_rewind)
+ s->request_rewind(s);
+}
+
+/* Called from IO thread */
+pa_usec_t pa_sink_get_requested_latency_within_thread(pa_sink *s) {
+ pa_usec_t result = (pa_usec_t) -1;
+ pa_sink_input *i;
+ void *state = NULL;
+ pa_usec_t monitor_latency;
+
+ pa_sink_assert_ref(s);
+ pa_sink_assert_io_context(s);
+
+ if (!(s->flags & PA_SINK_DYNAMIC_LATENCY))
+ return PA_CLAMP(s->thread_info.fixed_latency, s->thread_info.min_latency, s->thread_info.max_latency);
+
+ if (s->thread_info.requested_latency_valid)
+ return s->thread_info.requested_latency;
+
+ PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
+ if (i->thread_info.requested_sink_latency != (pa_usec_t) -1 &&
+ (result == (pa_usec_t) -1 || result > i->thread_info.requested_sink_latency))
+ result = i->thread_info.requested_sink_latency;
+
+ monitor_latency = pa_source_get_requested_latency_within_thread(s->monitor_source);
+
+ if (monitor_latency != (pa_usec_t) -1 &&
+ (result == (pa_usec_t) -1 || result > monitor_latency))
+ result = monitor_latency;
+
+ if (result != (pa_usec_t) -1)
+ result = PA_CLAMP(result, s->thread_info.min_latency, s->thread_info.max_latency);
+
+ if (PA_SINK_IS_LINKED(s->thread_info.state)) {
+ /* Only cache if properly initialized */
+ s->thread_info.requested_latency = result;
+ s->thread_info.requested_latency_valid = TRUE;
+ }
+
+ return result;
+}
+
+/* Called from main thread */
+pa_usec_t pa_sink_get_requested_latency(pa_sink *s) {
+ pa_usec_t usec = 0;
+
+ pa_sink_assert_ref(s);
+ pa_assert_ctl_context();
+ pa_assert(PA_SINK_IS_LINKED(s->state));
+
+ if (s->state == PA_SINK_SUSPENDED)
+ return 0;
+
+ pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_REQUESTED_LATENCY, &usec, 0, NULL) == 0);
+
+ return usec;
+}
+
+/* Called from IO as well as the main thread -- the latter only before the IO thread started up */
+void pa_sink_set_max_rewind_within_thread(pa_sink *s, size_t max_rewind) {
+ pa_sink_input *i;
+ void *state = NULL;
+
+ pa_sink_assert_ref(s);
+ pa_sink_assert_io_context(s);
+
+ if (max_rewind == s->thread_info.max_rewind)
+ return;
+
+ s->thread_info.max_rewind = max_rewind;
+
+ if (PA_SINK_IS_LINKED(s->thread_info.state))
+ PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
+ pa_sink_input_update_max_rewind(i, s->thread_info.max_rewind);
+
+ if (s->monitor_source)
+ pa_source_set_max_rewind_within_thread(s->monitor_source, s->thread_info.max_rewind);
+}
+
+/* Called from main thread */
+void pa_sink_set_max_rewind(pa_sink *s, size_t max_rewind) {
+ pa_sink_assert_ref(s);
+ pa_assert_ctl_context();
+
+ if (PA_SINK_IS_LINKED(s->state))
+ pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REWIND, NULL, max_rewind, NULL) == 0);
+ else
+ pa_sink_set_max_rewind_within_thread(s, max_rewind);
+}
+
+/* Called from IO as well as the main thread -- the latter only before the IO thread started up */
+void pa_sink_set_max_request_within_thread(pa_sink *s, size_t max_request) {
+ void *state = NULL;
+
+ pa_sink_assert_ref(s);
+ pa_sink_assert_io_context(s);
+
+ if (max_request == s->thread_info.max_request)
+ return;
+
+ s->thread_info.max_request = max_request;
+
+ if (PA_SINK_IS_LINKED(s->thread_info.state)) {
+ pa_sink_input *i;
+
+ PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
+ pa_sink_input_update_max_request(i, s->thread_info.max_request);
+ }
+}
+
+/* Called from main thread */
+void pa_sink_set_max_request(pa_sink *s, size_t max_request) {
+ pa_sink_assert_ref(s);
+ pa_assert_ctl_context();
+
+ if (PA_SINK_IS_LINKED(s->state))
+ pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_MAX_REQUEST, NULL, max_request, NULL) == 0);
+ else
+ pa_sink_set_max_request_within_thread(s, max_request);
+}
+
+/* Called from IO thread */
+void pa_sink_invalidate_requested_latency(pa_sink *s, pa_bool_t dynamic) {
+ pa_sink_input *i;
+ void *state = NULL;
+
+ pa_sink_assert_ref(s);
+ pa_sink_assert_io_context(s);
+
+ if ((s->flags & PA_SINK_DYNAMIC_LATENCY))
+ s->thread_info.requested_latency_valid = FALSE;
+ else if (dynamic)
+ return;
+
+ if (PA_SINK_IS_LINKED(s->thread_info.state)) {
+
+ if (s->update_requested_latency)
+ s->update_requested_latency(s);
+
+ PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
+ if (i->update_sink_requested_latency)
+ i->update_sink_requested_latency(i);
+ }
+}
+
+/* Called from main thread */
+void pa_sink_set_latency_range(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
+ pa_sink_assert_ref(s);
+ pa_assert_ctl_context();
+
+ /* min_latency == 0: no limit
+ * min_latency anything else: specified limit
+ *
+ * Similar for max_latency */
+
+ if (min_latency < ABSOLUTE_MIN_LATENCY)
+ min_latency = ABSOLUTE_MIN_LATENCY;
+
+ if (max_latency <= 0 ||
+ max_latency > ABSOLUTE_MAX_LATENCY)
+ max_latency = ABSOLUTE_MAX_LATENCY;
+
+ pa_assert(min_latency <= max_latency);
+
+ /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
+ pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
+ max_latency == ABSOLUTE_MAX_LATENCY) ||
+ (s->flags & PA_SINK_DYNAMIC_LATENCY));
+
+ if (PA_SINK_IS_LINKED(s->state)) {
+ pa_usec_t r[2];
+
+ r[0] = min_latency;
+ r[1] = max_latency;
+
+ pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_LATENCY_RANGE, r, 0, NULL) == 0);
} else
- return &s->sw_volume;
+ pa_sink_set_latency_range_within_thread(s, min_latency, max_latency);
+}
+
+/* Called from main thread */
+void pa_sink_get_latency_range(pa_sink *s, pa_usec_t *min_latency, pa_usec_t *max_latency) {
+ pa_sink_assert_ref(s);
+ pa_assert_ctl_context();
+ pa_assert(min_latency);
+ pa_assert(max_latency);
+
+ if (PA_SINK_IS_LINKED(s->state)) {
+ pa_usec_t r[2] = { 0, 0 };
+
+ pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_LATENCY_RANGE, r, 0, NULL) == 0);
+
+ *min_latency = r[0];
+ *max_latency = r[1];
+ } else {
+ *min_latency = s->thread_info.min_latency;
+ *max_latency = s->thread_info.max_latency;
+ }
+}
+
+/* Called from IO thread */
+void pa_sink_set_latency_range_within_thread(pa_sink *s, pa_usec_t min_latency, pa_usec_t max_latency) {
+ pa_sink_assert_ref(s);
+ pa_sink_assert_io_context(s);
+
+ pa_assert(min_latency >= ABSOLUTE_MIN_LATENCY);
+ pa_assert(max_latency <= ABSOLUTE_MAX_LATENCY);
+ pa_assert(min_latency <= max_latency);
+
+ /* Hmm, let's see if someone forgot to set PA_SINK_DYNAMIC_LATENCY here... */
+ pa_assert((min_latency == ABSOLUTE_MIN_LATENCY &&
+ max_latency == ABSOLUTE_MAX_LATENCY) ||
+ (s->flags & PA_SINK_DYNAMIC_LATENCY));
+
+ if (s->thread_info.min_latency == min_latency &&
+ s->thread_info.max_latency == max_latency)
+ return;
+
+ s->thread_info.min_latency = min_latency;
+ s->thread_info.max_latency = max_latency;
+
+ if (PA_SINK_IS_LINKED(s->thread_info.state)) {
+ pa_sink_input *i;
+ void *state = NULL;
+
+ PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
+ if (i->update_sink_latency_range)
+ i->update_sink_latency_range(i);
+ }
+
+ pa_sink_invalidate_requested_latency(s, FALSE);
+
+ pa_source_set_latency_range_within_thread(s->monitor_source, min_latency, max_latency);
}
-void pa_sink_set_mute(pa_sink *s, pa_mixer_t m, int mute) {
- int *t;
-
- assert(s);
- assert(s->ref >= 1);
+/* Called from main thread */
+void pa_sink_set_fixed_latency(pa_sink *s, pa_usec_t latency) {
+ pa_sink_assert_ref(s);
+ pa_assert_ctl_context();
+
+ if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
+ pa_assert(latency == 0);
+ return;
+ }
+
+ if (latency < ABSOLUTE_MIN_LATENCY)
+ latency = ABSOLUTE_MIN_LATENCY;
+
+ if (latency > ABSOLUTE_MAX_LATENCY)
+ latency = ABSOLUTE_MAX_LATENCY;
- if (m == PA_MIXER_HARDWARE && s->set_hw_mute)
- t = &s->hw_muted;
+ if (PA_SINK_IS_LINKED(s->state))
+ pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_FIXED_LATENCY, NULL, (int64_t) latency, NULL) == 0);
else
- t = &s->sw_muted;
+ s->thread_info.fixed_latency = latency;
- if (!!*t == !!mute)
+ pa_source_set_fixed_latency(s->monitor_source, latency);
+}
+
+/* Called from main thread */
+pa_usec_t pa_sink_get_fixed_latency(pa_sink *s) {
+ pa_usec_t latency;
+
+ pa_sink_assert_ref(s);
+ pa_assert_ctl_context();
+
+ if (s->flags & PA_SINK_DYNAMIC_LATENCY)
+ return 0;
+
+ if (PA_SINK_IS_LINKED(s->state))
+ pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_FIXED_LATENCY, &latency, 0, NULL) == 0);
+ else
+ latency = s->thread_info.fixed_latency;
+
+ return latency;
+}
+
+/* Called from IO thread */
+void pa_sink_set_fixed_latency_within_thread(pa_sink *s, pa_usec_t latency) {
+ pa_sink_assert_ref(s);
+ pa_sink_assert_io_context(s);
+
+ if (s->flags & PA_SINK_DYNAMIC_LATENCY) {
+ pa_assert(latency == 0);
+ return;
+ }
+
+ pa_assert(latency >= ABSOLUTE_MIN_LATENCY);
+ pa_assert(latency <= ABSOLUTE_MAX_LATENCY);
+
+ if (s->thread_info.fixed_latency == latency)
return;
-
- *t = !!mute;
- if (t == &s->hw_muted)
- if (s->set_hw_mute(s) < 0)
- s->sw_muted = !!mute;
+ s->thread_info.fixed_latency = latency;
+
+ if (PA_SINK_IS_LINKED(s->thread_info.state)) {
+ pa_sink_input *i;
+ void *state = NULL;
+
+ PA_HASHMAP_FOREACH(i, s->thread_info.inputs, state)
+ if (i->update_sink_fixed_latency)
+ i->update_sink_fixed_latency(i);
+ }
+
+ pa_sink_invalidate_requested_latency(s, FALSE);
+
+ pa_source_set_fixed_latency_within_thread(s->monitor_source, latency);
+}
+
+/* Called from main context */
+size_t pa_sink_get_max_rewind(pa_sink *s) {
+ size_t r;
+ pa_assert_ctl_context();
+ pa_sink_assert_ref(s);
+
+ if (!PA_SINK_IS_LINKED(s->state))
+ return s->thread_info.max_rewind;
+
+ pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REWIND, &r, 0, NULL) == 0);
+
+ return r;
+}
+
+/* Called from main context */
+size_t pa_sink_get_max_request(pa_sink *s) {
+ size_t r;
+ pa_sink_assert_ref(s);
+ pa_assert_ctl_context();
+
+ if (!PA_SINK_IS_LINKED(s->state))
+ return s->thread_info.max_request;
+
+ pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_GET_MAX_REQUEST, &r, 0, NULL) == 0);
+
+ return r;
+}
+
+/* Called from main context */
+int pa_sink_set_port(pa_sink *s, const char *name, pa_bool_t save) {
+ pa_device_port *port;
+ int ret;
+
+ pa_sink_assert_ref(s);
+ pa_assert_ctl_context();
+
+ if (!s->set_port) {
+ pa_log_debug("set_port() operation not implemented for sink %u \"%s\"", s->index, s->name);
+ return -PA_ERR_NOTIMPLEMENTED;
+ }
+
+ if (!s->ports)
+ return -PA_ERR_NOENTITY;
+
+ if (!(port = pa_hashmap_get(s->ports, name)))
+ return -PA_ERR_NOENTITY;
+
+ if (s->active_port == port) {
+ s->save_port = s->save_port || save;
+ return 0;
+ }
+
+ if (s->flags & PA_SINK_SYNC_VOLUME) {
+ struct sink_message_set_port msg = { .port = port, .ret = 0 };
+ pa_assert_se(pa_asyncmsgq_send(s->asyncmsgq, PA_MSGOBJECT(s), PA_SINK_MESSAGE_SET_PORT, &msg, 0, NULL) == 0);
+ ret = msg.ret;
+ }
+ else
+ ret = s->set_port(s, port);
+
+ if (ret < 0)
+ return -PA_ERR_NOENTITY;
pa_subscription_post(s->core, PA_SUBSCRIPTION_EVENT_SINK|PA_SUBSCRIPTION_EVENT_CHANGE, s->index);
+
+ pa_log_info("Changed port of sink %u \"%s\" to %s", s->index, s->name, port->name);
+
+ s->active_port = port;
+ s->save_port = save;
+
+ pa_hook_fire(&s->core->hooks[PA_CORE_HOOK_SINK_PORT_CHANGED], s);
+
+ return 0;
}
-int pa_sink_get_mute(pa_sink *s, pa_mixer_t m) {
- assert(s);
- assert(s->ref >= 1);
+pa_bool_t pa_device_init_icon(pa_proplist *p, pa_bool_t is_sink) {
+ const char *ff, *c, *t = NULL, *s = "", *profile, *bus;
+
+ pa_assert(p);
+
+ if (pa_proplist_contains(p, PA_PROP_DEVICE_ICON_NAME))
+ return TRUE;
+
+ if ((ff = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
+
+ if (pa_streq(ff, "microphone"))
+ t = "audio-input-microphone";
+ else if (pa_streq(ff, "webcam"))
+ t = "camera-web";
+ else if (pa_streq(ff, "computer"))
+ t = "computer";
+ else if (pa_streq(ff, "handset"))
+ t = "phone";
+ else if (pa_streq(ff, "portable"))
+ t = "multimedia-player";
+ else if (pa_streq(ff, "tv"))
+ t = "video-display";
+
+ /*
+ * The following icons are not part of the icon naming spec,
+ * because Rodney Dawes sucks as the maintainer of that spec.
+ *
+ * http://lists.freedesktop.org/archives/xdg/2009-May/010397.html
+ */
+ else if (pa_streq(ff, "headset"))
+ t = "audio-headset";
+ else if (pa_streq(ff, "headphone"))
+ t = "audio-headphones";
+ else if (pa_streq(ff, "speaker"))
+ t = "audio-speakers";
+ else if (pa_streq(ff, "hands-free"))
+ t = "audio-handsfree";
+ }
+
+ if (!t)
+ if ((c = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
+ if (pa_streq(c, "modem"))
+ t = "modem";
- if (m == PA_MIXER_HARDWARE && s->set_hw_mute) {
+ if (!t) {
+ if (is_sink)
+ t = "audio-card";
+ else
+ t = "audio-input-microphone";
+ }
- if (s->get_hw_mute)
- s->get_hw_mute(s);
-
- return s->hw_muted;
- } else
- return s->sw_muted;
+ if ((profile = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
+ if (strstr(profile, "analog"))
+ s = "-analog";
+ else if (strstr(profile, "iec958"))
+ s = "-iec958";
+ else if (strstr(profile, "hdmi"))
+ s = "-hdmi";
+ }
+
+ bus = pa_proplist_gets(p, PA_PROP_DEVICE_BUS);
+
+ pa_proplist_setf(p, PA_PROP_DEVICE_ICON_NAME, "%s%s%s%s", t, pa_strempty(s), bus ? "-" : "", pa_strempty(bus));
+
+ return TRUE;
+}
+
+pa_bool_t pa_device_init_description(pa_proplist *p) {
+ const char *s, *d = NULL, *k;
+ pa_assert(p);
+
+ if (pa_proplist_contains(p, PA_PROP_DEVICE_DESCRIPTION))
+ return TRUE;
+
+ if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
+ if (pa_streq(s, "internal"))
+ d = _("Internal Audio");
+
+ if (!d)
+ if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS)))
+ if (pa_streq(s, "modem"))
+ d = _("Modem");
+
+ if (!d)
+ d = pa_proplist_gets(p, PA_PROP_DEVICE_PRODUCT_NAME);
+
+ if (!d)
+ return FALSE;
+
+ k = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_DESCRIPTION);
+
+ if (d && k)
+ pa_proplist_setf(p, PA_PROP_DEVICE_DESCRIPTION, _("%s %s"), d, k);
+ else if (d)
+ pa_proplist_sets(p, PA_PROP_DEVICE_DESCRIPTION, d);
+
+ return TRUE;
+}
+
+pa_bool_t pa_device_init_intended_roles(pa_proplist *p) {
+ const char *s;
+ pa_assert(p);
+
+ if (pa_proplist_contains(p, PA_PROP_DEVICE_INTENDED_ROLES))
+ return TRUE;
+
+ if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR)))
+ if (pa_streq(s, "handset") || pa_streq(s, "hands-free")
+ || pa_streq(s, "headset")) {
+ pa_proplist_sets(p, PA_PROP_DEVICE_INTENDED_ROLES, "phone");
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+unsigned pa_device_init_priority(pa_proplist *p) {
+ const char *s;
+ unsigned priority = 0;
+
+ pa_assert(p);
+
+ if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_CLASS))) {
+
+ if (pa_streq(s, "sound"))
+ priority += 9000;
+ else if (!pa_streq(s, "modem"))
+ priority += 1000;
+ }
+
+ if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_FORM_FACTOR))) {
+
+ if (pa_streq(s, "internal"))
+ priority += 900;
+ else if (pa_streq(s, "speaker"))
+ priority += 500;
+ else if (pa_streq(s, "headphone"))
+ priority += 400;
+ }
+
+ if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_BUS))) {
+
+ if (pa_streq(s, "pci"))
+ priority += 50;
+ else if (pa_streq(s, "usb"))
+ priority += 40;
+ else if (pa_streq(s, "bluetooth"))
+ priority += 30;
+ }
+
+ if ((s = pa_proplist_gets(p, PA_PROP_DEVICE_PROFILE_NAME))) {
+
+ if (pa_startswith(s, "analog-"))
+ priority += 9;
+ else if (pa_startswith(s, "iec958-"))
+ priority += 8;
+ }
+
+ return priority;
+}
+
+PA_STATIC_FLIST_DECLARE(pa_sink_volume_change, 0, pa_xfree);
+
+/* Called from the IO thread. */
+static pa_sink_volume_change *pa_sink_volume_change_new(pa_sink *s) {
+ pa_sink_volume_change *c;
+ if (!(c = pa_flist_pop(PA_STATIC_FLIST_GET(pa_sink_volume_change))))
+ c = pa_xnew(pa_sink_volume_change, 1);
+
+ PA_LLIST_INIT(pa_sink_volume_change, c);
+ c->at = 0;
+ pa_cvolume_reset(&c->hw_volume, s->sample_spec.channels);
+ return c;
+}
+
+/* Called from the IO thread. */
+static void pa_sink_volume_change_free(pa_sink_volume_change *c) {
+ pa_assert(c);
+ if (pa_flist_push(PA_STATIC_FLIST_GET(pa_sink_volume_change), c) < 0)
+ pa_xfree(c);
+}
+
+/* Called from the IO thread. */
+void pa_sink_volume_change_push(pa_sink *s) {
+ pa_sink_volume_change *c = NULL;
+ pa_sink_volume_change *nc = NULL;
+ uint32_t safety_margin = s->thread_info.volume_change_safety_margin;
+
+ const char *direction = NULL;
+
+ pa_assert(s);
+ nc = pa_sink_volume_change_new(s);
+
+ /* NOTE: There is already more different volumes in pa_sink that I can remember.
+ * Adding one more volume for HW would get us rid of this, but I am trying
+ * to survive with the ones we already have. */
+ pa_sw_cvolume_divide(&nc->hw_volume, &s->real_volume, &s->soft_volume);
+
+ if (!s->thread_info.volume_changes && pa_cvolume_equal(&nc->hw_volume, &s->thread_info.current_hw_volume)) {
+ pa_log_debug("Volume not changing");
+ pa_sink_volume_change_free(nc);
+ return;
+ }
+
+ nc->at = pa_sink_get_latency_within_thread(s);
+ nc->at += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
+
+ if (s->thread_info.volume_changes_tail) {
+ for (c = s->thread_info.volume_changes_tail; c; c = c->prev) {
+ /* If volume is going up let's do it a bit late. If it is going
+ * down let's do it a bit early. */
+ if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&c->hw_volume)) {
+ if (nc->at + safety_margin > c->at) {
+ nc->at += safety_margin;
+ direction = "up";
+ break;
+ }
+ }
+ else if (nc->at - safety_margin > c->at) {
+ nc->at -= safety_margin;
+ direction = "down";
+ break;
+ }
+ }
+ }
+
+ if (c == NULL) {
+ if (pa_cvolume_avg(&nc->hw_volume) > pa_cvolume_avg(&s->thread_info.current_hw_volume)) {
+ nc->at += safety_margin;
+ direction = "up";
+ } else {
+ nc->at -= safety_margin;
+ direction = "down";
+ }
+ PA_LLIST_PREPEND(pa_sink_volume_change, s->thread_info.volume_changes, nc);
+ }
+ else {
+ PA_LLIST_INSERT_AFTER(pa_sink_volume_change, s->thread_info.volume_changes, c, nc);
+ }
+
+ pa_log_debug("Volume going %s to %d at %llu", direction, pa_cvolume_avg(&nc->hw_volume), (long long unsigned) nc->at);
+
+ /* We can ignore volume events that came earlier but should happen later than this. */
+ PA_LLIST_FOREACH(c, nc->next) {
+ pa_log_debug("Volume change to %d at %llu was dropped", pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at);
+ pa_sink_volume_change_free(c);
+ }
+ nc->next = NULL;
+ s->thread_info.volume_changes_tail = nc;
+}
+
+/* Called from the IO thread. */
+static void pa_sink_volume_change_flush(pa_sink *s) {
+ pa_sink_volume_change *c = s->thread_info.volume_changes;
+ pa_assert(s);
+ s->thread_info.volume_changes = NULL;
+ s->thread_info.volume_changes_tail = NULL;
+ while (c) {
+ pa_sink_volume_change *next = c->next;
+ pa_sink_volume_change_free(c);
+ c = next;
+ }
+}
+
+/* Called from the IO thread. */
+pa_bool_t pa_sink_volume_change_apply(pa_sink *s, pa_usec_t *usec_to_next) {
+ pa_usec_t now = pa_rtclock_now();
+ pa_bool_t ret = FALSE;
+
+ pa_assert(s);
+ pa_assert(s->write_volume);
+
+ while (s->thread_info.volume_changes && now >= s->thread_info.volume_changes->at) {
+ pa_sink_volume_change *c = s->thread_info.volume_changes;
+ PA_LLIST_REMOVE(pa_sink_volume_change, s->thread_info.volume_changes, c);
+ pa_log_debug("Volume change to %d at %llu was written %llu usec late",
+ pa_cvolume_avg(&c->hw_volume), (long long unsigned) c->at, (long long unsigned) (now - c->at));
+ ret = TRUE;
+ s->thread_info.current_hw_volume = c->hw_volume;
+ pa_sink_volume_change_free(c);
+ }
+
+ if (s->write_volume && ret)
+ s->write_volume(s);
+
+ if (s->thread_info.volume_changes) {
+ if (usec_to_next)
+ *usec_to_next = s->thread_info.volume_changes->at - now;
+ if (pa_log_ratelimit(PA_LOG_DEBUG))
+ pa_log_debug("Next volume change in %lld usec", (long long) (s->thread_info.volume_changes->at - now));
+ }
+ else {
+ if (usec_to_next)
+ *usec_to_next = 0;
+ s->thread_info.volume_changes_tail = NULL;
+ }
+ return ret;
+}
+
+/* Called from the IO thread. */
+static void pa_sink_volume_change_rewind(pa_sink *s, size_t nbytes) {
+ /* All the queued volume events later than current latency are shifted to happen earlier. */
+ pa_sink_volume_change *c;
+ pa_volume_t prev_vol = pa_cvolume_avg(&s->thread_info.current_hw_volume);
+ pa_usec_t rewound = pa_bytes_to_usec(nbytes, &s->sample_spec);
+ pa_usec_t limit = pa_sink_get_latency_within_thread(s);
+
+ pa_log_debug("latency = %lld", (long long) limit);
+ limit += pa_rtclock_now() + s->thread_info.volume_change_extra_delay;
+
+ PA_LLIST_FOREACH(c, s->thread_info.volume_changes) {
+ pa_usec_t modified_limit = limit;
+ if (prev_vol > pa_cvolume_avg(&c->hw_volume))
+ modified_limit -= s->thread_info.volume_change_safety_margin;
+ else
+ modified_limit += s->thread_info.volume_change_safety_margin;
+ if (c->at > modified_limit) {
+ c->at -= rewound;
+ if (c->at < modified_limit)
+ c->at = modified_limit;
+ }
+ prev_vol = pa_cvolume_avg(&c->hw_volume);
+ }
+ pa_sink_volume_change_apply(s, NULL);
+}
+
+/* Called from the main thread */
+/* Gets the list of formats supported by the sink. The members and idxset must
+ * be freed by the caller. */
+pa_idxset* pa_sink_get_formats(pa_sink *s) {
+ pa_idxset *ret;
+
+ pa_assert(s);
+
+ if (s->get_formats) {
+ /* Sink supports format query, all is good */
+ ret = s->get_formats(s);
+ } else {
+ /* Sink doesn't support format query, so assume it does PCM */
+ pa_format_info *f = pa_format_info_new();
+ f->encoding = PA_ENCODING_PCM;
+
+ ret = pa_idxset_new(NULL, NULL);
+ pa_idxset_put(ret, f, NULL);
+ }
+
+ return ret;
+}
+
+/* Called from the main thread */
+/* Checks if the sink can accept this format */
+pa_bool_t pa_sink_check_format(pa_sink *s, pa_format_info *f)
+{
+ pa_idxset *formats = NULL;
+ pa_bool_t ret = FALSE;
+
+ pa_assert(s);
+ pa_assert(f);
+
+ formats = pa_sink_get_formats(s);
+
+ if (formats) {
+ pa_format_info *finfo_device;
+ uint32_t i;
+
+ PA_IDXSET_FOREACH(finfo_device, formats, i) {
+ if (pa_format_info_is_compatible(finfo_device, f)) {
+ ret = TRUE;
+ break;
+ }
+ }
+
+ pa_idxset_free(formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
+ }
+
+ return ret;
+}
+
+/* Called from the main thread */
+/* Calculates the intersection between formats supported by the sink and
+ * in_formats, and returns these, in the order of the sink's formats. */
+pa_idxset* pa_sink_check_formats(pa_sink *s, pa_idxset *in_formats) {
+ pa_idxset *out_formats = pa_idxset_new(NULL, NULL), *sink_formats = NULL;
+ pa_format_info *f_sink, *f_in;
+ uint32_t i, j;
+
+ pa_assert(s);
+
+ if (!in_formats || pa_idxset_isempty(in_formats))
+ goto done;
+
+ sink_formats = pa_sink_get_formats(s);
+
+ PA_IDXSET_FOREACH(f_sink, sink_formats, i) {
+ PA_IDXSET_FOREACH(f_in, in_formats, j) {
+ if (pa_format_info_is_compatible(f_sink, f_in))
+ pa_idxset_put(out_formats, pa_format_info_copy(f_in), NULL);
+ }
+ }
+
+done:
+ if (sink_formats)
+ pa_idxset_free(sink_formats, (pa_free2_cb_t) pa_format_info_free2, NULL);
+
+ return out_formats;
}