From c1b6a87b27b569cda135da05b53cc98aa9ca37cb Mon Sep 17 00:00:00 2001 From: Wim Taymans Date: Thu, 20 Aug 2009 13:40:27 +0200 Subject: alsa-sink: reduce the amount of smoother updates Exponentially increase the amount of time between smoother updates. We start with a 2ms interval and increase up to 200ms intervals. Smoother updates and the resulting linear regression take a fair amount of CPU so we want to reduce the amount of updates. --- src/modules/alsa/alsa-sink.c | 26 ++++++++++++++++++++++---- 1 file changed, 22 insertions(+), 4 deletions(-) (limited to 'src/modules/alsa') diff --git a/src/modules/alsa/alsa-sink.c b/src/modules/alsa/alsa-sink.c index e3707ae7..c3694729 100644 --- a/src/modules/alsa/alsa-sink.c +++ b/src/modules/alsa/alsa-sink.c @@ -68,6 +68,9 @@ #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */ #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/ +#define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */ +#define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update inteval */ + #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */ struct userdata { @@ -115,6 +118,8 @@ struct userdata { pa_smoother *smoother; uint64_t write_count; uint64_t since_start; + pa_usec_t smoother_interval; + pa_usec_t last_smoother_update; pa_reserve_wrapper *reserve; pa_hook_slot *reserve_slot; @@ -723,17 +728,27 @@ static void update_smoother(struct userdata *u) { now1 = pa_timespec_load(&htstamp); } + /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */ + if (now1 <= 0) + now1 = pa_rtclock_now(); + + /* check if the time since the last update is bigger than the interval */ + if (u->last_smoother_update > 0) { + if (u->last_smoother_update + u->smoother_interval > now1) + return; + } + position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size); if (PA_UNLIKELY(position < 0)) position = 0; - /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */ - if (now1 <= 0) - now1 = pa_rtclock_now(); - now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec); + u->last_smoother_update = now1; + /* exponentially increase the update interval up to the MAX limit */ + u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL); + pa_smoother_put(u->smoother, now1, now2); } @@ -906,6 +921,8 @@ static int unsuspend(struct userdata *u) { u->write_count = 0; pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE); + u->smoother_interval = SMOOTHER_MIN_INTERVAL; + u->last_smoother_update = 0; u->first = TRUE; u->since_start = 0; @@ -1622,6 +1639,7 @@ pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_ca 5, pa_rtclock_now(), TRUE); + u->smoother_interval = SMOOTHER_MIN_INTERVAL; dev_id = pa_modargs_get_value( ma, "device_id", -- cgit From 8a2a6b2004cd299467de1955f7f99e25033faa63 Mon Sep 17 00:00:00 2001 From: Lennart Poettering Date: Fri, 21 Aug 2009 03:43:53 +0200 Subject: adjust various data/library paths automatically if we are run from a build tree --- src/modules/alsa/alsa-mixer.c | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) (limited to 'src/modules/alsa') diff --git a/src/modules/alsa/alsa-mixer.c b/src/modules/alsa/alsa-mixer.c index a4c2ee0f..61c92cd0 100644 --- a/src/modules/alsa/alsa-mixer.c +++ b/src/modules/alsa/alsa-mixer.c @@ -929,7 +929,7 @@ static int element_zero_volume(pa_alsa_element *e, snd_mixer_t *m) { int pa_alsa_path_select(pa_alsa_path *p, snd_mixer_t *m) { pa_alsa_element *e; - int r; + int r = 0; pa_assert(m); pa_assert(p); @@ -1849,7 +1849,12 @@ pa_alsa_path* pa_alsa_path_new(const char *fname, pa_alsa_direction_t direction) items[1].data = &p->description; items[2].data = &p->name; - fn = pa_maybe_prefix_path(fname, PA_ALSA_PATHS_DIR); + fn = pa_maybe_prefix_path(fname, +#if defined(__linux__) && !defined(__OPTIMIZE__) + pa_run_from_build_tree() ? PA_BUILDDIR "/modules/alsa/mixer/paths/" : +#endif + PA_ALSA_PATHS_DIR); + r = pa_config_parse(fn, NULL, items, p); pa_xfree(fn); @@ -3110,7 +3115,12 @@ pa_alsa_profile_set* pa_alsa_profile_set_new(const char *fname, const pa_channel if (!fname) fname = "default.conf"; - fn = pa_maybe_prefix_path(fname, PA_ALSA_PROFILE_SETS_DIR); + fn = pa_maybe_prefix_path(fname, +#if defined(__linux__) && !defined(__OPTIMIZE__) + pa_run_from_build_tree() ? PA_BUILDDIR "/modules/alsa/mixer/profile-sets/" : +#endif + PA_ALSA_PROFILE_SETS_DIR); + r = pa_config_parse(fn, NULL, items, ps); pa_xfree(fn); -- cgit From fe9a577cf2799c0864a05a08c785161ba3738d88 Mon Sep 17 00:00:00 2001 From: Lennart Poettering Date: Fri, 21 Aug 2009 03:45:58 +0200 Subject: alsa: leave headphone jack enabled in normal mixer paths --- src/modules/alsa/mixer/paths/analog-output-lfe-on-mono.conf | 7 +++++-- src/modules/alsa/mixer/paths/analog-output-mono.conf | 7 +++++-- src/modules/alsa/mixer/paths/analog-output.conf | 7 +++++-- 3 files changed, 15 insertions(+), 6 deletions(-) (limited to 'src/modules/alsa') diff --git a/src/modules/alsa/mixer/paths/analog-output-lfe-on-mono.conf b/src/modules/alsa/mixer/paths/analog-output-lfe-on-mono.conf index 2db976a5..3457d4a2 100644 --- a/src/modules/alsa/mixer/paths/analog-output-lfe-on-mono.conf +++ b/src/modules/alsa/mixer/paths/analog-output-lfe-on-mono.conf @@ -41,9 +41,12 @@ volume = merge override-map.1 = lfe override-map.2 = lfe,lfe +; This profile path is intended to control the speaker, not the +; headphones. But it should not hurt if we leave the headphone jack +; enabled nonetheless. [Element Headphone] -switch = off -volume = off +switch = mute +volume = zero [Element Speaker] switch = mute diff --git a/src/modules/alsa/mixer/paths/analog-output-mono.conf b/src/modules/alsa/mixer/paths/analog-output-mono.conf index a58cc970..dc270cfe 100644 --- a/src/modules/alsa/mixer/paths/analog-output-mono.conf +++ b/src/modules/alsa/mixer/paths/analog-output-mono.conf @@ -38,9 +38,12 @@ volume = merge override-map.1 = all override-map.2 = all-left,all-right +; This profile path is intended to control the speaker, not the +; headphones. But it should not hurt if we leave the headphone jack +; enabled nonetheless. [Element Headphone] -switch = off -volume = off +switch = mute +volume = zero [Element Speaker] switch = mute diff --git a/src/modules/alsa/mixer/paths/analog-output.conf b/src/modules/alsa/mixer/paths/analog-output.conf index b412a437..f71a05a1 100644 --- a/src/modules/alsa/mixer/paths/analog-output.conf +++ b/src/modules/alsa/mixer/paths/analog-output.conf @@ -37,9 +37,12 @@ override-map.2 = all-left,all-right switch = off volume = off +; This profile path is intended to control the speaker, not the +; headphones. But it should not hurt if we leave the headphone jack +; enabled nonetheless. [Element Headphone] -switch = off -volume = off +switch = mute +volume = zero [Element Speaker] switch = mute -- cgit From 80c693730365c1a375a5c0e781f38e7f165b37bf Mon Sep 17 00:00:00 2001 From: Lennart Poettering Date: Sun, 23 Aug 2009 22:34:42 +0200 Subject: alsa: increase interval between smoother updates exponentially for alsa sources, following the scheme for sinks --- src/modules/alsa/alsa-sink.c | 8 +++----- src/modules/alsa/alsa-source.c | 20 ++++++++++++++++++-- 2 files changed, 21 insertions(+), 7 deletions(-) (limited to 'src/modules/alsa') diff --git a/src/modules/alsa/alsa-sink.c b/src/modules/alsa/alsa-sink.c index c3694729..b99ed782 100644 --- a/src/modules/alsa/alsa-sink.c +++ b/src/modules/alsa/alsa-sink.c @@ -733,10 +733,9 @@ static void update_smoother(struct userdata *u) { now1 = pa_rtclock_now(); /* check if the time since the last update is bigger than the interval */ - if (u->last_smoother_update > 0) { + if (u->last_smoother_update > 0) if (u->last_smoother_update + u->smoother_interval > now1) return; - } position = (int64_t) u->write_count - ((int64_t) delay * (int64_t) u->frame_size); @@ -745,11 +744,11 @@ static void update_smoother(struct userdata *u) { now2 = pa_bytes_to_usec((uint64_t) position, &u->sink->sample_spec); + pa_smoother_put(u->smoother, now1, now2); + u->last_smoother_update = now1; /* exponentially increase the update interval up to the MAX limit */ u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL); - - pa_smoother_put(u->smoother, now1, now2); } static pa_usec_t sink_get_latency(struct userdata *u) { @@ -927,7 +926,6 @@ static int unsuspend(struct userdata *u) { u->first = TRUE; u->since_start = 0; - pa_log_info("Resumed successfully..."); return 0; diff --git a/src/modules/alsa/alsa-source.c b/src/modules/alsa/alsa-source.c index 7da37553..336027a2 100644 --- a/src/modules/alsa/alsa-source.c +++ b/src/modules/alsa/alsa-source.c @@ -65,6 +65,9 @@ #define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */ #define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms */ +#define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms */ +#define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms */ + #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) struct userdata { @@ -108,6 +111,8 @@ struct userdata { pa_smoother *smoother; uint64_t read_count; + pa_usec_t smoother_interval; + pa_usec_t last_smoother_update; pa_reserve_wrapper *reserve; pa_hook_slot *reserve_slot; @@ -691,15 +696,23 @@ static void update_smoother(struct userdata *u) { now1 = pa_timespec_load(&htstamp); } - position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size); - /* Hmm, if the timestamp is 0, then it wasn't set and we take the current time */ if (now1 <= 0) now1 = pa_rtclock_now(); + /* check if the time since the last update is bigger than the interval */ + if (u->last_smoother_update > 0) + if (u->last_smoother_update + u->smoother_interval > now1) + return; + + position = u->read_count + ((uint64_t) delay * (uint64_t) u->frame_size); now2 = pa_bytes_to_usec(position, &u->source->sample_spec); pa_smoother_put(u->smoother, now1, now2); + + u->last_smoother_update = now1; + /* exponentially increase the update interval up to the MAX limit */ + u->smoother_interval = PA_MIN (u->smoother_interval * 2, SMOOTHER_MAX_INTERVAL); } static pa_usec_t source_get_latency(struct userdata *u) { @@ -862,6 +875,8 @@ static int unsuspend(struct userdata *u) { u->read_count = 0; pa_smoother_reset(u->smoother, pa_rtclock_now(), TRUE); + u->smoother_interval = SMOOTHER_MIN_INTERVAL; + u->last_smoother_update = 0; pa_log_info("Resumed successfully..."); @@ -1469,6 +1484,7 @@ pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, p 5, pa_rtclock_now(), FALSE); + u->smoother_interval = SMOOTHER_MIN_INTERVAL; dev_id = pa_modargs_get_value( ma, "device_id", -- cgit From 050a3a99e1d151b4f55c89f82073ef33f3399646 Mon Sep 17 00:00:00 2001 From: Lennart Poettering Date: Mon, 24 Aug 2009 03:26:56 +0200 Subject: alsa: automatically decrease watermark after a time of stability --- src/modules/alsa/alsa-sink.c | 146 +++++++++++++++++++++++++++++++---------- src/modules/alsa/alsa-source.c | 145 ++++++++++++++++++++++++++++++---------- 2 files changed, 223 insertions(+), 68 deletions(-) (limited to 'src/modules/alsa') diff --git a/src/modules/alsa/alsa-sink.c b/src/modules/alsa/alsa-sink.c index b99ed782..07d53880 100644 --- a/src/modules/alsa/alsa-sink.c +++ b/src/modules/alsa/alsa-sink.c @@ -62,14 +62,21 @@ /* #define DEBUG_TIMING */ #define DEFAULT_DEVICE "default" -#define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */ -#define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */ -#define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */ -#define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */ -#define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/ -#define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */ -#define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update inteval */ +#define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s -- Overall buffer size */ +#define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms -- Fill up when only this much is left in the buffer */ + +#define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- On underrun, increase watermark by this */ +#define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms -- When everything's great, decrease watermark by this */ +#define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s -- How long after a drop out recheck if things are good now */ +#define TSCHED_WATERMARK_INC_THRESHOLD_USEC (1*PA_USEC_PER_MSEC) /* 3ms -- If the buffer level ever below this theshold, increase the watermark */ +#define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms -- If the buffer level didn't drop below this theshold in the verification time, decrease the watermark */ + +#define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms -- Sleep at least 10ms on each iteration */ +#define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms -- Wakeup at least this long before the buffer runs empty*/ + +#define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms -- min smoother update interval */ +#define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms -- max smoother update inteval */ #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) /* don't require volume adjustments to be perfectly correct. don't necessarily extend granularity in software unless the differences get greater than this level */ @@ -99,7 +106,12 @@ struct userdata { hwbuf_unused, min_sleep, min_wakeup, - watermark_step; + watermark_inc_step, + watermark_dec_step, + watermark_inc_threshold, + watermark_dec_threshold; + + pa_usec_t watermark_dec_not_before; unsigned nfragments; pa_memchunk memchunk; @@ -248,6 +260,7 @@ static void fix_min_sleep_wakeup(struct userdata *u) { size_t max_use, max_use_2; pa_assert(u); + pa_assert(u->use_tsched); max_use = u->hwbuf_size - u->hwbuf_unused; max_use_2 = pa_frame_align(max_use/2, &u->sink->sample_spec); @@ -262,6 +275,7 @@ static void fix_min_sleep_wakeup(struct userdata *u) { static void fix_tsched_watermark(struct userdata *u) { size_t max_use; pa_assert(u); + pa_assert(u->use_tsched); max_use = u->hwbuf_size - u->hwbuf_unused; @@ -272,7 +286,7 @@ static void fix_tsched_watermark(struct userdata *u) { u->tsched_watermark = u->min_wakeup; } -static void adjust_after_underrun(struct userdata *u) { +static void increase_watermark(struct userdata *u) { size_t old_watermark; pa_usec_t old_min_latency, new_min_latency; @@ -281,31 +295,64 @@ static void adjust_after_underrun(struct userdata *u) { /* First, just try to increase the watermark */ old_watermark = u->tsched_watermark; - u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_step); + u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step); fix_tsched_watermark(u); if (old_watermark != u->tsched_watermark) { - pa_log_notice("Increasing wakeup watermark to %0.2f ms", - (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC); + pa_log_info("Increasing wakeup watermark to %0.2f ms", + (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC); return; } /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */ old_min_latency = u->sink->thread_info.min_latency; - new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_STEP_USEC); + new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC); new_min_latency = PA_MIN(new_min_latency, u->sink->thread_info.max_latency); if (old_min_latency != new_min_latency) { - pa_log_notice("Increasing minimal latency to %0.2f ms", - (double) new_min_latency / PA_USEC_PER_MSEC); + pa_log_info("Increasing minimal latency to %0.2f ms", + (double) new_min_latency / PA_USEC_PER_MSEC); pa_sink_set_latency_range_within_thread(u->sink, new_min_latency, u->sink->thread_info.max_latency); - return; } /* When we reach this we're officialy fucked! */ } +static void decrease_watermark(struct userdata *u) { + size_t old_watermark; + pa_usec_t now; + + pa_assert(u); + pa_assert(u->use_tsched); + + now = pa_rtclock_now(); + + if (u->watermark_dec_not_before <= 0) + goto restart; + + if (u->watermark_dec_not_before > now) + return; + + old_watermark = u->tsched_watermark; + + if (u->tsched_watermark < u->watermark_dec_step) + u->tsched_watermark = u->tsched_watermark / 2; + else + u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step); + + fix_tsched_watermark(u); + + if (old_watermark != u->tsched_watermark) + pa_log_info("Decreasing wakeup watermark to %0.2f ms", + (double) pa_bytes_to_usec(u->tsched_watermark, &u->sink->sample_spec) / PA_USEC_PER_MSEC); + + /* We don't change the latency range*/ + +restart: + u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC; +} + static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) { pa_usec_t usec, wm; @@ -313,6 +360,7 @@ static void hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*p pa_assert(process_usec); pa_assert(u); + pa_assert(u->use_tsched); usec = pa_sink_get_requested_latency_within_thread(u->sink); @@ -360,7 +408,7 @@ static int try_recover(struct userdata *u, const char *call, int err) { return 0; } -static size_t check_left_to_play(struct userdata *u, size_t n_bytes) { +static size_t check_left_to_play(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) { size_t left_to_play; /* We use <= instead of < for this check here because an underrun @@ -368,34 +416,55 @@ static size_t check_left_to_play(struct userdata *u, size_t n_bytes) { * it is removed from the buffer. This is particularly important * when block transfer is used. */ - if (n_bytes <= u->hwbuf_size) { + if (n_bytes <= u->hwbuf_size) left_to_play = u->hwbuf_size - n_bytes; + else { + + /* We got a dropout. What a mess! */ + left_to_play = 0; #ifdef DEBUG_TIMING - pa_log_debug("%0.2f ms left to play", (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC); + PA_DEBUG_TRAP; #endif - } else { - left_to_play = 0; + if (!u->first && !u->after_rewind) + if (pa_log_ratelimit()) + pa_log_info("Underrun!"); + } #ifdef DEBUG_TIMING - PA_DEBUG_TRAP; + pa_log_debug("%0.2f ms left to play; inc threshold = %0.2f ms; dec threshold = %0.2f ms", + (double) pa_bytes_to_usec(left_to_play, &u->sink->sample_spec) / PA_USEC_PER_MSEC, + (double) pa_bytes_to_usec(u->watermark_inc_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC, + (double) pa_bytes_to_usec(u->watermark_dec_threshold, &u->sink->sample_spec) / PA_USEC_PER_MSEC); #endif + if (u->use_tsched) { + pa_bool_t reset_not_before = TRUE; + if (!u->first && !u->after_rewind) { + if (left_to_play < u->watermark_inc_threshold) + increase_watermark(u); + else if (left_to_play > u->watermark_dec_threshold) { + reset_not_before = FALSE; - if (pa_log_ratelimit()) - pa_log_info("Underrun!"); + /* We decrease the watermark only if have actually + * been woken up by a timeout. If something else woke + * us up it's too easy to fulfill the deadlines... */ - if (u->use_tsched) - adjust_after_underrun(u); + if (on_timeout) + decrease_watermark(u); + } } + + if (reset_not_before) + u->watermark_dec_not_before = 0; } return left_to_play; } -static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) { +static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) { pa_bool_t work_done = TRUE; pa_usec_t max_sleep_usec = 0, process_usec = 0; size_t left_to_play; @@ -430,7 +499,8 @@ static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polle pa_log_debug("avail: %lu", (unsigned long) n_bytes); #endif - left_to_play = check_left_to_play(u, n_bytes); + left_to_play = check_left_to_play(u, n_bytes, on_timeout); + on_timeout = FALSE; if (u->use_tsched) @@ -565,7 +635,7 @@ static int mmap_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polle return work_done ? 1 : 0; } -static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) { +static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) { pa_bool_t work_done = FALSE; pa_usec_t max_sleep_usec = 0, process_usec = 0; size_t left_to_play; @@ -591,7 +661,8 @@ static int unix_write(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polle } n_bytes = (size_t) n * u->frame_size; - left_to_play = check_left_to_play(u, n_bytes); + left_to_play = check_left_to_play(u, n_bytes, on_timeout); + on_timeout = FALSE; if (u->use_tsched) @@ -1278,15 +1349,16 @@ static void thread_func(void *userdata) { if (PA_SINK_IS_OPENED(u->sink->thread_info.state)) { int work_done; pa_usec_t sleep_usec = 0; + pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll); if (PA_UNLIKELY(u->sink->thread_info.rewind_requested)) if (process_rewind(u) < 0) goto fail; if (u->use_mmap) - work_done = mmap_write(u, &sleep_usec, revents & POLLOUT); + work_done = mmap_write(u, &sleep_usec, revents & POLLOUT, on_timeout); else - work_done = unix_write(u, &sleep_usec, revents & POLLOUT); + work_done = unix_write(u, &sleep_usec, revents & POLLOUT, on_timeout); if (work_done < 0) goto fail; @@ -1787,7 +1859,6 @@ pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_ca u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size); u->nfragments = nfrags; u->hwbuf_size = u->fragment_size * nfrags; - u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec); pa_cvolume_mute(&u->hardware_volume, u->sink->sample_spec.channels); pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms", @@ -1798,7 +1869,13 @@ pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_ca pa_sink_set_max_rewind(u->sink, u->hwbuf_size); if (u->use_tsched) { - u->watermark_step = pa_usec_to_bytes(TSCHED_WATERMARK_STEP_USEC, &u->sink->sample_spec); + u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->sink->sample_spec); + + u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->sink->sample_spec); + u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->sink->sample_spec); + + u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->sink->sample_spec); + u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->sink->sample_spec); fix_min_sleep_wakeup(u); fix_tsched_watermark(u); @@ -1812,6 +1889,7 @@ pa_sink *pa_alsa_sink_new(pa_module *m, pa_modargs *ma, const char*driver, pa_ca } else pa_sink_set_fixed_latency(u->sink, pa_bytes_to_usec(u->hwbuf_size, &ss)); + reserve_update(u); if (update_sw_params(u) < 0) diff --git a/src/modules/alsa/alsa-source.c b/src/modules/alsa/alsa-source.c index 336027a2..165b2e3b 100644 --- a/src/modules/alsa/alsa-source.c +++ b/src/modules/alsa/alsa-source.c @@ -59,14 +59,22 @@ /* #define DEBUG_TIMING */ #define DEFAULT_DEVICE "default" -#define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */ -#define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */ -#define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */ -#define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */ -#define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms */ -#define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms */ -#define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms */ +#define DEFAULT_TSCHED_BUFFER_USEC (2*PA_USEC_PER_SEC) /* 2s */ +#define DEFAULT_TSCHED_WATERMARK_USEC (20*PA_USEC_PER_MSEC) /* 20ms */ + +#define TSCHED_WATERMARK_INC_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */ +#define TSCHED_WATERMARK_DEC_STEP_USEC (5*PA_USEC_PER_MSEC) /* 5ms */ +#define TSCHED_WATERMARK_VERIFY_AFTER_USEC (20*PA_USEC_PER_SEC) /* 20s */ +#define TSCHED_WATERMARK_INC_THRESHOLD_USEC (1*PA_USEC_PER_MSEC) /* 3ms */ +#define TSCHED_WATERMARK_DEC_THRESHOLD_USEC (100*PA_USEC_PER_MSEC) /* 100ms */ +#define TSCHED_WATERMARK_STEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */ + +#define TSCHED_MIN_SLEEP_USEC (10*PA_USEC_PER_MSEC) /* 10ms */ +#define TSCHED_MIN_WAKEUP_USEC (4*PA_USEC_PER_MSEC) /* 4ms */ + +#define SMOOTHER_MIN_INTERVAL (2*PA_USEC_PER_MSEC) /* 2ms */ +#define SMOOTHER_MAX_INTERVAL (200*PA_USEC_PER_MSEC) /* 200ms */ #define VOLUME_ACCURACY (PA_VOLUME_NORM/100) @@ -96,7 +104,12 @@ struct userdata { hwbuf_unused, min_sleep, min_wakeup, - watermark_step; + watermark_inc_step, + watermark_dec_step, + watermark_inc_threshold, + watermark_dec_threshold; + + pa_usec_t watermark_dec_not_before; unsigned nfragments; @@ -241,6 +254,7 @@ static int reserve_monitor_init(struct userdata *u, const char *dname) { static void fix_min_sleep_wakeup(struct userdata *u) { size_t max_use, max_use_2; pa_assert(u); + pa_assert(u->use_tsched); max_use = u->hwbuf_size - u->hwbuf_unused; max_use_2 = pa_frame_align(max_use/2, &u->source->sample_spec); @@ -255,6 +269,7 @@ static void fix_min_sleep_wakeup(struct userdata *u) { static void fix_tsched_watermark(struct userdata *u) { size_t max_use; pa_assert(u); + pa_assert(u->use_tsched); max_use = u->hwbuf_size - u->hwbuf_unused; @@ -265,7 +280,7 @@ static void fix_tsched_watermark(struct userdata *u) { u->tsched_watermark = u->min_wakeup; } -static void adjust_after_overrun(struct userdata *u) { +static void increase_watermark(struct userdata *u) { size_t old_watermark; pa_usec_t old_min_latency, new_min_latency; @@ -274,36 +289,72 @@ static void adjust_after_overrun(struct userdata *u) { /* First, just try to increase the watermark */ old_watermark = u->tsched_watermark; - u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_step); - + u->tsched_watermark = PA_MIN(u->tsched_watermark * 2, u->tsched_watermark + u->watermark_inc_step); fix_tsched_watermark(u); if (old_watermark != u->tsched_watermark) { - pa_log_notice("Increasing wakeup watermark to %0.2f ms", - (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC); + pa_log_info("Increasing wakeup watermark to %0.2f ms", + (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC); return; } /* Hmm, we cannot increase the watermark any further, hence let's raise the latency */ old_min_latency = u->source->thread_info.min_latency; - new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_STEP_USEC); + new_min_latency = PA_MIN(old_min_latency * 2, old_min_latency + TSCHED_WATERMARK_INC_STEP_USEC); new_min_latency = PA_MIN(new_min_latency, u->source->thread_info.max_latency); if (old_min_latency != new_min_latency) { - pa_log_notice("Increasing minimal latency to %0.2f ms", - (double) new_min_latency / PA_USEC_PER_MSEC); + pa_log_info("Increasing minimal latency to %0.2f ms", + (double) new_min_latency / PA_USEC_PER_MSEC); pa_source_set_latency_range_within_thread(u->source, new_min_latency, u->source->thread_info.max_latency); - return; } /* When we reach this we're officialy fucked! */ } +static void decrease_watermark(struct userdata *u) { + size_t old_watermark; + pa_usec_t now; + + pa_assert(u); + pa_assert(u->use_tsched); + + now = pa_rtclock_now(); + + if (u->watermark_dec_not_before <= 0) + goto restart; + + if (u->watermark_dec_not_before > now) + return; + + old_watermark = u->tsched_watermark; + + if (u->tsched_watermark < u->watermark_dec_step) + u->tsched_watermark = u->tsched_watermark / 2; + else + u->tsched_watermark = PA_MAX(u->tsched_watermark / 2, u->tsched_watermark - u->watermark_dec_step); + + fix_tsched_watermark(u); + + if (old_watermark != u->tsched_watermark) + pa_log_info("Decreasing wakeup watermark to %0.2f ms", + (double) pa_bytes_to_usec(u->tsched_watermark, &u->source->sample_spec) / PA_USEC_PER_MSEC); + + /* We don't change the latency range*/ + +restart: + u->watermark_dec_not_before = now + TSCHED_WATERMARK_VERIFY_AFTER_USEC; +} + static pa_usec_t hw_sleep_time(struct userdata *u, pa_usec_t *sleep_usec, pa_usec_t*process_usec) { pa_usec_t wm, usec; + pa_assert(sleep_usec); + pa_assert(process_usec); + pa_assert(u); + pa_assert(u->use_tsched); usec = pa_source_get_requested_latency_within_thread(u->source); @@ -352,7 +403,7 @@ static int try_recover(struct userdata *u, const char *call, int err) { return 0; } -static size_t check_left_to_record(struct userdata *u, size_t n_bytes) { +static size_t check_left_to_record(struct userdata *u, size_t n_bytes, pa_bool_t on_timeout) { size_t left_to_record; size_t rec_space = u->hwbuf_size - u->hwbuf_unused; @@ -361,14 +412,11 @@ static size_t check_left_to_record(struct userdata *u, size_t n_bytes) { * it is removed from the buffer. This is particularly important * when block transfer is used. */ - if (n_bytes <= rec_space) { + if (n_bytes <= rec_space) left_to_record = rec_space - n_bytes; + else { -#ifdef DEBUG_TIMING - pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC); -#endif - - } else { + /* We got a dropout. What a mess! */ left_to_record = 0; #ifdef DEBUG_TIMING @@ -377,15 +425,36 @@ static size_t check_left_to_record(struct userdata *u, size_t n_bytes) { if (pa_log_ratelimit()) pa_log_info("Overrun!"); + } - if (u->use_tsched) - adjust_after_overrun(u); +#ifdef DEBUG_TIMING + pa_log_debug("%0.2f ms left to record", (double) pa_bytes_to_usec(left_to_record, &u->source->sample_spec) / PA_USEC_PER_MSEC); +#endif + + if (u->use_tsched) { + pa_bool_t reset_not_before = TRUE; + + if (left_to_record < u->watermark_inc_threshold) + increase_watermark(u); + else if (left_to_record > u->watermark_dec_threshold) { + reset_not_before = FALSE; + + /* We decrease the watermark only if have actually been + * woken up by a timeout. If something else woke us up + * it's too easy to fulfill the deadlines... */ + + if (on_timeout) + decrease_watermark(u); + } + + if (reset_not_before) + u->watermark_dec_not_before = 0; } return left_to_record; } -static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) { +static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) { pa_bool_t work_done = FALSE; pa_usec_t max_sleep_usec = 0, process_usec = 0; size_t left_to_record; @@ -417,7 +486,8 @@ static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled pa_log_debug("avail: %lu", (unsigned long) n_bytes); #endif - left_to_record = check_left_to_record(u, n_bytes); + left_to_record = check_left_to_record(u, n_bytes, on_timeout); + on_timeout = FALSE; if (u->use_tsched) if (!polled && @@ -543,7 +613,7 @@ static int mmap_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled return work_done ? 1 : 0; } -static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled) { +static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled, pa_bool_t on_timeout) { int work_done = FALSE; pa_usec_t max_sleep_usec = 0, process_usec = 0; size_t left_to_record; @@ -570,7 +640,8 @@ static int unix_read(struct userdata *u, pa_usec_t *sleep_usec, pa_bool_t polled } n_bytes = (size_t) n * u->frame_size; - left_to_record = check_left_to_record(u, n_bytes); + left_to_record = check_left_to_record(u, n_bytes, on_timeout); + on_timeout = FALSE; if (u->use_tsched) if (!polled && @@ -1158,11 +1229,12 @@ static void thread_func(void *userdata) { if (PA_SOURCE_IS_OPENED(u->source->thread_info.state)) { int work_done; pa_usec_t sleep_usec = 0; + pa_bool_t on_timeout = pa_rtpoll_timer_elapsed(u->rtpoll); if (u->use_mmap) - work_done = mmap_read(u, &sleep_usec, revents & POLLIN); + work_done = mmap_read(u, &sleep_usec, revents & POLLIN, on_timeout); else - work_done = unix_read(u, &sleep_usec, revents & POLLIN); + work_done = unix_read(u, &sleep_usec, revents & POLLIN, on_timeout); if (work_done < 0) goto fail; @@ -1632,7 +1704,6 @@ pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, p u->fragment_size = frag_size = (uint32_t) (period_frames * frame_size); u->nfragments = nfrags; u->hwbuf_size = u->fragment_size * nfrags; - u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->source->sample_spec); pa_cvolume_mute(&u->hardware_volume, u->source->sample_spec.channels); pa_log_info("Using %u fragments of size %lu bytes, buffer time is %0.2fms", @@ -1640,7 +1711,13 @@ pa_source *pa_alsa_source_new(pa_module *m, pa_modargs *ma, const char*driver, p (double) pa_bytes_to_usec(u->hwbuf_size, &ss) / PA_USEC_PER_MSEC); if (u->use_tsched) { - u->watermark_step = pa_usec_to_bytes(TSCHED_WATERMARK_STEP_USEC, &u->source->sample_spec); + u->tsched_watermark = pa_usec_to_bytes_round_up(pa_bytes_to_usec_round_up(tsched_watermark, &requested_ss), &u->source->sample_spec); + + u->watermark_inc_step = pa_usec_to_bytes(TSCHED_WATERMARK_INC_STEP_USEC, &u->source->sample_spec); + u->watermark_dec_step = pa_usec_to_bytes(TSCHED_WATERMARK_DEC_STEP_USEC, &u->source->sample_spec); + + u->watermark_inc_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_INC_THRESHOLD_USEC, &u->source->sample_spec); + u->watermark_dec_threshold = pa_usec_to_bytes_round_up(TSCHED_WATERMARK_DEC_THRESHOLD_USEC, &u->source->sample_spec); fix_min_sleep_wakeup(u); fix_tsched_watermark(u); -- cgit