ladybird/Userland/Libraries/LibAudio/ConnectionToServer.cpp
kleines Filmröllchen 03fac609ee AudioServer+Userland: Separate audio IPC into normal client and manager
This is a sensible separation of concerns that mirrors the WindowServer
IPC split. On the one hand, there is the "normal" audio interface, used
for clients that play audio, which is the primary service of
AudioServer. On the other hand, there is the management interface,
which, like the WindowManager endpoint, provides higher-level control
over clients and the server itself.

The reasoning for this split are manifold, as mentioned we are mirroring
the WindowServer split. Another indication to the sensibility of the
split is that no single audio client used the APIs of both interfaces.
Also, useless audio queues are no longer created for managing clients
(since those don't even exist, just like there's no window backing
bitmap for window managing clients), eliminating any bugs that may occur
there as they have in the past.

Implementation-wise, we just move all the APIs and implementations from
the old AudioServer into the AudioManagerServer (and respective clients,
of course). There is one point of duplication, namely the hardware
sample rate. This will be fixed in combination with per-client sample
rate, eliminating client-side resampling and the related update bugs.
For now, we keep one legacy API to simplify the transition.

The new AudioManagerServer also gains a hardware sample rate change
callback to have exact symmetry on the main server parameters (getter,
setter, and callback).
2023-06-25 00:16:44 +02:00

151 lines
4.6 KiB
C++

/*
* Copyright (c) 2018-2020, Andreas Kling <kling@serenityos.org>
* Copyright (c) 2022, kleines Filmröllchen <filmroellchen@serenityos.org>
*
* SPDX-License-Identifier: BSD-2-Clause
*/
#include <AK/Atomic.h>
#include <AK/Debug.h>
#include <AK/Format.h>
#include <AK/OwnPtr.h>
#include <AK/Time.h>
#include <AK/Types.h>
#include <LibAudio/ConnectionToServer.h>
#include <LibAudio/Queue.h>
#include <LibAudio/UserSampleQueue.h>
#include <LibCore/Event.h>
#include <LibThreading/Mutex.h>
#include <sched.h>
#include <time.h>
namespace Audio {
ConnectionToServer::ConnectionToServer(NonnullOwnPtr<Core::LocalSocket> socket)
: IPC::ConnectionToServer<AudioClientEndpoint, AudioServerEndpoint>(*this, move(socket))
, m_buffer(make<AudioQueue>(MUST(AudioQueue::create())))
, m_user_queue(make<UserSampleQueue>())
, m_background_audio_enqueuer(Threading::Thread::construct([this]() {
// All the background thread does is run an event loop.
Core::EventLoop enqueuer_loop;
m_enqueuer_loop = &enqueuer_loop;
enqueuer_loop.exec();
{
Threading::MutexLocker const locker(m_enqueuer_loop_destruction);
m_enqueuer_loop = nullptr;
}
return (intptr_t) nullptr;
}))
{
async_pause_playback();
set_buffer(*m_buffer);
}
ConnectionToServer::~ConnectionToServer()
{
die();
}
void ConnectionToServer::die()
{
{
Threading::MutexLocker const locker(m_enqueuer_loop_destruction);
// We're sometimes getting here after the other thread has already exited and its event loop does no longer exist.
if (m_enqueuer_loop != nullptr) {
m_enqueuer_loop->wake();
m_enqueuer_loop->quit(0);
}
}
if (m_background_audio_enqueuer->is_started())
(void)m_background_audio_enqueuer->join();
}
ErrorOr<void> ConnectionToServer::async_enqueue(FixedArray<Sample>&& samples)
{
if (!m_background_audio_enqueuer->is_started()) {
m_background_audio_enqueuer->start();
while (!m_enqueuer_loop)
usleep(1);
TRY(m_background_audio_enqueuer->set_priority(THREAD_PRIORITY_MAX));
}
update_good_sleep_time();
m_user_queue->append(move(samples));
// Wake the background thread to make sure it starts enqueuing audio.
m_enqueuer_loop->post_event(*this, make<Core::CustomEvent>(0));
m_enqueuer_loop->wake();
async_start_playback();
return {};
}
void ConnectionToServer::clear_client_buffer()
{
m_user_queue->clear();
}
void ConnectionToServer::update_good_sleep_time()
{
auto sample_rate = static_cast<double>(get_sample_rate());
auto buffer_play_time_ns = 1'000'000'000.0 / (sample_rate / static_cast<double>(AUDIO_BUFFER_SIZE));
// A factor of 1 should be good for now.
m_good_sleep_time = Duration::from_nanoseconds(static_cast<unsigned>(buffer_play_time_ns)).to_timespec();
}
// Non-realtime audio writing loop
void ConnectionToServer::custom_event(Core::CustomEvent&)
{
Array<Sample, AUDIO_BUFFER_SIZE> next_chunk;
while (true) {
if (m_user_queue->is_empty()) {
dbgln_if(AUDIO_DEBUG, "Reached end of provided audio data, going to sleep");
break;
}
auto available_samples = min(AUDIO_BUFFER_SIZE, m_user_queue->size());
for (size_t i = 0; i < available_samples; ++i)
next_chunk[i] = (*m_user_queue)[i];
m_user_queue->discard_samples(available_samples);
// FIXME: Could we receive interrupts in a good non-IPC way instead?
auto result = m_buffer->blocking_enqueue(next_chunk, [this]() {
nanosleep(&m_good_sleep_time, nullptr);
});
if (result.is_error())
dbgln("Error while writing samples to shared buffer: {}", result.error());
}
}
ErrorOr<void, AudioQueue::QueueStatus> ConnectionToServer::realtime_enqueue(Array<Sample, AUDIO_BUFFER_SIZE> samples)
{
return m_buffer->enqueue(samples);
}
ErrorOr<void> ConnectionToServer::blocking_realtime_enqueue(Array<Sample, AUDIO_BUFFER_SIZE> samples, Function<void()> wait_function)
{
return m_buffer->blocking_enqueue(samples, move(wait_function));
}
unsigned ConnectionToServer::total_played_samples() const
{
return m_buffer->weak_tail() * AUDIO_BUFFER_SIZE;
}
unsigned ConnectionToServer::remaining_samples()
{
return static_cast<unsigned>(m_user_queue->remaining_samples());
}
size_t ConnectionToServer::remaining_buffers() const
{
return m_buffer->size() - m_buffer->weak_remaining_capacity();
}
void ConnectionToServer::client_volume_changed(double volume)
{
if (on_client_volume_change)
on_client_volume_change(volume);
}
}