Merge pull request #842 from bunnei/audio-core
Initial implementation of Audio Core
This commit is contained in:
commit
e1d66ea794
|
@ -3,6 +3,7 @@ include_directories(.)
|
||||||
|
|
||||||
add_subdirectory(common)
|
add_subdirectory(common)
|
||||||
add_subdirectory(core)
|
add_subdirectory(core)
|
||||||
|
add_subdirectory(audio_core)
|
||||||
add_subdirectory(video_core)
|
add_subdirectory(video_core)
|
||||||
add_subdirectory(input_common)
|
add_subdirectory(input_common)
|
||||||
add_subdirectory(tests)
|
add_subdirectory(tests)
|
||||||
|
|
|
@ -0,0 +1,11 @@
|
||||||
|
add_library(audio_core STATIC
|
||||||
|
audio_out.cpp
|
||||||
|
audio_out.h
|
||||||
|
buffer.h
|
||||||
|
stream.cpp
|
||||||
|
stream.h
|
||||||
|
)
|
||||||
|
|
||||||
|
create_target_directory_groups(audio_core)
|
||||||
|
|
||||||
|
target_link_libraries(audio_core PUBLIC common core)
|
|
@ -0,0 +1,50 @@
|
||||||
|
// Copyright 2018 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include "audio_core/audio_out.h"
|
||||||
|
#include "common/assert.h"
|
||||||
|
#include "common/logging/log.h"
|
||||||
|
|
||||||
|
namespace AudioCore {
|
||||||
|
|
||||||
|
/// Returns the stream format from the specified number of channels
|
||||||
|
static Stream::Format ChannelsToStreamFormat(int num_channels) {
|
||||||
|
switch (num_channels) {
|
||||||
|
case 1:
|
||||||
|
return Stream::Format::Mono16;
|
||||||
|
case 2:
|
||||||
|
return Stream::Format::Stereo16;
|
||||||
|
case 6:
|
||||||
|
return Stream::Format::Multi51Channel16;
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_CRITICAL(Audio, "Unimplemented num_channels={}", num_channels);
|
||||||
|
UNREACHABLE();
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
StreamPtr AudioOut::OpenStream(int sample_rate, int num_channels,
|
||||||
|
Stream::ReleaseCallback&& release_callback) {
|
||||||
|
streams.push_back(std::make_shared<Stream>(sample_rate, ChannelsToStreamFormat(num_channels),
|
||||||
|
std::move(release_callback)));
|
||||||
|
return streams.back();
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<u64> AudioOut::GetTagsAndReleaseBuffers(StreamPtr stream, size_t max_count) {
|
||||||
|
return stream->GetTagsAndReleaseBuffers(max_count);
|
||||||
|
}
|
||||||
|
|
||||||
|
void AudioOut::StartStream(StreamPtr stream) {
|
||||||
|
stream->Play();
|
||||||
|
}
|
||||||
|
|
||||||
|
void AudioOut::StopStream(StreamPtr stream) {
|
||||||
|
stream->Stop();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool AudioOut::QueueBuffer(StreamPtr stream, Buffer::Tag tag, std::vector<u8>&& data) {
|
||||||
|
return stream->QueueBuffer(std::make_shared<Buffer>(tag, std::move(data)));
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace AudioCore
|
|
@ -0,0 +1,44 @@
|
||||||
|
// Copyright 2018 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "audio_core/buffer.h"
|
||||||
|
#include "audio_core/stream.h"
|
||||||
|
#include "common/common_types.h"
|
||||||
|
|
||||||
|
namespace AudioCore {
|
||||||
|
|
||||||
|
using StreamPtr = std::shared_ptr<Stream>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Represents an audio playback interface, used to open and play audio streams
|
||||||
|
*/
|
||||||
|
class AudioOut {
|
||||||
|
public:
|
||||||
|
/// Opens a new audio stream
|
||||||
|
StreamPtr OpenStream(int sample_rate, int num_channels,
|
||||||
|
Stream::ReleaseCallback&& release_callback);
|
||||||
|
|
||||||
|
/// Returns a vector of recently released buffers specified by tag for the specified stream
|
||||||
|
std::vector<u64> GetTagsAndReleaseBuffers(StreamPtr stream, size_t max_count);
|
||||||
|
|
||||||
|
/// Starts an audio stream for playback
|
||||||
|
void StartStream(StreamPtr stream);
|
||||||
|
|
||||||
|
/// Stops an audio stream that is currently playing
|
||||||
|
void StopStream(StreamPtr stream);
|
||||||
|
|
||||||
|
/// Queues a buffer into the specified audio stream, returns true on success
|
||||||
|
bool QueueBuffer(StreamPtr stream, Buffer::Tag tag, std::vector<u8>&& data);
|
||||||
|
|
||||||
|
private:
|
||||||
|
/// Active audio streams on the interface
|
||||||
|
std::vector<StreamPtr> streams;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace AudioCore
|
|
@ -0,0 +1,37 @@
|
||||||
|
// Copyright 2018 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <vector>
|
||||||
|
|
||||||
|
#include "common/common_types.h"
|
||||||
|
|
||||||
|
namespace AudioCore {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Represents a buffer of audio samples to be played in an audio stream
|
||||||
|
*/
|
||||||
|
class Buffer {
|
||||||
|
public:
|
||||||
|
using Tag = u64;
|
||||||
|
|
||||||
|
Buffer(Tag tag, std::vector<u8>&& data) : tag{tag}, data{std::move(data)} {}
|
||||||
|
|
||||||
|
/// Returns the raw audio data for the buffer
|
||||||
|
const std::vector<u8>& GetData() const {
|
||||||
|
return data;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the buffer tag, this is provided by the game to the audout service
|
||||||
|
Tag GetTag() const {
|
||||||
|
return tag;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
Tag tag;
|
||||||
|
std::vector<u8> data;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace AudioCore
|
|
@ -0,0 +1,103 @@
|
||||||
|
// Copyright 2018 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include "common/assert.h"
|
||||||
|
#include "common/logging/log.h"
|
||||||
|
#include "core/core_timing.h"
|
||||||
|
#include "core/core_timing_util.h"
|
||||||
|
|
||||||
|
#include "audio_core/stream.h"
|
||||||
|
|
||||||
|
namespace AudioCore {
|
||||||
|
|
||||||
|
constexpr size_t MaxAudioBufferCount{32};
|
||||||
|
|
||||||
|
/// Returns the sample size for the specified audio stream format
|
||||||
|
static size_t SampleSizeFromFormat(Stream::Format format) {
|
||||||
|
switch (format) {
|
||||||
|
case Stream::Format::Mono16:
|
||||||
|
return 2;
|
||||||
|
case Stream::Format::Stereo16:
|
||||||
|
return 4;
|
||||||
|
case Stream::Format::Multi51Channel16:
|
||||||
|
return 12;
|
||||||
|
};
|
||||||
|
|
||||||
|
LOG_CRITICAL(Audio, "Unimplemented format={}", static_cast<u32>(format));
|
||||||
|
UNREACHABLE();
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
Stream::Stream(int sample_rate, Format format, ReleaseCallback&& release_callback)
|
||||||
|
: sample_rate{sample_rate}, format{format}, release_callback{std::move(release_callback)} {
|
||||||
|
release_event = CoreTiming::RegisterEvent(
|
||||||
|
"Stream::Release", [this](u64 userdata, int cycles_late) { ReleaseActiveBuffer(); });
|
||||||
|
}
|
||||||
|
|
||||||
|
void Stream::Play() {
|
||||||
|
state = State::Playing;
|
||||||
|
PlayNextBuffer();
|
||||||
|
}
|
||||||
|
|
||||||
|
void Stream::Stop() {
|
||||||
|
ASSERT_MSG(false, "Unimplemented");
|
||||||
|
}
|
||||||
|
|
||||||
|
s64 Stream::GetBufferReleaseCycles(const Buffer& buffer) const {
|
||||||
|
const size_t num_samples{buffer.GetData().size() / SampleSizeFromFormat(format)};
|
||||||
|
return CoreTiming::usToCycles((static_cast<u64>(num_samples) * 1000000) / sample_rate);
|
||||||
|
}
|
||||||
|
|
||||||
|
void Stream::PlayNextBuffer() {
|
||||||
|
if (!IsPlaying()) {
|
||||||
|
// Ensure we are in playing state before playing the next buffer
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (active_buffer) {
|
||||||
|
// Do not queue a new buffer if we are already playing a buffer
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (queued_buffers.empty()) {
|
||||||
|
// No queued buffers - we are effectively paused
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
active_buffer = queued_buffers.front();
|
||||||
|
queued_buffers.pop();
|
||||||
|
|
||||||
|
CoreTiming::ScheduleEventThreadsafe(GetBufferReleaseCycles(*active_buffer), release_event, {});
|
||||||
|
}
|
||||||
|
|
||||||
|
void Stream::ReleaseActiveBuffer() {
|
||||||
|
released_buffers.push(std::move(active_buffer));
|
||||||
|
release_callback();
|
||||||
|
PlayNextBuffer();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Stream::QueueBuffer(BufferPtr&& buffer) {
|
||||||
|
if (queued_buffers.size() < MaxAudioBufferCount) {
|
||||||
|
queued_buffers.push(std::move(buffer));
|
||||||
|
PlayNextBuffer();
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Stream::ContainsBuffer(Buffer::Tag tag) const {
|
||||||
|
ASSERT_MSG(false, "Unimplemented");
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<Buffer::Tag> Stream::GetTagsAndReleaseBuffers(size_t max_count) {
|
||||||
|
std::vector<Buffer::Tag> tags;
|
||||||
|
for (size_t count = 0; count < max_count && !released_buffers.empty(); ++count) {
|
||||||
|
tags.push_back(released_buffers.front()->GetTag());
|
||||||
|
released_buffers.pop();
|
||||||
|
}
|
||||||
|
return tags;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace AudioCore
|
|
@ -0,0 +1,89 @@
|
||||||
|
// Copyright 2018 yuzu Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <functional>
|
||||||
|
#include <memory>
|
||||||
|
#include <vector>
|
||||||
|
#include <queue>
|
||||||
|
|
||||||
|
#include "audio_core/buffer.h"
|
||||||
|
#include "common/assert.h"
|
||||||
|
#include "common/common_types.h"
|
||||||
|
#include "core/core_timing.h"
|
||||||
|
|
||||||
|
namespace AudioCore {
|
||||||
|
|
||||||
|
using BufferPtr = std::shared_ptr<Buffer>;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Represents an audio stream, which is a sequence of queued buffers, to be outputed by AudioOut
|
||||||
|
*/
|
||||||
|
class Stream {
|
||||||
|
public:
|
||||||
|
/// Audio format of the stream
|
||||||
|
enum class Format {
|
||||||
|
Mono16,
|
||||||
|
Stereo16,
|
||||||
|
Multi51Channel16,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Callback function type, used to change guest state on a buffer being released
|
||||||
|
using ReleaseCallback = std::function<void()>;
|
||||||
|
|
||||||
|
Stream(int sample_rate, Format format, ReleaseCallback&& release_callback);
|
||||||
|
|
||||||
|
/// Plays the audio stream
|
||||||
|
void Play();
|
||||||
|
|
||||||
|
/// Stops the audio stream
|
||||||
|
void Stop();
|
||||||
|
|
||||||
|
/// Queues a buffer into the audio stream, returns true on success
|
||||||
|
bool QueueBuffer(BufferPtr&& buffer);
|
||||||
|
|
||||||
|
/// Returns true if the audio stream contains a buffer with the specified tag
|
||||||
|
bool ContainsBuffer(Buffer::Tag tag) const;
|
||||||
|
|
||||||
|
/// Returns a vector of recently released buffers specified by tag
|
||||||
|
std::vector<Buffer::Tag> GetTagsAndReleaseBuffers(size_t max_count);
|
||||||
|
|
||||||
|
/// Returns true if the stream is currently playing
|
||||||
|
bool IsPlaying() const {
|
||||||
|
return state == State::Playing;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the number of queued buffers
|
||||||
|
size_t GetQueueSize() const {
|
||||||
|
return queued_buffers.size();
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
/// Current state of the stream
|
||||||
|
enum class State {
|
||||||
|
Stopped,
|
||||||
|
Playing,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Plays the next queued buffer in the audio stream, starting playback if necessary
|
||||||
|
void PlayNextBuffer();
|
||||||
|
|
||||||
|
/// Releases the actively playing buffer, signalling that it has been completed
|
||||||
|
void ReleaseActiveBuffer();
|
||||||
|
|
||||||
|
/// Gets the number of core cycles when the specified buffer will be released
|
||||||
|
s64 GetBufferReleaseCycles(const Buffer& buffer) const;
|
||||||
|
|
||||||
|
int sample_rate; ///< Sample rate of the stream
|
||||||
|
Format format; ///< Format of the stream
|
||||||
|
ReleaseCallback release_callback; ///< Buffer release callback for the stream
|
||||||
|
State state{State::Stopped}; ///< Playback state of the stream
|
||||||
|
CoreTiming::EventType* release_event{}; ///< Core timing release event for the stream
|
||||||
|
BufferPtr active_buffer; ///< Actively playing buffer in the stream
|
||||||
|
std::queue<BufferPtr> queued_buffers; ///< Buffers queued to be played in the stream
|
||||||
|
std::queue<BufferPtr> released_buffers; ///< Buffers recently released from the stream
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace AudioCore
|
|
@ -309,7 +309,7 @@ add_library(core STATIC
|
||||||
|
|
||||||
create_target_directory_groups(core)
|
create_target_directory_groups(core)
|
||||||
|
|
||||||
target_link_libraries(core PUBLIC common PRIVATE video_core)
|
target_link_libraries(core PUBLIC common PRIVATE audio_core video_core)
|
||||||
target_link_libraries(core PUBLIC Boost::boost PRIVATE fmt lz4_static unicorn)
|
target_link_libraries(core PUBLIC Boost::boost PRIVATE fmt lz4_static unicorn)
|
||||||
|
|
||||||
if (ARCHITECTURE_x86_64)
|
if (ARCHITECTURE_x86_64)
|
||||||
|
|
|
@ -177,6 +177,7 @@ System::ResultStatus System::Init(EmuWindow* emu_window, u32 system_mode) {
|
||||||
}
|
}
|
||||||
|
|
||||||
gpu_core = std::make_unique<Tegra::GPU>();
|
gpu_core = std::make_unique<Tegra::GPU>();
|
||||||
|
audio_core = std::make_unique<AudioCore::AudioOut>();
|
||||||
telemetry_session = std::make_unique<Core::TelemetrySession>();
|
telemetry_session = std::make_unique<Core::TelemetrySession>();
|
||||||
service_manager = std::make_shared<Service::SM::ServiceManager>();
|
service_manager = std::make_shared<Service::SM::ServiceManager>();
|
||||||
|
|
||||||
|
@ -228,6 +229,7 @@ void System::Shutdown() {
|
||||||
service_manager.reset();
|
service_manager.reset();
|
||||||
telemetry_session.reset();
|
telemetry_session.reset();
|
||||||
gpu_core.reset();
|
gpu_core.reset();
|
||||||
|
audio_core.reset();
|
||||||
|
|
||||||
// Close all CPU/threading state
|
// Close all CPU/threading state
|
||||||
cpu_barrier->NotifyEnd();
|
cpu_barrier->NotifyEnd();
|
||||||
|
|
|
@ -8,6 +8,7 @@
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
|
#include "audio_core/audio_out.h"
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
#include "core/arm/exclusive_monitor.h"
|
#include "core/arm/exclusive_monitor.h"
|
||||||
#include "core/core_cpu.h"
|
#include "core/core_cpu.h"
|
||||||
|
@ -131,6 +132,11 @@ public:
|
||||||
return *gpu_core;
|
return *gpu_core;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Gets the AudioCore interface
|
||||||
|
AudioCore::AudioOut& AudioCore() {
|
||||||
|
return *audio_core;
|
||||||
|
}
|
||||||
|
|
||||||
/// Gets the scheduler for the CPU core that is currently running
|
/// Gets the scheduler for the CPU core that is currently running
|
||||||
Kernel::Scheduler& CurrentScheduler() {
|
Kernel::Scheduler& CurrentScheduler() {
|
||||||
return *CurrentCpuCore().Scheduler();
|
return *CurrentCpuCore().Scheduler();
|
||||||
|
@ -195,6 +201,7 @@ private:
|
||||||
/// AppLoader used to load the current executing application
|
/// AppLoader used to load the current executing application
|
||||||
std::unique_ptr<Loader::AppLoader> app_loader;
|
std::unique_ptr<Loader::AppLoader> app_loader;
|
||||||
std::unique_ptr<Tegra::GPU> gpu_core;
|
std::unique_ptr<Tegra::GPU> gpu_core;
|
||||||
|
std::unique_ptr<AudioCore::AudioOut> audio_core;
|
||||||
std::shared_ptr<Tegra::DebugContext> debug_context;
|
std::shared_ptr<Tegra::DebugContext> debug_context;
|
||||||
Kernel::SharedPtr<Kernel::Process> current_process;
|
Kernel::SharedPtr<Kernel::Process> current_process;
|
||||||
std::shared_ptr<ExclusiveMonitor> cpu_exclusive_monitor;
|
std::shared_ptr<ExclusiveMonitor> cpu_exclusive_monitor;
|
||||||
|
|
|
@ -5,8 +5,7 @@
|
||||||
#include <array>
|
#include <array>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
#include "core/core_timing.h"
|
#include "core/core.h"
|
||||||
#include "core/core_timing_util.h"
|
|
||||||
#include "core/hle/ipc_helpers.h"
|
#include "core/hle/ipc_helpers.h"
|
||||||
#include "core/hle/kernel/event.h"
|
#include "core/hle/kernel/event.h"
|
||||||
#include "core/hle/kernel/hle_ipc.h"
|
#include "core/hle/kernel/hle_ipc.h"
|
||||||
|
@ -14,17 +13,22 @@
|
||||||
|
|
||||||
namespace Service::Audio {
|
namespace Service::Audio {
|
||||||
|
|
||||||
/// Switch sample rate frequency
|
namespace ErrCodes {
|
||||||
constexpr u32 sample_rate{48000};
|
enum {
|
||||||
/// TODO(st4rk): dynamic number of channels, as I think Switch has support
|
ErrorUnknown = 2,
|
||||||
/// to more audio channels (probably when Docked I guess)
|
BufferCountExceeded = 8,
|
||||||
constexpr u32 audio_channels{2};
|
};
|
||||||
/// TODO(st4rk): find a proper value for the audio_ticks
|
}
|
||||||
constexpr u64 audio_ticks{static_cast<u64>(CoreTiming::BASE_CLOCK_RATE / 500)};
|
|
||||||
|
constexpr std::array<char, 10> DefaultDevice{{"DeviceOut"}};
|
||||||
|
constexpr int DefaultSampleRate{48000};
|
||||||
|
|
||||||
class IAudioOut final : public ServiceFramework<IAudioOut> {
|
class IAudioOut final : public ServiceFramework<IAudioOut> {
|
||||||
public:
|
public:
|
||||||
IAudioOut() : ServiceFramework("IAudioOut"), audio_out_state(AudioState::Stopped) {
|
IAudioOut(AudoutParams audio_params)
|
||||||
|
: ServiceFramework("IAudioOut"), audio_params(audio_params),
|
||||||
|
audio_core(Core::System::GetInstance().AudioCore()) {
|
||||||
|
|
||||||
static const FunctionInfo functions[] = {
|
static const FunctionInfo functions[] = {
|
||||||
{0, &IAudioOut::GetAudioOutState, "GetAudioOutState"},
|
{0, &IAudioOut::GetAudioOutState, "GetAudioOutState"},
|
||||||
{1, &IAudioOut::StartAudioOut, "StartAudioOut"},
|
{1, &IAudioOut::StartAudioOut, "StartAudioOut"},
|
||||||
|
@ -32,66 +36,65 @@ public:
|
||||||
{3, &IAudioOut::AppendAudioOutBufferImpl, "AppendAudioOutBuffer"},
|
{3, &IAudioOut::AppendAudioOutBufferImpl, "AppendAudioOutBuffer"},
|
||||||
{4, &IAudioOut::RegisterBufferEvent, "RegisterBufferEvent"},
|
{4, &IAudioOut::RegisterBufferEvent, "RegisterBufferEvent"},
|
||||||
{5, &IAudioOut::GetReleasedAudioOutBufferImpl, "GetReleasedAudioOutBuffer"},
|
{5, &IAudioOut::GetReleasedAudioOutBufferImpl, "GetReleasedAudioOutBuffer"},
|
||||||
{6, nullptr, "ContainsAudioOutBuffer"},
|
{6, &IAudioOut::ContainsAudioOutBuffer, "ContainsAudioOutBuffer"},
|
||||||
{7, &IAudioOut::AppendAudioOutBufferImpl, "AppendAudioOutBufferAuto"},
|
{7, &IAudioOut::AppendAudioOutBufferImpl, "AppendAudioOutBufferAuto"},
|
||||||
{8, &IAudioOut::GetReleasedAudioOutBufferImpl, "GetReleasedAudioOutBufferAuto"},
|
{8, &IAudioOut::GetReleasedAudioOutBufferImpl, "GetReleasedAudioOutBufferAuto"},
|
||||||
{9, nullptr, "GetAudioOutBufferCount"},
|
{9, &IAudioOut::GetAudioOutBufferCount, "GetAudioOutBufferCount"},
|
||||||
{10, nullptr, "GetAudioOutPlayedSampleCount"},
|
{10, nullptr, "GetAudioOutPlayedSampleCount"},
|
||||||
{11, nullptr, "FlushAudioOutBuffers"},
|
{11, nullptr, "FlushAudioOutBuffers"},
|
||||||
};
|
};
|
||||||
RegisterHandlers(functions);
|
RegisterHandlers(functions);
|
||||||
|
|
||||||
// This is the event handle used to check if the audio buffer was released
|
// This is the event handle used to check if the audio buffer was released
|
||||||
buffer_event =
|
buffer_event = Kernel::Event::Create(Kernel::ResetType::Sticky, "IAudioOutBufferReleased");
|
||||||
Kernel::Event::Create(Kernel::ResetType::OneShot, "IAudioOutBufferReleasedEvent");
|
|
||||||
|
|
||||||
// Register event callback to update the Audio Buffer
|
stream = audio_core.OpenStream(audio_params.sample_rate, audio_params.channel_count,
|
||||||
audio_event = CoreTiming::RegisterEvent(
|
[=]() { buffer_event->Signal(); });
|
||||||
"IAudioOut::UpdateAudioBuffersCallback", [this](u64 userdata, int cycles_late) {
|
|
||||||
UpdateAudioBuffersCallback();
|
|
||||||
CoreTiming::ScheduleEvent(audio_ticks - cycles_late, audio_event);
|
|
||||||
});
|
|
||||||
|
|
||||||
// Start the audio event
|
|
||||||
CoreTiming::ScheduleEvent(audio_ticks, audio_event);
|
|
||||||
}
|
|
||||||
|
|
||||||
~IAudioOut() {
|
|
||||||
CoreTiming::UnscheduleEvent(audio_event, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
struct AudioBuffer {
|
||||||
|
u64_le next;
|
||||||
|
u64_le buffer;
|
||||||
|
u64_le buffer_capacity;
|
||||||
|
u64_le buffer_size;
|
||||||
|
u64_le offset;
|
||||||
|
};
|
||||||
|
static_assert(sizeof(AudioBuffer) == 0x28, "AudioBuffer is an invalid size");
|
||||||
|
|
||||||
void GetAudioOutState(Kernel::HLERequestContext& ctx) {
|
void GetAudioOutState(Kernel::HLERequestContext& ctx) {
|
||||||
LOG_DEBUG(Service_Audio, "called");
|
LOG_DEBUG(Service_Audio, "called");
|
||||||
IPC::ResponseBuilder rb{ctx, 3};
|
IPC::ResponseBuilder rb{ctx, 3};
|
||||||
rb.Push(RESULT_SUCCESS);
|
rb.Push(RESULT_SUCCESS);
|
||||||
rb.Push(static_cast<u32>(audio_out_state));
|
rb.Push(static_cast<u32>(stream->IsPlaying() ? AudioState::Started : AudioState::Stopped));
|
||||||
}
|
}
|
||||||
|
|
||||||
void StartAudioOut(Kernel::HLERequestContext& ctx) {
|
void StartAudioOut(Kernel::HLERequestContext& ctx) {
|
||||||
LOG_WARNING(Service_Audio, "(STUBBED) called");
|
LOG_DEBUG(Service_Audio, "called");
|
||||||
|
|
||||||
// Start audio
|
if (stream->IsPlaying()) {
|
||||||
audio_out_state = AudioState::Started;
|
IPC::ResponseBuilder rb{ctx, 2};
|
||||||
|
rb.Push(ResultCode(ErrorModule::Audio, ErrCodes::ErrorUnknown));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
audio_core.StartStream(stream);
|
||||||
|
|
||||||
IPC::ResponseBuilder rb{ctx, 2};
|
IPC::ResponseBuilder rb{ctx, 2};
|
||||||
rb.Push(RESULT_SUCCESS);
|
rb.Push(RESULT_SUCCESS);
|
||||||
}
|
}
|
||||||
|
|
||||||
void StopAudioOut(Kernel::HLERequestContext& ctx) {
|
void StopAudioOut(Kernel::HLERequestContext& ctx) {
|
||||||
LOG_WARNING(Service_Audio, "(STUBBED) called");
|
LOG_DEBUG(Service_Audio, "called");
|
||||||
|
|
||||||
// Stop audio
|
audio_core.StopStream(stream);
|
||||||
audio_out_state = AudioState::Stopped;
|
|
||||||
|
|
||||||
queue_keys.clear();
|
|
||||||
|
|
||||||
IPC::ResponseBuilder rb{ctx, 2};
|
IPC::ResponseBuilder rb{ctx, 2};
|
||||||
rb.Push(RESULT_SUCCESS);
|
rb.Push(RESULT_SUCCESS);
|
||||||
}
|
}
|
||||||
|
|
||||||
void RegisterBufferEvent(Kernel::HLERequestContext& ctx) {
|
void RegisterBufferEvent(Kernel::HLERequestContext& ctx) {
|
||||||
LOG_WARNING(Service_Audio, "(STUBBED) called");
|
LOG_DEBUG(Service_Audio, "called");
|
||||||
|
|
||||||
IPC::ResponseBuilder rb{ctx, 2, 1};
|
IPC::ResponseBuilder rb{ctx, 2, 1};
|
||||||
rb.Push(RESULT_SUCCESS);
|
rb.Push(RESULT_SUCCESS);
|
||||||
|
@ -99,101 +102,107 @@ private:
|
||||||
}
|
}
|
||||||
|
|
||||||
void AppendAudioOutBufferImpl(Kernel::HLERequestContext& ctx) {
|
void AppendAudioOutBufferImpl(Kernel::HLERequestContext& ctx) {
|
||||||
LOG_WARNING(Service_Audio, "(STUBBED) called");
|
LOG_DEBUG(Service_Audio, "(STUBBED) called {}", ctx.Description());
|
||||||
IPC::RequestParser rp{ctx};
|
IPC::RequestParser rp{ctx};
|
||||||
|
|
||||||
const u64 key{rp.Pop<u64>()};
|
const auto& input_buffer{ctx.ReadBuffer()};
|
||||||
queue_keys.insert(queue_keys.begin(), key);
|
ASSERT_MSG(input_buffer.size() == sizeof(AudioBuffer),
|
||||||
|
"AudioBuffer input is an invalid size!");
|
||||||
|
AudioBuffer audio_buffer{};
|
||||||
|
std::memcpy(&audio_buffer, input_buffer.data(), sizeof(AudioBuffer));
|
||||||
|
const u64 tag{rp.Pop<u64>()};
|
||||||
|
|
||||||
|
std::vector<u8> data(audio_buffer.buffer_size);
|
||||||
|
Memory::ReadBlock(audio_buffer.buffer, data.data(), data.size());
|
||||||
|
|
||||||
|
if (!audio_core.QueueBuffer(stream, tag, std::move(data))) {
|
||||||
|
IPC::ResponseBuilder rb{ctx, 2};
|
||||||
|
rb.Push(ResultCode(ErrorModule::Audio, ErrCodes::BufferCountExceeded));
|
||||||
|
}
|
||||||
|
|
||||||
IPC::ResponseBuilder rb{ctx, 2};
|
IPC::ResponseBuilder rb{ctx, 2};
|
||||||
rb.Push(RESULT_SUCCESS);
|
rb.Push(RESULT_SUCCESS);
|
||||||
}
|
}
|
||||||
|
|
||||||
void GetReleasedAudioOutBufferImpl(Kernel::HLERequestContext& ctx) {
|
void GetReleasedAudioOutBufferImpl(Kernel::HLERequestContext& ctx) {
|
||||||
LOG_WARNING(Service_Audio, "(STUBBED) called");
|
LOG_DEBUG(Service_Audio, "called {}", ctx.Description());
|
||||||
|
IPC::RequestParser rp{ctx};
|
||||||
|
const u64 max_count{ctx.GetWriteBufferSize() / sizeof(u64)};
|
||||||
|
const auto released_buffers{audio_core.GetTagsAndReleaseBuffers(stream, max_count)};
|
||||||
|
|
||||||
// TODO(st4rk): This is how libtransistor currently implements the
|
std::vector<u64> tags{released_buffers};
|
||||||
// GetReleasedAudioOutBuffer, it should return the key (a VAddr) to the app and this address
|
tags.resize(max_count);
|
||||||
// is used to know which buffer should be filled with data and send again to the service
|
ctx.WriteBuffer(tags);
|
||||||
// through AppendAudioOutBuffer. Check if this is the proper way to do it.
|
|
||||||
u64 key{0};
|
|
||||||
|
|
||||||
if (queue_keys.size()) {
|
|
||||||
key = queue_keys.back();
|
|
||||||
queue_keys.pop_back();
|
|
||||||
}
|
|
||||||
|
|
||||||
ctx.WriteBuffer(&key, sizeof(u64));
|
|
||||||
|
|
||||||
IPC::ResponseBuilder rb{ctx, 3};
|
IPC::ResponseBuilder rb{ctx, 3};
|
||||||
rb.Push(RESULT_SUCCESS);
|
rb.Push(RESULT_SUCCESS);
|
||||||
// TODO(st4rk): This might be the total of released buffers, needs to be verified on
|
rb.Push<u32>(static_cast<u32>(released_buffers.size()));
|
||||||
// hardware
|
|
||||||
rb.Push<u32>(static_cast<u32>(queue_keys.size()));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void UpdateAudioBuffersCallback() {
|
void ContainsAudioOutBuffer(Kernel::HLERequestContext& ctx) {
|
||||||
if (audio_out_state != AudioState::Started) {
|
LOG_DEBUG(Service_Audio, "called");
|
||||||
return;
|
IPC::RequestParser rp{ctx};
|
||||||
}
|
const u64 tag{rp.Pop<u64>()};
|
||||||
|
IPC::ResponseBuilder rb{ctx, 3};
|
||||||
if (queue_keys.empty()) {
|
rb.Push(RESULT_SUCCESS);
|
||||||
return;
|
rb.Push(stream->ContainsBuffer(tag));
|
||||||
}
|
|
||||||
|
|
||||||
buffer_event->Signal();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
enum class AudioState : u32 {
|
void GetAudioOutBufferCount(Kernel::HLERequestContext& ctx) {
|
||||||
Started,
|
LOG_DEBUG(Service_Audio, "called");
|
||||||
Stopped,
|
IPC::ResponseBuilder rb{ctx, 3};
|
||||||
};
|
rb.Push(RESULT_SUCCESS);
|
||||||
|
rb.Push(static_cast<u32>(stream->GetQueueSize()));
|
||||||
|
}
|
||||||
|
|
||||||
/// This is used to trigger the audio event callback that is going to read the samples from the
|
AudioCore::AudioOut& audio_core;
|
||||||
/// audio_buffer list and enqueue the samples using the sink (audio_core).
|
AudioCore::StreamPtr stream;
|
||||||
CoreTiming::EventType* audio_event;
|
|
||||||
|
AudoutParams audio_params{};
|
||||||
|
|
||||||
/// This is the evend handle used to check if the audio buffer was released
|
/// This is the evend handle used to check if the audio buffer was released
|
||||||
Kernel::SharedPtr<Kernel::Event> buffer_event;
|
Kernel::SharedPtr<Kernel::Event> buffer_event;
|
||||||
|
|
||||||
/// (st4rk): This is just a temporary workaround for the future implementation. Libtransistor
|
|
||||||
/// uses the key as an address in the App, so we need to return when the
|
|
||||||
/// GetReleasedAudioOutBuffer_1 is called, otherwise we'll run in problems, because
|
|
||||||
/// libtransistor uses the key returned as an pointer.
|
|
||||||
std::vector<u64> queue_keys;
|
|
||||||
|
|
||||||
AudioState audio_out_state;
|
|
||||||
};
|
};
|
||||||
|
|
||||||
void AudOutU::ListAudioOutsImpl(Kernel::HLERequestContext& ctx) {
|
void AudOutU::ListAudioOutsImpl(Kernel::HLERequestContext& ctx) {
|
||||||
LOG_WARNING(Service_Audio, "(STUBBED) called");
|
LOG_DEBUG(Service_Audio, "called");
|
||||||
IPC::RequestParser rp{ctx};
|
IPC::RequestParser rp{ctx};
|
||||||
|
|
||||||
constexpr std::array<char, 15> audio_interface{{"AudioInterface"}};
|
ctx.WriteBuffer(DefaultDevice);
|
||||||
ctx.WriteBuffer(audio_interface);
|
|
||||||
|
|
||||||
IPC::ResponseBuilder rb = rp.MakeBuilder(3, 0, 0);
|
IPC::ResponseBuilder rb = rp.MakeBuilder(3, 0, 0);
|
||||||
|
|
||||||
rb.Push(RESULT_SUCCESS);
|
rb.Push(RESULT_SUCCESS);
|
||||||
// TODO(st4rk): We're currently returning only one audio interface (stringlist size). However,
|
rb.Push<u32>(1); // Amount of audio devices
|
||||||
// it's highly possible to have more than one interface (despite that libtransistor requires
|
|
||||||
// only one).
|
|
||||||
rb.Push<u32>(1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudOutU::OpenAudioOutImpl(Kernel::HLERequestContext& ctx) {
|
void AudOutU::OpenAudioOutImpl(Kernel::HLERequestContext& ctx) {
|
||||||
LOG_WARNING(Service_Audio, "(STUBBED) called");
|
LOG_DEBUG(Service_Audio, "called");
|
||||||
|
|
||||||
if (!audio_out_interface) {
|
ctx.WriteBuffer(DefaultDevice);
|
||||||
audio_out_interface = std::make_shared<IAudioOut>();
|
IPC::RequestParser rp{ctx};
|
||||||
|
auto params{rp.PopRaw<AudoutParams>()};
|
||||||
|
if (params.channel_count <= 2) {
|
||||||
|
// Mono does not exist for audout
|
||||||
|
params.channel_count = 2;
|
||||||
|
} else {
|
||||||
|
params.channel_count = 6;
|
||||||
}
|
}
|
||||||
|
if (!params.sample_rate) {
|
||||||
|
params.sample_rate = DefaultSampleRate;
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(bunnei): Support more than one IAudioOut interface. When we add this, ListAudioOutsImpl
|
||||||
|
// will likely need to be updated as well.
|
||||||
|
ASSERT_MSG(!audio_out_interface, "Unimplemented");
|
||||||
|
audio_out_interface = std::make_shared<IAudioOut>(std::move(params));
|
||||||
|
|
||||||
IPC::ResponseBuilder rb{ctx, 6, 0, 1};
|
IPC::ResponseBuilder rb{ctx, 6, 0, 1};
|
||||||
rb.Push(RESULT_SUCCESS);
|
rb.Push(RESULT_SUCCESS);
|
||||||
rb.Push<u32>(sample_rate);
|
rb.Push<u32>(DefaultSampleRate);
|
||||||
rb.Push<u32>(audio_channels);
|
rb.Push<u32>(params.channel_count);
|
||||||
rb.Push<u32>(static_cast<u32>(PcmFormat::Int16));
|
rb.Push<u32>(static_cast<u32>(PcmFormat::Int16));
|
||||||
rb.Push<u32>(0); // This field is unknown
|
rb.Push<u32>(static_cast<u32>(AudioState::Stopped));
|
||||||
rb.PushIpcInterface<Audio::IAudioOut>(audio_out_interface);
|
rb.PushIpcInterface<Audio::IAudioOut>(audio_out_interface);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -12,6 +12,18 @@ class HLERequestContext;
|
||||||
|
|
||||||
namespace Service::Audio {
|
namespace Service::Audio {
|
||||||
|
|
||||||
|
struct AudoutParams {
|
||||||
|
s32_le sample_rate;
|
||||||
|
u16_le channel_count;
|
||||||
|
INSERT_PADDING_BYTES(2);
|
||||||
|
};
|
||||||
|
static_assert(sizeof(AudoutParams) == 0x8, "AudoutParams is an invalid size");
|
||||||
|
|
||||||
|
enum class AudioState : u32 {
|
||||||
|
Started,
|
||||||
|
Stopped,
|
||||||
|
};
|
||||||
|
|
||||||
class IAudioOut;
|
class IAudioOut;
|
||||||
|
|
||||||
class AudOutU final : public ServiceFramework<AudOutU> {
|
class AudOutU final : public ServiceFramework<AudOutU> {
|
||||||
|
|
Reference in New Issue