yuzu-emu
/
yuzu
Archived
1
0
Fork 0

Merge branch 'master' into fullscreen-enum

This commit is contained in:
lat9nq 2021-07-25 15:31:33 -04:00 committed by GitHub
commit 09d6cc9943
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
453 changed files with 49820 additions and 27394 deletions

View File

@ -48,69 +48,6 @@ if (BUILD_REPOSITORY)
endif()
endif()
# The variable SRC_DIR must be passed into the script (since it uses the current build directory for all values of CMAKE_*_DIR)
set(VIDEO_CORE "${SRC_DIR}/src/video_core")
set(HASH_FILES
"${VIDEO_CORE}/renderer_opengl/gl_arb_decompiler.cpp"
"${VIDEO_CORE}/renderer_opengl/gl_arb_decompiler.h"
"${VIDEO_CORE}/renderer_opengl/gl_shader_cache.cpp"
"${VIDEO_CORE}/renderer_opengl/gl_shader_cache.h"
"${VIDEO_CORE}/renderer_opengl/gl_shader_decompiler.cpp"
"${VIDEO_CORE}/renderer_opengl/gl_shader_decompiler.h"
"${VIDEO_CORE}/renderer_opengl/gl_shader_disk_cache.cpp"
"${VIDEO_CORE}/renderer_opengl/gl_shader_disk_cache.h"
"${VIDEO_CORE}/shader/decode/arithmetic.cpp"
"${VIDEO_CORE}/shader/decode/arithmetic_half.cpp"
"${VIDEO_CORE}/shader/decode/arithmetic_half_immediate.cpp"
"${VIDEO_CORE}/shader/decode/arithmetic_immediate.cpp"
"${VIDEO_CORE}/shader/decode/arithmetic_integer.cpp"
"${VIDEO_CORE}/shader/decode/arithmetic_integer_immediate.cpp"
"${VIDEO_CORE}/shader/decode/bfe.cpp"
"${VIDEO_CORE}/shader/decode/bfi.cpp"
"${VIDEO_CORE}/shader/decode/conversion.cpp"
"${VIDEO_CORE}/shader/decode/ffma.cpp"
"${VIDEO_CORE}/shader/decode/float_set.cpp"
"${VIDEO_CORE}/shader/decode/float_set_predicate.cpp"
"${VIDEO_CORE}/shader/decode/half_set.cpp"
"${VIDEO_CORE}/shader/decode/half_set_predicate.cpp"
"${VIDEO_CORE}/shader/decode/hfma2.cpp"
"${VIDEO_CORE}/shader/decode/image.cpp"
"${VIDEO_CORE}/shader/decode/integer_set.cpp"
"${VIDEO_CORE}/shader/decode/integer_set_predicate.cpp"
"${VIDEO_CORE}/shader/decode/memory.cpp"
"${VIDEO_CORE}/shader/decode/texture.cpp"
"${VIDEO_CORE}/shader/decode/other.cpp"
"${VIDEO_CORE}/shader/decode/predicate_set_predicate.cpp"
"${VIDEO_CORE}/shader/decode/predicate_set_register.cpp"
"${VIDEO_CORE}/shader/decode/register_set_predicate.cpp"
"${VIDEO_CORE}/shader/decode/shift.cpp"
"${VIDEO_CORE}/shader/decode/video.cpp"
"${VIDEO_CORE}/shader/decode/warp.cpp"
"${VIDEO_CORE}/shader/decode/xmad.cpp"
"${VIDEO_CORE}/shader/ast.cpp"
"${VIDEO_CORE}/shader/ast.h"
"${VIDEO_CORE}/shader/compiler_settings.cpp"
"${VIDEO_CORE}/shader/compiler_settings.h"
"${VIDEO_CORE}/shader/control_flow.cpp"
"${VIDEO_CORE}/shader/control_flow.h"
"${VIDEO_CORE}/shader/decode.cpp"
"${VIDEO_CORE}/shader/expr.cpp"
"${VIDEO_CORE}/shader/expr.h"
"${VIDEO_CORE}/shader/node.h"
"${VIDEO_CORE}/shader/node_helper.cpp"
"${VIDEO_CORE}/shader/node_helper.h"
"${VIDEO_CORE}/shader/registry.cpp"
"${VIDEO_CORE}/shader/registry.h"
"${VIDEO_CORE}/shader/shader_ir.cpp"
"${VIDEO_CORE}/shader/shader_ir.h"
"${VIDEO_CORE}/shader/track.cpp"
"${VIDEO_CORE}/shader/transform_feedback.cpp"
"${VIDEO_CORE}/shader/transform_feedback.h"
)
set(COMBINED "")
foreach (F IN LISTS HASH_FILES)
file(READ ${F} TMP)
set(COMBINED "${COMBINED}${TMP}")
endforeach()
string(MD5 SHADER_CACHE_VERSION "${COMBINED}")
# The variable SRC_DIR must be passed into the script
# (since it uses the current build directory for all values of CMAKE_*_DIR)
configure_file("${SRC_DIR}/src/common/scm_rev.cpp.in" "scm_rev.cpp" @ONLY)

View File

@ -35,7 +35,7 @@ It is written in C++ with portability in mind, and we actively maintain builds f
The emulator is capable of running most commercial games at full speed, provided you meet the [necessary hardware requirements](https://yuzu-emu.org/help/quickstart/#hardware-requirements).
For a full list of games yuzu support, please visit our [Compatibility page](https://yuzu-emu.org/game/)
For a full list of games yuzu support, please visit our [Compatibility page](https://yuzu-emu.org/game/)
Check out our [website](https://yuzu-emu.org/) for the latest news on exciting features, monthly progress reports, and more!
@ -43,7 +43,7 @@ Check out our [website](https://yuzu-emu.org/) for the latest news on exciting f
Most of the development happens on GitHub. It's also where [our central repository](https://github.com/yuzu-emu/yuzu) is hosted. For development discussion, please join us on [Discord](https://discord.com/invite/u77vRWY).
If you want to contribute, please take a look at the [Contributor's Guide](https://github.com/yuzu-emu/yuzu/wiki/Contributing) and [Developer Information](https://github.com/yuzu-emu/yuzu/wiki/Developer-Information).
If you want to contribute, please take a look at the [Contributor's Guide](https://github.com/yuzu-emu/yuzu/wiki/Contributing) and [Developer Information](https://github.com/yuzu-emu/yuzu/wiki/Developer-Information).
You can also contact any of the developers on Discord in order to know about the current state of the emulator.
If you want to contribute to the user interface translation project, please check out the [yuzu project on transifex](https://www.transifex.com/yuzu-emulator/yuzu). We centralize translation work there, and periodically upstream translations.
@ -78,3 +78,5 @@ If you wish to support us a different way, please join our [Discord](https://dis
## License
yuzu is licensed under the GPLv2 (or any later version). Refer to the [license.txt](https://github.com/yuzu-emu/yuzu/blob/master/license.txt) file.
The [Skyline-Emulator Team](https://github.com/skyline-emu/skyline) is exempt from GPLv2 for the contributions from all these contributors [FernandoS27](https://github.com/FernandoS27), [lioncash](https://github.com/lioncash), [bunnei](https://github.com/bunnei), [ReinUsesLisp](https://github.com/ReinUsesLisp), [Morph1984](https://github.com/Morph1984), [ogniK5377](https://github.com/ogniK5377), [german77](https://github.com/german77), [ameerj](https://github.com/ameerj), [Kelebek1](https://github.com/Kelebek1) and [lat9nq](https://github.com/lat9nq). They may only use the code from these contributors under Mozilla Public License, version 2.0.

View File

@ -38,6 +38,26 @@ QPushButton#RendererStatusBarButton:!checked {
color: #0066ff;
}
QPushButton#GPUStatusBarButton {
color: #656565;
border: 1px solid transparent;
background-color: transparent;
padding: 0px 3px 0px 3px;
text-align: center;
}
QPushButton#GPUStatusBarButton:hover {
border: 1px solid #76797C;
}
QPushButton#GPUStatusBarButton:checked {
color: #ff8040;
}
QPushButton#GPUStatusBarButton:!checked {
color: #40dd40;
}
QPushButton#buttonRefreshDevices {
min-width: 21px;
min-height: 21px;

View File

@ -1283,6 +1283,27 @@ QPushButton#RendererStatusBarButton:!checked {
color: #00ccdd;
}
QPushButton#GPUStatusBarButton {
min-width: 0px;
color: #656565;
border: 1px solid transparent;
background-color: transparent;
padding: 0px 3px 0px 3px;
text-align: center;
}
QPushButton#GPUStatusBarButton:hover {
border: 1px solid #76797C;
}
QPushButton#GPUStatusBarButton:checked {
color: #ff8040;
}
QPushButton#GPUStatusBarButton:!checked {
color: #40dd40;
}
QPushButton#buttonRefreshDevices {
min-width: 23px;
min-height: 23px;

View File

@ -2186,6 +2186,27 @@ QPushButton#RendererStatusBarButton:!checked {
color: #00ccdd;
}
QPushButton#GPUStatusBarButton {
min-width: 0px;
color: #656565;
border: 1px solid transparent;
background-color: transparent;
padding: 0px 3px 0px 3px;
text-align: center;
}
QPushButton#GPUStatusBarButton:hover {
border: 1px solid #76797C;
}
QPushButton#GPUStatusBarButton:checked {
color: #ff8040;
}
QPushButton#GPUStatusBarButton:!checked {
color: #40dd40;
}
QPushButton#buttonRefreshDevices {
min-width: 19px;
min-height: 19px;

@ -1 +1 @@
Subproject commit 8188e3fbbc105591064093440f88081fb957d4f0
Subproject commit 07c4a37bcf41ea50aef6e98236abdfe8089fb4c6

2
externals/sirit vendored

@ -1 +1 @@
Subproject commit eefca56afd49379bdebc97ded8b480839f930881
Subproject commit a39596358a3a5488c06554c0c15184a6af71e433

View File

@ -142,6 +142,7 @@ add_subdirectory(core)
add_subdirectory(audio_core)
add_subdirectory(video_core)
add_subdirectory(input_common)
add_subdirectory(shader_recompiler)
add_subdirectory(tests)
if (ENABLE_SDL2)

View File

@ -1,8 +1,3 @@
# Add a custom command to generate a new shader_cache_version hash when any of the following files change
# NOTE: This is an approximation of what files affect shader generation, its possible something else
# could affect the result, but much more unlikely than the following files. Keeping a list of files
# like this allows for much better caching since it doesn't force the user to recompile binary shaders every update
set(VIDEO_CORE "${CMAKE_SOURCE_DIR}/src/video_core")
if (DEFINED ENV{AZURECIREPO})
set(BUILD_REPOSITORY $ENV{AZURECIREPO})
endif()
@ -30,64 +25,7 @@ add_custom_command(OUTPUT scm_rev.cpp
-DGIT_EXECUTABLE=${GIT_EXECUTABLE}
-P ${CMAKE_SOURCE_DIR}/CMakeModules/GenerateSCMRev.cmake
DEPENDS
# WARNING! It was too much work to try and make a common location for this list,
# so if you need to change it, please update CMakeModules/GenerateSCMRev.cmake as well
"${VIDEO_CORE}/renderer_opengl/gl_arb_decompiler.cpp"
"${VIDEO_CORE}/renderer_opengl/gl_arb_decompiler.h"
"${VIDEO_CORE}/renderer_opengl/gl_shader_cache.cpp"
"${VIDEO_CORE}/renderer_opengl/gl_shader_cache.h"
"${VIDEO_CORE}/renderer_opengl/gl_shader_decompiler.cpp"
"${VIDEO_CORE}/renderer_opengl/gl_shader_decompiler.h"
"${VIDEO_CORE}/renderer_opengl/gl_shader_disk_cache.cpp"
"${VIDEO_CORE}/renderer_opengl/gl_shader_disk_cache.h"
"${VIDEO_CORE}/shader/decode/arithmetic.cpp"
"${VIDEO_CORE}/shader/decode/arithmetic_half.cpp"
"${VIDEO_CORE}/shader/decode/arithmetic_half_immediate.cpp"
"${VIDEO_CORE}/shader/decode/arithmetic_immediate.cpp"
"${VIDEO_CORE}/shader/decode/arithmetic_integer.cpp"
"${VIDEO_CORE}/shader/decode/arithmetic_integer_immediate.cpp"
"${VIDEO_CORE}/shader/decode/bfe.cpp"
"${VIDEO_CORE}/shader/decode/bfi.cpp"
"${VIDEO_CORE}/shader/decode/conversion.cpp"
"${VIDEO_CORE}/shader/decode/ffma.cpp"
"${VIDEO_CORE}/shader/decode/float_set.cpp"
"${VIDEO_CORE}/shader/decode/float_set_predicate.cpp"
"${VIDEO_CORE}/shader/decode/half_set.cpp"
"${VIDEO_CORE}/shader/decode/half_set_predicate.cpp"
"${VIDEO_CORE}/shader/decode/hfma2.cpp"
"${VIDEO_CORE}/shader/decode/image.cpp"
"${VIDEO_CORE}/shader/decode/integer_set.cpp"
"${VIDEO_CORE}/shader/decode/integer_set_predicate.cpp"
"${VIDEO_CORE}/shader/decode/memory.cpp"
"${VIDEO_CORE}/shader/decode/texture.cpp"
"${VIDEO_CORE}/shader/decode/other.cpp"
"${VIDEO_CORE}/shader/decode/predicate_set_predicate.cpp"
"${VIDEO_CORE}/shader/decode/predicate_set_register.cpp"
"${VIDEO_CORE}/shader/decode/register_set_predicate.cpp"
"${VIDEO_CORE}/shader/decode/shift.cpp"
"${VIDEO_CORE}/shader/decode/video.cpp"
"${VIDEO_CORE}/shader/decode/warp.cpp"
"${VIDEO_CORE}/shader/decode/xmad.cpp"
"${VIDEO_CORE}/shader/ast.cpp"
"${VIDEO_CORE}/shader/ast.h"
"${VIDEO_CORE}/shader/compiler_settings.cpp"
"${VIDEO_CORE}/shader/compiler_settings.h"
"${VIDEO_CORE}/shader/control_flow.cpp"
"${VIDEO_CORE}/shader/control_flow.h"
"${VIDEO_CORE}/shader/decode.cpp"
"${VIDEO_CORE}/shader/expr.cpp"
"${VIDEO_CORE}/shader/expr.h"
"${VIDEO_CORE}/shader/node.h"
"${VIDEO_CORE}/shader/node_helper.cpp"
"${VIDEO_CORE}/shader/node_helper.h"
"${VIDEO_CORE}/shader/registry.cpp"
"${VIDEO_CORE}/shader/registry.h"
"${VIDEO_CORE}/shader/shader_ir.cpp"
"${VIDEO_CORE}/shader/shader_ir.h"
"${VIDEO_CORE}/shader/track.cpp"
"${VIDEO_CORE}/shader/transform_feedback.cpp"
"${VIDEO_CORE}/shader/transform_feedback.h"
# and also check that the scm_rev files haven't changed
# Check that the scm_rev files haven't changed
"${CMAKE_CURRENT_SOURCE_DIR}/scm_rev.cpp.in"
"${CMAKE_CURRENT_SOURCE_DIR}/scm_rev.h"
# technically we should regenerate if the git version changed, but its not worth the effort imo
@ -231,7 +169,7 @@ endif()
create_target_directory_groups(common)
target_link_libraries(common PUBLIC ${Boost_LIBRARIES} fmt::fmt microprofile)
target_link_libraries(common PUBLIC ${Boost_LIBRARIES} fmt::fmt microprofile Threads::Threads)
target_link_libraries(common PRIVATE lz4::lz4 xbyak)
if (MSVC)
target_link_libraries(common PRIVATE zstd::zstd)

View File

@ -144,6 +144,10 @@ bool ParseFilterRule(Filter& instance, Iterator begin, Iterator end) {
SUB(Render, Software) \
SUB(Render, OpenGL) \
SUB(Render, Vulkan) \
CLS(Shader) \
SUB(Shader, SPIRV) \
SUB(Shader, GLASM) \
SUB(Shader, GLSL) \
CLS(Audio) \
SUB(Audio, DSP) \
SUB(Audio, Sink) \

View File

@ -114,6 +114,10 @@ enum class Class : u8 {
Render_Software, ///< Software renderer backend
Render_OpenGL, ///< OpenGL backend
Render_Vulkan, ///< Vulkan backend
Shader, ///< Shader recompiler
Shader_SPIRV, ///< Shader SPIR-V code generation
Shader_GLASM, ///< Shader GLASM code generation
Shader_GLSL, ///< Shader GLSL code generation
Audio, ///< Audio emulation
Audio_DSP, ///< The HLE implementation of the DSP
Audio_Sink, ///< Emulator audio output backend

View File

@ -14,7 +14,6 @@
#define BUILD_ID "@BUILD_ID@"
#define TITLE_BAR_FORMAT_IDLE "@TITLE_BAR_FORMAT_IDLE@"
#define TITLE_BAR_FORMAT_RUNNING "@TITLE_BAR_FORMAT_RUNNING@"
#define SHADER_CACHE_VERSION "@SHADER_CACHE_VERSION@"
namespace Common {
@ -28,7 +27,6 @@ const char g_build_version[] = BUILD_VERSION;
const char g_build_id[] = BUILD_ID;
const char g_title_bar_format_idle[] = TITLE_BAR_FORMAT_IDLE;
const char g_title_bar_format_running[] = TITLE_BAR_FORMAT_RUNNING;
const char g_shader_cache_version[] = SHADER_CACHE_VERSION;
} // namespace

View File

@ -57,7 +57,7 @@ void LogSettings() {
log_setting("Renderer_UseNvdecEmulation", values.use_nvdec_emulation.GetValue());
log_setting("Renderer_AccelerateASTC", values.accelerate_astc.GetValue());
log_setting("Renderer_UseVsync", values.use_vsync.GetValue());
log_setting("Renderer_UseAssemblyShaders", values.use_assembly_shaders.GetValue());
log_setting("Renderer_ShaderBackend", values.shader_backend.GetValue());
log_setting("Renderer_UseAsynchronousShaders", values.use_asynchronous_shaders.GetValue());
log_setting("Renderer_UseGarbageCollection", values.use_caches_gc.GetValue());
log_setting("Renderer_AnisotropicFilteringLevel", values.max_anisotropy.GetValue());
@ -140,7 +140,7 @@ void RestoreGlobalState(bool is_powered_on) {
values.use_nvdec_emulation.SetGlobal(true);
values.accelerate_astc.SetGlobal(true);
values.use_vsync.SetGlobal(true);
values.use_assembly_shaders.SetGlobal(true);
values.shader_backend.SetGlobal(true);
values.use_asynchronous_shaders.SetGlobal(true);
values.use_fast_gpu_time.SetGlobal(true);
values.use_caches_gc.SetGlobal(true);

View File

@ -24,6 +24,12 @@ enum class RendererBackend : u32 {
Vulkan = 1,
};
enum class ShaderBackend : u32 {
GLSL = 0,
GLASM = 1,
SPIRV = 2,
};
enum class GPUAccuracy : u32 {
Normal = 0,
High = 1,
@ -313,6 +319,9 @@ struct Values {
// Renderer
Setting<RendererBackend> renderer_backend{RendererBackend::OpenGL, "backend"};
BasicSetting<bool> renderer_debug{false, "debug"};
BasicSetting<bool> enable_nsight_aftermath{false, "nsight_aftermath"};
BasicSetting<bool> disable_shader_loop_safety_checks{false,
"disable_shader_loop_safety_checks"};
Setting<int> vulkan_device{0, "vulkan_device"};
Setting<u16> resolution_factor{1, "resolution_factor"};
@ -336,7 +345,7 @@ struct Values {
Setting<bool> accelerate_astc{true, "accelerate_astc"};
Setting<bool> use_vsync{true, "use_vsync"};
BasicSetting<bool> disable_fps_limit{false, "disable_fps_limit"};
Setting<bool> use_assembly_shaders{false, "use_assembly_shaders"};
Setting<ShaderBackend> shader_backend{ShaderBackend::GLASM, "shader_backend"};
Setting<bool> use_asynchronous_shaders{false, "use_asynchronous_shaders"};
Setting<bool> use_fast_gpu_time{true, "use_fast_gpu_time"};
Setting<bool> use_caches_gc{false, "use_caches_gc"};

View File

@ -5,6 +5,7 @@
#pragma once
#include <atomic>
#include <condition_variable>
#include <functional>
#include <mutex>
#include <stop_token>
@ -39,7 +40,7 @@ public:
const auto lambda = [this, func](std::stop_token stop_token) {
Common::SetCurrentThreadName(thread_name.c_str());
{
std::conditional_t<with_state, StateType, int> state{func()};
[[maybe_unused]] std::conditional_t<with_state, StateType, int> state{func()};
while (!stop_token.stop_requested()) {
Task task;
{

View File

@ -517,6 +517,8 @@ add_library(core STATIC
hle/service/psc/psc.h
hle/service/ptm/psm.cpp
hle/service/ptm/psm.h
hle/service/kernel_helpers.cpp
hle/service/kernel_helpers.h
hle/service/service.cpp
hle/service/service.h
hle/service/set/set.cpp

View File

@ -58,6 +58,9 @@ bool SessionRequestManager::HasSessionRequestHandler(const HLERequestContext& co
void SessionRequestHandler::ClientConnected(KServerSession* session) {
session->ClientConnected(shared_from_this());
// Ensure our server session is tracked globally.
kernel.RegisterServerSession(session);
}
void SessionRequestHandler::ClientDisconnected(KServerSession* session) {

View File

@ -3,6 +3,7 @@
// Refer to the license.txt file included.
#include "core/hle/kernel/k_auto_object.h"
#include "core/hle/kernel/kernel.h"
namespace Kernel {
@ -11,4 +12,12 @@ KAutoObject* KAutoObject::Create(KAutoObject* obj) {
return obj;
}
void KAutoObject::RegisterWithKernel() {
kernel.RegisterKernelObject(this);
}
void KAutoObject::UnregisterWithKernel() {
kernel.UnregisterKernelObject(this);
}
} // namespace Kernel

View File

@ -85,8 +85,12 @@ private:
KERNEL_AUTOOBJECT_TRAITS(KAutoObject, KAutoObject);
public:
explicit KAutoObject(KernelCore& kernel_) : kernel(kernel_) {}
virtual ~KAutoObject() = default;
explicit KAutoObject(KernelCore& kernel_) : kernel(kernel_) {
RegisterWithKernel();
}
virtual ~KAutoObject() {
UnregisterWithKernel();
}
static KAutoObject* Create(KAutoObject* ptr);
@ -166,6 +170,10 @@ public:
}
}
private:
void RegisterWithKernel();
void UnregisterWithKernel();
protected:
KernelCore& kernel;
std::string name;

View File

@ -10,6 +10,7 @@
#include "common/alignment.h"
#include "common/assert.h"
#include "common/logging/log.h"
#include "common/scope_exit.h"
#include "common/settings.h"
#include "core/core.h"
#include "core/device_memory.h"
@ -43,6 +44,8 @@ void SetupMainThread(Core::System& system, KProcess& owner_process, u32 priority
ASSERT(owner_process.GetResourceLimit()->Reserve(LimitableResource::Threads, 1));
KThread* thread = KThread::Create(system.Kernel());
SCOPE_EXIT({ thread->Close(); });
ASSERT(KThread::InitializeUserThread(system, thread, entry_point, 0, stack_top, priority,
owner_process.GetIdealCoreId(), &owner_process)
.IsSuccess());
@ -162,7 +165,7 @@ void KProcess::DecrementThreadCount() {
ASSERT(num_threads > 0);
if (const auto count = --num_threads; count == 0) {
UNIMPLEMENTED_MSG("Process termination is not implemented!");
LOG_WARNING(Kernel, "Process termination is not fully implemented.");
}
}
@ -406,6 +409,9 @@ void KProcess::Finalize() {
resource_limit->Close();
}
// Finalize the handle table and close any open handles.
handle_table.Finalize();
// Perform inherited finalization.
KAutoObjectWithSlabHeapAndContainer<KProcess, KSynchronizationObject>::Finalize();
}

View File

@ -28,7 +28,10 @@ namespace Kernel {
KServerSession::KServerSession(KernelCore& kernel_) : KSynchronizationObject{kernel_} {}
KServerSession::~KServerSession() {}
KServerSession::~KServerSession() {
// Ensure that the global list tracking server sessions does not hold on to a reference.
kernel.UnregisterServerSession(this);
}
void KServerSession::Initialize(KSession* parent_session_, std::string&& name_,
std::shared_ptr<SessionRequestManager> manager_) {

View File

@ -61,6 +61,7 @@ struct KernelCore::Impl {
void Initialize(KernelCore& kernel) {
global_scheduler_context = std::make_unique<Kernel::GlobalSchedulerContext>(kernel);
global_handle_table = std::make_unique<Kernel::KHandleTable>(kernel);
global_handle_table->Initialize(KHandleTable::MaxTableSize);
is_phantom_mode_for_singlecore = false;
@ -90,9 +91,39 @@ struct KernelCore::Impl {
}
void Shutdown() {
// Shutdown all processes.
if (current_process) {
current_process->Finalize();
current_process->Close();
current_process = nullptr;
}
process_list.clear();
// Ensures all service threads gracefully shutdown
// Close all open server ports.
std::unordered_set<KServerPort*> server_ports_;
{
std::lock_guard lk(server_ports_lock);
server_ports_ = server_ports;
server_ports.clear();
}
for (auto* server_port : server_ports_) {
server_port->Close();
}
// Close all open server sessions.
std::unordered_set<KServerSession*> server_sessions_;
{
std::lock_guard lk(server_sessions_lock);
server_sessions_ = server_sessions;
server_sessions.clear();
}
for (auto* server_session : server_sessions_) {
server_session->Close();
}
// Ensure that the object list container is finalized and properly shutdown.
object_list_container.Finalize();
// Ensures all service threads gracefully shutdown.
service_threads.clear();
next_object_id = 0;
@ -111,11 +142,7 @@ struct KernelCore::Impl {
cores.clear();
if (current_process) {
current_process->Close();
current_process = nullptr;
}
global_handle_table->Finalize();
global_handle_table.reset();
preemption_event = nullptr;
@ -142,6 +169,16 @@ struct KernelCore::Impl {
// Next host thead ID to use, 0-3 IDs represent core threads, >3 represent others
next_host_thread_id = Core::Hardware::NUM_CPU_CORES;
// Track kernel objects that were not freed on shutdown
{
std::lock_guard lk(registered_objects_lock);
if (registered_objects.size()) {
LOG_WARNING(Kernel, "{} kernel objects were dangling on shutdown!",
registered_objects.size());
registered_objects.clear();
}
}
}
void InitializePhysicalCores() {
@ -630,6 +667,21 @@ struct KernelCore::Impl {
user_slab_heap_size);
}
KClientPort* CreateNamedServicePort(std::string name) {
auto search = service_interface_factory.find(name);
if (search == service_interface_factory.end()) {
UNIMPLEMENTED();
return {};
}
KClientPort* port = &search->second(system.ServiceManager(), system);
{
std::lock_guard lk(server_ports_lock);
server_ports.insert(&port->GetParent()->GetServerPort());
}
return port;
}
std::atomic<u32> next_object_id{0};
std::atomic<u64> next_kernel_process_id{KProcess::InitialKIPIDMin};
std::atomic<u64> next_user_process_id{KProcess::ProcessIDMin};
@ -656,6 +708,12 @@ struct KernelCore::Impl {
/// the ConnectToPort SVC.
std::unordered_map<std::string, ServiceInterfaceFactory> service_interface_factory;
NamedPortTable named_ports;
std::unordered_set<KServerPort*> server_ports;
std::unordered_set<KServerSession*> server_sessions;
std::unordered_set<KAutoObject*> registered_objects;
std::mutex server_ports_lock;
std::mutex server_sessions_lock;
std::mutex registered_objects_lock;
std::unique_ptr<Core::ExclusiveMonitor> exclusive_monitor;
std::vector<Kernel::PhysicalCore> cores;
@ -844,12 +902,27 @@ void KernelCore::RegisterNamedService(std::string name, ServiceInterfaceFactory&
}
KClientPort* KernelCore::CreateNamedServicePort(std::string name) {
auto search = impl->service_interface_factory.find(name);
if (search == impl->service_interface_factory.end()) {
UNIMPLEMENTED();
return {};
}
return &search->second(impl->system.ServiceManager(), impl->system);
return impl->CreateNamedServicePort(std::move(name));
}
void KernelCore::RegisterServerSession(KServerSession* server_session) {
std::lock_guard lk(impl->server_sessions_lock);
impl->server_sessions.insert(server_session);
}
void KernelCore::UnregisterServerSession(KServerSession* server_session) {
std::lock_guard lk(impl->server_sessions_lock);
impl->server_sessions.erase(server_session);
}
void KernelCore::RegisterKernelObject(KAutoObject* object) {
std::lock_guard lk(impl->registered_objects_lock);
impl->registered_objects.insert(object);
}
void KernelCore::UnregisterKernelObject(KAutoObject* object) {
std::lock_guard lk(impl->registered_objects_lock);
impl->registered_objects.erase(object);
}
bool KernelCore::IsValidNamedPort(NamedPortTable::const_iterator port) const {

View File

@ -45,6 +45,7 @@ class KPort;
class KProcess;
class KResourceLimit;
class KScheduler;
class KServerSession;
class KSession;
class KSharedMemory;
class KThread;
@ -185,6 +186,22 @@ public:
/// Opens a port to a service previously registered with RegisterNamedService.
KClientPort* CreateNamedServicePort(std::string name);
/// Registers a server session with the gobal emulation state, to be freed on shutdown. This is
/// necessary because we do not emulate processes for HLE sessions.
void RegisterServerSession(KServerSession* server_session);
/// Unregisters a server session previously registered with RegisterServerSession when it was
/// destroyed during the current emulation session.
void UnregisterServerSession(KServerSession* server_session);
/// Registers all kernel objects with the global emulation state, this is purely for tracking
/// leaks after emulation has been shutdown.
void RegisterKernelObject(KAutoObject* object);
/// Unregisters a kernel object previously registered with RegisterKernelObject when it was
/// destroyed during the current emulation session.
void UnregisterKernelObject(KAutoObject* object);
/// Determines whether or not the given port is a valid named port.
bool IsValidNamedPort(NamedPortTable::const_iterator port) const;

View File

@ -298,6 +298,7 @@ static ResultCode ConnectToNamedPort(Core::System& system, Handle* out, VAddr po
// Create a session.
KClientSession* session{};
R_TRY(port->CreateSession(std::addressof(session)));
port->Close();
// Register the session in the table, close the extra reference.
handle_table.Register(*out, session);
@ -1439,11 +1440,6 @@ static void ExitProcess(Core::System& system) {
LOG_INFO(Kernel_SVC, "Process {} exiting", current_process->GetProcessID());
ASSERT_MSG(current_process->GetStatus() == ProcessStatus::Running,
"Process has already exited");
current_process->PrepareForTermination();
// Kill the current thread
system.Kernel().CurrentScheduler()->GetCurrentThread()->Exit();
}
static void ExitProcess32(Core::System& system) {

View File

@ -18,6 +18,7 @@
#include "core/hle/kernel/k_writable_event.h"
#include "core/hle/kernel/kernel.h"
#include "core/hle/service/hid/controllers/npad.h"
#include "core/hle/service/kernel_helpers.h"
namespace Service::HID {
constexpr s32 HID_JOYSTICK_MAX = 0x7fff;
@ -147,7 +148,9 @@ bool Controller_NPad::IsDeviceHandleValid(const DeviceHandle& device_handle) {
device_handle.device_index < DeviceIndex::MaxDeviceIndex;
}
Controller_NPad::Controller_NPad(Core::System& system_) : ControllerBase{system_} {
Controller_NPad::Controller_NPad(Core::System& system_,
KernelHelpers::ServiceContext& service_context_)
: ControllerBase{system_}, service_context{service_context_} {
latest_vibration_values.fill({DEFAULT_VIBRATION_VALUE, DEFAULT_VIBRATION_VALUE});
}
@ -251,10 +254,9 @@ void Controller_NPad::InitNewlyAddedController(std::size_t controller_idx) {
}
void Controller_NPad::OnInit() {
auto& kernel = system.Kernel();
for (std::size_t i = 0; i < styleset_changed_events.size(); ++i) {
styleset_changed_events[i] = Kernel::KEvent::Create(kernel);
styleset_changed_events[i]->Initialize(fmt::format("npad:NpadStyleSetChanged_{}", i));
styleset_changed_events[i] =
service_context.CreateEvent(fmt::format("npad:NpadStyleSetChanged_{}", i));
}
if (!IsControllerActivated()) {
@ -344,8 +346,7 @@ void Controller_NPad::OnRelease() {
}
for (std::size_t i = 0; i < styleset_changed_events.size(); ++i) {
styleset_changed_events[i]->Close();
styleset_changed_events[i] = nullptr;
service_context.CloseEvent(styleset_changed_events[i]);
}
}

View File

@ -20,6 +20,10 @@ class KEvent;
class KReadableEvent;
} // namespace Kernel
namespace Service::KernelHelpers {
class ServiceContext;
}
namespace Service::HID {
constexpr u32 NPAD_HANDHELD = 32;
@ -27,7 +31,8 @@ constexpr u32 NPAD_UNKNOWN = 16; // TODO(ogniK): What is this?
class Controller_NPad final : public ControllerBase {
public:
explicit Controller_NPad(Core::System& system_);
explicit Controller_NPad(Core::System& system_,
KernelHelpers::ServiceContext& service_context_);
~Controller_NPad() override;
// Called when the controller is initialized
@ -566,6 +571,7 @@ private:
std::array<std::unique_ptr<Input::MotionDevice>, Settings::NativeMotion::NUM_MOTIONS_HID>,
10>;
KernelHelpers::ServiceContext& service_context;
std::mutex mutex;
ButtonArray buttons;
StickArray sticks;

View File

@ -46,8 +46,9 @@ constexpr auto pad_update_ns = std::chrono::nanoseconds{1000 * 1000}; //
constexpr auto motion_update_ns = std::chrono::nanoseconds{15 * 1000 * 1000}; // (15ms, 66.666Hz)
constexpr std::size_t SHARED_MEMORY_SIZE = 0x40000;
IAppletResource::IAppletResource(Core::System& system_)
: ServiceFramework{system_, "IAppletResource"} {
IAppletResource::IAppletResource(Core::System& system_,
KernelHelpers::ServiceContext& service_context_)
: ServiceFramework{system_, "IAppletResource"}, service_context{service_context_} {
static const FunctionInfo functions[] = {
{0, &IAppletResource::GetSharedMemoryHandle, "GetSharedMemoryHandle"},
};
@ -63,7 +64,7 @@ IAppletResource::IAppletResource(Core::System& system_)
MakeController<Controller_Stubbed>(HidController::CaptureButton);
MakeController<Controller_Stubbed>(HidController::InputDetector);
MakeController<Controller_Stubbed>(HidController::UniquePad);
MakeController<Controller_NPad>(HidController::NPad);
MakeControllerWithServiceContext<Controller_NPad>(HidController::NPad);
MakeController<Controller_Gesture>(HidController::Gesture);
MakeController<Controller_ConsoleSixAxis>(HidController::ConsoleSixAxisSensor);
@ -191,13 +192,14 @@ private:
std::shared_ptr<IAppletResource> Hid::GetAppletResource() {
if (applet_resource == nullptr) {
applet_resource = std::make_shared<IAppletResource>(system);
applet_resource = std::make_shared<IAppletResource>(system, service_context);
}
return applet_resource;
}
Hid::Hid(Core::System& system_) : ServiceFramework{system_, "hid"} {
Hid::Hid(Core::System& system_)
: ServiceFramework{system_, "hid"}, service_context{system_, service_name} {
// clang-format off
static const FunctionInfo functions[] = {
{0, &Hid::CreateAppletResource, "CreateAppletResource"},
@ -347,7 +349,7 @@ void Hid::CreateAppletResource(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_HID, "called, applet_resource_user_id={}", applet_resource_user_id);
if (applet_resource == nullptr) {
applet_resource = std::make_shared<IAppletResource>(system);
applet_resource = std::make_shared<IAppletResource>(system, service_context);
}
IPC::ResponseBuilder rb{ctx, 2, 0, 1};

View File

@ -7,6 +7,7 @@
#include <chrono>
#include "core/hle/service/hid/controllers/controller_base.h"
#include "core/hle/service/kernel_helpers.h"
#include "core/hle/service/service.h"
namespace Core::Timing {
@ -39,7 +40,8 @@ enum class HidController : std::size_t {
class IAppletResource final : public ServiceFramework<IAppletResource> {
public:
explicit IAppletResource(Core::System& system_);
explicit IAppletResource(Core::System& system_,
KernelHelpers::ServiceContext& service_context_);
~IAppletResource() override;
void ActivateController(HidController controller);
@ -60,11 +62,18 @@ private:
void MakeController(HidController controller) {
controllers[static_cast<std::size_t>(controller)] = std::make_unique<T>(system);
}
template <typename T>
void MakeControllerWithServiceContext(HidController controller) {
controllers[static_cast<std::size_t>(controller)] =
std::make_unique<T>(system, service_context);
}
void GetSharedMemoryHandle(Kernel::HLERequestContext& ctx);
void UpdateControllers(std::uintptr_t user_data, std::chrono::nanoseconds ns_late);
void UpdateMotion(std::uintptr_t user_data, std::chrono::nanoseconds ns_late);
KernelHelpers::ServiceContext& service_context;
std::shared_ptr<Core::Timing::EventType> pad_update_event;
std::shared_ptr<Core::Timing::EventType> motion_update_event;
@ -176,6 +185,8 @@ private:
static_assert(sizeof(VibrationDeviceInfo) == 0x8, "VibrationDeviceInfo has incorrect size.");
std::shared_ptr<IAppletResource> applet_resource;
KernelHelpers::ServiceContext service_context;
};
/// Reload input devices. Used when input configuration changed

View File

@ -0,0 +1,62 @@
// Copyright 2021 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "core/core.h"
#include "core/hle/kernel/k_event.h"
#include "core/hle/kernel/k_process.h"
#include "core/hle/kernel/k_readable_event.h"
#include "core/hle/kernel/k_resource_limit.h"
#include "core/hle/kernel/k_scoped_resource_reservation.h"
#include "core/hle/kernel/k_writable_event.h"
#include "core/hle/service/kernel_helpers.h"
namespace Service::KernelHelpers {
ServiceContext::ServiceContext(Core::System& system_, std::string name_)
: kernel(system_.Kernel()) {
process = Kernel::KProcess::Create(kernel);
ASSERT(Kernel::KProcess::Initialize(process, system_, std::move(name_),
Kernel::KProcess::ProcessType::Userland)
.IsSuccess());
}
ServiceContext::~ServiceContext() {
process->Close();
process = nullptr;
}
Kernel::KEvent* ServiceContext::CreateEvent(std::string&& name) {
// Reserve a new event from the process resource limit
Kernel::KScopedResourceReservation event_reservation(process,
Kernel::LimitableResource::Events);
if (!event_reservation.Succeeded()) {
LOG_CRITICAL(Service, "Resource limit reached!");
return {};
}
// Create a new event.
auto* event = Kernel::KEvent::Create(kernel);
if (!event) {
LOG_CRITICAL(Service, "Unable to create event!");
return {};
}
// Initialize the event.
event->Initialize(std::move(name));
// Commit the thread reservation.
event_reservation.Commit();
// Register the event.
Kernel::KEvent::Register(kernel, event);
return event;
}
void ServiceContext::CloseEvent(Kernel::KEvent* event) {
event->GetReadableEvent().Close();
event->GetWritableEvent().Close();
}
} // namespace Service::KernelHelpers

View File

@ -0,0 +1,35 @@
// Copyright 2021 yuzu emulator team
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <string>
namespace Core {
class System;
}
namespace Kernel {
class KernelCore;
class KEvent;
class KProcess;
} // namespace Kernel
namespace Service::KernelHelpers {
class ServiceContext {
public:
ServiceContext(Core::System& system_, std::string name_);
~ServiceContext();
Kernel::KEvent* CreateEvent(std::string&& name);
void CloseEvent(Kernel::KEvent* event);
private:
Kernel::KernelCore& kernel;
Kernel::KProcess* process{};
};
} // namespace Service::KernelHelpers

View File

@ -39,11 +39,11 @@ void InstallInterfaces(SM::ServiceManager& service_manager, NVFlinger::NVFlinger
nvflinger.SetNVDrvInstance(module_);
}
Module::Module(Core::System& system) : syncpoint_manager{system.GPU()} {
auto& kernel = system.Kernel();
Module::Module(Core::System& system)
: syncpoint_manager{system.GPU()}, service_context{system, "nvdrv"} {
for (u32 i = 0; i < MaxNvEvents; i++) {
events_interface.events[i].event = Kernel::KEvent::Create(kernel);
events_interface.events[i].event->Initialize(fmt::format("NVDRV::NvEvent_{}", i));
events_interface.events[i].event =
service_context.CreateEvent(fmt::format("NVDRV::NvEvent_{}", i));
events_interface.status[i] = EventState::Free;
events_interface.registered[i] = false;
}
@ -65,8 +65,7 @@ Module::Module(Core::System& system) : syncpoint_manager{system.GPU()} {
Module::~Module() {
for (u32 i = 0; i < MaxNvEvents; i++) {
events_interface.events[i].event->Close();
events_interface.events[i].event = nullptr;
service_context.CloseEvent(events_interface.events[i].event);
}
}

View File

@ -9,6 +9,7 @@
#include <vector>
#include "common/common_types.h"
#include "core/hle/service/kernel_helpers.h"
#include "core/hle/service/nvdrv/nvdata.h"
#include "core/hle/service/nvdrv/syncpoint_manager.h"
#include "core/hle/service/service.h"
@ -154,6 +155,8 @@ private:
std::unordered_map<std::string, std::shared_ptr<Devices::nvdevice>> devices;
EventInterface events_interface;
KernelHelpers::ServiceContext service_context;
};
/// Registers all NVDRV services with the specified service manager.

View File

@ -104,23 +104,22 @@ ServiceFrameworkBase::~ServiceFrameworkBase() {
void ServiceFrameworkBase::InstallAsService(SM::ServiceManager& service_manager) {
const auto guard = LockService();
ASSERT(!port_installed);
ASSERT(!service_registered);
auto port = service_manager.RegisterService(service_name, max_sessions).Unwrap();
port->SetSessionHandler(shared_from_this());
port_installed = true;
service_manager.RegisterService(service_name, max_sessions, shared_from_this());
service_registered = true;
}
Kernel::KClientPort& ServiceFrameworkBase::CreatePort() {
const auto guard = LockService();
ASSERT(!port_installed);
ASSERT(!service_registered);
auto* port = Kernel::KPort::Create(kernel);
port->Initialize(max_sessions, false, service_name);
port->GetServerPort().SetSessionHandler(shared_from_this());
port_installed = true;
service_registered = true;
return port->GetClientPort();
}

View File

@ -96,6 +96,9 @@ protected:
/// System context that the service operates under.
Core::System& system;
/// Identifier string used to connect to the service.
std::string service_name;
private:
template <typename T>
friend class ServiceFramework;
@ -117,14 +120,12 @@ private:
void RegisterHandlersBaseTipc(const FunctionInfoBase* functions, std::size_t n);
void ReportUnimplementedFunction(Kernel::HLERequestContext& ctx, const FunctionInfoBase* info);
/// Identifier string used to connect to the service.
std::string service_name;
/// Maximum number of concurrent sessions that this service can handle.
u32 max_sessions;
/// Flag to store if a port was already create/installed to detect multiple install attempts,
/// which is not supported.
bool port_installed = false;
bool service_registered = false;
/// Function used to safely up-cast pointers to the derived class before invoking a handler.
InvokerFn* handler_invoker;

View File

@ -4,6 +4,7 @@
#include <tuple>
#include "common/assert.h"
#include "common/scope_exit.h"
#include "core/core.h"
#include "core/hle/ipc_helpers.h"
#include "core/hle/kernel/k_client_port.h"
@ -40,17 +41,13 @@ static ResultCode ValidateServiceName(const std::string& name) {
}
Kernel::KClientPort& ServiceManager::InterfaceFactory(ServiceManager& self, Core::System& system) {
ASSERT(self.sm_interface.expired());
auto sm = std::make_shared<SM>(self, system);
self.sm_interface = sm;
self.sm_interface = std::make_shared<SM>(self, system);
self.controller_interface = std::make_unique<Controller>(system);
return sm->CreatePort();
return self.sm_interface->CreatePort();
}
ResultVal<Kernel::KServerPort*> ServiceManager::RegisterService(std::string name,
u32 max_sessions) {
ResultCode ServiceManager::RegisterService(std::string name, u32 max_sessions,
Kernel::SessionRequestHandlerPtr handler) {
CASCADE_CODE(ValidateServiceName(name));
@ -59,12 +56,9 @@ ResultVal<Kernel::KServerPort*> ServiceManager::RegisterService(std::string name
return ERR_ALREADY_REGISTERED;
}
auto* port = Kernel::KPort::Create(kernel);
port->Initialize(max_sessions, false, name);
registered_services.emplace(std::move(name), handler);
registered_services.emplace(std::move(name), port);
return MakeResult(&port->GetServerPort());
return ResultSuccess;
}
ResultCode ServiceManager::UnregisterService(const std::string& name) {
@ -76,14 +70,11 @@ ResultCode ServiceManager::UnregisterService(const std::string& name) {
return ERR_SERVICE_NOT_REGISTERED;
}
iter->second->Close();
registered_services.erase(iter);
return ResultSuccess;
}
ResultVal<Kernel::KPort*> ServiceManager::GetServicePort(const std::string& name) {
CASCADE_CODE(ValidateServiceName(name));
auto it = registered_services.find(name);
if (it == registered_services.end()) {
@ -91,10 +82,13 @@ ResultVal<Kernel::KPort*> ServiceManager::GetServicePort(const std::string& name
return ERR_SERVICE_NOT_REGISTERED;
}
return MakeResult(it->second);
}
auto* port = Kernel::KPort::Create(kernel);
port->Initialize(ServerSessionCountMax, false, name);
auto handler = it->second;
port->GetServerPort().SetSessionHandler(std::move(handler));
SM::~SM() = default;
return MakeResult(port);
}
/**
* SM::Initialize service function
@ -156,11 +150,15 @@ ResultVal<Kernel::KClientSession*> SM::GetServiceImpl(Kernel::HLERequestContext&
LOG_ERROR(Service_SM, "called service={} -> error 0x{:08X}", name, port_result.Code().raw);
return port_result.Code();
}
auto& port = port_result.Unwrap()->GetClientPort();
auto& port = port_result.Unwrap();
SCOPE_EXIT({ port->GetClientPort().Close(); });
server_ports.emplace_back(&port->GetServerPort());
// Create a new session.
Kernel::KClientSession* session{};
if (const auto result = port.CreateSession(std::addressof(session)); result.IsError()) {
if (const auto result = port->GetClientPort().CreateSession(std::addressof(session));
result.IsError()) {
LOG_ERROR(Service_SM, "called service={} -> error 0x{:08X}", name, result.raw);
return result;
}
@ -180,20 +178,21 @@ void SM::RegisterService(Kernel::HLERequestContext& ctx) {
LOG_DEBUG(Service_SM, "called with name={}, max_session_count={}, is_light={}", name,
max_session_count, is_light);
auto handle = service_manager.RegisterService(name, max_session_count);
if (handle.Failed()) {
LOG_ERROR(Service_SM, "failed to register service with error_code={:08X}",
handle.Code().raw);
if (const auto result = service_manager.RegisterService(name, max_session_count, nullptr);
result.IsError()) {
LOG_ERROR(Service_SM, "failed to register service with error_code={:08X}", result.raw);
IPC::ResponseBuilder rb{ctx, 2};
rb.Push(handle.Code());
rb.Push(result);
return;
}
IPC::ResponseBuilder rb{ctx, 2, 0, 1, IPC::ResponseBuilder::Flags::AlwaysMoveHandles};
rb.Push(handle.Code());
auto* port = Kernel::KPort::Create(kernel);
port->Initialize(ServerSessionCountMax, is_light, name);
SCOPE_EXIT({ port->GetClientPort().Close(); });
auto server_port = handle.Unwrap();
rb.PushMoveObjects(server_port);
IPC::ResponseBuilder rb{ctx, 2, 0, 1, IPC::ResponseBuilder::Flags::AlwaysMoveHandles};
rb.Push(ResultSuccess);
rb.PushMoveObjects(port->GetServerPort());
}
void SM::UnregisterService(Kernel::HLERequestContext& ctx) {
@ -225,4 +224,10 @@ SM::SM(ServiceManager& service_manager_, Core::System& system_)
});
}
SM::~SM() {
for (auto& server_port : server_ports) {
server_port->Close();
}
}
} // namespace Service::SM

View File

@ -49,6 +49,7 @@ private:
ServiceManager& service_manager;
bool is_initialized{};
Kernel::KernelCore& kernel;
std::vector<Kernel::KServerPort*> server_ports;
};
class ServiceManager {
@ -58,7 +59,8 @@ public:
explicit ServiceManager(Kernel::KernelCore& kernel_);
~ServiceManager();
ResultVal<Kernel::KServerPort*> RegisterService(std::string name, u32 max_sessions);
ResultCode RegisterService(std::string name, u32 max_sessions,
Kernel::SessionRequestHandlerPtr handler);
ResultCode UnregisterService(const std::string& name);
ResultVal<Kernel::KPort*> GetServicePort(const std::string& name);
@ -69,21 +71,17 @@ public:
LOG_DEBUG(Service, "Can't find service: {}", service_name);
return nullptr;
}
auto* port = service->second;
if (port == nullptr) {
return nullptr;
}
return std::static_pointer_cast<T>(port->GetServerPort().GetSessionRequestHandler());
return std::static_pointer_cast<T>(service->second);
}
void InvokeControlRequest(Kernel::HLERequestContext& context);
private:
std::weak_ptr<SM> sm_interface;
std::shared_ptr<SM> sm_interface;
std::unique_ptr<Controller> controller_interface;
/// Map of registered services, retrieved using GetServicePort.
std::unordered_map<std::string, Kernel::KPort*> registered_services;
std::unordered_map<std::string, Kernel::SessionRequestHandlerPtr> registered_services;
/// Kernel context
Kernel::KernelCore& kernel;

View File

@ -62,7 +62,6 @@ json GetYuzuVersionData() {
{"build_date", std::string(Common::g_build_date)},
{"build_fullname", std::string(Common::g_build_fullname)},
{"build_version", std::string(Common::g_build_version)},
{"shader_cache_version", std::string(Common::g_shader_cache_version)},
};
}

View File

@ -233,8 +233,8 @@ void TelemetrySession::AddInitialInfo(Loader::AppLoader& app_loader,
Settings::values.use_nvdec_emulation.GetValue());
AddField(field_type, "Renderer_AccelerateASTC", Settings::values.accelerate_astc.GetValue());
AddField(field_type, "Renderer_UseVsync", Settings::values.use_vsync.GetValue());
AddField(field_type, "Renderer_UseAssemblyShaders",
Settings::values.use_assembly_shaders.GetValue());
AddField(field_type, "Renderer_ShaderBackend",
static_cast<u32>(Settings::values.shader_backend.GetValue()));
AddField(field_type, "Renderer_UseAsynchronousShaders",
Settings::values.use_asynchronous_shaders.GetValue());
AddField(field_type, "System_UseDockedMode", Settings::values.use_docked_mode.GetValue());

View File

@ -0,0 +1,268 @@
add_library(shader_recompiler STATIC
backend/bindings.h
backend/glasm/emit_context.cpp
backend/glasm/emit_context.h
backend/glasm/emit_glasm.cpp
backend/glasm/emit_glasm.h
backend/glasm/emit_glasm_barriers.cpp
backend/glasm/emit_glasm_bitwise_conversion.cpp
backend/glasm/emit_glasm_composite.cpp
backend/glasm/emit_glasm_context_get_set.cpp
backend/glasm/emit_glasm_control_flow.cpp
backend/glasm/emit_glasm_convert.cpp
backend/glasm/emit_glasm_floating_point.cpp
backend/glasm/emit_glasm_image.cpp
backend/glasm/emit_glasm_instructions.h
backend/glasm/emit_glasm_integer.cpp
backend/glasm/emit_glasm_logical.cpp
backend/glasm/emit_glasm_memory.cpp
backend/glasm/emit_glasm_not_implemented.cpp
backend/glasm/emit_glasm_select.cpp
backend/glasm/emit_glasm_shared_memory.cpp
backend/glasm/emit_glasm_special.cpp
backend/glasm/emit_glasm_undefined.cpp
backend/glasm/emit_glasm_warp.cpp
backend/glasm/reg_alloc.cpp
backend/glasm/reg_alloc.h
backend/glsl/emit_context.cpp
backend/glsl/emit_context.h
backend/glsl/emit_glsl.cpp
backend/glsl/emit_glsl.h
backend/glsl/emit_glsl_atomic.cpp
backend/glsl/emit_glsl_barriers.cpp
backend/glsl/emit_glsl_bitwise_conversion.cpp
backend/glsl/emit_glsl_composite.cpp
backend/glsl/emit_glsl_context_get_set.cpp
backend/glsl/emit_glsl_control_flow.cpp
backend/glsl/emit_glsl_convert.cpp
backend/glsl/emit_glsl_floating_point.cpp
backend/glsl/emit_glsl_image.cpp
backend/glsl/emit_glsl_instructions.h
backend/glsl/emit_glsl_integer.cpp
backend/glsl/emit_glsl_logical.cpp
backend/glsl/emit_glsl_memory.cpp
backend/glsl/emit_glsl_not_implemented.cpp
backend/glsl/emit_glsl_select.cpp
backend/glsl/emit_glsl_shared_memory.cpp
backend/glsl/emit_glsl_special.cpp
backend/glsl/emit_glsl_undefined.cpp
backend/glsl/emit_glsl_warp.cpp
backend/glsl/var_alloc.cpp
backend/glsl/var_alloc.h
backend/spirv/emit_context.cpp
backend/spirv/emit_context.h
backend/spirv/emit_spirv.cpp
backend/spirv/emit_spirv.h
backend/spirv/emit_spirv_atomic.cpp
backend/spirv/emit_spirv_barriers.cpp
backend/spirv/emit_spirv_bitwise_conversion.cpp
backend/spirv/emit_spirv_composite.cpp
backend/spirv/emit_spirv_context_get_set.cpp
backend/spirv/emit_spirv_control_flow.cpp
backend/spirv/emit_spirv_convert.cpp
backend/spirv/emit_spirv_floating_point.cpp
backend/spirv/emit_spirv_image.cpp
backend/spirv/emit_spirv_image_atomic.cpp
backend/spirv/emit_spirv_instructions.h
backend/spirv/emit_spirv_integer.cpp
backend/spirv/emit_spirv_logical.cpp
backend/spirv/emit_spirv_memory.cpp
backend/spirv/emit_spirv_select.cpp
backend/spirv/emit_spirv_shared_memory.cpp
backend/spirv/emit_spirv_special.cpp
backend/spirv/emit_spirv_undefined.cpp
backend/spirv/emit_spirv_warp.cpp
environment.h
exception.h
frontend/ir/abstract_syntax_list.h
frontend/ir/attribute.cpp
frontend/ir/attribute.h
frontend/ir/basic_block.cpp
frontend/ir/basic_block.h
frontend/ir/breadth_first_search.h
frontend/ir/condition.cpp
frontend/ir/condition.h
frontend/ir/flow_test.cpp
frontend/ir/flow_test.h
frontend/ir/ir_emitter.cpp
frontend/ir/ir_emitter.h
frontend/ir/microinstruction.cpp
frontend/ir/modifiers.h
frontend/ir/opcodes.cpp
frontend/ir/opcodes.h
frontend/ir/opcodes.inc
frontend/ir/patch.cpp
frontend/ir/patch.h
frontend/ir/post_order.cpp
frontend/ir/post_order.h
frontend/ir/pred.h
frontend/ir/program.cpp
frontend/ir/program.h
frontend/ir/reg.h
frontend/ir/type.cpp
frontend/ir/type.h
frontend/ir/value.cpp
frontend/ir/value.h
frontend/maxwell/control_flow.cpp
frontend/maxwell/control_flow.h
frontend/maxwell/decode.cpp
frontend/maxwell/decode.h
frontend/maxwell/indirect_branch_table_track.cpp
frontend/maxwell/indirect_branch_table_track.h
frontend/maxwell/instruction.h
frontend/maxwell/location.h
frontend/maxwell/maxwell.inc
frontend/maxwell/opcodes.cpp
frontend/maxwell/opcodes.h
frontend/maxwell/structured_control_flow.cpp
frontend/maxwell/structured_control_flow.h
frontend/maxwell/translate/impl/atomic_operations_global_memory.cpp
frontend/maxwell/translate/impl/atomic_operations_shared_memory.cpp
frontend/maxwell/translate/impl/attribute_memory_to_physical.cpp
frontend/maxwell/translate/impl/barrier_operations.cpp
frontend/maxwell/translate/impl/bitfield_extract.cpp
frontend/maxwell/translate/impl/bitfield_insert.cpp
frontend/maxwell/translate/impl/branch_indirect.cpp
frontend/maxwell/translate/impl/common_encoding.h
frontend/maxwell/translate/impl/common_funcs.cpp
frontend/maxwell/translate/impl/common_funcs.h
frontend/maxwell/translate/impl/condition_code_set.cpp
frontend/maxwell/translate/impl/double_add.cpp
frontend/maxwell/translate/impl/double_compare_and_set.cpp
frontend/maxwell/translate/impl/double_fused_multiply_add.cpp
frontend/maxwell/translate/impl/double_min_max.cpp
frontend/maxwell/translate/impl/double_multiply.cpp
frontend/maxwell/translate/impl/double_set_predicate.cpp
frontend/maxwell/translate/impl/exit_program.cpp
frontend/maxwell/translate/impl/find_leading_one.cpp
frontend/maxwell/translate/impl/floating_point_add.cpp
frontend/maxwell/translate/impl/floating_point_compare.cpp
frontend/maxwell/translate/impl/floating_point_compare_and_set.cpp
frontend/maxwell/translate/impl/floating_point_conversion_floating_point.cpp
frontend/maxwell/translate/impl/floating_point_conversion_integer.cpp
frontend/maxwell/translate/impl/floating_point_fused_multiply_add.cpp
frontend/maxwell/translate/impl/floating_point_min_max.cpp
frontend/maxwell/translate/impl/floating_point_multi_function.cpp
frontend/maxwell/translate/impl/floating_point_multiply.cpp
frontend/maxwell/translate/impl/floating_point_range_reduction.cpp
frontend/maxwell/translate/impl/floating_point_set_predicate.cpp
frontend/maxwell/translate/impl/floating_point_swizzled_add.cpp
frontend/maxwell/translate/impl/half_floating_point_add.cpp
frontend/maxwell/translate/impl/half_floating_point_fused_multiply_add.cpp
frontend/maxwell/translate/impl/half_floating_point_helper.cpp
frontend/maxwell/translate/impl/half_floating_point_helper.h
frontend/maxwell/translate/impl/half_floating_point_multiply.cpp
frontend/maxwell/translate/impl/half_floating_point_set.cpp
frontend/maxwell/translate/impl/half_floating_point_set_predicate.cpp
frontend/maxwell/translate/impl/impl.cpp
frontend/maxwell/translate/impl/impl.h
frontend/maxwell/translate/impl/integer_add.cpp
frontend/maxwell/translate/impl/integer_add_three_input.cpp
frontend/maxwell/translate/impl/integer_compare.cpp
frontend/maxwell/translate/impl/integer_compare_and_set.cpp
frontend/maxwell/translate/impl/integer_floating_point_conversion.cpp
frontend/maxwell/translate/impl/integer_funnel_shift.cpp
frontend/maxwell/translate/impl/integer_minimum_maximum.cpp
frontend/maxwell/translate/impl/integer_popcount.cpp
frontend/maxwell/translate/impl/integer_scaled_add.cpp
frontend/maxwell/translate/impl/integer_set_predicate.cpp
frontend/maxwell/translate/impl/integer_shift_left.cpp
frontend/maxwell/translate/impl/integer_shift_right.cpp
frontend/maxwell/translate/impl/integer_short_multiply_add.cpp
frontend/maxwell/translate/impl/integer_to_integer_conversion.cpp
frontend/maxwell/translate/impl/internal_stage_buffer_entry_read.cpp
frontend/maxwell/translate/impl/load_constant.cpp
frontend/maxwell/translate/impl/load_constant.h
frontend/maxwell/translate/impl/load_effective_address.cpp
frontend/maxwell/translate/impl/load_store_attribute.cpp
frontend/maxwell/translate/impl/load_store_local_shared.cpp
frontend/maxwell/translate/impl/load_store_memory.cpp
frontend/maxwell/translate/impl/logic_operation.cpp
frontend/maxwell/translate/impl/logic_operation_three_input.cpp
frontend/maxwell/translate/impl/move_predicate_to_register.cpp
frontend/maxwell/translate/impl/move_register.cpp
frontend/maxwell/translate/impl/move_register_to_predicate.cpp
frontend/maxwell/translate/impl/move_special_register.cpp
frontend/maxwell/translate/impl/not_implemented.cpp
frontend/maxwell/translate/impl/output_geometry.cpp
frontend/maxwell/translate/impl/pixel_load.cpp
frontend/maxwell/translate/impl/predicate_set_predicate.cpp
frontend/maxwell/translate/impl/predicate_set_register.cpp
frontend/maxwell/translate/impl/select_source_with_predicate.cpp
frontend/maxwell/translate/impl/surface_atomic_operations.cpp
frontend/maxwell/translate/impl/surface_load_store.cpp
frontend/maxwell/translate/impl/texture_fetch.cpp
frontend/maxwell/translate/impl/texture_fetch_swizzled.cpp
frontend/maxwell/translate/impl/texture_gather.cpp
frontend/maxwell/translate/impl/texture_gather_swizzled.cpp
frontend/maxwell/translate/impl/texture_gradient.cpp
frontend/maxwell/translate/impl/texture_load.cpp
frontend/maxwell/translate/impl/texture_load_swizzled.cpp
frontend/maxwell/translate/impl/texture_mipmap_level.cpp
frontend/maxwell/translate/impl/texture_query.cpp
frontend/maxwell/translate/impl/video_helper.cpp
frontend/maxwell/translate/impl/video_helper.h
frontend/maxwell/translate/impl/video_minimum_maximum.cpp
frontend/maxwell/translate/impl/video_multiply_add.cpp
frontend/maxwell/translate/impl/video_set_predicate.cpp
frontend/maxwell/translate/impl/vote.cpp
frontend/maxwell/translate/impl/warp_shuffle.cpp
frontend/maxwell/translate/translate.cpp
frontend/maxwell/translate/translate.h
frontend/maxwell/translate_program.cpp
frontend/maxwell/translate_program.h
host_translate_info.h
ir_opt/collect_shader_info_pass.cpp
ir_opt/constant_propagation_pass.cpp
ir_opt/dead_code_elimination_pass.cpp
ir_opt/dual_vertex_pass.cpp
ir_opt/global_memory_to_storage_buffer_pass.cpp
ir_opt/identity_removal_pass.cpp
ir_opt/lower_fp16_to_fp32.cpp
ir_opt/lower_int64_to_int32.cpp
ir_opt/passes.h
ir_opt/ssa_rewrite_pass.cpp
ir_opt/texture_pass.cpp
ir_opt/verification_pass.cpp
object_pool.h
profile.h
program_header.h
runtime_info.h
shader_info.h
varying_state.h
)
target_link_libraries(shader_recompiler PUBLIC common fmt::fmt sirit)
if (MSVC)
target_compile_options(shader_recompiler PRIVATE
/W4
/WX
/we4018 # 'expression' : signed/unsigned mismatch
/we4244 # 'argument' : conversion from 'type1' to 'type2', possible loss of data (floating-point)
/we4245 # 'conversion' : conversion from 'type1' to 'type2', signed/unsigned mismatch
/we4254 # 'operator': conversion from 'type1:field_bits' to 'type2:field_bits', possible loss of data
/we4267 # 'var' : conversion from 'size_t' to 'type', possible loss of data
/we4305 # 'context' : truncation from 'type1' to 'type2'
/we4800 # Implicit conversion from 'type' to bool. Possible information loss
/we4826 # Conversion from 'type1' to 'type2' is sign-extended. This may cause unexpected runtime behavior.
)
else()
target_compile_options(shader_recompiler PRIVATE
-Werror
-Werror=conversion
-Werror=ignored-qualifiers
-Werror=implicit-fallthrough
-Werror=shadow
-Werror=sign-compare
$<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-parameter>
$<$<CXX_COMPILER_ID:GNU>:-Werror=unused-but-set-variable>
-Werror=unused-variable
# Bracket depth determines maximum size of a fold expression in Clang since 9c9974c3ccb6.
# And this in turns limits the size of a std::array.
$<$<CXX_COMPILER_ID:Clang>:-fbracket-depth=1024>
)
endif()
create_target_directory_groups(shader_recompiler)

View File

@ -0,0 +1,19 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "common/common_types.h"
namespace Shader::Backend {
struct Bindings {
u32 unified{};
u32 uniform_buffer{};
u32 storage_buffer{};
u32 texture{};
u32 image{};
};
} // namespace Shader::Backend

View File

@ -0,0 +1,154 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <string_view>
#include "shader_recompiler/backend/bindings.h"
#include "shader_recompiler/backend/glasm/emit_context.h"
#include "shader_recompiler/frontend/ir/program.h"
#include "shader_recompiler/profile.h"
#include "shader_recompiler/runtime_info.h"
namespace Shader::Backend::GLASM {
namespace {
std::string_view InterpDecorator(Interpolation interp) {
switch (interp) {
case Interpolation::Smooth:
return "";
case Interpolation::Flat:
return "FLAT ";
case Interpolation::NoPerspective:
return "NOPERSPECTIVE ";
}
throw InvalidArgument("Invalid interpolation {}", interp);
}
bool IsInputArray(Stage stage) {
return stage == Stage::Geometry || stage == Stage::TessellationControl ||
stage == Stage::TessellationEval;
}
} // Anonymous namespace
EmitContext::EmitContext(IR::Program& program, Bindings& bindings, const Profile& profile_,
const RuntimeInfo& runtime_info_)
: info{program.info}, profile{profile_}, runtime_info{runtime_info_} {
// FIXME: Temporary partial implementation
u32 cbuf_index{};
for (const auto& desc : info.constant_buffer_descriptors) {
if (desc.count != 1) {
throw NotImplementedException("Constant buffer descriptor array");
}
Add("CBUFFER c{}[]={{program.buffer[{}]}};", desc.index, cbuf_index);
++cbuf_index;
}
u32 ssbo_index{};
for (const auto& desc : info.storage_buffers_descriptors) {
if (desc.count != 1) {
throw NotImplementedException("Storage buffer descriptor array");
}
if (runtime_info.glasm_use_storage_buffers) {
Add("STORAGE ssbo{}[]={{program.storage[{}]}};", ssbo_index, bindings.storage_buffer);
++bindings.storage_buffer;
++ssbo_index;
}
}
if (!runtime_info.glasm_use_storage_buffers) {
if (const size_t num = info.storage_buffers_descriptors.size(); num > 0) {
Add("PARAM c[{}]={{program.local[0..{}]}};", num, num - 1);
}
}
stage = program.stage;
switch (program.stage) {
case Stage::VertexA:
case Stage::VertexB:
stage_name = "vertex";
attrib_name = "vertex";
break;
case Stage::TessellationControl:
case Stage::TessellationEval:
stage_name = "primitive";
attrib_name = "primitive";
break;
case Stage::Geometry:
stage_name = "primitive";
attrib_name = "vertex";
break;
case Stage::Fragment:
stage_name = "fragment";
attrib_name = "fragment";
break;
case Stage::Compute:
stage_name = "invocation";
break;
}
const std::string_view attr_stage{stage == Stage::Fragment ? "fragment" : "vertex"};
const VaryingState loads{info.loads.mask | info.passthrough.mask};
for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
if (loads.Generic(index)) {
Add("{}ATTRIB in_attr{}[]={{{}.attrib[{}..{}]}};",
InterpDecorator(info.interpolation[index]), index, attr_stage, index, index);
}
}
if (IsInputArray(stage) && loads.AnyComponent(IR::Attribute::PositionX)) {
Add("ATTRIB vertex_position=vertex.position;");
}
if (info.uses_invocation_id) {
Add("ATTRIB primitive_invocation=primitive.invocation;");
}
if (info.stores_tess_level_outer) {
Add("OUTPUT result_patch_tessouter[]={{result.patch.tessouter[0..3]}};");
}
if (info.stores_tess_level_inner) {
Add("OUTPUT result_patch_tessinner[]={{result.patch.tessinner[0..1]}};");
}
if (info.stores.ClipDistances()) {
Add("OUTPUT result_clip[]={{result.clip[0..7]}};");
}
for (size_t index = 0; index < info.uses_patches.size(); ++index) {
if (!info.uses_patches[index]) {
continue;
}
if (stage == Stage::TessellationControl) {
Add("OUTPUT result_patch_attrib{}[]={{result.patch.attrib[{}..{}]}};"
"ATTRIB primitive_out_patch_attrib{}[]={{primitive.out.patch.attrib[{}..{}]}};",
index, index, index, index, index, index);
} else {
Add("ATTRIB primitive_patch_attrib{}[]={{primitive.patch.attrib[{}..{}]}};", index,
index, index);
}
}
if (stage == Stage::Fragment) {
Add("OUTPUT frag_color0=result.color;");
for (size_t index = 1; index < info.stores_frag_color.size(); ++index) {
Add("OUTPUT frag_color{}=result.color[{}];", index, index);
}
}
for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
if (info.stores.Generic(index)) {
Add("OUTPUT out_attr{}[]={{result.attrib[{}..{}]}};", index, index, index);
}
}
image_buffer_bindings.reserve(info.image_buffer_descriptors.size());
for (const auto& desc : info.image_buffer_descriptors) {
image_buffer_bindings.push_back(bindings.image);
bindings.image += desc.count;
}
image_bindings.reserve(info.image_descriptors.size());
for (const auto& desc : info.image_descriptors) {
image_bindings.push_back(bindings.image);
bindings.image += desc.count;
}
texture_buffer_bindings.reserve(info.texture_buffer_descriptors.size());
for (const auto& desc : info.texture_buffer_descriptors) {
texture_buffer_bindings.push_back(bindings.texture);
bindings.texture += desc.count;
}
texture_bindings.reserve(info.texture_descriptors.size());
for (const auto& desc : info.texture_descriptors) {
texture_bindings.push_back(bindings.texture);
bindings.texture += desc.count;
}
}
} // namespace Shader::Backend::GLASM

View File

@ -0,0 +1,80 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <string>
#include <utility>
#include <vector>
#include <fmt/format.h>
#include "shader_recompiler/backend/glasm/reg_alloc.h"
#include "shader_recompiler/stage.h"
namespace Shader {
struct Info;
struct Profile;
struct RuntimeInfo;
} // namespace Shader
namespace Shader::Backend {
struct Bindings;
}
namespace Shader::IR {
class Inst;
struct Program;
} // namespace Shader::IR
namespace Shader::Backend::GLASM {
class EmitContext {
public:
explicit EmitContext(IR::Program& program, Bindings& bindings, const Profile& profile_,
const RuntimeInfo& runtime_info_);
template <typename... Args>
void Add(const char* format_str, IR::Inst& inst, Args&&... args) {
code += fmt::format(fmt::runtime(format_str), reg_alloc.Define(inst),
std::forward<Args>(args)...);
// TODO: Remove this
code += '\n';
}
template <typename... Args>
void LongAdd(const char* format_str, IR::Inst& inst, Args&&... args) {
code += fmt::format(fmt::runtime(format_str), reg_alloc.LongDefine(inst),
std::forward<Args>(args)...);
// TODO: Remove this
code += '\n';
}
template <typename... Args>
void Add(const char* format_str, Args&&... args) {
code += fmt::format(fmt::runtime(format_str), std::forward<Args>(args)...);
// TODO: Remove this
code += '\n';
}
std::string code;
RegAlloc reg_alloc{};
const Info& info;
const Profile& profile;
const RuntimeInfo& runtime_info;
std::vector<u32> texture_buffer_bindings;
std::vector<u32> image_buffer_bindings;
std::vector<u32> texture_bindings;
std::vector<u32> image_bindings;
Stage stage{};
std::string_view stage_name = "invalid";
std::string_view attrib_name = "invalid";
u32 num_safety_loop_vars{};
bool uses_y_direction{};
};
} // namespace Shader::Backend::GLASM

View File

@ -0,0 +1,492 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include <string>
#include <tuple>
#include "common/div_ceil.h"
#include "common/settings.h"
#include "shader_recompiler/backend/bindings.h"
#include "shader_recompiler/backend/glasm/emit_context.h"
#include "shader_recompiler/backend/glasm/emit_glasm.h"
#include "shader_recompiler/backend/glasm/emit_glasm_instructions.h"
#include "shader_recompiler/frontend/ir/ir_emitter.h"
#include "shader_recompiler/frontend/ir/program.h"
#include "shader_recompiler/profile.h"
#include "shader_recompiler/runtime_info.h"
namespace Shader::Backend::GLASM {
namespace {
template <class Func>
struct FuncTraits {};
template <class ReturnType_, class... Args>
struct FuncTraits<ReturnType_ (*)(Args...)> {
using ReturnType = ReturnType_;
static constexpr size_t NUM_ARGS = sizeof...(Args);
template <size_t I>
using ArgType = std::tuple_element_t<I, std::tuple<Args...>>;
};
template <typename T>
struct Identity {
Identity(T data_) : data{data_} {}
T Extract() {
return data;
}
T data;
};
template <bool scalar>
class RegWrapper {
public:
RegWrapper(EmitContext& ctx, const IR::Value& ir_value) : reg_alloc{ctx.reg_alloc} {
const Value value{reg_alloc.Peek(ir_value)};
if (value.type == Type::Register) {
inst = ir_value.InstRecursive();
reg = Register{value};
} else {
reg = value.type == Type::U64 ? reg_alloc.AllocLongReg() : reg_alloc.AllocReg();
}
switch (value.type) {
case Type::Register:
case Type::Void:
break;
case Type::U32:
ctx.Add("MOV.U {}.x,{};", reg, value.imm_u32);
break;
case Type::U64:
ctx.Add("MOV.U64 {}.x,{};", reg, value.imm_u64);
break;
}
}
auto Extract() {
if (inst) {
reg_alloc.Unref(*inst);
} else {
reg_alloc.FreeReg(reg);
}
return std::conditional_t<scalar, ScalarRegister, Register>{Value{reg}};
}
private:
RegAlloc& reg_alloc;
IR::Inst* inst{};
Register reg{};
};
template <typename ArgType>
class ValueWrapper {
public:
ValueWrapper(EmitContext& ctx, const IR::Value& ir_value_)
: reg_alloc{ctx.reg_alloc}, ir_value{ir_value_}, value{reg_alloc.Peek(ir_value)} {}
ArgType Extract() {
if (!ir_value.IsImmediate()) {
reg_alloc.Unref(*ir_value.InstRecursive());
}
return value;
}
private:
RegAlloc& reg_alloc;
const IR::Value& ir_value;
ArgType value;
};
template <typename ArgType>
auto Arg(EmitContext& ctx, const IR::Value& arg) {
if constexpr (std::is_same_v<ArgType, Register>) {
return RegWrapper<false>{ctx, arg};
} else if constexpr (std::is_same_v<ArgType, ScalarRegister>) {
return RegWrapper<true>{ctx, arg};
} else if constexpr (std::is_base_of_v<Value, ArgType>) {
return ValueWrapper<ArgType>{ctx, arg};
} else if constexpr (std::is_same_v<ArgType, const IR::Value&>) {
return Identity<const IR::Value&>{arg};
} else if constexpr (std::is_same_v<ArgType, u32>) {
return Identity{arg.U32()};
} else if constexpr (std::is_same_v<ArgType, IR::Attribute>) {
return Identity{arg.Attribute()};
} else if constexpr (std::is_same_v<ArgType, IR::Patch>) {
return Identity{arg.Patch()};
} else if constexpr (std::is_same_v<ArgType, IR::Reg>) {
return Identity{arg.Reg()};
}
}
template <auto func, bool is_first_arg_inst>
struct InvokeCall {
template <typename... Args>
InvokeCall(EmitContext& ctx, IR::Inst* inst, Args&&... args) {
if constexpr (is_first_arg_inst) {
func(ctx, *inst, args.Extract()...);
} else {
func(ctx, args.Extract()...);
}
}
};
template <auto func, bool is_first_arg_inst, size_t... I>
void Invoke(EmitContext& ctx, IR::Inst* inst, std::index_sequence<I...>) {
using Traits = FuncTraits<decltype(func)>;
if constexpr (is_first_arg_inst) {
InvokeCall<func, is_first_arg_inst>{
ctx, inst, Arg<typename Traits::template ArgType<I + 2>>(ctx, inst->Arg(I))...};
} else {
InvokeCall<func, is_first_arg_inst>{
ctx, inst, Arg<typename Traits::template ArgType<I + 1>>(ctx, inst->Arg(I))...};
}
}
template <auto func>
void Invoke(EmitContext& ctx, IR::Inst* inst) {
using Traits = FuncTraits<decltype(func)>;
static_assert(Traits::NUM_ARGS >= 1, "Insufficient arguments");
if constexpr (Traits::NUM_ARGS == 1) {
Invoke<func, false>(ctx, inst, std::make_index_sequence<0>{});
} else {
using FirstArgType = typename Traits::template ArgType<1>;
static constexpr bool is_first_arg_inst = std::is_same_v<FirstArgType, IR::Inst&>;
using Indices = std::make_index_sequence<Traits::NUM_ARGS - (is_first_arg_inst ? 2 : 1)>;
Invoke<func, is_first_arg_inst>(ctx, inst, Indices{});
}
}
void EmitInst(EmitContext& ctx, IR::Inst* inst) {
switch (inst->GetOpcode()) {
#define OPCODE(name, result_type, ...) \
case IR::Opcode::name: \
return Invoke<&Emit##name>(ctx, inst);
#include "shader_recompiler/frontend/ir/opcodes.inc"
#undef OPCODE
}
throw LogicError("Invalid opcode {}", inst->GetOpcode());
}
bool IsReference(IR::Inst& inst) {
return inst.GetOpcode() == IR::Opcode::Reference;
}
void PrecolorInst(IR::Inst& phi) {
// Insert phi moves before references to avoid overwritting other phis
const size_t num_args{phi.NumArgs()};
for (size_t i = 0; i < num_args; ++i) {
IR::Block& phi_block{*phi.PhiBlock(i)};
auto it{std::find_if_not(phi_block.rbegin(), phi_block.rend(), IsReference).base()};
IR::IREmitter ir{phi_block, it};
const IR::Value arg{phi.Arg(i)};
if (arg.IsImmediate()) {
ir.PhiMove(phi, arg);
} else {
ir.PhiMove(phi, IR::Value{&RegAlloc::AliasInst(*arg.Inst())});
}
}
for (size_t i = 0; i < num_args; ++i) {
IR::IREmitter{*phi.PhiBlock(i)}.Reference(IR::Value{&phi});
}
}
void Precolor(const IR::Program& program) {
for (IR::Block* const block : program.blocks) {
for (IR::Inst& phi : block->Instructions()) {
if (!IR::IsPhi(phi)) {
break;
}
PrecolorInst(phi);
}
}
}
void EmitCode(EmitContext& ctx, const IR::Program& program) {
const auto eval{
[&](const IR::U1& cond) { return ScalarS32{ctx.reg_alloc.Consume(IR::Value{cond})}; }};
for (const IR::AbstractSyntaxNode& node : program.syntax_list) {
switch (node.type) {
case IR::AbstractSyntaxNode::Type::Block:
for (IR::Inst& inst : node.data.block->Instructions()) {
EmitInst(ctx, &inst);
}
break;
case IR::AbstractSyntaxNode::Type::If:
ctx.Add("MOV.S.CC RC,{};"
"IF NE.x;",
eval(node.data.if_node.cond));
break;
case IR::AbstractSyntaxNode::Type::EndIf:
ctx.Add("ENDIF;");
break;
case IR::AbstractSyntaxNode::Type::Loop:
ctx.Add("REP;");
break;
case IR::AbstractSyntaxNode::Type::Repeat:
if (!Settings::values.disable_shader_loop_safety_checks) {
const u32 loop_index{ctx.num_safety_loop_vars++};
const u32 vector_index{loop_index / 4};
const char component{"xyzw"[loop_index % 4]};
ctx.Add("SUB.S.CC loop{}.{},loop{}.{},1;"
"BRK(LT.{});",
vector_index, component, vector_index, component, component);
}
if (node.data.repeat.cond.IsImmediate()) {
if (node.data.repeat.cond.U1()) {
ctx.Add("ENDREP;");
} else {
ctx.Add("BRK;"
"ENDREP;");
}
} else {
ctx.Add("MOV.S.CC RC,{};"
"BRK(EQ.x);"
"ENDREP;",
eval(node.data.repeat.cond));
}
break;
case IR::AbstractSyntaxNode::Type::Break:
if (node.data.break_node.cond.IsImmediate()) {
if (node.data.break_node.cond.U1()) {
ctx.Add("BRK;");
}
} else {
ctx.Add("MOV.S.CC RC,{};"
"BRK (NE.x);",
eval(node.data.break_node.cond));
}
break;
case IR::AbstractSyntaxNode::Type::Return:
case IR::AbstractSyntaxNode::Type::Unreachable:
ctx.Add("RET;");
break;
}
}
if (!ctx.reg_alloc.IsEmpty()) {
LOG_WARNING(Shader_GLASM, "Register leak after generating code");
}
}
void SetupOptions(const IR::Program& program, const Profile& profile,
const RuntimeInfo& runtime_info, std::string& header) {
const Info& info{program.info};
const Stage stage{program.stage};
// TODO: Track the shared atomic ops
header += "OPTION NV_internal;"
"OPTION NV_shader_storage_buffer;"
"OPTION NV_gpu_program_fp64;";
if (info.uses_int64_bit_atomics) {
header += "OPTION NV_shader_atomic_int64;";
}
if (info.uses_atomic_f32_add) {
header += "OPTION NV_shader_atomic_float;";
}
if (info.uses_atomic_f16x2_add || info.uses_atomic_f16x2_min || info.uses_atomic_f16x2_max) {
header += "OPTION NV_shader_atomic_fp16_vector;";
}
if (info.uses_subgroup_invocation_id || info.uses_subgroup_mask || info.uses_subgroup_vote ||
info.uses_fswzadd) {
header += "OPTION NV_shader_thread_group;";
}
if (info.uses_subgroup_shuffles) {
header += "OPTION NV_shader_thread_shuffle;";
}
if (info.uses_sparse_residency) {
header += "OPTION EXT_sparse_texture2;";
}
const bool stores_viewport_layer{info.stores[IR::Attribute::ViewportIndex] ||
info.stores[IR::Attribute::Layer]};
if ((stage != Stage::Geometry && stores_viewport_layer) ||
info.stores[IR::Attribute::ViewportMask]) {
if (profile.support_viewport_index_layer_non_geometry) {
header += "OPTION NV_viewport_array2;";
}
}
if (program.is_geometry_passthrough && profile.support_geometry_shader_passthrough) {
header += "OPTION NV_geometry_shader_passthrough;";
}
if (info.uses_typeless_image_reads && profile.support_typeless_image_loads) {
header += "OPTION EXT_shader_image_load_formatted;";
}
if (profile.support_derivative_control) {
header += "OPTION ARB_derivative_control;";
}
if (stage == Stage::Fragment && runtime_info.force_early_z != 0) {
header += "OPTION NV_early_fragment_tests;";
}
if (stage == Stage::Fragment) {
header += "OPTION ARB_draw_buffers;";
}
}
std::string_view StageHeader(Stage stage) {
switch (stage) {
case Stage::VertexA:
case Stage::VertexB:
return "!!NVvp5.0\n";
case Stage::TessellationControl:
return "!!NVtcp5.0\n";
case Stage::TessellationEval:
return "!!NVtep5.0\n";
case Stage::Geometry:
return "!!NVgp5.0\n";
case Stage::Fragment:
return "!!NVfp5.0\n";
case Stage::Compute:
return "!!NVcp5.0\n";
}
throw InvalidArgument("Invalid stage {}", stage);
}
std::string_view InputPrimitive(InputTopology topology) {
switch (topology) {
case InputTopology::Points:
return "POINTS";
case InputTopology::Lines:
return "LINES";
case InputTopology::LinesAdjacency:
return "LINESS_ADJACENCY";
case InputTopology::Triangles:
return "TRIANGLES";
case InputTopology::TrianglesAdjacency:
return "TRIANGLES_ADJACENCY";
}
throw InvalidArgument("Invalid input topology {}", topology);
}
std::string_view OutputPrimitive(OutputTopology topology) {
switch (topology) {
case OutputTopology::PointList:
return "POINTS";
case OutputTopology::LineStrip:
return "LINE_STRIP";
case OutputTopology::TriangleStrip:
return "TRIANGLE_STRIP";
}
throw InvalidArgument("Invalid output topology {}", topology);
}
std::string_view GetTessMode(TessPrimitive primitive) {
switch (primitive) {
case TessPrimitive::Triangles:
return "TRIANGLES";
case TessPrimitive::Quads:
return "QUADS";
case TessPrimitive::Isolines:
return "ISOLINES";
}
throw InvalidArgument("Invalid tessellation primitive {}", primitive);
}
std::string_view GetTessSpacing(TessSpacing spacing) {
switch (spacing) {
case TessSpacing::Equal:
return "EQUAL";
case TessSpacing::FractionalOdd:
return "FRACTIONAL_ODD";
case TessSpacing::FractionalEven:
return "FRACTIONAL_EVEN";
}
throw InvalidArgument("Invalid tessellation spacing {}", spacing);
}
} // Anonymous namespace
std::string EmitGLASM(const Profile& profile, const RuntimeInfo& runtime_info, IR::Program& program,
Bindings& bindings) {
EmitContext ctx{program, bindings, profile, runtime_info};
Precolor(program);
EmitCode(ctx, program);
std::string header{StageHeader(program.stage)};
SetupOptions(program, profile, runtime_info, header);
switch (program.stage) {
case Stage::TessellationControl:
header += fmt::format("VERTICES_OUT {};", program.invocations);
break;
case Stage::TessellationEval:
header += fmt::format("TESS_MODE {};"
"TESS_SPACING {};"
"TESS_VERTEX_ORDER {};",
GetTessMode(runtime_info.tess_primitive),
GetTessSpacing(runtime_info.tess_spacing),
runtime_info.tess_clockwise ? "CW" : "CCW");
break;
case Stage::Geometry:
header += fmt::format("PRIMITIVE_IN {};", InputPrimitive(runtime_info.input_topology));
if (program.is_geometry_passthrough) {
if (profile.support_geometry_shader_passthrough) {
for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
if (program.info.passthrough.Generic(index)) {
header += fmt::format("PASSTHROUGH result.attrib[{}];", index);
}
}
if (program.info.passthrough.AnyComponent(IR::Attribute::PositionX)) {
header += "PASSTHROUGH result.position;";
}
} else {
LOG_WARNING(Shader_GLASM, "Passthrough geometry program used but not supported");
}
} else {
header +=
fmt::format("VERTICES_OUT {};"
"PRIMITIVE_OUT {};",
program.output_vertices, OutputPrimitive(program.output_topology));
}
break;
case Stage::Compute:
header += fmt::format("GROUP_SIZE {} {} {};", program.workgroup_size[0],
program.workgroup_size[1], program.workgroup_size[2]);
break;
default:
break;
}
if (program.shared_memory_size > 0) {
header += fmt::format("SHARED_MEMORY {};", program.shared_memory_size);
header += fmt::format("SHARED shared_mem[]={{program.sharedmem}};");
}
header += "TEMP ";
for (size_t index = 0; index < ctx.reg_alloc.NumUsedRegisters(); ++index) {
header += fmt::format("R{},", index);
}
if (program.local_memory_size > 0) {
header += fmt::format("lmem[{}],", program.local_memory_size);
}
if (program.info.uses_fswzadd) {
header += "FSWZA[4],FSWZB[4],";
}
const u32 num_safety_loop_vectors{Common::DivCeil(ctx.num_safety_loop_vars, 4u)};
for (u32 index = 0; index < num_safety_loop_vectors; ++index) {
header += fmt::format("loop{},", index);
}
header += "RC;"
"LONG TEMP ";
for (size_t index = 0; index < ctx.reg_alloc.NumUsedLongRegisters(); ++index) {
header += fmt::format("D{},", index);
}
header += "DC;";
if (program.info.uses_fswzadd) {
header += "MOV.F FSWZA[0],-1;"
"MOV.F FSWZA[1],1;"
"MOV.F FSWZA[2],-1;"
"MOV.F FSWZA[3],0;"
"MOV.F FSWZB[0],-1;"
"MOV.F FSWZB[1],-1;"
"MOV.F FSWZB[2],1;"
"MOV.F FSWZB[3],-1;";
}
for (u32 index = 0; index < num_safety_loop_vectors; ++index) {
header += fmt::format("MOV.S loop{},{{0x2000,0x2000,0x2000,0x2000}};", index);
}
if (ctx.uses_y_direction) {
header += "PARAM y_direction[1]={state.material.front.ambient};";
}
ctx.code.insert(0, header);
ctx.code += "END";
return ctx.code;
}
} // namespace Shader::Backend::GLASM

View File

@ -0,0 +1,25 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <string>
#include "shader_recompiler/backend/bindings.h"
#include "shader_recompiler/frontend/ir/program.h"
#include "shader_recompiler/profile.h"
#include "shader_recompiler/runtime_info.h"
namespace Shader::Backend::GLASM {
[[nodiscard]] std::string EmitGLASM(const Profile& profile, const RuntimeInfo& runtime_info,
IR::Program& program, Bindings& bindings);
[[nodiscard]] inline std::string EmitGLASM(const Profile& profile, const RuntimeInfo& runtime_info,
IR::Program& program) {
Bindings binding;
return EmitGLASM(profile, runtime_info, program, binding);
}
} // namespace Shader::Backend::GLASM

View File

@ -0,0 +1,91 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "shader_recompiler/backend/glasm/emit_context.h"
#include "shader_recompiler/backend/glasm/emit_glasm_instructions.h"
#include "shader_recompiler/frontend/ir/value.h"
namespace Shader::Backend::GLASM {
static void Alias(IR::Inst& inst, const IR::Value& value) {
if (value.IsImmediate()) {
return;
}
IR::Inst& value_inst{RegAlloc::AliasInst(*value.Inst())};
value_inst.DestructiveAddUsage(inst.UseCount());
value_inst.DestructiveRemoveUsage();
inst.SetDefinition(value_inst.Definition<Id>());
}
void EmitIdentity(EmitContext&, IR::Inst& inst, const IR::Value& value) {
Alias(inst, value);
}
void EmitConditionRef(EmitContext& ctx, IR::Inst& inst, const IR::Value& value) {
// Fake one usage to get a real register out of the condition
inst.DestructiveAddUsage(1);
const Register ret{ctx.reg_alloc.Define(inst)};
const ScalarS32 input{ctx.reg_alloc.Consume(value)};
if (ret != input) {
ctx.Add("MOV.S {},{};", ret, input);
}
}
void EmitBitCastU16F16(EmitContext&, IR::Inst& inst, const IR::Value& value) {
Alias(inst, value);
}
void EmitBitCastU32F32(EmitContext&, IR::Inst& inst, const IR::Value& value) {
Alias(inst, value);
}
void EmitBitCastU64F64(EmitContext&, IR::Inst& inst, const IR::Value& value) {
Alias(inst, value);
}
void EmitBitCastF16U16(EmitContext&, IR::Inst& inst, const IR::Value& value) {
Alias(inst, value);
}
void EmitBitCastF32U32(EmitContext&, IR::Inst& inst, const IR::Value& value) {
Alias(inst, value);
}
void EmitBitCastF64U64(EmitContext&, IR::Inst& inst, const IR::Value& value) {
Alias(inst, value);
}
void EmitPackUint2x32(EmitContext& ctx, IR::Inst& inst, Register value) {
ctx.LongAdd("PK64.U {}.x,{};", inst, value);
}
void EmitUnpackUint2x32(EmitContext& ctx, IR::Inst& inst, Register value) {
ctx.Add("UP64.U {}.xy,{}.x;", inst, value);
}
void EmitPackFloat2x16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] Register value) {
throw NotImplementedException("GLASM instruction");
}
void EmitUnpackFloat2x16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] Register value) {
throw NotImplementedException("GLASM instruction");
}
void EmitPackHalf2x16(EmitContext& ctx, IR::Inst& inst, Register value) {
ctx.Add("PK2H {}.x,{};", inst, value);
}
void EmitUnpackHalf2x16(EmitContext& ctx, IR::Inst& inst, Register value) {
ctx.Add("UP2H {}.xy,{}.x;", inst, value);
}
void EmitPackDouble2x32(EmitContext& ctx, IR::Inst& inst, Register value) {
ctx.LongAdd("PK64 {}.x,{};", inst, value);
}
void EmitUnpackDouble2x32(EmitContext& ctx, IR::Inst& inst, Register value) {
ctx.Add("UP64 {}.xy,{}.x;", inst, value);
}
} // namespace Shader::Backend::GLASM

View File

@ -0,0 +1,244 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "shader_recompiler/backend/glasm/emit_context.h"
#include "shader_recompiler/backend/glasm/emit_glasm_instructions.h"
#include "shader_recompiler/frontend/ir/value.h"
namespace Shader::Backend::GLASM {
namespace {
template <auto read_imm, char type, typename... Values>
void CompositeConstruct(EmitContext& ctx, IR::Inst& inst, Values&&... elements) {
const Register ret{ctx.reg_alloc.Define(inst)};
if (std::ranges::any_of(std::array{elements...},
[](const IR::Value& value) { return value.IsImmediate(); })) {
using Type = std::invoke_result_t<decltype(read_imm), IR::Value>;
const std::array<Type, 4> values{(elements.IsImmediate() ? (elements.*read_imm)() : 0)...};
ctx.Add("MOV.{} {},{{{},{},{},{}}};", type, ret, fmt::to_string(values[0]),
fmt::to_string(values[1]), fmt::to_string(values[2]), fmt::to_string(values[3]));
}
size_t index{};
for (const IR::Value& element : {elements...}) {
if (!element.IsImmediate()) {
const ScalarU32 value{ctx.reg_alloc.Consume(element)};
ctx.Add("MOV.{} {}.{},{};", type, ret, "xyzw"[index], value);
}
++index;
}
}
void CompositeExtract(EmitContext& ctx, IR::Inst& inst, Register composite, u32 index, char type) {
const Register ret{ctx.reg_alloc.Define(inst)};
if (ret == composite && index == 0) {
// No need to do anything here, the source and destination are the same register
return;
}
ctx.Add("MOV.{} {}.x,{}.{};", type, ret, composite, "xyzw"[index]);
}
template <typename ObjectType>
void CompositeInsert(EmitContext& ctx, IR::Inst& inst, Register composite, ObjectType object,
u32 index, char type) {
const Register ret{ctx.reg_alloc.Define(inst)};
const char swizzle{"xyzw"[index]};
if (ret != composite && ret == object) {
// The object is aliased with the return value, so we have to use a temporary to insert
ctx.Add("MOV.{} RC,{};"
"MOV.{} RC.{},{};"
"MOV.{} {},RC;",
type, composite, type, swizzle, object, type, ret);
} else if (ret != composite) {
// The input composite is not aliased with the return value so we have to copy it before
// hand. But the insert object is not aliased with the return value, so we don't have to
// worry about that
ctx.Add("MOV.{} {},{};"
"MOV.{} {}.{},{};",
type, ret, composite, type, ret, swizzle, object);
} else {
// The return value is alised so we can just insert the object, it doesn't matter if it's
// aliased
ctx.Add("MOV.{} {}.{},{};", type, ret, swizzle, object);
}
}
} // Anonymous namespace
void EmitCompositeConstructU32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& e1,
const IR::Value& e2) {
CompositeConstruct<&IR::Value::U32, 'U'>(ctx, inst, e1, e2);
}
void EmitCompositeConstructU32x3(EmitContext& ctx, IR::Inst& inst, const IR::Value& e1,
const IR::Value& e2, const IR::Value& e3) {
CompositeConstruct<&IR::Value::U32, 'U'>(ctx, inst, e1, e2, e3);
}
void EmitCompositeConstructU32x4(EmitContext& ctx, IR::Inst& inst, const IR::Value& e1,
const IR::Value& e2, const IR::Value& e3, const IR::Value& e4) {
CompositeConstruct<&IR::Value::U32, 'U'>(ctx, inst, e1, e2, e3, e4);
}
void EmitCompositeExtractU32x2(EmitContext& ctx, IR::Inst& inst, Register composite, u32 index) {
CompositeExtract(ctx, inst, composite, index, 'U');
}
void EmitCompositeExtractU32x3(EmitContext& ctx, IR::Inst& inst, Register composite, u32 index) {
CompositeExtract(ctx, inst, composite, index, 'U');
}
void EmitCompositeExtractU32x4(EmitContext& ctx, IR::Inst& inst, Register composite, u32 index) {
CompositeExtract(ctx, inst, composite, index, 'U');
}
void EmitCompositeInsertU32x2([[maybe_unused]] EmitContext& ctx,
[[maybe_unused]] Register composite,
[[maybe_unused]] ScalarU32 object, [[maybe_unused]] u32 index) {
throw NotImplementedException("GLASM instruction");
}
void EmitCompositeInsertU32x3([[maybe_unused]] EmitContext& ctx,
[[maybe_unused]] Register composite,
[[maybe_unused]] ScalarU32 object, [[maybe_unused]] u32 index) {
throw NotImplementedException("GLASM instruction");
}
void EmitCompositeInsertU32x4([[maybe_unused]] EmitContext& ctx,
[[maybe_unused]] Register composite,
[[maybe_unused]] ScalarU32 object, [[maybe_unused]] u32 index) {
throw NotImplementedException("GLASM instruction");
}
void EmitCompositeConstructF16x2([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] Register e1,
[[maybe_unused]] Register e2) {
throw NotImplementedException("GLASM instruction");
}
void EmitCompositeConstructF16x3([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] Register e1,
[[maybe_unused]] Register e2, [[maybe_unused]] Register e3) {
throw NotImplementedException("GLASM instruction");
}
void EmitCompositeConstructF16x4([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] Register e1,
[[maybe_unused]] Register e2, [[maybe_unused]] Register e3,
[[maybe_unused]] Register e4) {
throw NotImplementedException("GLASM instruction");
}
void EmitCompositeExtractF16x2([[maybe_unused]] EmitContext& ctx,
[[maybe_unused]] Register composite, [[maybe_unused]] u32 index) {
throw NotImplementedException("GLASM instruction");
}
void EmitCompositeExtractF16x3([[maybe_unused]] EmitContext& ctx,
[[maybe_unused]] Register composite, [[maybe_unused]] u32 index) {
throw NotImplementedException("GLASM instruction");
}
void EmitCompositeExtractF16x4([[maybe_unused]] EmitContext& ctx,
[[maybe_unused]] Register composite, [[maybe_unused]] u32 index) {
throw NotImplementedException("GLASM instruction");
}
void EmitCompositeInsertF16x2([[maybe_unused]] EmitContext& ctx,
[[maybe_unused]] Register composite, [[maybe_unused]] Register object,
[[maybe_unused]] u32 index) {
throw NotImplementedException("GLASM instruction");
}
void EmitCompositeInsertF16x3([[maybe_unused]] EmitContext& ctx,
[[maybe_unused]] Register composite, [[maybe_unused]] Register object,
[[maybe_unused]] u32 index) {
throw NotImplementedException("GLASM instruction");
}
void EmitCompositeInsertF16x4([[maybe_unused]] EmitContext& ctx,
[[maybe_unused]] Register composite, [[maybe_unused]] Register object,
[[maybe_unused]] u32 index) {
throw NotImplementedException("GLASM instruction");
}
void EmitCompositeConstructF32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& e1,
const IR::Value& e2) {
CompositeConstruct<&IR::Value::F32, 'F'>(ctx, inst, e1, e2);
}
void EmitCompositeConstructF32x3(EmitContext& ctx, IR::Inst& inst, const IR::Value& e1,
const IR::Value& e2, const IR::Value& e3) {
CompositeConstruct<&IR::Value::F32, 'F'>(ctx, inst, e1, e2, e3);
}
void EmitCompositeConstructF32x4(EmitContext& ctx, IR::Inst& inst, const IR::Value& e1,
const IR::Value& e2, const IR::Value& e3, const IR::Value& e4) {
CompositeConstruct<&IR::Value::F32, 'F'>(ctx, inst, e1, e2, e3, e4);
}
void EmitCompositeExtractF32x2(EmitContext& ctx, IR::Inst& inst, Register composite, u32 index) {
CompositeExtract(ctx, inst, composite, index, 'F');
}
void EmitCompositeExtractF32x3(EmitContext& ctx, IR::Inst& inst, Register composite, u32 index) {
CompositeExtract(ctx, inst, composite, index, 'F');
}
void EmitCompositeExtractF32x4(EmitContext& ctx, IR::Inst& inst, Register composite, u32 index) {
CompositeExtract(ctx, inst, composite, index, 'F');
}
void EmitCompositeInsertF32x2(EmitContext& ctx, IR::Inst& inst, Register composite,
ScalarF32 object, u32 index) {
CompositeInsert(ctx, inst, composite, object, index, 'F');
}
void EmitCompositeInsertF32x3(EmitContext& ctx, IR::Inst& inst, Register composite,
ScalarF32 object, u32 index) {
CompositeInsert(ctx, inst, composite, object, index, 'F');
}
void EmitCompositeInsertF32x4(EmitContext& ctx, IR::Inst& inst, Register composite,
ScalarF32 object, u32 index) {
CompositeInsert(ctx, inst, composite, object, index, 'F');
}
void EmitCompositeConstructF64x2([[maybe_unused]] EmitContext& ctx) {
throw NotImplementedException("GLASM instruction");
}
void EmitCompositeConstructF64x3([[maybe_unused]] EmitContext& ctx) {
throw NotImplementedException("GLASM instruction");
}
void EmitCompositeConstructF64x4([[maybe_unused]] EmitContext& ctx) {
throw NotImplementedException("GLASM instruction");
}
void EmitCompositeExtractF64x2([[maybe_unused]] EmitContext& ctx) {
throw NotImplementedException("GLASM instruction");
}
void EmitCompositeExtractF64x3([[maybe_unused]] EmitContext& ctx) {
throw NotImplementedException("GLASM instruction");
}
void EmitCompositeExtractF64x4([[maybe_unused]] EmitContext& ctx) {
throw NotImplementedException("GLASM instruction");
}
void EmitCompositeInsertF64x2([[maybe_unused]] EmitContext& ctx,
[[maybe_unused]] Register composite, [[maybe_unused]] Register object,
[[maybe_unused]] u32 index) {
throw NotImplementedException("GLASM instruction");
}
void EmitCompositeInsertF64x3([[maybe_unused]] EmitContext& ctx,
[[maybe_unused]] Register composite, [[maybe_unused]] Register object,
[[maybe_unused]] u32 index) {
throw NotImplementedException("GLASM instruction");
}
void EmitCompositeInsertF64x4([[maybe_unused]] EmitContext& ctx,
[[maybe_unused]] Register composite, [[maybe_unused]] Register object,
[[maybe_unused]] u32 index) {
throw NotImplementedException("GLASM instruction");
}
} // namespace Shader::Backend::GLASM

View File

@ -0,0 +1,346 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <string_view>
#include "shader_recompiler/backend/glasm/emit_context.h"
#include "shader_recompiler/backend/glasm/emit_glasm_instructions.h"
#include "shader_recompiler/frontend/ir/value.h"
#include "shader_recompiler/profile.h"
#include "shader_recompiler/shader_info.h"
namespace Shader::Backend::GLASM {
namespace {
void GetCbuf(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, ScalarU32 offset,
std::string_view size) {
if (!binding.IsImmediate()) {
throw NotImplementedException("Indirect constant buffer loading");
}
const Register ret{ctx.reg_alloc.Define(inst)};
if (offset.type == Type::U32) {
// Avoid reading arrays out of bounds, matching hardware's behavior
if (offset.imm_u32 >= 0x10'000) {
ctx.Add("MOV.S {},0;", ret);
return;
}
}
ctx.Add("LDC.{} {},c{}[{}];", size, ret, binding.U32(), offset);
}
bool IsInputArray(Stage stage) {
return stage == Stage::Geometry || stage == Stage::TessellationControl ||
stage == Stage::TessellationEval;
}
std::string VertexIndex(EmitContext& ctx, ScalarU32 vertex) {
return IsInputArray(ctx.stage) ? fmt::format("[{}]", vertex) : "";
}
u32 TexCoordIndex(IR::Attribute attr) {
return (static_cast<u32>(attr) - static_cast<u32>(IR::Attribute::FixedFncTexture0S)) / 4;
}
} // Anonymous namespace
void EmitGetCbufU8(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, ScalarU32 offset) {
GetCbuf(ctx, inst, binding, offset, "U8");
}
void EmitGetCbufS8(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, ScalarU32 offset) {
GetCbuf(ctx, inst, binding, offset, "S8");
}
void EmitGetCbufU16(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, ScalarU32 offset) {
GetCbuf(ctx, inst, binding, offset, "U16");
}
void EmitGetCbufS16(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, ScalarU32 offset) {
GetCbuf(ctx, inst, binding, offset, "S16");
}
void EmitGetCbufU32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, ScalarU32 offset) {
GetCbuf(ctx, inst, binding, offset, "U32");
}
void EmitGetCbufF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, ScalarU32 offset) {
GetCbuf(ctx, inst, binding, offset, "F32");
}
void EmitGetCbufU32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset) {
GetCbuf(ctx, inst, binding, offset, "U32X2");
}
void EmitGetAttribute(EmitContext& ctx, IR::Inst& inst, IR::Attribute attr, ScalarU32 vertex) {
const u32 element{static_cast<u32>(attr) % 4};
const char swizzle{"xyzw"[element]};
if (IR::IsGeneric(attr)) {
const u32 index{IR::GenericAttributeIndex(attr)};
ctx.Add("MOV.F {}.x,in_attr{}{}[0].{};", inst, index, VertexIndex(ctx, vertex), swizzle);
return;
}
if (attr >= IR::Attribute::FixedFncTexture0S && attr <= IR::Attribute::FixedFncTexture9Q) {
const u32 index{TexCoordIndex(attr)};
ctx.Add("MOV.F {}.x,{}.texcoord[{}].{};", inst, ctx.attrib_name, index, swizzle);
return;
}
switch (attr) {
case IR::Attribute::PrimitiveId:
ctx.Add("MOV.S {}.x,primitive.id;", inst);
break;
case IR::Attribute::PositionX:
case IR::Attribute::PositionY:
case IR::Attribute::PositionZ:
case IR::Attribute::PositionW:
if (IsInputArray(ctx.stage)) {
ctx.Add("MOV.F {}.x,vertex_position{}.{};", inst, VertexIndex(ctx, vertex), swizzle);
} else {
ctx.Add("MOV.F {}.x,{}.position.{};", inst, ctx.attrib_name, swizzle);
}
break;
case IR::Attribute::ColorFrontDiffuseR:
case IR::Attribute::ColorFrontDiffuseG:
case IR::Attribute::ColorFrontDiffuseB:
case IR::Attribute::ColorFrontDiffuseA:
ctx.Add("MOV.F {}.x,{}.color.{};", inst, ctx.attrib_name, swizzle);
break;
case IR::Attribute::PointSpriteS:
case IR::Attribute::PointSpriteT:
ctx.Add("MOV.F {}.x,{}.pointcoord.{};", inst, ctx.attrib_name, swizzle);
break;
case IR::Attribute::TessellationEvaluationPointU:
case IR::Attribute::TessellationEvaluationPointV:
ctx.Add("MOV.F {}.x,vertex.tesscoord.{};", inst, swizzle);
break;
case IR::Attribute::InstanceId:
ctx.Add("MOV.S {}.x,{}.instance;", inst, ctx.attrib_name);
break;
case IR::Attribute::VertexId:
ctx.Add("MOV.S {}.x,{}.id;", inst, ctx.attrib_name);
break;
case IR::Attribute::FrontFace:
ctx.Add("CMP.S {}.x,{}.facing.x,0,-1;", inst, ctx.attrib_name);
break;
default:
throw NotImplementedException("Get attribute {}", attr);
}
}
void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, ScalarF32 value,
[[maybe_unused]] ScalarU32 vertex) {
const u32 element{static_cast<u32>(attr) % 4};
const char swizzle{"xyzw"[element]};
if (IR::IsGeneric(attr)) {
const u32 index{IR::GenericAttributeIndex(attr)};
ctx.Add("MOV.F out_attr{}[0].{},{};", index, swizzle, value);
return;
}
if (attr >= IR::Attribute::FixedFncTexture0S && attr <= IR::Attribute::FixedFncTexture9R) {
const u32 index{TexCoordIndex(attr)};
ctx.Add("MOV.F result.texcoord[{}].{},{};", index, swizzle, value);
return;
}
switch (attr) {
case IR::Attribute::Layer:
if (ctx.stage == Stage::Geometry || ctx.profile.support_viewport_index_layer_non_geometry) {
ctx.Add("MOV.F result.layer.x,{};", value);
} else {
LOG_WARNING(Shader_GLASM,
"Layer stored outside of geometry shader not supported by device");
}
break;
case IR::Attribute::ViewportIndex:
if (ctx.stage == Stage::Geometry || ctx.profile.support_viewport_index_layer_non_geometry) {
ctx.Add("MOV.F result.viewport.x,{};", value);
} else {
LOG_WARNING(Shader_GLASM,
"Viewport stored outside of geometry shader not supported by device");
}
break;
case IR::Attribute::ViewportMask:
// NV_viewport_array2 is required to access result.viewportmask, regardless of shader stage.
if (ctx.profile.support_viewport_index_layer_non_geometry) {
ctx.Add("MOV.F result.viewportmask[0].x,{};", value);
} else {
LOG_WARNING(Shader_GLASM, "Device does not support storing to ViewportMask");
}
break;
case IR::Attribute::PointSize:
ctx.Add("MOV.F result.pointsize.x,{};", value);
break;
case IR::Attribute::PositionX:
case IR::Attribute::PositionY:
case IR::Attribute::PositionZ:
case IR::Attribute::PositionW:
ctx.Add("MOV.F result.position.{},{};", swizzle, value);
break;
case IR::Attribute::ColorFrontDiffuseR:
case IR::Attribute::ColorFrontDiffuseG:
case IR::Attribute::ColorFrontDiffuseB:
case IR::Attribute::ColorFrontDiffuseA:
ctx.Add("MOV.F result.color.{},{};", swizzle, value);
break;
case IR::Attribute::ColorFrontSpecularR:
case IR::Attribute::ColorFrontSpecularG:
case IR::Attribute::ColorFrontSpecularB:
case IR::Attribute::ColorFrontSpecularA:
ctx.Add("MOV.F result.color.secondary.{},{};", swizzle, value);
break;
case IR::Attribute::ColorBackDiffuseR:
case IR::Attribute::ColorBackDiffuseG:
case IR::Attribute::ColorBackDiffuseB:
case IR::Attribute::ColorBackDiffuseA:
ctx.Add("MOV.F result.color.back.{},{};", swizzle, value);
break;
case IR::Attribute::ColorBackSpecularR:
case IR::Attribute::ColorBackSpecularG:
case IR::Attribute::ColorBackSpecularB:
case IR::Attribute::ColorBackSpecularA:
ctx.Add("MOV.F result.color.back.secondary.{},{};", swizzle, value);
break;
case IR::Attribute::FogCoordinate:
ctx.Add("MOV.F result.fogcoord.x,{};", value);
break;
case IR::Attribute::ClipDistance0:
case IR::Attribute::ClipDistance1:
case IR::Attribute::ClipDistance2:
case IR::Attribute::ClipDistance3:
case IR::Attribute::ClipDistance4:
case IR::Attribute::ClipDistance5:
case IR::Attribute::ClipDistance6:
case IR::Attribute::ClipDistance7: {
const u32 index{static_cast<u32>(attr) - static_cast<u32>(IR::Attribute::ClipDistance0)};
ctx.Add("MOV.F result.clip[{}].x,{};", index, value);
break;
}
default:
throw NotImplementedException("Set attribute {}", attr);
}
}
void EmitGetAttributeIndexed(EmitContext& ctx, IR::Inst& inst, ScalarS32 offset, ScalarU32 vertex) {
// RC.x = base_index
// RC.y = masked_index
// RC.z = compare_index
ctx.Add("SHR.S RC.x,{},2;"
"AND.S RC.y,RC.x,3;"
"SHR.S RC.z,{},4;",
offset, offset);
const Register ret{ctx.reg_alloc.Define(inst)};
u32 num_endifs{};
const auto read{[&](u32 compare_index, const std::array<std::string, 4>& values) {
++num_endifs;
ctx.Add("SEQ.S.CC RC.w,RC.z,{};" // compare_index
"IF NE.w;"
// X
"SEQ.S.CC RC.w,RC.y,0;"
"IF NE.w;"
"MOV {}.x,{};"
"ELSE;"
// Y
"SEQ.S.CC RC.w,RC.y,1;"
"IF NE.w;"
"MOV {}.x,{};"
"ELSE;"
// Z
"SEQ.S.CC RC.w,RC.y,2;"
"IF NE.w;"
"MOV {}.x,{};"
"ELSE;"
// W
"MOV {}.x,{};"
"ENDIF;"
"ENDIF;"
"ENDIF;"
"ELSE;",
compare_index, ret, values[0], ret, values[1], ret, values[2], ret, values[3]);
}};
const auto read_swizzled{[&](u32 compare_index, std::string_view value) {
const std::array values{fmt::format("{}.x", value), fmt::format("{}.y", value),
fmt::format("{}.z", value), fmt::format("{}.w", value)};
read(compare_index, values);
}};
if (ctx.info.loads.AnyComponent(IR::Attribute::PositionX)) {
const u32 index{static_cast<u32>(IR::Attribute::PositionX)};
if (IsInputArray(ctx.stage)) {
read_swizzled(index, fmt::format("vertex_position{}", VertexIndex(ctx, vertex)));
} else {
read_swizzled(index, fmt::format("{}.position", ctx.attrib_name));
}
}
for (u32 index = 0; index < static_cast<u32>(IR::NUM_GENERICS); ++index) {
if (!ctx.info.loads.Generic(index)) {
continue;
}
read_swizzled(index, fmt::format("in_attr{}{}[0]", index, VertexIndex(ctx, vertex)));
}
for (u32 i = 0; i < num_endifs; ++i) {
ctx.Add("ENDIF;");
}
}
void EmitSetAttributeIndexed([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] ScalarU32 offset,
[[maybe_unused]] ScalarF32 value, [[maybe_unused]] ScalarU32 vertex) {
throw NotImplementedException("GLASM instruction");
}
void EmitGetPatch(EmitContext& ctx, IR::Inst& inst, IR::Patch patch) {
if (!IR::IsGeneric(patch)) {
throw NotImplementedException("Non-generic patch load");
}
const u32 index{IR::GenericPatchIndex(patch)};
const u32 element{IR::GenericPatchElement(patch)};
const char swizzle{"xyzw"[element]};
const std::string_view out{ctx.stage == Stage::TessellationControl ? ".out" : ""};
ctx.Add("MOV.F {},primitive{}.patch.attrib[{}].{};", inst, out, index, swizzle);
}
void EmitSetPatch(EmitContext& ctx, IR::Patch patch, ScalarF32 value) {
if (IR::IsGeneric(patch)) {
const u32 index{IR::GenericPatchIndex(patch)};
const u32 element{IR::GenericPatchElement(patch)};
ctx.Add("MOV.F result.patch.attrib[{}].{},{};", index, "xyzw"[element], value);
return;
}
switch (patch) {
case IR::Patch::TessellationLodLeft:
case IR::Patch::TessellationLodRight:
case IR::Patch::TessellationLodTop:
case IR::Patch::TessellationLodBottom: {
const u32 index{static_cast<u32>(patch) - u32(IR::Patch::TessellationLodLeft)};
ctx.Add("MOV.F result.patch.tessouter[{}].x,{};", index, value);
break;
}
case IR::Patch::TessellationLodInteriorU:
ctx.Add("MOV.F result.patch.tessinner[0].x,{};", value);
break;
case IR::Patch::TessellationLodInteriorV:
ctx.Add("MOV.F result.patch.tessinner[1].x,{};", value);
break;
default:
throw NotImplementedException("Patch {}", patch);
}
}
void EmitSetFragColor(EmitContext& ctx, u32 index, u32 component, ScalarF32 value) {
ctx.Add("MOV.F frag_color{}.{},{};", index, "xyzw"[component], value);
}
void EmitSetSampleMask(EmitContext& ctx, ScalarS32 value) {
ctx.Add("MOV.S result.samplemask.x,{};", value);
}
void EmitSetFragDepth(EmitContext& ctx, ScalarF32 value) {
ctx.Add("MOV.F result.depth.z,{};", value);
}
void EmitLoadLocal(EmitContext& ctx, IR::Inst& inst, ScalarU32 word_offset) {
ctx.Add("MOV.U {},lmem[{}].x;", inst, word_offset);
}
void EmitWriteLocal(EmitContext& ctx, ScalarU32 word_offset, ScalarU32 value) {
ctx.Add("MOV.U lmem[{}].x,{};", word_offset, value);
}
} // namespace Shader::Backend::GLASM

View File

@ -0,0 +1,231 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <string_view>
#include "shader_recompiler/backend/glasm/emit_context.h"
#include "shader_recompiler/backend/glasm/emit_glasm_instructions.h"
#include "shader_recompiler/frontend/ir/modifiers.h"
#include "shader_recompiler/frontend/ir/value.h"
namespace Shader::Backend::GLASM {
namespace {
std::string_view FpRounding(IR::FpRounding fp_rounding) {
switch (fp_rounding) {
case IR::FpRounding::DontCare:
return "";
case IR::FpRounding::RN:
return ".ROUND";
case IR::FpRounding::RZ:
return ".TRUNC";
case IR::FpRounding::RM:
return ".FLR";
case IR::FpRounding::RP:
return ".CEIL";
}
throw InvalidArgument("Invalid floating-point rounding {}", fp_rounding);
}
template <typename InputType>
void Convert(EmitContext& ctx, IR::Inst& inst, InputType value, std::string_view dest,
std::string_view src, bool is_long_result) {
const std::string_view fp_rounding{FpRounding(inst.Flags<IR::FpControl>().rounding)};
const auto ret{is_long_result ? ctx.reg_alloc.LongDefine(inst) : ctx.reg_alloc.Define(inst)};
ctx.Add("CVT.{}.{}{} {}.x,{};", dest, src, fp_rounding, ret, value);
}
} // Anonymous namespace
void EmitConvertS16F16(EmitContext& ctx, IR::Inst& inst, Register value) {
Convert(ctx, inst, value, "S16", "F16", false);
}
void EmitConvertS16F32(EmitContext& ctx, IR::Inst& inst, ScalarF32 value) {
Convert(ctx, inst, value, "S16", "F32", false);
}
void EmitConvertS16F64(EmitContext& ctx, IR::Inst& inst, ScalarF64 value) {
Convert(ctx, inst, value, "S16", "F64", false);
}
void EmitConvertS32F16(EmitContext& ctx, IR::Inst& inst, Register value) {
Convert(ctx, inst, value, "S32", "F16", false);
}
void EmitConvertS32F32(EmitContext& ctx, IR::Inst& inst, ScalarF32 value) {
Convert(ctx, inst, value, "S32", "F32", false);
}
void EmitConvertS32F64(EmitContext& ctx, IR::Inst& inst, ScalarF64 value) {
Convert(ctx, inst, value, "S32", "F64", false);
}
void EmitConvertS64F16(EmitContext& ctx, IR::Inst& inst, Register value) {
Convert(ctx, inst, value, "S64", "F16", true);
}
void EmitConvertS64F32(EmitContext& ctx, IR::Inst& inst, ScalarF32 value) {
Convert(ctx, inst, value, "S64", "F32", true);
}
void EmitConvertS64F64(EmitContext& ctx, IR::Inst& inst, ScalarF64 value) {
Convert(ctx, inst, value, "S64", "F64", true);
}
void EmitConvertU16F16(EmitContext& ctx, IR::Inst& inst, Register value) {
Convert(ctx, inst, value, "U16", "F16", false);
}
void EmitConvertU16F32(EmitContext& ctx, IR::Inst& inst, ScalarF32 value) {
Convert(ctx, inst, value, "U16", "F32", false);
}
void EmitConvertU16F64(EmitContext& ctx, IR::Inst& inst, ScalarF64 value) {
Convert(ctx, inst, value, "U16", "F64", false);
}
void EmitConvertU32F16(EmitContext& ctx, IR::Inst& inst, Register value) {
Convert(ctx, inst, value, "U32", "F16", false);
}
void EmitConvertU32F32(EmitContext& ctx, IR::Inst& inst, ScalarF32 value) {
Convert(ctx, inst, value, "U32", "F32", false);
}
void EmitConvertU32F64(EmitContext& ctx, IR::Inst& inst, ScalarF64 value) {
Convert(ctx, inst, value, "U32", "F64", false);
}
void EmitConvertU64F16(EmitContext& ctx, IR::Inst& inst, Register value) {
Convert(ctx, inst, value, "U64", "F16", true);
}
void EmitConvertU64F32(EmitContext& ctx, IR::Inst& inst, ScalarF32 value) {
Convert(ctx, inst, value, "U64", "F32", true);
}
void EmitConvertU64F64(EmitContext& ctx, IR::Inst& inst, ScalarF64 value) {
Convert(ctx, inst, value, "U64", "F64", true);
}
void EmitConvertU64U32(EmitContext& ctx, IR::Inst& inst, ScalarU32 value) {
Convert(ctx, inst, value, "U64", "U32", true);
}
void EmitConvertU32U64(EmitContext& ctx, IR::Inst& inst, Register value) {
Convert(ctx, inst, value, "U32", "U64", false);
}
void EmitConvertF16F32(EmitContext& ctx, IR::Inst& inst, ScalarF32 value) {
Convert(ctx, inst, value, "F16", "F32", false);
}
void EmitConvertF32F16(EmitContext& ctx, IR::Inst& inst, Register value) {
Convert(ctx, inst, value, "F32", "F16", false);
}
void EmitConvertF32F64(EmitContext& ctx, IR::Inst& inst, ScalarF64 value) {
Convert(ctx, inst, value, "F32", "F64", false);
}
void EmitConvertF64F32(EmitContext& ctx, IR::Inst& inst, ScalarF32 value) {
Convert(ctx, inst, value, "F64", "F32", true);
}
void EmitConvertF16S8(EmitContext& ctx, IR::Inst& inst, Register value) {
Convert(ctx, inst, value, "F16", "S8", false);
}
void EmitConvertF16S16(EmitContext& ctx, IR::Inst& inst, Register value) {
Convert(ctx, inst, value, "F16", "S16", false);
}
void EmitConvertF16S32(EmitContext& ctx, IR::Inst& inst, ScalarS32 value) {
Convert(ctx, inst, value, "F16", "S32", false);
}
void EmitConvertF16S64(EmitContext& ctx, IR::Inst& inst, Register value) {
Convert(ctx, inst, value, "F16", "S64", false);
}
void EmitConvertF16U8(EmitContext& ctx, IR::Inst& inst, Register value) {
Convert(ctx, inst, value, "F16", "U8", false);
}
void EmitConvertF16U16(EmitContext& ctx, IR::Inst& inst, Register value) {
Convert(ctx, inst, value, "F16", "U16", false);
}
void EmitConvertF16U32(EmitContext& ctx, IR::Inst& inst, ScalarU32 value) {
Convert(ctx, inst, value, "F16", "U32", false);
}
void EmitConvertF16U64(EmitContext& ctx, IR::Inst& inst, Register value) {
Convert(ctx, inst, value, "F16", "U64", false);
}
void EmitConvertF32S8(EmitContext& ctx, IR::Inst& inst, Register value) {
Convert(ctx, inst, value, "F32", "S8", false);
}
void EmitConvertF32S16(EmitContext& ctx, IR::Inst& inst, Register value) {
Convert(ctx, inst, value, "F32", "S16", false);
}
void EmitConvertF32S32(EmitContext& ctx, IR::Inst& inst, ScalarS32 value) {
Convert(ctx, inst, value, "F32", "S32", false);
}
void EmitConvertF32S64(EmitContext& ctx, IR::Inst& inst, Register value) {
Convert(ctx, inst, value, "F32", "S64", false);
}
void EmitConvertF32U8(EmitContext& ctx, IR::Inst& inst, Register value) {
Convert(ctx, inst, value, "F32", "U8", false);
}
void EmitConvertF32U16(EmitContext& ctx, IR::Inst& inst, Register value) {
Convert(ctx, inst, value, "F32", "U16", false);
}
void EmitConvertF32U32(EmitContext& ctx, IR::Inst& inst, ScalarU32 value) {
Convert(ctx, inst, value, "F32", "U32", false);
}
void EmitConvertF32U64(EmitContext& ctx, IR::Inst& inst, Register value) {
Convert(ctx, inst, value, "F32", "U64", false);
}
void EmitConvertF64S8(EmitContext& ctx, IR::Inst& inst, Register value) {
Convert(ctx, inst, value, "F64", "S8", true);
}
void EmitConvertF64S16(EmitContext& ctx, IR::Inst& inst, Register value) {
Convert(ctx, inst, value, "F64", "S16", true);
}
void EmitConvertF64S32(EmitContext& ctx, IR::Inst& inst, ScalarS32 value) {
Convert(ctx, inst, value, "F64", "S32", true);
}
void EmitConvertF64S64(EmitContext& ctx, IR::Inst& inst, Register value) {
Convert(ctx, inst, value, "F64", "S64", true);
}
void EmitConvertF64U8(EmitContext& ctx, IR::Inst& inst, Register value) {
Convert(ctx, inst, value, "F64", "U8", true);
}
void EmitConvertF64U16(EmitContext& ctx, IR::Inst& inst, Register value) {
Convert(ctx, inst, value, "F64", "U16", true);
}
void EmitConvertF64U32(EmitContext& ctx, IR::Inst& inst, ScalarU32 value) {
Convert(ctx, inst, value, "F64", "U32", true);
}
void EmitConvertF64U64(EmitContext& ctx, IR::Inst& inst, Register value) {
Convert(ctx, inst, value, "F64", "U64", true);
}
} // namespace Shader::Backend::GLASM

View File

@ -0,0 +1,414 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <string_view>
#include "shader_recompiler/backend/glasm/emit_context.h"
#include "shader_recompiler/backend/glasm/emit_glasm_instructions.h"
#include "shader_recompiler/frontend/ir/modifiers.h"
#include "shader_recompiler/frontend/ir/value.h"
namespace Shader::Backend::GLASM {
namespace {
template <typename InputType>
void Compare(EmitContext& ctx, IR::Inst& inst, InputType lhs, InputType rhs, std::string_view op,
std::string_view type, bool ordered, bool inequality = false) {
const Register ret{ctx.reg_alloc.Define(inst)};
ctx.Add("{}.{} RC.x,{},{};", op, type, lhs, rhs);
if (ordered && inequality) {
ctx.Add("SEQ.{} RC.y,{},{};"
"SEQ.{} RC.z,{},{};"
"AND.U RC.x,RC.x,RC.y;"
"AND.U RC.x,RC.x,RC.z;"
"SNE.S {}.x,RC.x,0;",
type, lhs, lhs, type, rhs, rhs, ret);
} else if (ordered) {
ctx.Add("SNE.S {}.x,RC.x,0;", ret);
} else {
ctx.Add("SNE.{} RC.y,{},{};"
"SNE.{} RC.z,{},{};"
"OR.U RC.x,RC.x,RC.y;"
"OR.U RC.x,RC.x,RC.z;"
"SNE.S {}.x,RC.x,0;",
type, lhs, lhs, type, rhs, rhs, ret);
}
}
template <typename InputType>
void Clamp(EmitContext& ctx, Register ret, InputType value, InputType min_value,
InputType max_value, std::string_view type) {
// Call MAX first to properly clamp nan to min_value instead
ctx.Add("MAX.{} RC.x,{},{};"
"MIN.{} {}.x,RC.x,{};",
type, min_value, value, type, ret, max_value);
}
std::string_view Precise(IR::Inst& inst) {
const bool precise{inst.Flags<IR::FpControl>().no_contraction};
return precise ? ".PREC" : "";
}
} // Anonymous namespace
void EmitFPAbs16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] Register value) {
throw NotImplementedException("GLASM instruction");
}
void EmitFPAbs32(EmitContext& ctx, IR::Inst& inst, ScalarF32 value) {
ctx.Add("MOV.F {}.x,|{}|;", inst, value);
}
void EmitFPAbs64(EmitContext& ctx, IR::Inst& inst, ScalarF64 value) {
ctx.LongAdd("MOV.F64 {}.x,|{}|;", inst, value);
}
void EmitFPAdd16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] Register a, [[maybe_unused]] Register b) {
throw NotImplementedException("GLASM instruction");
}
void EmitFPAdd32(EmitContext& ctx, IR::Inst& inst, ScalarF32 a, ScalarF32 b) {
ctx.Add("ADD.F{} {}.x,{},{};", Precise(inst), ctx.reg_alloc.Define(inst), a, b);
}
void EmitFPAdd64(EmitContext& ctx, IR::Inst& inst, ScalarF64 a, ScalarF64 b) {
ctx.Add("ADD.F64{} {}.x,{},{};", Precise(inst), ctx.reg_alloc.LongDefine(inst), a, b);
}
void EmitFPFma16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] Register a, [[maybe_unused]] Register b,
[[maybe_unused]] Register c) {
throw NotImplementedException("GLASM instruction");
}
void EmitFPFma32(EmitContext& ctx, IR::Inst& inst, ScalarF32 a, ScalarF32 b, ScalarF32 c) {
ctx.Add("MAD.F{} {}.x,{},{},{};", Precise(inst), ctx.reg_alloc.Define(inst), a, b, c);
}
void EmitFPFma64(EmitContext& ctx, IR::Inst& inst, ScalarF64 a, ScalarF64 b, ScalarF64 c) {
ctx.Add("MAD.F64{} {}.x,{},{},{};", Precise(inst), ctx.reg_alloc.LongDefine(inst), a, b, c);
}
void EmitFPMax32(EmitContext& ctx, IR::Inst& inst, ScalarF32 a, ScalarF32 b) {
ctx.Add("MAX.F {}.x,{},{};", inst, a, b);
}
void EmitFPMax64(EmitContext& ctx, IR::Inst& inst, ScalarF64 a, ScalarF64 b) {
ctx.LongAdd("MAX.F64 {}.x,{},{};", inst, a, b);
}
void EmitFPMin32(EmitContext& ctx, IR::Inst& inst, ScalarF32 a, ScalarF32 b) {
ctx.Add("MIN.F {}.x,{},{};", inst, a, b);
}
void EmitFPMin64(EmitContext& ctx, IR::Inst& inst, ScalarF64 a, ScalarF64 b) {
ctx.LongAdd("MIN.F64 {}.x,{},{};", inst, a, b);
}
void EmitFPMul16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] Register a, [[maybe_unused]] Register b) {
throw NotImplementedException("GLASM instruction");
}
void EmitFPMul32(EmitContext& ctx, IR::Inst& inst, ScalarF32 a, ScalarF32 b) {
ctx.Add("MUL.F{} {}.x,{},{};", Precise(inst), ctx.reg_alloc.Define(inst), a, b);
}
void EmitFPMul64(EmitContext& ctx, IR::Inst& inst, ScalarF64 a, ScalarF64 b) {
ctx.Add("MUL.F64{} {}.x,{},{};", Precise(inst), ctx.reg_alloc.LongDefine(inst), a, b);
}
void EmitFPNeg16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] Register value) {
throw NotImplementedException("GLASM instruction");
}
void EmitFPNeg32(EmitContext& ctx, IR::Inst& inst, ScalarRegister value) {
ctx.Add("MOV.F {}.x,-{};", inst, value);
}
void EmitFPNeg64(EmitContext& ctx, IR::Inst& inst, Register value) {
ctx.LongAdd("MOV.F64 {}.x,-{};", inst, value);
}
void EmitFPSin(EmitContext& ctx, IR::Inst& inst, ScalarF32 value) {
ctx.Add("SIN {}.x,{};", inst, value);
}
void EmitFPCos(EmitContext& ctx, IR::Inst& inst, ScalarF32 value) {
ctx.Add("COS {}.x,{};", inst, value);
}
void EmitFPExp2(EmitContext& ctx, IR::Inst& inst, ScalarF32 value) {
ctx.Add("EX2 {}.x,{};", inst, value);
}
void EmitFPLog2(EmitContext& ctx, IR::Inst& inst, ScalarF32 value) {
ctx.Add("LG2 {}.x,{};", inst, value);
}
void EmitFPRecip32(EmitContext& ctx, IR::Inst& inst, ScalarF32 value) {
ctx.Add("RCP {}.x,{};", inst, value);
}
void EmitFPRecip64([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] Register value) {
throw NotImplementedException("GLASM instruction");
}
void EmitFPRecipSqrt32(EmitContext& ctx, IR::Inst& inst, ScalarF32 value) {
ctx.Add("RSQ {}.x,{};", inst, value);
}
void EmitFPRecipSqrt64([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] Register value) {
throw NotImplementedException("GLASM instruction");
}
void EmitFPSqrt(EmitContext& ctx, IR::Inst& inst, ScalarF32 value) {
const Register ret{ctx.reg_alloc.Define(inst)};
ctx.Add("RSQ RC.x,{};RCP {}.x,RC.x;", value, ret);
}
void EmitFPSaturate16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] Register value) {
throw NotImplementedException("GLASM instruction");
}
void EmitFPSaturate32(EmitContext& ctx, IR::Inst& inst, ScalarF32 value) {
ctx.Add("MOV.F.SAT {}.x,{};", inst, value);
}
void EmitFPSaturate64([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] Register value) {
throw NotImplementedException("GLASM instruction");
}
void EmitFPClamp16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] Register value,
[[maybe_unused]] Register min_value, [[maybe_unused]] Register max_value) {
throw NotImplementedException("GLASM instruction");
}
void EmitFPClamp32(EmitContext& ctx, IR::Inst& inst, ScalarF32 value, ScalarF32 min_value,
ScalarF32 max_value) {
Clamp(ctx, ctx.reg_alloc.Define(inst), value, min_value, max_value, "F");
}
void EmitFPClamp64(EmitContext& ctx, IR::Inst& inst, ScalarF64 value, ScalarF64 min_value,
ScalarF64 max_value) {
Clamp(ctx, ctx.reg_alloc.LongDefine(inst), value, min_value, max_value, "F64");
}
void EmitFPRoundEven16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] Register value) {
throw NotImplementedException("GLASM instruction");
}
void EmitFPRoundEven32(EmitContext& ctx, IR::Inst& inst, ScalarF32 value) {
ctx.Add("ROUND.F {}.x,{};", inst, value);
}
void EmitFPRoundEven64(EmitContext& ctx, IR::Inst& inst, ScalarF64 value) {
ctx.LongAdd("ROUND.F64 {}.x,{};", inst, value);
}
void EmitFPFloor16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] Register value) {
throw NotImplementedException("GLASM instruction");
}
void EmitFPFloor32(EmitContext& ctx, IR::Inst& inst, ScalarF32 value) {
ctx.Add("FLR.F {}.x,{};", inst, value);
}
void EmitFPFloor64(EmitContext& ctx, IR::Inst& inst, ScalarF64 value) {
ctx.LongAdd("FLR.F64 {}.x,{};", inst, value);
}
void EmitFPCeil16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] Register value) {
throw NotImplementedException("GLASM instruction");
}
void EmitFPCeil32(EmitContext& ctx, IR::Inst& inst, ScalarF32 value) {
ctx.Add("CEIL.F {}.x,{};", inst, value);
}
void EmitFPCeil64(EmitContext& ctx, IR::Inst& inst, ScalarF64 value) {
ctx.LongAdd("CEIL.F64 {}.x,{};", inst, value);
}
void EmitFPTrunc16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] Register value) {
throw NotImplementedException("GLASM instruction");
}
void EmitFPTrunc32(EmitContext& ctx, IR::Inst& inst, ScalarF32 value) {
ctx.Add("TRUNC.F {}.x,{};", inst, value);
}
void EmitFPTrunc64(EmitContext& ctx, IR::Inst& inst, ScalarF64 value) {
ctx.LongAdd("TRUNC.F64 {}.x,{};", inst, value);
}
void EmitFPOrdEqual16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] Register lhs,
[[maybe_unused]] Register rhs) {
throw NotImplementedException("GLASM instruction");
}
void EmitFPOrdEqual32(EmitContext& ctx, IR::Inst& inst, ScalarF32 lhs, ScalarF32 rhs) {
Compare(ctx, inst, lhs, rhs, "SEQ", "F", true);
}
void EmitFPOrdEqual64(EmitContext& ctx, IR::Inst& inst, ScalarF64 lhs, ScalarF64 rhs) {
Compare(ctx, inst, lhs, rhs, "SEQ", "F64", true);
}
void EmitFPUnordEqual16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] Register lhs,
[[maybe_unused]] Register rhs) {
throw NotImplementedException("GLASM instruction");
}
void EmitFPUnordEqual32(EmitContext& ctx, IR::Inst& inst, ScalarF32 lhs, ScalarF32 rhs) {
Compare(ctx, inst, lhs, rhs, "SEQ", "F", false);
}
void EmitFPUnordEqual64(EmitContext& ctx, IR::Inst& inst, ScalarF64 lhs, ScalarF64 rhs) {
Compare(ctx, inst, lhs, rhs, "SEQ", "F64", false);
}
void EmitFPOrdNotEqual16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] Register lhs,
[[maybe_unused]] Register rhs) {
throw NotImplementedException("GLASM instruction");
}
void EmitFPOrdNotEqual32(EmitContext& ctx, IR::Inst& inst, ScalarF32 lhs, ScalarF32 rhs) {
Compare(ctx, inst, lhs, rhs, "SNE", "F", true, true);
}
void EmitFPOrdNotEqual64(EmitContext& ctx, IR::Inst& inst, ScalarF64 lhs, ScalarF64 rhs) {
Compare(ctx, inst, lhs, rhs, "SNE", "F64", true, true);
}
void EmitFPUnordNotEqual16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] Register lhs,
[[maybe_unused]] Register rhs) {
throw NotImplementedException("GLASM instruction");
}
void EmitFPUnordNotEqual32(EmitContext& ctx, IR::Inst& inst, ScalarF32 lhs, ScalarF32 rhs) {
Compare(ctx, inst, lhs, rhs, "SNE", "F", false, true);
}
void EmitFPUnordNotEqual64(EmitContext& ctx, IR::Inst& inst, ScalarF64 lhs, ScalarF64 rhs) {
Compare(ctx, inst, lhs, rhs, "SNE", "F64", false, true);
}
void EmitFPOrdLessThan16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] Register lhs,
[[maybe_unused]] Register rhs) {
throw NotImplementedException("GLASM instruction");
}
void EmitFPOrdLessThan32(EmitContext& ctx, IR::Inst& inst, ScalarF32 lhs, ScalarF32 rhs) {
Compare(ctx, inst, lhs, rhs, "SLT", "F", true);
}
void EmitFPOrdLessThan64(EmitContext& ctx, IR::Inst& inst, ScalarF64 lhs, ScalarF64 rhs) {
Compare(ctx, inst, lhs, rhs, "SLT", "F64", true);
}
void EmitFPUnordLessThan16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] Register lhs,
[[maybe_unused]] Register rhs) {
throw NotImplementedException("GLASM instruction");
}
void EmitFPUnordLessThan32(EmitContext& ctx, IR::Inst& inst, ScalarF32 lhs, ScalarF32 rhs) {
Compare(ctx, inst, lhs, rhs, "SLT", "F", false);
}
void EmitFPUnordLessThan64(EmitContext& ctx, IR::Inst& inst, ScalarF64 lhs, ScalarF64 rhs) {
Compare(ctx, inst, lhs, rhs, "SLT", "F64", false);
}
void EmitFPOrdGreaterThan16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] Register lhs,
[[maybe_unused]] Register rhs) {
throw NotImplementedException("GLASM instruction");
}
void EmitFPOrdGreaterThan32(EmitContext& ctx, IR::Inst& inst, ScalarF32 lhs, ScalarF32 rhs) {
Compare(ctx, inst, lhs, rhs, "SGT", "F", true);
}
void EmitFPOrdGreaterThan64(EmitContext& ctx, IR::Inst& inst, ScalarF64 lhs, ScalarF64 rhs) {
Compare(ctx, inst, lhs, rhs, "SGT", "F64", true);
}
void EmitFPUnordGreaterThan16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] Register lhs,
[[maybe_unused]] Register rhs) {
throw NotImplementedException("GLASM instruction");
}
void EmitFPUnordGreaterThan32(EmitContext& ctx, IR::Inst& inst, ScalarF32 lhs, ScalarF32 rhs) {
Compare(ctx, inst, lhs, rhs, "SGT", "F", false);
}
void EmitFPUnordGreaterThan64(EmitContext& ctx, IR::Inst& inst, ScalarF64 lhs, ScalarF64 rhs) {
Compare(ctx, inst, lhs, rhs, "SGT", "F64", false);
}
void EmitFPOrdLessThanEqual16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] Register lhs,
[[maybe_unused]] Register rhs) {
throw NotImplementedException("GLASM instruction");
}
void EmitFPOrdLessThanEqual32(EmitContext& ctx, IR::Inst& inst, ScalarF32 lhs, ScalarF32 rhs) {
Compare(ctx, inst, lhs, rhs, "SLE", "F", true);
}
void EmitFPOrdLessThanEqual64(EmitContext& ctx, IR::Inst& inst, ScalarF64 lhs, ScalarF64 rhs) {
Compare(ctx, inst, lhs, rhs, "SLE", "F64", true);
}
void EmitFPUnordLessThanEqual16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] Register lhs,
[[maybe_unused]] Register rhs) {
throw NotImplementedException("GLASM instruction");
}
void EmitFPUnordLessThanEqual32(EmitContext& ctx, IR::Inst& inst, ScalarF32 lhs, ScalarF32 rhs) {
Compare(ctx, inst, lhs, rhs, "SLE", "F", false);
}
void EmitFPUnordLessThanEqual64(EmitContext& ctx, IR::Inst& inst, ScalarF64 lhs, ScalarF64 rhs) {
Compare(ctx, inst, lhs, rhs, "SLE", "F64", false);
}
void EmitFPOrdGreaterThanEqual16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] Register lhs,
[[maybe_unused]] Register rhs) {
throw NotImplementedException("GLASM instruction");
}
void EmitFPOrdGreaterThanEqual32(EmitContext& ctx, IR::Inst& inst, ScalarF32 lhs, ScalarF32 rhs) {
Compare(ctx, inst, lhs, rhs, "SGE", "F", true);
}
void EmitFPOrdGreaterThanEqual64(EmitContext& ctx, IR::Inst& inst, ScalarF64 lhs, ScalarF64 rhs) {
Compare(ctx, inst, lhs, rhs, "SGE", "F64", true);
}
void EmitFPUnordGreaterThanEqual16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] Register lhs,
[[maybe_unused]] Register rhs) {
throw NotImplementedException("GLASM instruction");
}
void EmitFPUnordGreaterThanEqual32(EmitContext& ctx, IR::Inst& inst, ScalarF32 lhs, ScalarF32 rhs) {
Compare(ctx, inst, lhs, rhs, "SGE", "F", false);
}
void EmitFPUnordGreaterThanEqual64(EmitContext& ctx, IR::Inst& inst, ScalarF64 lhs, ScalarF64 rhs) {
Compare(ctx, inst, lhs, rhs, "SGE", "F64", false);
}
void EmitFPIsNan16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] Register value) {
throw NotImplementedException("GLASM instruction");
}
void EmitFPIsNan32(EmitContext& ctx, IR::Inst& inst, ScalarF32 value) {
Compare(ctx, inst, value, value, "SNE", "F", true, false);
}
void EmitFPIsNan64(EmitContext& ctx, IR::Inst& inst, ScalarF64 value) {
Compare(ctx, inst, value, value, "SNE", "F64", true, false);
}
} // namespace Shader::Backend::GLASM

View File

@ -0,0 +1,850 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <utility>
#include "shader_recompiler/backend/glasm/emit_context.h"
#include "shader_recompiler/backend/glasm/emit_glasm_instructions.h"
#include "shader_recompiler/frontend/ir/modifiers.h"
#include "shader_recompiler/frontend/ir/value.h"
namespace Shader::Backend::GLASM {
namespace {
struct ScopedRegister {
ScopedRegister() = default;
ScopedRegister(RegAlloc& reg_alloc_) : reg_alloc{&reg_alloc_}, reg{reg_alloc->AllocReg()} {}
~ScopedRegister() {
if (reg_alloc) {
reg_alloc->FreeReg(reg);
}
}
ScopedRegister& operator=(ScopedRegister&& rhs) noexcept {
if (reg_alloc) {
reg_alloc->FreeReg(reg);
}
reg_alloc = std::exchange(rhs.reg_alloc, nullptr);
reg = rhs.reg;
return *this;
}
ScopedRegister(ScopedRegister&& rhs) noexcept
: reg_alloc{std::exchange(rhs.reg_alloc, nullptr)}, reg{rhs.reg} {}
ScopedRegister& operator=(const ScopedRegister&) = delete;
ScopedRegister(const ScopedRegister&) = delete;
RegAlloc* reg_alloc{};
Register reg;
};
std::string Texture(EmitContext& ctx, IR::TextureInstInfo info,
[[maybe_unused]] const IR::Value& index) {
// FIXME: indexed reads
if (info.type == TextureType::Buffer) {
return fmt::format("texture[{}]", ctx.texture_buffer_bindings.at(info.descriptor_index));
} else {
return fmt::format("texture[{}]", ctx.texture_bindings.at(info.descriptor_index));
}
}
std::string Image(EmitContext& ctx, IR::TextureInstInfo info,
[[maybe_unused]] const IR::Value& index) {
// FIXME: indexed reads
if (info.type == TextureType::Buffer) {
return fmt::format("image[{}]", ctx.image_buffer_bindings.at(info.descriptor_index));
} else {
return fmt::format("image[{}]", ctx.image_bindings.at(info.descriptor_index));
}
}
std::string_view TextureType(IR::TextureInstInfo info) {
if (info.is_depth) {
switch (info.type) {
case TextureType::Color1D:
return "SHADOW1D";
case TextureType::ColorArray1D:
return "SHADOWARRAY1D";
case TextureType::Color2D:
return "SHADOW2D";
case TextureType::ColorArray2D:
return "SHADOWARRAY2D";
case TextureType::Color3D:
return "SHADOW3D";
case TextureType::ColorCube:
return "SHADOWCUBE";
case TextureType::ColorArrayCube:
return "SHADOWARRAYCUBE";
case TextureType::Buffer:
return "SHADOWBUFFER";
}
} else {
switch (info.type) {
case TextureType::Color1D:
return "1D";
case TextureType::ColorArray1D:
return "ARRAY1D";
case TextureType::Color2D:
return "2D";
case TextureType::ColorArray2D:
return "ARRAY2D";
case TextureType::Color3D:
return "3D";
case TextureType::ColorCube:
return "CUBE";
case TextureType::ColorArrayCube:
return "ARRAYCUBE";
case TextureType::Buffer:
return "BUFFER";
}
}
throw InvalidArgument("Invalid texture type {}", info.type.Value());
}
std::string Offset(EmitContext& ctx, const IR::Value& offset) {
if (offset.IsEmpty()) {
return "";
}
return fmt::format(",offset({})", Register{ctx.reg_alloc.Consume(offset)});
}
std::pair<ScopedRegister, ScopedRegister> AllocOffsetsRegs(EmitContext& ctx,
const IR::Value& offset2) {
if (offset2.IsEmpty()) {
return {};
} else {
return {ctx.reg_alloc, ctx.reg_alloc};
}
}
void SwizzleOffsets(EmitContext& ctx, Register off_x, Register off_y, const IR::Value& offset1,
const IR::Value& offset2) {
const Register offsets_a{ctx.reg_alloc.Consume(offset1)};
const Register offsets_b{ctx.reg_alloc.Consume(offset2)};
// Input swizzle: [XYXY] [XYXY]
// Output swizzle: [XXXX] [YYYY]
ctx.Add("MOV {}.x,{}.x;"
"MOV {}.y,{}.z;"
"MOV {}.z,{}.x;"
"MOV {}.w,{}.z;"
"MOV {}.x,{}.y;"
"MOV {}.y,{}.w;"
"MOV {}.z,{}.y;"
"MOV {}.w,{}.w;",
off_x, offsets_a, off_x, offsets_a, off_x, offsets_b, off_x, offsets_b, off_y,
offsets_a, off_y, offsets_a, off_y, offsets_b, off_y, offsets_b);
}
std::string GradOffset(const IR::Value& offset) {
if (offset.IsImmediate()) {
LOG_WARNING(Shader_GLASM, "Gradient offset is a scalar immediate");
return "";
}
IR::Inst* const vector{offset.InstRecursive()};
if (!vector->AreAllArgsImmediates()) {
LOG_WARNING(Shader_GLASM, "Gradient offset vector is not immediate");
return "";
}
switch (vector->NumArgs()) {
case 1:
return fmt::format(",({})", static_cast<s32>(vector->Arg(0).U32()));
case 2:
return fmt::format(",({},{})", static_cast<s32>(vector->Arg(0).U32()),
static_cast<s32>(vector->Arg(1).U32()));
default:
throw LogicError("Invalid number of gradient offsets {}", vector->NumArgs());
}
}
std::pair<std::string, ScopedRegister> Coord(EmitContext& ctx, const IR::Value& coord) {
if (coord.IsImmediate()) {
ScopedRegister scoped_reg(ctx.reg_alloc);
ctx.Add("MOV.U {}.x,{};", scoped_reg.reg, ScalarU32{ctx.reg_alloc.Consume(coord)});
return {fmt::to_string(scoped_reg.reg), std::move(scoped_reg)};
}
std::string coord_vec{fmt::to_string(Register{ctx.reg_alloc.Consume(coord)})};
if (coord.InstRecursive()->HasUses()) {
// Move non-dead coords to a separate register, although this should never happen because
// vectors are only assembled for immediate texture instructions
ctx.Add("MOV.F RC,{};", coord_vec);
coord_vec = "RC";
}
return {std::move(coord_vec), ScopedRegister{}};
}
void StoreSparse(EmitContext& ctx, IR::Inst* sparse_inst) {
if (!sparse_inst) {
return;
}
const Register sparse_ret{ctx.reg_alloc.Define(*sparse_inst)};
ctx.Add("MOV.S {},-1;"
"MOV.S {}(NONRESIDENT),0;",
sparse_ret, sparse_ret);
}
std::string_view FormatStorage(ImageFormat format) {
switch (format) {
case ImageFormat::Typeless:
return "U";
case ImageFormat::R8_UINT:
return "U8";
case ImageFormat::R8_SINT:
return "S8";
case ImageFormat::R16_UINT:
return "U16";
case ImageFormat::R16_SINT:
return "S16";
case ImageFormat::R32_UINT:
return "U32";
case ImageFormat::R32G32_UINT:
return "U32X2";
case ImageFormat::R32G32B32A32_UINT:
return "U32X4";
}
throw InvalidArgument("Invalid image format {}", format);
}
template <typename T>
void ImageAtomic(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, Register coord, T value,
std::string_view op) {
const auto info{inst.Flags<IR::TextureInstInfo>()};
const std::string_view type{TextureType(info)};
const std::string image{Image(ctx, info, index)};
const Register ret{ctx.reg_alloc.Define(inst)};
ctx.Add("ATOMIM.{} {},{},{},{},{};", op, ret, value, coord, image, type);
}
IR::Inst* PrepareSparse(IR::Inst& inst) {
const auto sparse_inst{inst.GetAssociatedPseudoOperation(IR::Opcode::GetSparseFromOp)};
if (sparse_inst) {
sparse_inst->Invalidate();
}
return sparse_inst;
}
} // Anonymous namespace
void EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
const IR::Value& coord, Register bias_lc, const IR::Value& offset) {
const auto info{inst.Flags<IR::TextureInstInfo>()};
const auto sparse_inst{PrepareSparse(inst)};
const std::string_view sparse_mod{sparse_inst ? ".SPARSE" : ""};
const std::string_view lod_clamp_mod{info.has_lod_clamp ? ".LODCLAMP" : ""};
const std::string_view type{TextureType(info)};
const std::string texture{Texture(ctx, info, index)};
const std::string offset_vec{Offset(ctx, offset)};
const auto [coord_vec, coord_alloc]{Coord(ctx, coord)};
const Register ret{ctx.reg_alloc.Define(inst)};
if (info.has_bias) {
if (info.type == TextureType::ColorArrayCube) {
ctx.Add("TXB.F{}{} {},{},{},{},ARRAYCUBE{};", lod_clamp_mod, sparse_mod, ret, coord_vec,
bias_lc, texture, offset_vec);
} else {
if (info.has_lod_clamp) {
ctx.Add("MOV.F {}.w,{}.x;"
"TXB.F.LODCLAMP{} {},{},{}.y,{},{}{};",
coord_vec, bias_lc, sparse_mod, ret, coord_vec, bias_lc, texture, type,
offset_vec);
} else {
ctx.Add("MOV.F {}.w,{}.x;"
"TXB.F{} {},{},{},{}{};",
coord_vec, bias_lc, sparse_mod, ret, coord_vec, texture, type, offset_vec);
}
}
} else {
if (info.has_lod_clamp && info.type == TextureType::ColorArrayCube) {
ctx.Add("TEX.F.LODCLAMP{} {},{},{},{},ARRAYCUBE{};", sparse_mod, ret, coord_vec,
bias_lc, texture, offset_vec);
} else {
ctx.Add("TEX.F{}{} {},{},{},{}{};", lod_clamp_mod, sparse_mod, ret, coord_vec, texture,
type, offset_vec);
}
}
StoreSparse(ctx, sparse_inst);
}
void EmitImageSampleExplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
const IR::Value& coord, ScalarF32 lod, const IR::Value& offset) {
const auto info{inst.Flags<IR::TextureInstInfo>()};
const auto sparse_inst{PrepareSparse(inst)};
const std::string_view sparse_mod{sparse_inst ? ".SPARSE" : ""};
const std::string_view type{TextureType(info)};
const std::string texture{Texture(ctx, info, index)};
const std::string offset_vec{Offset(ctx, offset)};
const auto [coord_vec, coord_alloc]{Coord(ctx, coord)};
const Register ret{ctx.reg_alloc.Define(inst)};
if (info.type == TextureType::ColorArrayCube) {
ctx.Add("TXL.F{} {},{},{},{},ARRAYCUBE{};", sparse_mod, ret, coord_vec, lod, texture,
offset_vec);
} else {
ctx.Add("MOV.F {}.w,{};"
"TXL.F{} {},{},{},{}{};",
coord_vec, lod, sparse_mod, ret, coord_vec, texture, type, offset_vec);
}
StoreSparse(ctx, sparse_inst);
}
void EmitImageSampleDrefImplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
const IR::Value& coord, const IR::Value& dref,
const IR::Value& bias_lc, const IR::Value& offset) {
// Allocate early to avoid aliases
const auto info{inst.Flags<IR::TextureInstInfo>()};
ScopedRegister staging;
if (info.type == TextureType::ColorArrayCube) {
staging = ScopedRegister{ctx.reg_alloc};
}
const ScalarF32 dref_val{ctx.reg_alloc.Consume(dref)};
const Register bias_lc_vec{ctx.reg_alloc.Consume(bias_lc)};
const auto sparse_inst{PrepareSparse(inst)};
const std::string_view sparse_mod{sparse_inst ? ".SPARSE" : ""};
const std::string_view type{TextureType(info)};
const std::string texture{Texture(ctx, info, index)};
const std::string offset_vec{Offset(ctx, offset)};
const auto [coord_vec, coord_alloc]{Coord(ctx, coord)};
const Register ret{ctx.reg_alloc.Define(inst)};
if (info.has_bias) {
if (info.has_lod_clamp) {
switch (info.type) {
case TextureType::Color1D:
case TextureType::ColorArray1D:
case TextureType::Color2D:
ctx.Add("MOV.F {}.z,{};"
"MOV.F {}.w,{}.x;"
"TXB.F.LODCLAMP{} {},{},{}.y,{},{}{};",
coord_vec, dref_val, coord_vec, bias_lc_vec, sparse_mod, ret, coord_vec,
bias_lc_vec, texture, type, offset_vec);
break;
case TextureType::ColorArray2D:
case TextureType::ColorCube:
ctx.Add("MOV.F {}.w,{};"
"TXB.F.LODCLAMP{} {},{},{},{},{}{};",
coord_vec, dref_val, sparse_mod, ret, coord_vec, bias_lc_vec, texture, type,
offset_vec);
break;
default:
throw NotImplementedException("Invalid type {} with bias and lod clamp",
info.type.Value());
}
} else {
switch (info.type) {
case TextureType::Color1D:
case TextureType::ColorArray1D:
case TextureType::Color2D:
ctx.Add("MOV.F {}.z,{};"
"MOV.F {}.w,{}.x;"
"TXB.F{} {},{},{},{}{};",
coord_vec, dref_val, coord_vec, bias_lc_vec, sparse_mod, ret, coord_vec,
texture, type, offset_vec);
break;
case TextureType::ColorArray2D:
case TextureType::ColorCube:
ctx.Add("MOV.F {}.w,{};"
"TXB.F{} {},{},{},{},{}{};",
coord_vec, dref_val, sparse_mod, ret, coord_vec, bias_lc_vec, texture, type,
offset_vec);
break;
case TextureType::ColorArrayCube:
ctx.Add("MOV.F {}.x,{};"
"MOV.F {}.y,{}.x;"
"TXB.F{} {},{},{},{},{}{};",
staging.reg, dref_val, staging.reg, bias_lc_vec, sparse_mod, ret, coord_vec,
staging.reg, texture, type, offset_vec);
break;
default:
throw NotImplementedException("Invalid type {}", info.type.Value());
}
}
} else {
if (info.has_lod_clamp) {
if (info.type != TextureType::ColorArrayCube) {
const bool w_swizzle{info.type == TextureType::ColorArray2D ||
info.type == TextureType::ColorCube};
const char dref_swizzle{w_swizzle ? 'w' : 'z'};
ctx.Add("MOV.F {}.{},{};"
"TEX.F.LODCLAMP{} {},{},{},{},{}{};",
coord_vec, dref_swizzle, dref_val, sparse_mod, ret, coord_vec, bias_lc_vec,
texture, type, offset_vec);
} else {
ctx.Add("MOV.F {}.x,{};"
"MOV.F {}.y,{};"
"TEX.F.LODCLAMP{} {},{},{},{},{}{};",
staging.reg, dref_val, staging.reg, bias_lc_vec, sparse_mod, ret, coord_vec,
staging.reg, texture, type, offset_vec);
}
} else {
if (info.type != TextureType::ColorArrayCube) {
const bool w_swizzle{info.type == TextureType::ColorArray2D ||
info.type == TextureType::ColorCube};
const char dref_swizzle{w_swizzle ? 'w' : 'z'};
ctx.Add("MOV.F {}.{},{};"
"TEX.F{} {},{},{},{}{};",
coord_vec, dref_swizzle, dref_val, sparse_mod, ret, coord_vec, texture,
type, offset_vec);
} else {
ctx.Add("TEX.F{} {},{},{},{},{}{};", sparse_mod, ret, coord_vec, dref_val, texture,
type, offset_vec);
}
}
}
StoreSparse(ctx, sparse_inst);
}
void EmitImageSampleDrefExplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
const IR::Value& coord, const IR::Value& dref,
const IR::Value& lod, const IR::Value& offset) {
// Allocate early to avoid aliases
const auto info{inst.Flags<IR::TextureInstInfo>()};
ScopedRegister staging;
if (info.type == TextureType::ColorArrayCube) {
staging = ScopedRegister{ctx.reg_alloc};
}
const ScalarF32 dref_val{ctx.reg_alloc.Consume(dref)};
const ScalarF32 lod_val{ctx.reg_alloc.Consume(lod)};
const auto sparse_inst{PrepareSparse(inst)};
const std::string_view sparse_mod{sparse_inst ? ".SPARSE" : ""};
const std::string_view type{TextureType(info)};
const std::string texture{Texture(ctx, info, index)};
const std::string offset_vec{Offset(ctx, offset)};
const auto [coord_vec, coord_alloc]{Coord(ctx, coord)};
const Register ret{ctx.reg_alloc.Define(inst)};
switch (info.type) {
case TextureType::Color1D:
case TextureType::ColorArray1D:
case TextureType::Color2D:
ctx.Add("MOV.F {}.z,{};"
"MOV.F {}.w,{};"
"TXL.F{} {},{},{},{}{};",
coord_vec, dref_val, coord_vec, lod_val, sparse_mod, ret, coord_vec, texture, type,
offset_vec);
break;
case TextureType::ColorArray2D:
case TextureType::ColorCube:
ctx.Add("MOV.F {}.w,{};"
"TXL.F{} {},{},{},{},{}{};",
coord_vec, dref_val, sparse_mod, ret, coord_vec, lod_val, texture, type,
offset_vec);
break;
case TextureType::ColorArrayCube:
ctx.Add("MOV.F {}.x,{};"
"MOV.F {}.y,{};"
"TXL.F{} {},{},{},{},{}{};",
staging.reg, dref_val, staging.reg, lod_val, sparse_mod, ret, coord_vec,
staging.reg, texture, type, offset_vec);
break;
default:
throw NotImplementedException("Invalid type {}", info.type.Value());
}
StoreSparse(ctx, sparse_inst);
}
void EmitImageGather(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
const IR::Value& coord, const IR::Value& offset, const IR::Value& offset2) {
// Allocate offsets early so they don't overwrite any consumed register
const auto [off_x, off_y]{AllocOffsetsRegs(ctx, offset2)};
const auto info{inst.Flags<IR::TextureInstInfo>()};
const char comp{"xyzw"[info.gather_component]};
const auto sparse_inst{PrepareSparse(inst)};
const std::string_view sparse_mod{sparse_inst ? ".SPARSE" : ""};
const std::string_view type{TextureType(info)};
const std::string texture{Texture(ctx, info, index)};
const Register coord_vec{ctx.reg_alloc.Consume(coord)};
const Register ret{ctx.reg_alloc.Define(inst)};
if (offset2.IsEmpty()) {
const std::string offset_vec{Offset(ctx, offset)};
ctx.Add("TXG.F{} {},{},{}.{},{}{};", sparse_mod, ret, coord_vec, texture, comp, type,
offset_vec);
} else {
SwizzleOffsets(ctx, off_x.reg, off_y.reg, offset, offset2);
ctx.Add("TXGO.F{} {},{},{},{},{}.{},{};", sparse_mod, ret, coord_vec, off_x.reg, off_y.reg,
texture, comp, type);
}
StoreSparse(ctx, sparse_inst);
}
void EmitImageGatherDref(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
const IR::Value& coord, const IR::Value& offset, const IR::Value& offset2,
const IR::Value& dref) {
// FIXME: This instruction is not working as expected
// Allocate offsets early so they don't overwrite any consumed register
const auto [off_x, off_y]{AllocOffsetsRegs(ctx, offset2)};
const auto info{inst.Flags<IR::TextureInstInfo>()};
const auto sparse_inst{PrepareSparse(inst)};
const std::string_view sparse_mod{sparse_inst ? ".SPARSE" : ""};
const std::string_view type{TextureType(info)};
const std::string texture{Texture(ctx, info, index)};
const Register coord_vec{ctx.reg_alloc.Consume(coord)};
const ScalarF32 dref_value{ctx.reg_alloc.Consume(dref)};
const Register ret{ctx.reg_alloc.Define(inst)};
std::string args;
switch (info.type) {
case TextureType::Color2D:
ctx.Add("MOV.F {}.z,{};", coord_vec, dref_value);
args = fmt::to_string(coord_vec);
break;
case TextureType::ColorArray2D:
case TextureType::ColorCube:
ctx.Add("MOV.F {}.w,{};", coord_vec, dref_value);
args = fmt::to_string(coord_vec);
break;
case TextureType::ColorArrayCube:
args = fmt::format("{},{}", coord_vec, dref_value);
break;
default:
throw NotImplementedException("Invalid type {}", info.type.Value());
}
if (offset2.IsEmpty()) {
const std::string offset_vec{Offset(ctx, offset)};
ctx.Add("TXG.F{} {},{},{},{}{};", sparse_mod, ret, args, texture, type, offset_vec);
} else {
SwizzleOffsets(ctx, off_x.reg, off_y.reg, offset, offset2);
ctx.Add("TXGO.F{} {},{},{},{},{},{};", sparse_mod, ret, args, off_x.reg, off_y.reg, texture,
type);
}
StoreSparse(ctx, sparse_inst);
}
void EmitImageFetch(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
const IR::Value& coord, const IR::Value& offset, ScalarS32 lod, ScalarS32 ms) {
const auto info{inst.Flags<IR::TextureInstInfo>()};
const auto sparse_inst{PrepareSparse(inst)};
const std::string_view sparse_mod{sparse_inst ? ".SPARSE" : ""};
const std::string_view type{TextureType(info)};
const std::string texture{Texture(ctx, info, index)};
const std::string offset_vec{Offset(ctx, offset)};
const auto [coord_vec, coord_alloc]{Coord(ctx, coord)};
const Register ret{ctx.reg_alloc.Define(inst)};
if (info.type == TextureType::Buffer) {
ctx.Add("TXF.F{} {},{},{},{}{};", sparse_mod, ret, coord_vec, texture, type, offset_vec);
} else if (ms.type != Type::Void) {
ctx.Add("MOV.S {}.w,{};"
"TXFMS.F{} {},{},{},{}{};",
coord_vec, ms, sparse_mod, ret, coord_vec, texture, type, offset_vec);
} else {
ctx.Add("MOV.S {}.w,{};"
"TXF.F{} {},{},{},{}{};",
coord_vec, lod, sparse_mod, ret, coord_vec, texture, type, offset_vec);
}
StoreSparse(ctx, sparse_inst);
}
void EmitImageQueryDimensions(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
ScalarS32 lod) {
const auto info{inst.Flags<IR::TextureInstInfo>()};
const std::string texture{Texture(ctx, info, index)};
const std::string_view type{TextureType(info)};
ctx.Add("TXQ {},{},{},{};", inst, lod, texture, type);
}
void EmitImageQueryLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, Register coord) {
const auto info{inst.Flags<IR::TextureInstInfo>()};
const std::string texture{Texture(ctx, info, index)};
const std::string_view type{TextureType(info)};
ctx.Add("LOD.F {},{},{},{};", inst, coord, texture, type);
}
void EmitImageGradient(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
const IR::Value& coord, const IR::Value& derivatives,
const IR::Value& offset, const IR::Value& lod_clamp) {
const auto info{inst.Flags<IR::TextureInstInfo>()};
ScopedRegister dpdx, dpdy;
const bool multi_component{info.num_derivates > 1 || info.has_lod_clamp};
if (multi_component) {
// Allocate this early to avoid aliasing other registers
dpdx = ScopedRegister{ctx.reg_alloc};
dpdy = ScopedRegister{ctx.reg_alloc};
}
const auto sparse_inst{PrepareSparse(inst)};
const std::string_view sparse_mod{sparse_inst ? ".SPARSE" : ""};
const std::string_view type{TextureType(info)};
const std::string texture{Texture(ctx, info, index)};
const std::string offset_vec{GradOffset(offset)};
const Register coord_vec{ctx.reg_alloc.Consume(coord)};
const Register derivatives_vec{ctx.reg_alloc.Consume(derivatives)};
const Register ret{ctx.reg_alloc.Define(inst)};
if (multi_component) {
ctx.Add("MOV.F {}.x,{}.x;"
"MOV.F {}.y,{}.z;"
"MOV.F {}.x,{}.y;"
"MOV.F {}.y,{}.w;",
dpdx.reg, derivatives_vec, dpdx.reg, derivatives_vec, dpdy.reg, derivatives_vec,
dpdy.reg, derivatives_vec);
if (info.has_lod_clamp) {
const ScalarF32 lod_clamp_value{ctx.reg_alloc.Consume(lod_clamp)};
ctx.Add("MOV.F {}.w,{};"
"TXD.F.LODCLAMP{} {},{},{},{},{},{}{};",
dpdy.reg, lod_clamp_value, sparse_mod, ret, coord_vec, dpdx.reg, dpdy.reg,
texture, type, offset_vec);
} else {
ctx.Add("TXD.F{} {},{},{},{},{},{}{};", sparse_mod, ret, coord_vec, dpdx.reg, dpdy.reg,
texture, type, offset_vec);
}
} else {
ctx.Add("TXD.F{} {},{},{}.x,{}.y,{},{}{};", sparse_mod, ret, coord_vec, derivatives_vec,
derivatives_vec, texture, type, offset_vec);
}
StoreSparse(ctx, sparse_inst);
}
void EmitImageRead(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, Register coord) {
const auto info{inst.Flags<IR::TextureInstInfo>()};
const auto sparse_inst{PrepareSparse(inst)};
const std::string_view format{FormatStorage(info.image_format)};
const std::string_view sparse_mod{sparse_inst ? ".SPARSE" : ""};
const std::string_view type{TextureType(info)};
const std::string image{Image(ctx, info, index)};
const Register ret{ctx.reg_alloc.Define(inst)};
ctx.Add("LOADIM.{}{} {},{},{},{};", format, sparse_mod, ret, coord, image, type);
StoreSparse(ctx, sparse_inst);
}
void EmitImageWrite(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, Register coord,
Register color) {
const auto info{inst.Flags<IR::TextureInstInfo>()};
const std::string_view format{FormatStorage(info.image_format)};
const std::string_view type{TextureType(info)};
const std::string image{Image(ctx, info, index)};
ctx.Add("STOREIM.{} {},{},{},{};", format, image, color, coord, type);
}
void EmitImageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, Register coord,
ScalarU32 value) {
ImageAtomic(ctx, inst, index, coord, value, "ADD.U32");
}
void EmitImageAtomicSMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, Register coord,
ScalarS32 value) {
ImageAtomic(ctx, inst, index, coord, value, "MIN.S32");
}
void EmitImageAtomicUMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, Register coord,
ScalarU32 value) {
ImageAtomic(ctx, inst, index, coord, value, "MIN.U32");
}
void EmitImageAtomicSMax32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, Register coord,
ScalarS32 value) {
ImageAtomic(ctx, inst, index, coord, value, "MAX.S32");
}
void EmitImageAtomicUMax32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, Register coord,
ScalarU32 value) {
ImageAtomic(ctx, inst, index, coord, value, "MAX.U32");
}
void EmitImageAtomicInc32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, Register coord,
ScalarU32 value) {
ImageAtomic(ctx, inst, index, coord, value, "IWRAP.U32");
}
void EmitImageAtomicDec32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, Register coord,
ScalarU32 value) {
ImageAtomic(ctx, inst, index, coord, value, "DWRAP.U32");
}
void EmitImageAtomicAnd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, Register coord,
ScalarU32 value) {
ImageAtomic(ctx, inst, index, coord, value, "AND.U32");
}
void EmitImageAtomicOr32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, Register coord,
ScalarU32 value) {
ImageAtomic(ctx, inst, index, coord, value, "OR.U32");
}
void EmitImageAtomicXor32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, Register coord,
ScalarU32 value) {
ImageAtomic(ctx, inst, index, coord, value, "XOR.U32");
}
void EmitImageAtomicExchange32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
Register coord, ScalarU32 value) {
ImageAtomic(ctx, inst, index, coord, value, "EXCH.U32");
}
void EmitBindlessImageSampleImplicitLod(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBindlessImageSampleExplicitLod(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBindlessImageSampleDrefImplicitLod(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBindlessImageSampleDrefExplicitLod(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBindlessImageGather(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBindlessImageGatherDref(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBindlessImageFetch(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBindlessImageQueryDimensions(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBindlessImageQueryLod(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBindlessImageGradient(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBindlessImageRead(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBindlessImageWrite(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBoundImageSampleImplicitLod(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBoundImageSampleExplicitLod(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBoundImageSampleDrefImplicitLod(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBoundImageSampleDrefExplicitLod(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBoundImageGather(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBoundImageGatherDref(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBoundImageFetch(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBoundImageQueryDimensions(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBoundImageQueryLod(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBoundImageGradient(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBoundImageRead(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBoundImageWrite(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBindlessImageAtomicIAdd32(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBindlessImageAtomicSMin32(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBindlessImageAtomicUMin32(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBindlessImageAtomicSMax32(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBindlessImageAtomicUMax32(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBindlessImageAtomicInc32(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBindlessImageAtomicDec32(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBindlessImageAtomicAnd32(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBindlessImageAtomicOr32(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBindlessImageAtomicXor32(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBindlessImageAtomicExchange32(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBoundImageAtomicIAdd32(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBoundImageAtomicSMin32(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBoundImageAtomicUMin32(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBoundImageAtomicSMax32(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBoundImageAtomicUMax32(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBoundImageAtomicInc32(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBoundImageAtomicDec32(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBoundImageAtomicAnd32(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBoundImageAtomicOr32(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBoundImageAtomicXor32(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitBoundImageAtomicExchange32(EmitContext&) {
throw LogicError("Unreachable instruction");
}
} // namespace Shader::Backend::GLASM

View File

@ -0,0 +1,625 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include "common/common_types.h"
#include "shader_recompiler/backend/glasm/reg_alloc.h"
namespace Shader::IR {
enum class Attribute : u64;
enum class Patch : u64;
class Inst;
class Value;
} // namespace Shader::IR
namespace Shader::Backend::GLASM {
class EmitContext;
// Microinstruction emitters
void EmitPhi(EmitContext& ctx, IR::Inst& inst);
void EmitVoid(EmitContext& ctx);
void EmitIdentity(EmitContext& ctx, IR::Inst& inst, const IR::Value& value);
void EmitConditionRef(EmitContext& ctx, IR::Inst& inst, const IR::Value& value);
void EmitReference(EmitContext&, const IR::Value& value);
void EmitPhiMove(EmitContext& ctx, const IR::Value& phi, const IR::Value& value);
void EmitJoin(EmitContext& ctx);
void EmitDemoteToHelperInvocation(EmitContext& ctx);
void EmitBarrier(EmitContext& ctx);
void EmitWorkgroupMemoryBarrier(EmitContext& ctx);
void EmitDeviceMemoryBarrier(EmitContext& ctx);
void EmitPrologue(EmitContext& ctx);
void EmitEpilogue(EmitContext& ctx);
void EmitEmitVertex(EmitContext& ctx, ScalarS32 stream);
void EmitEndPrimitive(EmitContext& ctx, const IR::Value& stream);
void EmitGetRegister(EmitContext& ctx);
void EmitSetRegister(EmitContext& ctx);
void EmitGetPred(EmitContext& ctx);
void EmitSetPred(EmitContext& ctx);
void EmitSetGotoVariable(EmitContext& ctx);
void EmitGetGotoVariable(EmitContext& ctx);
void EmitSetIndirectBranchVariable(EmitContext& ctx);
void EmitGetIndirectBranchVariable(EmitContext& ctx);
void EmitGetCbufU8(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, ScalarU32 offset);
void EmitGetCbufS8(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, ScalarU32 offset);
void EmitGetCbufU16(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, ScalarU32 offset);
void EmitGetCbufS16(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, ScalarU32 offset);
void EmitGetCbufU32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, ScalarU32 offset);
void EmitGetCbufF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, ScalarU32 offset);
void EmitGetCbufU32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, ScalarU32 offset);
void EmitGetAttribute(EmitContext& ctx, IR::Inst& inst, IR::Attribute attr, ScalarU32 vertex);
void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, ScalarF32 value, ScalarU32 vertex);
void EmitGetAttributeIndexed(EmitContext& ctx, IR::Inst& inst, ScalarS32 offset, ScalarU32 vertex);
void EmitSetAttributeIndexed(EmitContext& ctx, ScalarU32 offset, ScalarF32 value, ScalarU32 vertex);
void EmitGetPatch(EmitContext& ctx, IR::Inst& inst, IR::Patch patch);
void EmitSetPatch(EmitContext& ctx, IR::Patch patch, ScalarF32 value);
void EmitSetFragColor(EmitContext& ctx, u32 index, u32 component, ScalarF32 value);
void EmitSetSampleMask(EmitContext& ctx, ScalarS32 value);
void EmitSetFragDepth(EmitContext& ctx, ScalarF32 value);
void EmitGetZFlag(EmitContext& ctx);
void EmitGetSFlag(EmitContext& ctx);
void EmitGetCFlag(EmitContext& ctx);
void EmitGetOFlag(EmitContext& ctx);
void EmitSetZFlag(EmitContext& ctx);
void EmitSetSFlag(EmitContext& ctx);
void EmitSetCFlag(EmitContext& ctx);
void EmitSetOFlag(EmitContext& ctx);
void EmitWorkgroupId(EmitContext& ctx, IR::Inst& inst);
void EmitLocalInvocationId(EmitContext& ctx, IR::Inst& inst);
void EmitInvocationId(EmitContext& ctx, IR::Inst& inst);
void EmitSampleId(EmitContext& ctx, IR::Inst& inst);
void EmitIsHelperInvocation(EmitContext& ctx, IR::Inst& inst);
void EmitYDirection(EmitContext& ctx, IR::Inst& inst);
void EmitLoadLocal(EmitContext& ctx, IR::Inst& inst, ScalarU32 word_offset);
void EmitWriteLocal(EmitContext& ctx, ScalarU32 word_offset, ScalarU32 value);
void EmitUndefU1(EmitContext& ctx, IR::Inst& inst);
void EmitUndefU8(EmitContext& ctx, IR::Inst& inst);
void EmitUndefU16(EmitContext& ctx, IR::Inst& inst);
void EmitUndefU32(EmitContext& ctx, IR::Inst& inst);
void EmitUndefU64(EmitContext& ctx, IR::Inst& inst);
void EmitLoadGlobalU8(EmitContext& ctx, IR::Inst& inst, Register address);
void EmitLoadGlobalS8(EmitContext& ctx, IR::Inst& inst, Register address);
void EmitLoadGlobalU16(EmitContext& ctx, IR::Inst& inst, Register address);
void EmitLoadGlobalS16(EmitContext& ctx, IR::Inst& inst, Register address);
void EmitLoadGlobal32(EmitContext& ctx, IR::Inst& inst, Register address);
void EmitLoadGlobal64(EmitContext& ctx, IR::Inst& inst, Register address);
void EmitLoadGlobal128(EmitContext& ctx, IR::Inst& inst, Register address);
void EmitWriteGlobalU8(EmitContext& ctx, Register address, Register value);
void EmitWriteGlobalS8(EmitContext& ctx, Register address, Register value);
void EmitWriteGlobalU16(EmitContext& ctx, Register address, Register value);
void EmitWriteGlobalS16(EmitContext& ctx, Register address, Register value);
void EmitWriteGlobal32(EmitContext& ctx, Register address, ScalarU32 value);
void EmitWriteGlobal64(EmitContext& ctx, Register address, Register value);
void EmitWriteGlobal128(EmitContext& ctx, Register address, Register value);
void EmitLoadStorageU8(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset);
void EmitLoadStorageS8(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset);
void EmitLoadStorageU16(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset);
void EmitLoadStorageS16(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset);
void EmitLoadStorage32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset);
void EmitLoadStorage64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset);
void EmitLoadStorage128(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset);
void EmitWriteStorageU8(EmitContext& ctx, const IR::Value& binding, ScalarU32 offset,
ScalarU32 value);
void EmitWriteStorageS8(EmitContext& ctx, const IR::Value& binding, ScalarU32 offset,
ScalarS32 value);
void EmitWriteStorageU16(EmitContext& ctx, const IR::Value& binding, ScalarU32 offset,
ScalarU32 value);
void EmitWriteStorageS16(EmitContext& ctx, const IR::Value& binding, ScalarU32 offset,
ScalarS32 value);
void EmitWriteStorage32(EmitContext& ctx, const IR::Value& binding, ScalarU32 offset,
ScalarU32 value);
void EmitWriteStorage64(EmitContext& ctx, const IR::Value& binding, ScalarU32 offset,
Register value);
void EmitWriteStorage128(EmitContext& ctx, const IR::Value& binding, ScalarU32 offset,
Register value);
void EmitLoadSharedU8(EmitContext& ctx, IR::Inst& inst, ScalarU32 offset);
void EmitLoadSharedS8(EmitContext& ctx, IR::Inst& inst, ScalarU32 offset);
void EmitLoadSharedU16(EmitContext& ctx, IR::Inst& inst, ScalarU32 offset);
void EmitLoadSharedS16(EmitContext& ctx, IR::Inst& inst, ScalarU32 offset);
void EmitLoadSharedU32(EmitContext& ctx, IR::Inst& inst, ScalarU32 offset);
void EmitLoadSharedU64(EmitContext& ctx, IR::Inst& inst, ScalarU32 offset);
void EmitLoadSharedU128(EmitContext& ctx, IR::Inst& inst, ScalarU32 offset);
void EmitWriteSharedU8(EmitContext& ctx, ScalarU32 offset, ScalarU32 value);
void EmitWriteSharedU16(EmitContext& ctx, ScalarU32 offset, ScalarU32 value);
void EmitWriteSharedU32(EmitContext& ctx, ScalarU32 offset, ScalarU32 value);
void EmitWriteSharedU64(EmitContext& ctx, ScalarU32 offset, Register value);
void EmitWriteSharedU128(EmitContext& ctx, ScalarU32 offset, Register value);
void EmitCompositeConstructU32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& e1,
const IR::Value& e2);
void EmitCompositeConstructU32x3(EmitContext& ctx, IR::Inst& inst, const IR::Value& e1,
const IR::Value& e2, const IR::Value& e3);
void EmitCompositeConstructU32x4(EmitContext& ctx, IR::Inst& inst, const IR::Value& e1,
const IR::Value& e2, const IR::Value& e3, const IR::Value& e4);
void EmitCompositeExtractU32x2(EmitContext& ctx, IR::Inst& inst, Register composite, u32 index);
void EmitCompositeExtractU32x3(EmitContext& ctx, IR::Inst& inst, Register composite, u32 index);
void EmitCompositeExtractU32x4(EmitContext& ctx, IR::Inst& inst, Register composite, u32 index);
void EmitCompositeInsertU32x2(EmitContext& ctx, Register composite, ScalarU32 object, u32 index);
void EmitCompositeInsertU32x3(EmitContext& ctx, Register composite, ScalarU32 object, u32 index);
void EmitCompositeInsertU32x4(EmitContext& ctx, Register composite, ScalarU32 object, u32 index);
void EmitCompositeConstructF16x2(EmitContext& ctx, Register e1, Register e2);
void EmitCompositeConstructF16x3(EmitContext& ctx, Register e1, Register e2, Register e3);
void EmitCompositeConstructF16x4(EmitContext& ctx, Register e1, Register e2, Register e3,
Register e4);
void EmitCompositeExtractF16x2(EmitContext& ctx, Register composite, u32 index);
void EmitCompositeExtractF16x3(EmitContext& ctx, Register composite, u32 index);
void EmitCompositeExtractF16x4(EmitContext& ctx, Register composite, u32 index);
void EmitCompositeInsertF16x2(EmitContext& ctx, Register composite, Register object, u32 index);
void EmitCompositeInsertF16x3(EmitContext& ctx, Register composite, Register object, u32 index);
void EmitCompositeInsertF16x4(EmitContext& ctx, Register composite, Register object, u32 index);
void EmitCompositeConstructF32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& e1,
const IR::Value& e2);
void EmitCompositeConstructF32x3(EmitContext& ctx, IR::Inst& inst, const IR::Value& e1,
const IR::Value& e2, const IR::Value& e3);
void EmitCompositeConstructF32x4(EmitContext& ctx, IR::Inst& inst, const IR::Value& e1,
const IR::Value& e2, const IR::Value& e3, const IR::Value& e4);
void EmitCompositeExtractF32x2(EmitContext& ctx, IR::Inst& inst, Register composite, u32 index);
void EmitCompositeExtractF32x3(EmitContext& ctx, IR::Inst& inst, Register composite, u32 index);
void EmitCompositeExtractF32x4(EmitContext& ctx, IR::Inst& inst, Register composite, u32 index);
void EmitCompositeInsertF32x2(EmitContext& ctx, IR::Inst& inst, Register composite,
ScalarF32 object, u32 index);
void EmitCompositeInsertF32x3(EmitContext& ctx, IR::Inst& inst, Register composite,
ScalarF32 object, u32 index);
void EmitCompositeInsertF32x4(EmitContext& ctx, IR::Inst& inst, Register composite,
ScalarF32 object, u32 index);
void EmitCompositeConstructF64x2(EmitContext& ctx);
void EmitCompositeConstructF64x3(EmitContext& ctx);
void EmitCompositeConstructF64x4(EmitContext& ctx);
void EmitCompositeExtractF64x2(EmitContext& ctx);
void EmitCompositeExtractF64x3(EmitContext& ctx);
void EmitCompositeExtractF64x4(EmitContext& ctx);
void EmitCompositeInsertF64x2(EmitContext& ctx, Register composite, Register object, u32 index);
void EmitCompositeInsertF64x3(EmitContext& ctx, Register composite, Register object, u32 index);
void EmitCompositeInsertF64x4(EmitContext& ctx, Register composite, Register object, u32 index);
void EmitSelectU1(EmitContext& ctx, IR::Inst& inst, ScalarS32 cond, ScalarS32 true_value,
ScalarS32 false_value);
void EmitSelectU8(EmitContext& ctx, ScalarS32 cond, ScalarS32 true_value, ScalarS32 false_value);
void EmitSelectU16(EmitContext& ctx, ScalarS32 cond, ScalarS32 true_value, ScalarS32 false_value);
void EmitSelectU32(EmitContext& ctx, IR::Inst& inst, ScalarS32 cond, ScalarS32 true_value,
ScalarS32 false_value);
void EmitSelectU64(EmitContext& ctx, IR::Inst& inst, ScalarS32 cond, Register true_value,
Register false_value);
void EmitSelectF16(EmitContext& ctx, ScalarS32 cond, Register true_value, Register false_value);
void EmitSelectF32(EmitContext& ctx, IR::Inst& inst, ScalarS32 cond, ScalarS32 true_value,
ScalarS32 false_value);
void EmitSelectF64(EmitContext& ctx, ScalarS32 cond, Register true_value, Register false_value);
void EmitBitCastU16F16(EmitContext& ctx, IR::Inst& inst, const IR::Value& value);
void EmitBitCastU32F32(EmitContext& ctx, IR::Inst& inst, const IR::Value& value);
void EmitBitCastU64F64(EmitContext& ctx, IR::Inst& inst, const IR::Value& value);
void EmitBitCastF16U16(EmitContext& ctx, IR::Inst& inst, const IR::Value& value);
void EmitBitCastF32U32(EmitContext& ctx, IR::Inst& inst, const IR::Value& value);
void EmitBitCastF64U64(EmitContext& ctx, IR::Inst& inst, const IR::Value& value);
void EmitPackUint2x32(EmitContext& ctx, IR::Inst& inst, Register value);
void EmitUnpackUint2x32(EmitContext& ctx, IR::Inst& inst, Register value);
void EmitPackFloat2x16(EmitContext& ctx, Register value);
void EmitUnpackFloat2x16(EmitContext& ctx, Register value);
void EmitPackHalf2x16(EmitContext& ctx, IR::Inst& inst, Register value);
void EmitUnpackHalf2x16(EmitContext& ctx, IR::Inst& inst, Register value);
void EmitPackDouble2x32(EmitContext& ctx, IR::Inst& inst, Register value);
void EmitUnpackDouble2x32(EmitContext& ctx, IR::Inst& inst, Register value);
void EmitGetZeroFromOp(EmitContext& ctx);
void EmitGetSignFromOp(EmitContext& ctx);
void EmitGetCarryFromOp(EmitContext& ctx);
void EmitGetOverflowFromOp(EmitContext& ctx);
void EmitGetSparseFromOp(EmitContext& ctx);
void EmitGetInBoundsFromOp(EmitContext& ctx);
void EmitFPAbs16(EmitContext& ctx, IR::Inst& inst, Register value);
void EmitFPAbs32(EmitContext& ctx, IR::Inst& inst, ScalarF32 value);
void EmitFPAbs64(EmitContext& ctx, IR::Inst& inst, ScalarF64 value);
void EmitFPAdd16(EmitContext& ctx, IR::Inst& inst, Register a, Register b);
void EmitFPAdd32(EmitContext& ctx, IR::Inst& inst, ScalarF32 a, ScalarF32 b);
void EmitFPAdd64(EmitContext& ctx, IR::Inst& inst, ScalarF64 a, ScalarF64 b);
void EmitFPFma16(EmitContext& ctx, IR::Inst& inst, Register a, Register b, Register c);
void EmitFPFma32(EmitContext& ctx, IR::Inst& inst, ScalarF32 a, ScalarF32 b, ScalarF32 c);
void EmitFPFma64(EmitContext& ctx, IR::Inst& inst, ScalarF64 a, ScalarF64 b, ScalarF64 c);
void EmitFPMax32(EmitContext& ctx, IR::Inst& inst, ScalarF32 a, ScalarF32 b);
void EmitFPMax64(EmitContext& ctx, IR::Inst& inst, ScalarF64 a, ScalarF64 b);
void EmitFPMin32(EmitContext& ctx, IR::Inst& inst, ScalarF32 a, ScalarF32 b);
void EmitFPMin64(EmitContext& ctx, IR::Inst& inst, ScalarF64 a, ScalarF64 b);
void EmitFPMul16(EmitContext& ctx, IR::Inst& inst, Register a, Register b);
void EmitFPMul32(EmitContext& ctx, IR::Inst& inst, ScalarF32 a, ScalarF32 b);
void EmitFPMul64(EmitContext& ctx, IR::Inst& inst, ScalarF64 a, ScalarF64 b);
void EmitFPNeg16(EmitContext& ctx, Register value);
void EmitFPNeg32(EmitContext& ctx, IR::Inst& inst, ScalarRegister value);
void EmitFPNeg64(EmitContext& ctx, IR::Inst& inst, Register value);
void EmitFPSin(EmitContext& ctx, IR::Inst& inst, ScalarF32 value);
void EmitFPCos(EmitContext& ctx, IR::Inst& inst, ScalarF32 value);
void EmitFPExp2(EmitContext& ctx, IR::Inst& inst, ScalarF32 value);
void EmitFPLog2(EmitContext& ctx, IR::Inst& inst, ScalarF32 value);
void EmitFPRecip32(EmitContext& ctx, IR::Inst& inst, ScalarF32 value);
void EmitFPRecip64(EmitContext& ctx, Register value);
void EmitFPRecipSqrt32(EmitContext& ctx, IR::Inst& inst, ScalarF32 value);
void EmitFPRecipSqrt64(EmitContext& ctx, Register value);
void EmitFPSqrt(EmitContext& ctx, IR::Inst& inst, ScalarF32 value);
void EmitFPSaturate16(EmitContext& ctx, Register value);
void EmitFPSaturate32(EmitContext& ctx, IR::Inst& inst, ScalarF32 value);
void EmitFPSaturate64(EmitContext& ctx, Register value);
void EmitFPClamp16(EmitContext& ctx, Register value, Register min_value, Register max_value);
void EmitFPClamp32(EmitContext& ctx, IR::Inst& inst, ScalarF32 value, ScalarF32 min_value,
ScalarF32 max_value);
void EmitFPClamp64(EmitContext& ctx, IR::Inst& inst, ScalarF64 value, ScalarF64 min_value,
ScalarF64 max_value);
void EmitFPRoundEven16(EmitContext& ctx, Register value);
void EmitFPRoundEven32(EmitContext& ctx, IR::Inst& inst, ScalarF32 value);
void EmitFPRoundEven64(EmitContext& ctx, IR::Inst& inst, ScalarF64 value);
void EmitFPFloor16(EmitContext& ctx, Register value);
void EmitFPFloor32(EmitContext& ctx, IR::Inst& inst, ScalarF32 value);
void EmitFPFloor64(EmitContext& ctx, IR::Inst& inst, ScalarF64 value);
void EmitFPCeil16(EmitContext& ctx, Register value);
void EmitFPCeil32(EmitContext& ctx, IR::Inst& inst, ScalarF32 value);
void EmitFPCeil64(EmitContext& ctx, IR::Inst& inst, ScalarF64 value);
void EmitFPTrunc16(EmitContext& ctx, Register value);
void EmitFPTrunc32(EmitContext& ctx, IR::Inst& inst, ScalarF32 value);
void EmitFPTrunc64(EmitContext& ctx, IR::Inst& inst, ScalarF64 value);
void EmitFPOrdEqual16(EmitContext& ctx, Register lhs, Register rhs);
void EmitFPOrdEqual32(EmitContext& ctx, IR::Inst& inst, ScalarF32 lhs, ScalarF32 rhs);
void EmitFPOrdEqual64(EmitContext& ctx, IR::Inst& inst, ScalarF64 lhs, ScalarF64 rhs);
void EmitFPUnordEqual16(EmitContext& ctx, Register lhs, Register rhs);
void EmitFPUnordEqual32(EmitContext& ctx, IR::Inst& inst, ScalarF32 lhs, ScalarF32 rhs);
void EmitFPUnordEqual64(EmitContext& ctx, IR::Inst& inst, ScalarF64 lhs, ScalarF64 rhs);
void EmitFPOrdNotEqual16(EmitContext& ctx, Register lhs, Register rhs);
void EmitFPOrdNotEqual32(EmitContext& ctx, IR::Inst& inst, ScalarF32 lhs, ScalarF32 rhs);
void EmitFPOrdNotEqual64(EmitContext& ctx, IR::Inst& inst, ScalarF64 lhs, ScalarF64 rhs);
void EmitFPUnordNotEqual16(EmitContext& ctx, Register lhs, Register rhs);
void EmitFPUnordNotEqual32(EmitContext& ctx, IR::Inst& inst, ScalarF32 lhs, ScalarF32 rhs);
void EmitFPUnordNotEqual64(EmitContext& ctx, IR::Inst& inst, ScalarF64 lhs, ScalarF64 rhs);
void EmitFPOrdLessThan16(EmitContext& ctx, Register lhs, Register rhs);
void EmitFPOrdLessThan32(EmitContext& ctx, IR::Inst& inst, ScalarF32 lhs, ScalarF32 rhs);
void EmitFPOrdLessThan64(EmitContext& ctx, IR::Inst& inst, ScalarF64 lhs, ScalarF64 rhs);
void EmitFPUnordLessThan16(EmitContext& ctx, Register lhs, Register rhs);
void EmitFPUnordLessThan32(EmitContext& ctx, IR::Inst& inst, ScalarF32 lhs, ScalarF32 rhs);
void EmitFPUnordLessThan64(EmitContext& ctx, IR::Inst& inst, ScalarF64 lhs, ScalarF64 rhs);
void EmitFPOrdGreaterThan16(EmitContext& ctx, Register lhs, Register rhs);
void EmitFPOrdGreaterThan32(EmitContext& ctx, IR::Inst& inst, ScalarF32 lhs, ScalarF32 rhs);
void EmitFPOrdGreaterThan64(EmitContext& ctx, IR::Inst& inst, ScalarF64 lhs, ScalarF64 rhs);
void EmitFPUnordGreaterThan16(EmitContext& ctx, Register lhs, Register rhs);
void EmitFPUnordGreaterThan32(EmitContext& ctx, IR::Inst& inst, ScalarF32 lhs, ScalarF32 rhs);
void EmitFPUnordGreaterThan64(EmitContext& ctx, IR::Inst& inst, ScalarF64 lhs, ScalarF64 rhs);
void EmitFPOrdLessThanEqual16(EmitContext& ctx, Register lhs, Register rhs);
void EmitFPOrdLessThanEqual32(EmitContext& ctx, IR::Inst& inst, ScalarF32 lhs, ScalarF32 rhs);
void EmitFPOrdLessThanEqual64(EmitContext& ctx, IR::Inst& inst, ScalarF64 lhs, ScalarF64 rhs);
void EmitFPUnordLessThanEqual16(EmitContext& ctx, Register lhs, Register rhs);
void EmitFPUnordLessThanEqual32(EmitContext& ctx, IR::Inst& inst, ScalarF32 lhs, ScalarF32 rhs);
void EmitFPUnordLessThanEqual64(EmitContext& ctx, IR::Inst& inst, ScalarF64 lhs, ScalarF64 rhs);
void EmitFPOrdGreaterThanEqual16(EmitContext& ctx, Register lhs, Register rhs);
void EmitFPOrdGreaterThanEqual32(EmitContext& ctx, IR::Inst& inst, ScalarF32 lhs, ScalarF32 rhs);
void EmitFPOrdGreaterThanEqual64(EmitContext& ctx, IR::Inst& inst, ScalarF64 lhs, ScalarF64 rhs);
void EmitFPUnordGreaterThanEqual16(EmitContext& ctx, Register lhs, Register rhs);
void EmitFPUnordGreaterThanEqual32(EmitContext& ctx, IR::Inst& inst, ScalarF32 lhs, ScalarF32 rhs);
void EmitFPUnordGreaterThanEqual64(EmitContext& ctx, IR::Inst& inst, ScalarF64 lhs, ScalarF64 rhs);
void EmitFPIsNan16(EmitContext& ctx, Register value);
void EmitFPIsNan32(EmitContext& ctx, IR::Inst& inst, ScalarF32 value);
void EmitFPIsNan64(EmitContext& ctx, IR::Inst& inst, ScalarF64 value);
void EmitIAdd32(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b);
void EmitIAdd64(EmitContext& ctx, IR::Inst& inst, Register a, Register b);
void EmitISub32(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b);
void EmitISub64(EmitContext& ctx, IR::Inst& inst, Register a, Register b);
void EmitIMul32(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b);
void EmitINeg32(EmitContext& ctx, IR::Inst& inst, ScalarS32 value);
void EmitINeg64(EmitContext& ctx, IR::Inst& inst, Register value);
void EmitIAbs32(EmitContext& ctx, IR::Inst& inst, ScalarS32 value);
void EmitShiftLeftLogical32(EmitContext& ctx, IR::Inst& inst, ScalarU32 base, ScalarU32 shift);
void EmitShiftLeftLogical64(EmitContext& ctx, IR::Inst& inst, ScalarRegister base, ScalarU32 shift);
void EmitShiftRightLogical32(EmitContext& ctx, IR::Inst& inst, ScalarU32 base, ScalarU32 shift);
void EmitShiftRightLogical64(EmitContext& ctx, IR::Inst& inst, ScalarRegister base,
ScalarU32 shift);
void EmitShiftRightArithmetic32(EmitContext& ctx, IR::Inst& inst, ScalarS32 base, ScalarS32 shift);
void EmitShiftRightArithmetic64(EmitContext& ctx, IR::Inst& inst, ScalarRegister base,
ScalarS32 shift);
void EmitBitwiseAnd32(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b);
void EmitBitwiseOr32(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b);
void EmitBitwiseXor32(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b);
void EmitBitFieldInsert(EmitContext& ctx, IR::Inst& inst, ScalarS32 base, ScalarS32 insert,
ScalarS32 offset, ScalarS32 count);
void EmitBitFieldSExtract(EmitContext& ctx, IR::Inst& inst, ScalarS32 base, ScalarS32 offset,
ScalarS32 count);
void EmitBitFieldUExtract(EmitContext& ctx, IR::Inst& inst, ScalarU32 base, ScalarU32 offset,
ScalarU32 count);
void EmitBitReverse32(EmitContext& ctx, IR::Inst& inst, ScalarS32 value);
void EmitBitCount32(EmitContext& ctx, IR::Inst& inst, ScalarS32 value);
void EmitBitwiseNot32(EmitContext& ctx, IR::Inst& inst, ScalarS32 value);
void EmitFindSMsb32(EmitContext& ctx, IR::Inst& inst, ScalarS32 value);
void EmitFindUMsb32(EmitContext& ctx, IR::Inst& inst, ScalarU32 value);
void EmitSMin32(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b);
void EmitUMin32(EmitContext& ctx, IR::Inst& inst, ScalarU32 a, ScalarU32 b);
void EmitSMax32(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b);
void EmitUMax32(EmitContext& ctx, IR::Inst& inst, ScalarU32 a, ScalarU32 b);
void EmitSClamp32(EmitContext& ctx, IR::Inst& inst, ScalarS32 value, ScalarS32 min, ScalarS32 max);
void EmitUClamp32(EmitContext& ctx, IR::Inst& inst, ScalarU32 value, ScalarU32 min, ScalarU32 max);
void EmitSLessThan(EmitContext& ctx, IR::Inst& inst, ScalarS32 lhs, ScalarS32 rhs);
void EmitULessThan(EmitContext& ctx, IR::Inst& inst, ScalarU32 lhs, ScalarU32 rhs);
void EmitIEqual(EmitContext& ctx, IR::Inst& inst, ScalarS32 lhs, ScalarS32 rhs);
void EmitSLessThanEqual(EmitContext& ctx, IR::Inst& inst, ScalarS32 lhs, ScalarS32 rhs);
void EmitULessThanEqual(EmitContext& ctx, IR::Inst& inst, ScalarU32 lhs, ScalarU32 rhs);
void EmitSGreaterThan(EmitContext& ctx, IR::Inst& inst, ScalarS32 lhs, ScalarS32 rhs);
void EmitUGreaterThan(EmitContext& ctx, IR::Inst& inst, ScalarU32 lhs, ScalarU32 rhs);
void EmitINotEqual(EmitContext& ctx, IR::Inst& inst, ScalarS32 lhs, ScalarS32 rhs);
void EmitSGreaterThanEqual(EmitContext& ctx, IR::Inst& inst, ScalarS32 lhs, ScalarS32 rhs);
void EmitUGreaterThanEqual(EmitContext& ctx, IR::Inst& inst, ScalarU32 lhs, ScalarU32 rhs);
void EmitSharedAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, ScalarU32 pointer_offset,
ScalarU32 value);
void EmitSharedAtomicSMin32(EmitContext& ctx, IR::Inst& inst, ScalarU32 pointer_offset,
ScalarS32 value);
void EmitSharedAtomicUMin32(EmitContext& ctx, IR::Inst& inst, ScalarU32 pointer_offset,
ScalarU32 value);
void EmitSharedAtomicSMax32(EmitContext& ctx, IR::Inst& inst, ScalarU32 pointer_offset,
ScalarS32 value);
void EmitSharedAtomicUMax32(EmitContext& ctx, IR::Inst& inst, ScalarU32 pointer_offset,
ScalarU32 value);
void EmitSharedAtomicInc32(EmitContext& ctx, IR::Inst& inst, ScalarU32 pointer_offset,
ScalarU32 value);
void EmitSharedAtomicDec32(EmitContext& ctx, IR::Inst& inst, ScalarU32 pointer_offset,
ScalarU32 value);
void EmitSharedAtomicAnd32(EmitContext& ctx, IR::Inst& inst, ScalarU32 pointer_offset,
ScalarU32 value);
void EmitSharedAtomicOr32(EmitContext& ctx, IR::Inst& inst, ScalarU32 pointer_offset,
ScalarU32 value);
void EmitSharedAtomicXor32(EmitContext& ctx, IR::Inst& inst, ScalarU32 pointer_offset,
ScalarU32 value);
void EmitSharedAtomicExchange32(EmitContext& ctx, IR::Inst& inst, ScalarU32 pointer_offset,
ScalarU32 value);
void EmitSharedAtomicExchange64(EmitContext& ctx, IR::Inst& inst, ScalarU32 pointer_offset,
Register value);
void EmitStorageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, ScalarU32 value);
void EmitStorageAtomicSMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, ScalarS32 value);
void EmitStorageAtomicUMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, ScalarU32 value);
void EmitStorageAtomicSMax32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, ScalarS32 value);
void EmitStorageAtomicUMax32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, ScalarU32 value);
void EmitStorageAtomicInc32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, ScalarU32 value);
void EmitStorageAtomicDec32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, ScalarU32 value);
void EmitStorageAtomicAnd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, ScalarU32 value);
void EmitStorageAtomicOr32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, ScalarU32 value);
void EmitStorageAtomicXor32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, ScalarU32 value);
void EmitStorageAtomicExchange32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, ScalarU32 value);
void EmitStorageAtomicIAdd64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, Register value);
void EmitStorageAtomicSMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, Register value);
void EmitStorageAtomicUMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, Register value);
void EmitStorageAtomicSMax64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, Register value);
void EmitStorageAtomicUMax64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, Register value);
void EmitStorageAtomicAnd64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, Register value);
void EmitStorageAtomicOr64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, Register value);
void EmitStorageAtomicXor64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, Register value);
void EmitStorageAtomicExchange64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, Register value);
void EmitStorageAtomicAddF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, ScalarF32 value);
void EmitStorageAtomicAddF16x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, Register value);
void EmitStorageAtomicAddF32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, Register value);
void EmitStorageAtomicMinF16x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, Register value);
void EmitStorageAtomicMinF32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, Register value);
void EmitStorageAtomicMaxF16x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, Register value);
void EmitStorageAtomicMaxF32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, Register value);
void EmitGlobalAtomicIAdd32(EmitContext& ctx);
void EmitGlobalAtomicSMin32(EmitContext& ctx);
void EmitGlobalAtomicUMin32(EmitContext& ctx);
void EmitGlobalAtomicSMax32(EmitContext& ctx);
void EmitGlobalAtomicUMax32(EmitContext& ctx);
void EmitGlobalAtomicInc32(EmitContext& ctx);
void EmitGlobalAtomicDec32(EmitContext& ctx);
void EmitGlobalAtomicAnd32(EmitContext& ctx);
void EmitGlobalAtomicOr32(EmitContext& ctx);
void EmitGlobalAtomicXor32(EmitContext& ctx);
void EmitGlobalAtomicExchange32(EmitContext& ctx);
void EmitGlobalAtomicIAdd64(EmitContext& ctx);
void EmitGlobalAtomicSMin64(EmitContext& ctx);
void EmitGlobalAtomicUMin64(EmitContext& ctx);
void EmitGlobalAtomicSMax64(EmitContext& ctx);
void EmitGlobalAtomicUMax64(EmitContext& ctx);
void EmitGlobalAtomicInc64(EmitContext& ctx);
void EmitGlobalAtomicDec64(EmitContext& ctx);
void EmitGlobalAtomicAnd64(EmitContext& ctx);
void EmitGlobalAtomicOr64(EmitContext& ctx);
void EmitGlobalAtomicXor64(EmitContext& ctx);
void EmitGlobalAtomicExchange64(EmitContext& ctx);
void EmitGlobalAtomicAddF32(EmitContext& ctx);
void EmitGlobalAtomicAddF16x2(EmitContext& ctx);
void EmitGlobalAtomicAddF32x2(EmitContext& ctx);
void EmitGlobalAtomicMinF16x2(EmitContext& ctx);
void EmitGlobalAtomicMinF32x2(EmitContext& ctx);
void EmitGlobalAtomicMaxF16x2(EmitContext& ctx);
void EmitGlobalAtomicMaxF32x2(EmitContext& ctx);
void EmitLogicalOr(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b);
void EmitLogicalAnd(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b);
void EmitLogicalXor(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b);
void EmitLogicalNot(EmitContext& ctx, IR::Inst& inst, ScalarS32 value);
void EmitConvertS16F16(EmitContext& ctx, IR::Inst& inst, Register value);
void EmitConvertS16F32(EmitContext& ctx, IR::Inst& inst, ScalarF32 value);
void EmitConvertS16F64(EmitContext& ctx, IR::Inst& inst, ScalarF64 value);
void EmitConvertS32F16(EmitContext& ctx, IR::Inst& inst, Register value);
void EmitConvertS32F32(EmitContext& ctx, IR::Inst& inst, ScalarF32 value);
void EmitConvertS32F64(EmitContext& ctx, IR::Inst& inst, ScalarF64 value);
void EmitConvertS64F16(EmitContext& ctx, IR::Inst& inst, Register value);
void EmitConvertS64F32(EmitContext& ctx, IR::Inst& inst, ScalarF32 value);
void EmitConvertS64F64(EmitContext& ctx, IR::Inst& inst, ScalarF64 value);
void EmitConvertU16F16(EmitContext& ctx, IR::Inst& inst, Register value);
void EmitConvertU16F32(EmitContext& ctx, IR::Inst& inst, ScalarF32 value);
void EmitConvertU16F64(EmitContext& ctx, IR::Inst& inst, ScalarF64 value);
void EmitConvertU32F16(EmitContext& ctx, IR::Inst& inst, Register value);
void EmitConvertU32F32(EmitContext& ctx, IR::Inst& inst, ScalarF32 value);
void EmitConvertU32F64(EmitContext& ctx, IR::Inst& inst, ScalarF64 value);
void EmitConvertU64F16(EmitContext& ctx, IR::Inst& inst, Register value);
void EmitConvertU64F32(EmitContext& ctx, IR::Inst& inst, ScalarF32 value);
void EmitConvertU64F64(EmitContext& ctx, IR::Inst& inst, ScalarF64 value);
void EmitConvertU64U32(EmitContext& ctx, IR::Inst& inst, ScalarU32 value);
void EmitConvertU32U64(EmitContext& ctx, IR::Inst& inst, Register value);
void EmitConvertF16F32(EmitContext& ctx, IR::Inst& inst, ScalarF32 value);
void EmitConvertF32F16(EmitContext& ctx, IR::Inst& inst, Register value);
void EmitConvertF32F64(EmitContext& ctx, IR::Inst& inst, ScalarF64 value);
void EmitConvertF64F32(EmitContext& ctx, IR::Inst& inst, ScalarF32 value);
void EmitConvertF16S8(EmitContext& ctx, IR::Inst& inst, Register value);
void EmitConvertF16S16(EmitContext& ctx, IR::Inst& inst, Register value);
void EmitConvertF16S32(EmitContext& ctx, IR::Inst& inst, ScalarS32 value);
void EmitConvertF16S64(EmitContext& ctx, IR::Inst& inst, Register value);
void EmitConvertF16U8(EmitContext& ctx, IR::Inst& inst, Register value);
void EmitConvertF16U16(EmitContext& ctx, IR::Inst& inst, Register value);
void EmitConvertF16U32(EmitContext& ctx, IR::Inst& inst, ScalarU32 value);
void EmitConvertF16U64(EmitContext& ctx, IR::Inst& inst, Register value);
void EmitConvertF32S8(EmitContext& ctx, IR::Inst& inst, Register value);
void EmitConvertF32S16(EmitContext& ctx, IR::Inst& inst, Register value);
void EmitConvertF32S32(EmitContext& ctx, IR::Inst& inst, ScalarS32 value);
void EmitConvertF32S64(EmitContext& ctx, IR::Inst& inst, Register value);
void EmitConvertF32U8(EmitContext& ctx, IR::Inst& inst, Register value);
void EmitConvertF32U16(EmitContext& ctx, IR::Inst& inst, Register value);
void EmitConvertF32U32(EmitContext& ctx, IR::Inst& inst, ScalarU32 value);
void EmitConvertF32U64(EmitContext& ctx, IR::Inst& inst, Register value);
void EmitConvertF64S8(EmitContext& ctx, IR::Inst& inst, Register value);
void EmitConvertF64S16(EmitContext& ctx, IR::Inst& inst, Register value);
void EmitConvertF64S32(EmitContext& ctx, IR::Inst& inst, ScalarS32 value);
void EmitConvertF64S64(EmitContext& ctx, IR::Inst& inst, Register value);
void EmitConvertF64U8(EmitContext& ctx, IR::Inst& inst, Register value);
void EmitConvertF64U16(EmitContext& ctx, IR::Inst& inst, Register value);
void EmitConvertF64U32(EmitContext& ctx, IR::Inst& inst, ScalarU32 value);
void EmitConvertF64U64(EmitContext& ctx, IR::Inst& inst, Register value);
void EmitBindlessImageSampleImplicitLod(EmitContext&);
void EmitBindlessImageSampleExplicitLod(EmitContext&);
void EmitBindlessImageSampleDrefImplicitLod(EmitContext&);
void EmitBindlessImageSampleDrefExplicitLod(EmitContext&);
void EmitBindlessImageGather(EmitContext&);
void EmitBindlessImageGatherDref(EmitContext&);
void EmitBindlessImageFetch(EmitContext&);
void EmitBindlessImageQueryDimensions(EmitContext&);
void EmitBindlessImageQueryLod(EmitContext&);
void EmitBindlessImageGradient(EmitContext&);
void EmitBindlessImageRead(EmitContext&);
void EmitBindlessImageWrite(EmitContext&);
void EmitBoundImageSampleImplicitLod(EmitContext&);
void EmitBoundImageSampleExplicitLod(EmitContext&);
void EmitBoundImageSampleDrefImplicitLod(EmitContext&);
void EmitBoundImageSampleDrefExplicitLod(EmitContext&);
void EmitBoundImageGather(EmitContext&);
void EmitBoundImageGatherDref(EmitContext&);
void EmitBoundImageFetch(EmitContext&);
void EmitBoundImageQueryDimensions(EmitContext&);
void EmitBoundImageQueryLod(EmitContext&);
void EmitBoundImageGradient(EmitContext&);
void EmitBoundImageRead(EmitContext&);
void EmitBoundImageWrite(EmitContext&);
void EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
const IR::Value& coord, Register bias_lc, const IR::Value& offset);
void EmitImageSampleExplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
const IR::Value& coord, ScalarF32 lod, const IR::Value& offset);
void EmitImageSampleDrefImplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
const IR::Value& coord, const IR::Value& dref,
const IR::Value& bias_lc, const IR::Value& offset);
void EmitImageSampleDrefExplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
const IR::Value& coord, const IR::Value& dref,
const IR::Value& lod, const IR::Value& offset);
void EmitImageGather(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
const IR::Value& coord, const IR::Value& offset, const IR::Value& offset2);
void EmitImageGatherDref(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
const IR::Value& coord, const IR::Value& offset, const IR::Value& offset2,
const IR::Value& dref);
void EmitImageFetch(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
const IR::Value& coord, const IR::Value& offset, ScalarS32 lod, ScalarS32 ms);
void EmitImageQueryDimensions(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
ScalarS32 lod);
void EmitImageQueryLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, Register coord);
void EmitImageGradient(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
const IR::Value& coord, const IR::Value& derivatives,
const IR::Value& offset, const IR::Value& lod_clamp);
void EmitImageRead(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, Register coord);
void EmitImageWrite(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, Register coord,
Register color);
void EmitBindlessImageAtomicIAdd32(EmitContext&);
void EmitBindlessImageAtomicSMin32(EmitContext&);
void EmitBindlessImageAtomicUMin32(EmitContext&);
void EmitBindlessImageAtomicSMax32(EmitContext&);
void EmitBindlessImageAtomicUMax32(EmitContext&);
void EmitBindlessImageAtomicInc32(EmitContext&);
void EmitBindlessImageAtomicDec32(EmitContext&);
void EmitBindlessImageAtomicAnd32(EmitContext&);
void EmitBindlessImageAtomicOr32(EmitContext&);
void EmitBindlessImageAtomicXor32(EmitContext&);
void EmitBindlessImageAtomicExchange32(EmitContext&);
void EmitBoundImageAtomicIAdd32(EmitContext&);
void EmitBoundImageAtomicSMin32(EmitContext&);
void EmitBoundImageAtomicUMin32(EmitContext&);
void EmitBoundImageAtomicSMax32(EmitContext&);
void EmitBoundImageAtomicUMax32(EmitContext&);
void EmitBoundImageAtomicInc32(EmitContext&);
void EmitBoundImageAtomicDec32(EmitContext&);
void EmitBoundImageAtomicAnd32(EmitContext&);
void EmitBoundImageAtomicOr32(EmitContext&);
void EmitBoundImageAtomicXor32(EmitContext&);
void EmitBoundImageAtomicExchange32(EmitContext&);
void EmitImageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, Register coord,
ScalarU32 value);
void EmitImageAtomicSMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, Register coord,
ScalarS32 value);
void EmitImageAtomicUMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, Register coord,
ScalarU32 value);
void EmitImageAtomicSMax32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, Register coord,
ScalarS32 value);
void EmitImageAtomicUMax32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, Register coord,
ScalarU32 value);
void EmitImageAtomicInc32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, Register coord,
ScalarU32 value);
void EmitImageAtomicDec32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, Register coord,
ScalarU32 value);
void EmitImageAtomicAnd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, Register coord,
ScalarU32 value);
void EmitImageAtomicOr32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, Register coord,
ScalarU32 value);
void EmitImageAtomicXor32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index, Register coord,
ScalarU32 value);
void EmitImageAtomicExchange32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
Register coord, ScalarU32 value);
void EmitLaneId(EmitContext& ctx, IR::Inst& inst);
void EmitVoteAll(EmitContext& ctx, IR::Inst& inst, ScalarS32 pred);
void EmitVoteAny(EmitContext& ctx, IR::Inst& inst, ScalarS32 pred);
void EmitVoteEqual(EmitContext& ctx, IR::Inst& inst, ScalarS32 pred);
void EmitSubgroupBallot(EmitContext& ctx, IR::Inst& inst, ScalarS32 pred);
void EmitSubgroupEqMask(EmitContext& ctx, IR::Inst& inst);
void EmitSubgroupLtMask(EmitContext& ctx, IR::Inst& inst);
void EmitSubgroupLeMask(EmitContext& ctx, IR::Inst& inst);
void EmitSubgroupGtMask(EmitContext& ctx, IR::Inst& inst);
void EmitSubgroupGeMask(EmitContext& ctx, IR::Inst& inst);
void EmitShuffleIndex(EmitContext& ctx, IR::Inst& inst, ScalarU32 value, ScalarU32 index,
const IR::Value& clamp, const IR::Value& segmentation_mask);
void EmitShuffleUp(EmitContext& ctx, IR::Inst& inst, ScalarU32 value, ScalarU32 index,
const IR::Value& clamp, const IR::Value& segmentation_mask);
void EmitShuffleDown(EmitContext& ctx, IR::Inst& inst, ScalarU32 value, ScalarU32 index,
const IR::Value& clamp, const IR::Value& segmentation_mask);
void EmitShuffleButterfly(EmitContext& ctx, IR::Inst& inst, ScalarU32 value, ScalarU32 index,
const IR::Value& clamp, const IR::Value& segmentation_mask);
void EmitFSwizzleAdd(EmitContext& ctx, IR::Inst& inst, ScalarF32 op_a, ScalarF32 op_b,
ScalarU32 swizzle);
void EmitDPdxFine(EmitContext& ctx, IR::Inst& inst, ScalarF32 op_a);
void EmitDPdyFine(EmitContext& ctx, IR::Inst& inst, ScalarF32 op_a);
void EmitDPdxCoarse(EmitContext& ctx, IR::Inst& inst, ScalarF32 op_a);
void EmitDPdyCoarse(EmitContext& ctx, IR::Inst& inst, ScalarF32 op_a);
} // namespace Shader::Backend::GLASM

View File

@ -0,0 +1,294 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "shader_recompiler/backend/glasm/emit_context.h"
#include "shader_recompiler/backend/glasm/emit_glasm_instructions.h"
#include "shader_recompiler/frontend/ir/value.h"
namespace Shader::Backend::GLASM {
namespace {
void BitwiseLogicalOp(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b,
std::string_view lop) {
const auto zero = inst.GetAssociatedPseudoOperation(IR::Opcode::GetZeroFromOp);
const auto sign = inst.GetAssociatedPseudoOperation(IR::Opcode::GetSignFromOp);
if (zero) {
zero->Invalidate();
}
if (sign) {
sign->Invalidate();
}
if (zero || sign) {
ctx.reg_alloc.InvalidateConditionCodes();
}
const auto ret{ctx.reg_alloc.Define(inst)};
ctx.Add("{}.S {}.x,{},{};", lop, ret, a, b);
if (zero) {
ctx.Add("SEQ.S {},{},0;", *zero, ret);
}
if (sign) {
ctx.Add("SLT.S {},{},0;", *sign, ret);
}
}
} // Anonymous namespace
void EmitIAdd32(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b) {
const std::array flags{
inst.GetAssociatedPseudoOperation(IR::Opcode::GetZeroFromOp),
inst.GetAssociatedPseudoOperation(IR::Opcode::GetSignFromOp),
inst.GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp),
inst.GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp),
};
for (IR::Inst* const flag_inst : flags) {
if (flag_inst) {
flag_inst->Invalidate();
}
}
const bool cc{inst.HasAssociatedPseudoOperation()};
const std::string_view cc_mod{cc ? ".CC" : ""};
if (cc) {
ctx.reg_alloc.InvalidateConditionCodes();
}
const auto ret{ctx.reg_alloc.Define(inst)};
ctx.Add("ADD.S{} {}.x,{},{};", cc_mod, ret, a, b);
if (!cc) {
return;
}
static constexpr std::array<std::string_view, 4> masks{"", "SF", "CF", "OF"};
for (size_t flag_index = 0; flag_index < flags.size(); ++flag_index) {
if (!flags[flag_index]) {
continue;
}
const auto flag_ret{ctx.reg_alloc.Define(*flags[flag_index])};
if (flag_index == 0) {
ctx.Add("SEQ.S {}.x,{}.x,0;", flag_ret, ret);
} else {
// We could use conditional execution here, but it's broken on Nvidia's compiler
ctx.Add("IF {}.x;"
"MOV.S {}.x,-1;"
"ELSE;"
"MOV.S {}.x,0;"
"ENDIF;",
masks[flag_index], flag_ret, flag_ret);
}
}
}
void EmitIAdd64(EmitContext& ctx, IR::Inst& inst, Register a, Register b) {
ctx.LongAdd("ADD.S64 {}.x,{}.x,{}.x;", inst, a, b);
}
void EmitISub32(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b) {
ctx.Add("SUB.S {}.x,{},{};", inst, a, b);
}
void EmitISub64(EmitContext& ctx, IR::Inst& inst, Register a, Register b) {
ctx.LongAdd("SUB.S64 {}.x,{}.x,{}.x;", inst, a, b);
}
void EmitIMul32(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b) {
ctx.Add("MUL.S {}.x,{},{};", inst, a, b);
}
void EmitINeg32(EmitContext& ctx, IR::Inst& inst, ScalarS32 value) {
if (value.type != Type::Register && static_cast<s32>(value.imm_u32) < 0) {
ctx.Add("MOV.S {},{};", inst, -static_cast<s32>(value.imm_u32));
} else {
ctx.Add("MOV.S {},-{};", inst, value);
}
}
void EmitINeg64(EmitContext& ctx, IR::Inst& inst, Register value) {
ctx.LongAdd("MOV.S64 {},-{};", inst, value);
}
void EmitIAbs32(EmitContext& ctx, IR::Inst& inst, ScalarS32 value) {
ctx.Add("ABS.S {},{};", inst, value);
}
void EmitShiftLeftLogical32(EmitContext& ctx, IR::Inst& inst, ScalarU32 base, ScalarU32 shift) {
ctx.Add("SHL.U {}.x,{},{};", inst, base, shift);
}
void EmitShiftLeftLogical64(EmitContext& ctx, IR::Inst& inst, ScalarRegister base,
ScalarU32 shift) {
ctx.LongAdd("SHL.U64 {}.x,{},{};", inst, base, shift);
}
void EmitShiftRightLogical32(EmitContext& ctx, IR::Inst& inst, ScalarU32 base, ScalarU32 shift) {
ctx.Add("SHR.U {}.x,{},{};", inst, base, shift);
}
void EmitShiftRightLogical64(EmitContext& ctx, IR::Inst& inst, ScalarRegister base,
ScalarU32 shift) {
ctx.LongAdd("SHR.U64 {}.x,{},{};", inst, base, shift);
}
void EmitShiftRightArithmetic32(EmitContext& ctx, IR::Inst& inst, ScalarS32 base, ScalarS32 shift) {
ctx.Add("SHR.S {}.x,{},{};", inst, base, shift);
}
void EmitShiftRightArithmetic64(EmitContext& ctx, IR::Inst& inst, ScalarRegister base,
ScalarS32 shift) {
ctx.LongAdd("SHR.S64 {}.x,{},{};", inst, base, shift);
}
void EmitBitwiseAnd32(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b) {
BitwiseLogicalOp(ctx, inst, a, b, "AND");
}
void EmitBitwiseOr32(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b) {
BitwiseLogicalOp(ctx, inst, a, b, "OR");
}
void EmitBitwiseXor32(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b) {
BitwiseLogicalOp(ctx, inst, a, b, "XOR");
}
void EmitBitFieldInsert(EmitContext& ctx, IR::Inst& inst, ScalarS32 base, ScalarS32 insert,
ScalarS32 offset, ScalarS32 count) {
const Register ret{ctx.reg_alloc.Define(inst)};
if (count.type != Type::Register && offset.type != Type::Register) {
ctx.Add("BFI.S {},{{{},{},0,0}},{},{};", ret, count, offset, insert, base);
} else {
ctx.Add("MOV.S RC.x,{};"
"MOV.S RC.y,{};"
"BFI.S {},RC,{},{};",
count, offset, ret, insert, base);
}
}
void EmitBitFieldSExtract(EmitContext& ctx, IR::Inst& inst, ScalarS32 base, ScalarS32 offset,
ScalarS32 count) {
const Register ret{ctx.reg_alloc.Define(inst)};
if (count.type != Type::Register && offset.type != Type::Register) {
ctx.Add("BFE.S {},{{{},{},0,0}},{};", ret, count, offset, base);
} else {
ctx.Add("MOV.S RC.x,{};"
"MOV.S RC.y,{};"
"BFE.S {},RC,{};",
count, offset, ret, base);
}
}
void EmitBitFieldUExtract(EmitContext& ctx, IR::Inst& inst, ScalarU32 base, ScalarU32 offset,
ScalarU32 count) {
const auto zero = inst.GetAssociatedPseudoOperation(IR::Opcode::GetZeroFromOp);
const auto sign = inst.GetAssociatedPseudoOperation(IR::Opcode::GetSignFromOp);
if (zero) {
zero->Invalidate();
}
if (sign) {
sign->Invalidate();
}
if (zero || sign) {
ctx.reg_alloc.InvalidateConditionCodes();
}
const Register ret{ctx.reg_alloc.Define(inst)};
if (count.type != Type::Register && offset.type != Type::Register) {
ctx.Add("BFE.U {},{{{},{},0,0}},{};", ret, count, offset, base);
} else {
ctx.Add("MOV.U RC.x,{};"
"MOV.U RC.y,{};"
"BFE.U {},RC,{};",
count, offset, ret, base);
}
if (zero) {
ctx.Add("SEQ.S {},{},0;", *zero, ret);
}
if (sign) {
ctx.Add("SLT.S {},{},0;", *sign, ret);
}
}
void EmitBitReverse32(EmitContext& ctx, IR::Inst& inst, ScalarS32 value) {
ctx.Add("BFR {},{};", inst, value);
}
void EmitBitCount32(EmitContext& ctx, IR::Inst& inst, ScalarS32 value) {
ctx.Add("BTC {},{};", inst, value);
}
void EmitBitwiseNot32(EmitContext& ctx, IR::Inst& inst, ScalarS32 value) {
ctx.Add("NOT.S {},{};", inst, value);
}
void EmitFindSMsb32(EmitContext& ctx, IR::Inst& inst, ScalarS32 value) {
ctx.Add("BTFM.S {},{};", inst, value);
}
void EmitFindUMsb32(EmitContext& ctx, IR::Inst& inst, ScalarU32 value) {
ctx.Add("BTFM.U {},{};", inst, value);
}
void EmitSMin32(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b) {
ctx.Add("MIN.S {},{},{};", inst, a, b);
}
void EmitUMin32(EmitContext& ctx, IR::Inst& inst, ScalarU32 a, ScalarU32 b) {
ctx.Add("MIN.U {},{},{};", inst, a, b);
}
void EmitSMax32(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b) {
ctx.Add("MAX.S {},{},{};", inst, a, b);
}
void EmitUMax32(EmitContext& ctx, IR::Inst& inst, ScalarU32 a, ScalarU32 b) {
ctx.Add("MAX.U {},{},{};", inst, a, b);
}
void EmitSClamp32(EmitContext& ctx, IR::Inst& inst, ScalarS32 value, ScalarS32 min, ScalarS32 max) {
const Register ret{ctx.reg_alloc.Define(inst)};
ctx.Add("MIN.S RC.x,{},{};"
"MAX.S {}.x,RC.x,{};",
max, value, ret, min);
}
void EmitUClamp32(EmitContext& ctx, IR::Inst& inst, ScalarU32 value, ScalarU32 min, ScalarU32 max) {
const Register ret{ctx.reg_alloc.Define(inst)};
ctx.Add("MIN.U RC.x,{},{};"
"MAX.U {}.x,RC.x,{};",
max, value, ret, min);
}
void EmitSLessThan(EmitContext& ctx, IR::Inst& inst, ScalarS32 lhs, ScalarS32 rhs) {
ctx.Add("SLT.S {}.x,{},{};", inst, lhs, rhs);
}
void EmitULessThan(EmitContext& ctx, IR::Inst& inst, ScalarU32 lhs, ScalarU32 rhs) {
ctx.Add("SLT.U {}.x,{},{};", inst, lhs, rhs);
}
void EmitIEqual(EmitContext& ctx, IR::Inst& inst, ScalarS32 lhs, ScalarS32 rhs) {
ctx.Add("SEQ.S {}.x,{},{};", inst, lhs, rhs);
}
void EmitSLessThanEqual(EmitContext& ctx, IR::Inst& inst, ScalarS32 lhs, ScalarS32 rhs) {
ctx.Add("SLE.S {}.x,{},{};", inst, lhs, rhs);
}
void EmitULessThanEqual(EmitContext& ctx, IR::Inst& inst, ScalarU32 lhs, ScalarU32 rhs) {
ctx.Add("SLE.U {}.x,{},{};", inst, lhs, rhs);
}
void EmitSGreaterThan(EmitContext& ctx, IR::Inst& inst, ScalarS32 lhs, ScalarS32 rhs) {
ctx.Add("SGT.S {}.x,{},{};", inst, lhs, rhs);
}
void EmitUGreaterThan(EmitContext& ctx, IR::Inst& inst, ScalarU32 lhs, ScalarU32 rhs) {
ctx.Add("SGT.U {}.x,{},{};", inst, lhs, rhs);
}
void EmitINotEqual(EmitContext& ctx, IR::Inst& inst, ScalarS32 lhs, ScalarS32 rhs) {
ctx.Add("SNE.U {}.x,{},{};", inst, lhs, rhs);
}
void EmitSGreaterThanEqual(EmitContext& ctx, IR::Inst& inst, ScalarS32 lhs, ScalarS32 rhs) {
ctx.Add("SGE.S {}.x,{},{};", inst, lhs, rhs);
}
void EmitUGreaterThanEqual(EmitContext& ctx, IR::Inst& inst, ScalarU32 lhs, ScalarU32 rhs) {
ctx.Add("SGE.U {}.x,{},{};", inst, lhs, rhs);
}
} // namespace Shader::Backend::GLASM

View File

@ -0,0 +1,568 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <string_view>
#include "shader_recompiler/backend/glasm/emit_context.h"
#include "shader_recompiler/backend/glasm/emit_glasm_instructions.h"
#include "shader_recompiler/frontend/ir/program.h"
#include "shader_recompiler/frontend/ir/value.h"
#include "shader_recompiler/runtime_info.h"
namespace Shader::Backend::GLASM {
namespace {
void StorageOp(EmitContext& ctx, const IR::Value& binding, ScalarU32 offset,
std::string_view then_expr, std::string_view else_expr = {}) {
// Operate on bindless SSBO, call the expression with bounds checking
// address = c[binding].xy
// length = c[binding].z
const u32 sb_binding{binding.U32()};
ctx.Add("PK64.U DC,c[{}];" // pointer = address
"CVT.U64.U32 DC.z,{};" // offset = uint64_t(offset)
"ADD.U64 DC.x,DC.x,DC.z;" // pointer += offset
"SLT.U.CC RC.x,{},c[{}].z;", // cc = offset < length
sb_binding, offset, offset, sb_binding);
if (else_expr.empty()) {
ctx.Add("IF NE.x;{}ENDIF;", then_expr);
} else {
ctx.Add("IF NE.x;{}ELSE;{}ENDIF;", then_expr, else_expr);
}
}
void GlobalStorageOp(EmitContext& ctx, Register address, bool pointer_based, std::string_view expr,
std::string_view else_expr = {}) {
const size_t num_buffers{ctx.info.storage_buffers_descriptors.size()};
for (size_t index = 0; index < num_buffers; ++index) {
if (!ctx.info.nvn_buffer_used[index]) {
continue;
}
const auto& ssbo{ctx.info.storage_buffers_descriptors[index]};
ctx.Add("LDC.U64 DC.x,c{}[{}];" // ssbo_addr
"LDC.U32 RC.x,c{}[{}];" // ssbo_size_u32
"CVT.U64.U32 DC.y,RC.x;" // ssbo_size = ssbo_size_u32
"ADD.U64 DC.y,DC.y,DC.x;" // ssbo_end = ssbo_addr + ssbo_size
"SGE.U64 RC.x,{}.x,DC.x;" // a = input_addr >= ssbo_addr ? -1 : 0
"SLT.U64 RC.y,{}.x,DC.y;" // b = input_addr < ssbo_end ? -1 : 0
"AND.U.CC RC.x,RC.x,RC.y;" // cond = a && b
"IF NE.x;" // if cond
"SUB.U64 DC.x,{}.x,DC.x;", // offset = input_addr - ssbo_addr
ssbo.cbuf_index, ssbo.cbuf_offset, ssbo.cbuf_index, ssbo.cbuf_offset + 8, address,
address, address);
if (pointer_based) {
ctx.Add("PK64.U DC.y,c[{}];" // host_ssbo = cbuf
"ADD.U64 DC.x,DC.x,DC.y;" // host_addr = host_ssbo + offset
"{}"
"ELSE;",
index, expr);
} else {
ctx.Add("CVT.U32.U64 RC.x,DC.x;"
"{},ssbo{}[RC.x];"
"ELSE;",
expr, index);
}
}
if (!else_expr.empty()) {
ctx.Add("{}", else_expr);
}
const size_t num_used_buffers{ctx.info.nvn_buffer_used.count()};
for (size_t index = 0; index < num_used_buffers; ++index) {
ctx.Add("ENDIF;");
}
}
template <typename ValueType>
void Write(EmitContext& ctx, const IR::Value& binding, ScalarU32 offset, ValueType value,
std::string_view size) {
if (ctx.runtime_info.glasm_use_storage_buffers) {
ctx.Add("STB.{} {},ssbo{}[{}];", size, value, binding.U32(), offset);
} else {
StorageOp(ctx, binding, offset, fmt::format("STORE.{} {},DC.x;", size, value));
}
}
void Load(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, ScalarU32 offset,
std::string_view size) {
const Register ret{ctx.reg_alloc.Define(inst)};
if (ctx.runtime_info.glasm_use_storage_buffers) {
ctx.Add("LDB.{} {},ssbo{}[{}];", size, ret, binding.U32(), offset);
} else {
StorageOp(ctx, binding, offset, fmt::format("LOAD.{} {},DC.x;", size, ret),
fmt::format("MOV.U {},{{0,0,0,0}};", ret));
}
}
template <typename ValueType>
void GlobalWrite(EmitContext& ctx, Register address, ValueType value, std::string_view size) {
if (ctx.runtime_info.glasm_use_storage_buffers) {
GlobalStorageOp(ctx, address, false, fmt::format("STB.{} {}", size, value));
} else {
GlobalStorageOp(ctx, address, true, fmt::format("STORE.{} {},DC.x;", size, value));
}
}
void GlobalLoad(EmitContext& ctx, IR::Inst& inst, Register address, std::string_view size) {
const Register ret{ctx.reg_alloc.Define(inst)};
if (ctx.runtime_info.glasm_use_storage_buffers) {
GlobalStorageOp(ctx, address, false, fmt::format("LDB.{} {}", size, ret));
} else {
GlobalStorageOp(ctx, address, true, fmt::format("LOAD.{} {},DC.x;", size, ret),
fmt::format("MOV.S {},0;", ret));
}
}
template <typename ValueType>
void Atom(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, ScalarU32 offset,
ValueType value, std::string_view operation, std::string_view size) {
const Register ret{ctx.reg_alloc.Define(inst)};
if (ctx.runtime_info.glasm_use_storage_buffers) {
ctx.Add("ATOMB.{}.{} {},{},ssbo{}[{}];", operation, size, ret, value, binding.U32(),
offset);
} else {
StorageOp(ctx, binding, offset,
fmt::format("ATOM.{}.{} {},{},DC.x;", operation, size, ret, value));
}
}
} // Anonymous namespace
void EmitLoadGlobalU8(EmitContext& ctx, IR::Inst& inst, Register address) {
GlobalLoad(ctx, inst, address, "U8");
}
void EmitLoadGlobalS8(EmitContext& ctx, IR::Inst& inst, Register address) {
GlobalLoad(ctx, inst, address, "S8");
}
void EmitLoadGlobalU16(EmitContext& ctx, IR::Inst& inst, Register address) {
GlobalLoad(ctx, inst, address, "U16");
}
void EmitLoadGlobalS16(EmitContext& ctx, IR::Inst& inst, Register address) {
GlobalLoad(ctx, inst, address, "S16");
}
void EmitLoadGlobal32(EmitContext& ctx, IR::Inst& inst, Register address) {
GlobalLoad(ctx, inst, address, "U32");
}
void EmitLoadGlobal64(EmitContext& ctx, IR::Inst& inst, Register address) {
GlobalLoad(ctx, inst, address, "U32X2");
}
void EmitLoadGlobal128(EmitContext& ctx, IR::Inst& inst, Register address) {
GlobalLoad(ctx, inst, address, "U32X4");
}
void EmitWriteGlobalU8(EmitContext& ctx, Register address, Register value) {
GlobalWrite(ctx, address, value, "U8");
}
void EmitWriteGlobalS8(EmitContext& ctx, Register address, Register value) {
GlobalWrite(ctx, address, value, "S8");
}
void EmitWriteGlobalU16(EmitContext& ctx, Register address, Register value) {
GlobalWrite(ctx, address, value, "U16");
}
void EmitWriteGlobalS16(EmitContext& ctx, Register address, Register value) {
GlobalWrite(ctx, address, value, "S16");
}
void EmitWriteGlobal32(EmitContext& ctx, Register address, ScalarU32 value) {
GlobalWrite(ctx, address, value, "U32");
}
void EmitWriteGlobal64(EmitContext& ctx, Register address, Register value) {
GlobalWrite(ctx, address, value, "U32X2");
}
void EmitWriteGlobal128(EmitContext& ctx, Register address, Register value) {
GlobalWrite(ctx, address, value, "U32X4");
}
void EmitLoadStorageU8(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset) {
Load(ctx, inst, binding, offset, "U8");
}
void EmitLoadStorageS8(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset) {
Load(ctx, inst, binding, offset, "S8");
}
void EmitLoadStorageU16(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset) {
Load(ctx, inst, binding, offset, "U16");
}
void EmitLoadStorageS16(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset) {
Load(ctx, inst, binding, offset, "S16");
}
void EmitLoadStorage32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset) {
Load(ctx, inst, binding, offset, "U32");
}
void EmitLoadStorage64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset) {
Load(ctx, inst, binding, offset, "U32X2");
}
void EmitLoadStorage128(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset) {
Load(ctx, inst, binding, offset, "U32X4");
}
void EmitWriteStorageU8(EmitContext& ctx, const IR::Value& binding, ScalarU32 offset,
ScalarU32 value) {
Write(ctx, binding, offset, value, "U8");
}
void EmitWriteStorageS8(EmitContext& ctx, const IR::Value& binding, ScalarU32 offset,
ScalarS32 value) {
Write(ctx, binding, offset, value, "S8");
}
void EmitWriteStorageU16(EmitContext& ctx, const IR::Value& binding, ScalarU32 offset,
ScalarU32 value) {
Write(ctx, binding, offset, value, "U16");
}
void EmitWriteStorageS16(EmitContext& ctx, const IR::Value& binding, ScalarU32 offset,
ScalarS32 value) {
Write(ctx, binding, offset, value, "S16");
}
void EmitWriteStorage32(EmitContext& ctx, const IR::Value& binding, ScalarU32 offset,
ScalarU32 value) {
Write(ctx, binding, offset, value, "U32");
}
void EmitWriteStorage64(EmitContext& ctx, const IR::Value& binding, ScalarU32 offset,
Register value) {
Write(ctx, binding, offset, value, "U32X2");
}
void EmitWriteStorage128(EmitContext& ctx, const IR::Value& binding, ScalarU32 offset,
Register value) {
Write(ctx, binding, offset, value, "U32X4");
}
void EmitSharedAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, ScalarU32 pointer_offset,
ScalarU32 value) {
ctx.Add("ATOMS.ADD.U32 {},{},shared_mem[{}];", inst, value, pointer_offset);
}
void EmitSharedAtomicSMin32(EmitContext& ctx, IR::Inst& inst, ScalarU32 pointer_offset,
ScalarS32 value) {
ctx.Add("ATOMS.MIN.S32 {},{},shared_mem[{}];", inst, value, pointer_offset);
}
void EmitSharedAtomicUMin32(EmitContext& ctx, IR::Inst& inst, ScalarU32 pointer_offset,
ScalarU32 value) {
ctx.Add("ATOMS.MIN.U32 {},{},shared_mem[{}];", inst, value, pointer_offset);
}
void EmitSharedAtomicSMax32(EmitContext& ctx, IR::Inst& inst, ScalarU32 pointer_offset,
ScalarS32 value) {
ctx.Add("ATOMS.MAX.S32 {},{},shared_mem[{}];", inst, value, pointer_offset);
}
void EmitSharedAtomicUMax32(EmitContext& ctx, IR::Inst& inst, ScalarU32 pointer_offset,
ScalarU32 value) {
ctx.Add("ATOMS.MAX.U32 {},{},shared_mem[{}];", inst, value, pointer_offset);
}
void EmitSharedAtomicInc32(EmitContext& ctx, IR::Inst& inst, ScalarU32 pointer_offset,
ScalarU32 value) {
ctx.Add("ATOMS.IWRAP.U32 {},{},shared_mem[{}];", inst, value, pointer_offset);
}
void EmitSharedAtomicDec32(EmitContext& ctx, IR::Inst& inst, ScalarU32 pointer_offset,
ScalarU32 value) {
ctx.Add("ATOMS.DWRAP.U32 {},{},shared_mem[{}];", inst, value, pointer_offset);
}
void EmitSharedAtomicAnd32(EmitContext& ctx, IR::Inst& inst, ScalarU32 pointer_offset,
ScalarU32 value) {
ctx.Add("ATOMS.AND.U32 {},{},shared_mem[{}];", inst, value, pointer_offset);
}
void EmitSharedAtomicOr32(EmitContext& ctx, IR::Inst& inst, ScalarU32 pointer_offset,
ScalarU32 value) {
ctx.Add("ATOMS.OR.U32 {},{},shared_mem[{}];", inst, value, pointer_offset);
}
void EmitSharedAtomicXor32(EmitContext& ctx, IR::Inst& inst, ScalarU32 pointer_offset,
ScalarU32 value) {
ctx.Add("ATOMS.XOR.U32 {},{},shared_mem[{}];", inst, value, pointer_offset);
}
void EmitSharedAtomicExchange32(EmitContext& ctx, IR::Inst& inst, ScalarU32 pointer_offset,
ScalarU32 value) {
ctx.Add("ATOMS.EXCH.U32 {},{},shared_mem[{}];", inst, value, pointer_offset);
}
void EmitSharedAtomicExchange64(EmitContext& ctx, IR::Inst& inst, ScalarU32 pointer_offset,
Register value) {
ctx.LongAdd("ATOMS.EXCH.U64 {}.x,{},shared_mem[{}];", inst, value, pointer_offset);
}
void EmitStorageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, ScalarU32 value) {
Atom(ctx, inst, binding, offset, value, "ADD", "U32");
}
void EmitStorageAtomicSMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, ScalarS32 value) {
Atom(ctx, inst, binding, offset, value, "MIN", "S32");
}
void EmitStorageAtomicUMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, ScalarU32 value) {
Atom(ctx, inst, binding, offset, value, "MIN", "U32");
}
void EmitStorageAtomicSMax32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, ScalarS32 value) {
Atom(ctx, inst, binding, offset, value, "MAX", "S32");
}
void EmitStorageAtomicUMax32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, ScalarU32 value) {
Atom(ctx, inst, binding, offset, value, "MAX", "U32");
}
void EmitStorageAtomicInc32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, ScalarU32 value) {
Atom(ctx, inst, binding, offset, value, "IWRAP", "U32");
}
void EmitStorageAtomicDec32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, ScalarU32 value) {
Atom(ctx, inst, binding, offset, value, "DWRAP", "U32");
}
void EmitStorageAtomicAnd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, ScalarU32 value) {
Atom(ctx, inst, binding, offset, value, "AND", "U32");
}
void EmitStorageAtomicOr32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, ScalarU32 value) {
Atom(ctx, inst, binding, offset, value, "OR", "U32");
}
void EmitStorageAtomicXor32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, ScalarU32 value) {
Atom(ctx, inst, binding, offset, value, "XOR", "U32");
}
void EmitStorageAtomicExchange32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, ScalarU32 value) {
Atom(ctx, inst, binding, offset, value, "EXCH", "U32");
}
void EmitStorageAtomicIAdd64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, Register value) {
Atom(ctx, inst, binding, offset, value, "ADD", "U64");
}
void EmitStorageAtomicSMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, Register value) {
Atom(ctx, inst, binding, offset, value, "MIN", "S64");
}
void EmitStorageAtomicUMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, Register value) {
Atom(ctx, inst, binding, offset, value, "MIN", "U64");
}
void EmitStorageAtomicSMax64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, Register value) {
Atom(ctx, inst, binding, offset, value, "MAX", "S64");
}
void EmitStorageAtomicUMax64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, Register value) {
Atom(ctx, inst, binding, offset, value, "MAX", "U64");
}
void EmitStorageAtomicAnd64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, Register value) {
Atom(ctx, inst, binding, offset, value, "AND", "U64");
}
void EmitStorageAtomicOr64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, Register value) {
Atom(ctx, inst, binding, offset, value, "OR", "U64");
}
void EmitStorageAtomicXor64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, Register value) {
Atom(ctx, inst, binding, offset, value, "XOR", "U64");
}
void EmitStorageAtomicExchange64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, Register value) {
Atom(ctx, inst, binding, offset, value, "EXCH", "U64");
}
void EmitStorageAtomicAddF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, ScalarF32 value) {
Atom(ctx, inst, binding, offset, value, "ADD", "F32");
}
void EmitStorageAtomicAddF16x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, Register value) {
Atom(ctx, inst, binding, offset, value, "ADD", "F16x2");
}
void EmitStorageAtomicAddF32x2([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] const IR::Value& binding,
[[maybe_unused]] ScalarU32 offset, [[maybe_unused]] Register value) {
throw NotImplementedException("GLASM instruction");
}
void EmitStorageAtomicMinF16x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, Register value) {
Atom(ctx, inst, binding, offset, value, "MIN", "F16x2");
}
void EmitStorageAtomicMinF32x2([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] const IR::Value& binding,
[[maybe_unused]] ScalarU32 offset, [[maybe_unused]] Register value) {
throw NotImplementedException("GLASM instruction");
}
void EmitStorageAtomicMaxF16x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
ScalarU32 offset, Register value) {
Atom(ctx, inst, binding, offset, value, "MAX", "F16x2");
}
void EmitStorageAtomicMaxF32x2([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] const IR::Value& binding,
[[maybe_unused]] ScalarU32 offset, [[maybe_unused]] Register value) {
throw NotImplementedException("GLASM instruction");
}
void EmitGlobalAtomicIAdd32(EmitContext&) {
throw NotImplementedException("GLASM instruction");
}
void EmitGlobalAtomicSMin32(EmitContext&) {
throw NotImplementedException("GLASM instruction");
}
void EmitGlobalAtomicUMin32(EmitContext&) {
throw NotImplementedException("GLASM instruction");
}
void EmitGlobalAtomicSMax32(EmitContext&) {
throw NotImplementedException("GLASM instruction");
}
void EmitGlobalAtomicUMax32(EmitContext&) {
throw NotImplementedException("GLASM instruction");
}
void EmitGlobalAtomicInc32(EmitContext&) {
throw NotImplementedException("GLASM instruction");
}
void EmitGlobalAtomicDec32(EmitContext&) {
throw NotImplementedException("GLASM instruction");
}
void EmitGlobalAtomicAnd32(EmitContext&) {
throw NotImplementedException("GLASM instruction");
}
void EmitGlobalAtomicOr32(EmitContext&) {
throw NotImplementedException("GLASM instruction");
}
void EmitGlobalAtomicXor32(EmitContext&) {
throw NotImplementedException("GLASM instruction");
}
void EmitGlobalAtomicExchange32(EmitContext&) {
throw NotImplementedException("GLASM instruction");
}
void EmitGlobalAtomicIAdd64(EmitContext&) {
throw NotImplementedException("GLASM instruction");
}
void EmitGlobalAtomicSMin64(EmitContext&) {
throw NotImplementedException("GLASM instruction");
}
void EmitGlobalAtomicUMin64(EmitContext&) {
throw NotImplementedException("GLASM instruction");
}
void EmitGlobalAtomicSMax64(EmitContext&) {
throw NotImplementedException("GLASM instruction");
}
void EmitGlobalAtomicUMax64(EmitContext&) {
throw NotImplementedException("GLASM instruction");
}
void EmitGlobalAtomicInc64(EmitContext&) {
throw NotImplementedException("GLASM instruction");
}
void EmitGlobalAtomicDec64(EmitContext&) {
throw NotImplementedException("GLASM instruction");
}
void EmitGlobalAtomicAnd64(EmitContext&) {
throw NotImplementedException("GLASM instruction");
}
void EmitGlobalAtomicOr64(EmitContext&) {
throw NotImplementedException("GLASM instruction");
}
void EmitGlobalAtomicXor64(EmitContext&) {
throw NotImplementedException("GLASM instruction");
}
void EmitGlobalAtomicExchange64(EmitContext&) {
throw NotImplementedException("GLASM instruction");
}
void EmitGlobalAtomicAddF32(EmitContext&) {
throw NotImplementedException("GLASM instruction");
}
void EmitGlobalAtomicAddF16x2(EmitContext&) {
throw NotImplementedException("GLASM instruction");
}
void EmitGlobalAtomicAddF32x2(EmitContext&) {
throw NotImplementedException("GLASM instruction");
}
void EmitGlobalAtomicMinF16x2(EmitContext&) {
throw NotImplementedException("GLASM instruction");
}
void EmitGlobalAtomicMinF32x2(EmitContext&) {
throw NotImplementedException("GLASM instruction");
}
void EmitGlobalAtomicMaxF16x2(EmitContext&) {
throw NotImplementedException("GLASM instruction");
}
void EmitGlobalAtomicMaxF32x2(EmitContext&) {
throw NotImplementedException("GLASM instruction");
}
} // namespace Shader::Backend::GLASM

View File

@ -0,0 +1,273 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <string_view>
#include "shader_recompiler/backend/glasm/emit_context.h"
#include "shader_recompiler/backend/glasm/emit_glasm_instructions.h"
#include "shader_recompiler/frontend/ir/program.h"
#include "shader_recompiler/frontend/ir/value.h"
#ifdef _MSC_VER
#pragma warning(disable : 4100)
#endif
namespace Shader::Backend::GLASM {
#define NotImplemented() throw NotImplementedException("GLASM instruction {}", __LINE__)
static void DefinePhi(EmitContext& ctx, IR::Inst& phi) {
switch (phi.Arg(0).Type()) {
case IR::Type::U1:
case IR::Type::U32:
case IR::Type::F32:
ctx.reg_alloc.Define(phi);
break;
case IR::Type::U64:
case IR::Type::F64:
ctx.reg_alloc.LongDefine(phi);
break;
default:
throw NotImplementedException("Phi node type {}", phi.Type());
}
}
void EmitPhi(EmitContext& ctx, IR::Inst& phi) {
const size_t num_args{phi.NumArgs()};
for (size_t i = 0; i < num_args; ++i) {
ctx.reg_alloc.Consume(phi.Arg(i));
}
if (!phi.Definition<Id>().is_valid) {
// The phi node wasn't forward defined
DefinePhi(ctx, phi);
}
}
void EmitVoid(EmitContext&) {}
void EmitReference(EmitContext& ctx, const IR::Value& value) {
ctx.reg_alloc.Consume(value);
}
void EmitPhiMove(EmitContext& ctx, const IR::Value& phi_value, const IR::Value& value) {
IR::Inst& phi{RegAlloc::AliasInst(*phi_value.Inst())};
if (!phi.Definition<Id>().is_valid) {
// The phi node wasn't forward defined
DefinePhi(ctx, phi);
}
const Register phi_reg{ctx.reg_alloc.Consume(IR::Value{&phi})};
const Value eval_value{ctx.reg_alloc.Consume(value)};
if (phi_reg == eval_value) {
return;
}
switch (phi.Flags<IR::Type>()) {
case IR::Type::U1:
case IR::Type::U32:
case IR::Type::F32:
ctx.Add("MOV.S {}.x,{};", phi_reg, ScalarS32{eval_value});
break;
case IR::Type::U64:
case IR::Type::F64:
ctx.Add("MOV.U64 {}.x,{};", phi_reg, ScalarRegister{eval_value});
break;
default:
throw NotImplementedException("Phi node type {}", phi.Type());
}
}
void EmitJoin(EmitContext& ctx) {
NotImplemented();
}
void EmitDemoteToHelperInvocation(EmitContext& ctx) {
ctx.Add("KIL TR.x;");
}
void EmitBarrier(EmitContext& ctx) {
ctx.Add("BAR;");
}
void EmitWorkgroupMemoryBarrier(EmitContext& ctx) {
ctx.Add("MEMBAR.CTA;");
}
void EmitDeviceMemoryBarrier(EmitContext& ctx) {
ctx.Add("MEMBAR;");
}
void EmitPrologue(EmitContext& ctx) {
// TODO
}
void EmitEpilogue(EmitContext& ctx) {
// TODO
}
void EmitEmitVertex(EmitContext& ctx, ScalarS32 stream) {
if (stream.type == Type::U32 && stream.imm_u32 == 0) {
ctx.Add("EMIT;");
} else {
ctx.Add("EMITS {};", stream);
}
}
void EmitEndPrimitive(EmitContext& ctx, const IR::Value& stream) {
if (!stream.IsImmediate()) {
LOG_WARNING(Shader_GLASM, "Stream is not immediate");
}
ctx.reg_alloc.Consume(stream);
ctx.Add("ENDPRIM;");
}
void EmitGetRegister(EmitContext& ctx) {
NotImplemented();
}
void EmitSetRegister(EmitContext& ctx) {
NotImplemented();
}
void EmitGetPred(EmitContext& ctx) {
NotImplemented();
}
void EmitSetPred(EmitContext& ctx) {
NotImplemented();
}
void EmitSetGotoVariable(EmitContext& ctx) {
NotImplemented();
}
void EmitGetGotoVariable(EmitContext& ctx) {
NotImplemented();
}
void EmitSetIndirectBranchVariable(EmitContext& ctx) {
NotImplemented();
}
void EmitGetIndirectBranchVariable(EmitContext& ctx) {
NotImplemented();
}
void EmitGetZFlag(EmitContext& ctx) {
NotImplemented();
}
void EmitGetSFlag(EmitContext& ctx) {
NotImplemented();
}
void EmitGetCFlag(EmitContext& ctx) {
NotImplemented();
}
void EmitGetOFlag(EmitContext& ctx) {
NotImplemented();
}
void EmitSetZFlag(EmitContext& ctx) {
NotImplemented();
}
void EmitSetSFlag(EmitContext& ctx) {
NotImplemented();
}
void EmitSetCFlag(EmitContext& ctx) {
NotImplemented();
}
void EmitSetOFlag(EmitContext& ctx) {
NotImplemented();
}
void EmitWorkgroupId(EmitContext& ctx, IR::Inst& inst) {
ctx.Add("MOV.S {},invocation.groupid;", inst);
}
void EmitLocalInvocationId(EmitContext& ctx, IR::Inst& inst) {
ctx.Add("MOV.S {},invocation.localid;", inst);
}
void EmitInvocationId(EmitContext& ctx, IR::Inst& inst) {
ctx.Add("MOV.S {}.x,primitive_invocation.x;", inst);
}
void EmitSampleId(EmitContext& ctx, IR::Inst& inst) {
ctx.Add("MOV.S {}.x,fragment.sampleid.x;", inst);
}
void EmitIsHelperInvocation(EmitContext& ctx, IR::Inst& inst) {
ctx.Add("MOV.S {}.x,fragment.helperthread.x;", inst);
}
void EmitYDirection(EmitContext& ctx, IR::Inst& inst) {
ctx.uses_y_direction = true;
ctx.Add("MOV.F {}.x,y_direction[0].w;", inst);
}
void EmitUndefU1(EmitContext& ctx, IR::Inst& inst) {
ctx.Add("MOV.S {}.x,0;", inst);
}
void EmitUndefU8(EmitContext& ctx, IR::Inst& inst) {
ctx.Add("MOV.S {}.x,0;", inst);
}
void EmitUndefU16(EmitContext& ctx, IR::Inst& inst) {
ctx.Add("MOV.S {}.x,0;", inst);
}
void EmitUndefU32(EmitContext& ctx, IR::Inst& inst) {
ctx.Add("MOV.S {}.x,0;", inst);
}
void EmitUndefU64(EmitContext& ctx, IR::Inst& inst) {
ctx.LongAdd("MOV.S64 {}.x,0;", inst);
}
void EmitGetZeroFromOp(EmitContext& ctx) {
NotImplemented();
}
void EmitGetSignFromOp(EmitContext& ctx) {
NotImplemented();
}
void EmitGetCarryFromOp(EmitContext& ctx) {
NotImplemented();
}
void EmitGetOverflowFromOp(EmitContext& ctx) {
NotImplemented();
}
void EmitGetSparseFromOp(EmitContext& ctx) {
NotImplemented();
}
void EmitGetInBoundsFromOp(EmitContext& ctx) {
NotImplemented();
}
void EmitLogicalOr(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b) {
ctx.Add("OR.S {},{},{};", inst, a, b);
}
void EmitLogicalAnd(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b) {
ctx.Add("AND.S {},{},{};", inst, a, b);
}
void EmitLogicalXor(EmitContext& ctx, IR::Inst& inst, ScalarS32 a, ScalarS32 b) {
ctx.Add("XOR.S {},{},{};", inst, a, b);
}
void EmitLogicalNot(EmitContext& ctx, IR::Inst& inst, ScalarS32 value) {
ctx.Add("SEQ.S {},{},0;", inst, value);
}
} // namespace Shader::Backend::GLASM

View File

@ -0,0 +1,67 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "shader_recompiler/backend/glasm/emit_context.h"
#include "shader_recompiler/backend/glasm/emit_glasm_instructions.h"
#include "shader_recompiler/frontend/ir/value.h"
namespace Shader::Backend::GLASM {
void EmitSelectU1(EmitContext& ctx, IR::Inst& inst, ScalarS32 cond, ScalarS32 true_value,
ScalarS32 false_value) {
ctx.Add("CMP.S {},{},{},{};", inst, cond, true_value, false_value);
}
void EmitSelectU8([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] ScalarS32 cond,
[[maybe_unused]] ScalarS32 true_value, [[maybe_unused]] ScalarS32 false_value) {
throw NotImplementedException("GLASM instruction");
}
void EmitSelectU16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] ScalarS32 cond,
[[maybe_unused]] ScalarS32 true_value, [[maybe_unused]] ScalarS32 false_value) {
throw NotImplementedException("GLASM instruction");
}
void EmitSelectU32(EmitContext& ctx, IR::Inst& inst, ScalarS32 cond, ScalarS32 true_value,
ScalarS32 false_value) {
ctx.Add("CMP.S {},{},{},{};", inst, cond, true_value, false_value);
}
void EmitSelectU64(EmitContext& ctx, IR::Inst& inst, ScalarS32 cond, Register true_value,
Register false_value) {
ctx.reg_alloc.InvalidateConditionCodes();
const Register ret{ctx.reg_alloc.LongDefine(inst)};
if (ret == true_value) {
ctx.Add("MOV.S.CC RC.x,{};"
"MOV.U64 {}.x(EQ.x),{};",
cond, ret, false_value);
} else if (ret == false_value) {
ctx.Add("MOV.S.CC RC.x,{};"
"MOV.U64 {}.x(NE.x),{};",
cond, ret, true_value);
} else {
ctx.Add("MOV.S.CC RC.x,{};"
"MOV.U64 {}.x,{};"
"MOV.U64 {}.x(NE.x),{};",
cond, ret, false_value, ret, true_value);
}
}
void EmitSelectF16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] ScalarS32 cond,
[[maybe_unused]] Register true_value, [[maybe_unused]] Register false_value) {
throw NotImplementedException("GLASM instruction");
}
void EmitSelectF32(EmitContext& ctx, IR::Inst& inst, ScalarS32 cond, ScalarS32 true_value,
ScalarS32 false_value) {
ctx.Add("CMP.S {},{},{},{};", inst, cond, true_value, false_value);
}
void EmitSelectF64([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] ScalarS32 cond,
[[maybe_unused]] Register true_value, [[maybe_unused]] Register false_value) {
throw NotImplementedException("GLASM instruction");
}
} // namespace Shader::Backend::GLASM

View File

@ -0,0 +1,58 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "shader_recompiler/backend/glasm/emit_context.h"
#include "shader_recompiler/backend/glasm/emit_glasm_instructions.h"
#include "shader_recompiler/frontend/ir/value.h"
namespace Shader::Backend::GLASM {
void EmitLoadSharedU8(EmitContext& ctx, IR::Inst& inst, ScalarU32 offset) {
ctx.Add("LDS.U8 {},shared_mem[{}];", inst, offset);
}
void EmitLoadSharedS8(EmitContext& ctx, IR::Inst& inst, ScalarU32 offset) {
ctx.Add("LDS.S8 {},shared_mem[{}];", inst, offset);
}
void EmitLoadSharedU16(EmitContext& ctx, IR::Inst& inst, ScalarU32 offset) {
ctx.Add("LDS.U16 {},shared_mem[{}];", inst, offset);
}
void EmitLoadSharedS16(EmitContext& ctx, IR::Inst& inst, ScalarU32 offset) {
ctx.Add("LDS.S16 {},shared_mem[{}];", inst, offset);
}
void EmitLoadSharedU32(EmitContext& ctx, IR::Inst& inst, ScalarU32 offset) {
ctx.Add("LDS.U32 {},shared_mem[{}];", inst, offset);
}
void EmitLoadSharedU64(EmitContext& ctx, IR::Inst& inst, ScalarU32 offset) {
ctx.Add("LDS.U32X2 {},shared_mem[{}];", inst, offset);
}
void EmitLoadSharedU128(EmitContext& ctx, IR::Inst& inst, ScalarU32 offset) {
ctx.Add("LDS.U32X4 {},shared_mem[{}];", inst, offset);
}
void EmitWriteSharedU8(EmitContext& ctx, ScalarU32 offset, ScalarU32 value) {
ctx.Add("STS.U8 {},shared_mem[{}];", value, offset);
}
void EmitWriteSharedU16(EmitContext& ctx, ScalarU32 offset, ScalarU32 value) {
ctx.Add("STS.U16 {},shared_mem[{}];", value, offset);
}
void EmitWriteSharedU32(EmitContext& ctx, ScalarU32 offset, ScalarU32 value) {
ctx.Add("STS.U32 {},shared_mem[{}];", value, offset);
}
void EmitWriteSharedU64(EmitContext& ctx, ScalarU32 offset, Register value) {
ctx.Add("STS.U32X2 {},shared_mem[{}];", value, offset);
}
void EmitWriteSharedU128(EmitContext& ctx, ScalarU32 offset, Register value) {
ctx.Add("STS.U32X4 {},shared_mem[{}];", value, offset);
}
} // namespace Shader::Backend::GLASM

View File

@ -0,0 +1,150 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "shader_recompiler/backend/glasm/emit_context.h"
#include "shader_recompiler/backend/glasm/emit_glasm_instructions.h"
#include "shader_recompiler/frontend/ir/value.h"
#include "shader_recompiler/profile.h"
namespace Shader::Backend::GLASM {
void EmitLaneId(EmitContext& ctx, IR::Inst& inst) {
ctx.Add("MOV.S {}.x,{}.threadid;", inst, ctx.stage_name);
}
void EmitVoteAll(EmitContext& ctx, IR::Inst& inst, ScalarS32 pred) {
ctx.Add("TGALL.S {}.x,{};", inst, pred);
}
void EmitVoteAny(EmitContext& ctx, IR::Inst& inst, ScalarS32 pred) {
ctx.Add("TGANY.S {}.x,{};", inst, pred);
}
void EmitVoteEqual(EmitContext& ctx, IR::Inst& inst, ScalarS32 pred) {
ctx.Add("TGEQ.S {}.x,{};", inst, pred);
}
void EmitSubgroupBallot(EmitContext& ctx, IR::Inst& inst, ScalarS32 pred) {
ctx.Add("TGBALLOT {}.x,{};", inst, pred);
}
void EmitSubgroupEqMask(EmitContext& ctx, IR::Inst& inst) {
ctx.Add("MOV.U {},{}.threadeqmask;", inst, ctx.stage_name);
}
void EmitSubgroupLtMask(EmitContext& ctx, IR::Inst& inst) {
ctx.Add("MOV.U {},{}.threadltmask;", inst, ctx.stage_name);
}
void EmitSubgroupLeMask(EmitContext& ctx, IR::Inst& inst) {
ctx.Add("MOV.U {},{}.threadlemask;", inst, ctx.stage_name);
}
void EmitSubgroupGtMask(EmitContext& ctx, IR::Inst& inst) {
ctx.Add("MOV.U {},{}.threadgtmask;", inst, ctx.stage_name);
}
void EmitSubgroupGeMask(EmitContext& ctx, IR::Inst& inst) {
ctx.Add("MOV.U {},{}.threadgemask;", inst, ctx.stage_name);
}
static void Shuffle(EmitContext& ctx, IR::Inst& inst, ScalarU32 value, ScalarU32 index,
const IR::Value& clamp, const IR::Value& segmentation_mask,
std::string_view op) {
IR::Inst* const in_bounds{inst.GetAssociatedPseudoOperation(IR::Opcode::GetInBoundsFromOp)};
if (in_bounds) {
in_bounds->Invalidate();
}
std::string mask;
if (clamp.IsImmediate() && segmentation_mask.IsImmediate()) {
mask = fmt::to_string(clamp.U32() | (segmentation_mask.U32() << 8));
} else {
mask = "RC";
ctx.Add("BFI.U RC.x,{{5,8,0,0}},{},{};",
ScalarU32{ctx.reg_alloc.Consume(segmentation_mask)},
ScalarU32{ctx.reg_alloc.Consume(clamp)});
}
const Register value_ret{ctx.reg_alloc.Define(inst)};
if (in_bounds) {
const Register bounds_ret{ctx.reg_alloc.Define(*in_bounds)};
ctx.Add("SHF{}.U {},{},{},{};"
"MOV.U {}.x,{}.y;",
op, bounds_ret, value, index, mask, value_ret, bounds_ret);
} else {
ctx.Add("SHF{}.U {},{},{},{};"
"MOV.U {}.x,{}.y;",
op, value_ret, value, index, mask, value_ret, value_ret);
}
}
void EmitShuffleIndex(EmitContext& ctx, IR::Inst& inst, ScalarU32 value, ScalarU32 index,
const IR::Value& clamp, const IR::Value& segmentation_mask) {
Shuffle(ctx, inst, value, index, clamp, segmentation_mask, "IDX");
}
void EmitShuffleUp(EmitContext& ctx, IR::Inst& inst, ScalarU32 value, ScalarU32 index,
const IR::Value& clamp, const IR::Value& segmentation_mask) {
Shuffle(ctx, inst, value, index, clamp, segmentation_mask, "UP");
}
void EmitShuffleDown(EmitContext& ctx, IR::Inst& inst, ScalarU32 value, ScalarU32 index,
const IR::Value& clamp, const IR::Value& segmentation_mask) {
Shuffle(ctx, inst, value, index, clamp, segmentation_mask, "DOWN");
}
void EmitShuffleButterfly(EmitContext& ctx, IR::Inst& inst, ScalarU32 value, ScalarU32 index,
const IR::Value& clamp, const IR::Value& segmentation_mask) {
Shuffle(ctx, inst, value, index, clamp, segmentation_mask, "XOR");
}
void EmitFSwizzleAdd(EmitContext& ctx, IR::Inst& inst, ScalarF32 op_a, ScalarF32 op_b,
ScalarU32 swizzle) {
const auto ret{ctx.reg_alloc.Define(inst)};
ctx.Add("AND.U RC.z,{}.threadid,3;"
"SHL.U RC.z,RC.z,1;"
"SHR.U RC.z,{},RC.z;"
"AND.U RC.z,RC.z,3;"
"MUL.F RC.x,{},FSWZA[RC.z];"
"MUL.F RC.y,{},FSWZB[RC.z];"
"ADD.F {}.x,RC.x,RC.y;",
ctx.stage_name, swizzle, op_a, op_b, ret);
}
void EmitDPdxFine(EmitContext& ctx, IR::Inst& inst, ScalarF32 p) {
if (ctx.profile.support_derivative_control) {
ctx.Add("DDX.FINE {}.x,{};", inst, p);
} else {
LOG_WARNING(Shader_GLASM, "Fine derivatives not supported by device");
ctx.Add("DDX {}.x,{};", inst, p);
}
}
void EmitDPdyFine(EmitContext& ctx, IR::Inst& inst, ScalarF32 p) {
if (ctx.profile.support_derivative_control) {
ctx.Add("DDY.FINE {}.x,{};", inst, p);
} else {
LOG_WARNING(Shader_GLASM, "Fine derivatives not supported by device");
ctx.Add("DDY {}.x,{};", inst, p);
}
}
void EmitDPdxCoarse(EmitContext& ctx, IR::Inst& inst, ScalarF32 p) {
if (ctx.profile.support_derivative_control) {
ctx.Add("DDX.COARSE {}.x,{};", inst, p);
} else {
LOG_WARNING(Shader_GLASM, "Coarse derivatives not supported by device");
ctx.Add("DDX {}.x,{};", inst, p);
}
}
void EmitDPdyCoarse(EmitContext& ctx, IR::Inst& inst, ScalarF32 p) {
if (ctx.profile.support_derivative_control) {
ctx.Add("DDY.COARSE {}.x,{};", inst, p);
} else {
LOG_WARNING(Shader_GLASM, "Coarse derivatives not supported by device");
ctx.Add("DDY {}.x,{};", inst, p);
}
}
} // namespace Shader::Backend::GLASM

View File

@ -0,0 +1,186 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <string>
#include <fmt/format.h>
#include "shader_recompiler/backend/glasm/emit_context.h"
#include "shader_recompiler/backend/glasm/reg_alloc.h"
#include "shader_recompiler/exception.h"
#include "shader_recompiler/frontend/ir/value.h"
namespace Shader::Backend::GLASM {
Register RegAlloc::Define(IR::Inst& inst) {
return Define(inst, false);
}
Register RegAlloc::LongDefine(IR::Inst& inst) {
return Define(inst, true);
}
Value RegAlloc::Peek(const IR::Value& value) {
if (value.IsImmediate()) {
return MakeImm(value);
} else {
return PeekInst(*value.Inst());
}
}
Value RegAlloc::Consume(const IR::Value& value) {
if (value.IsImmediate()) {
return MakeImm(value);
} else {
return ConsumeInst(*value.Inst());
}
}
void RegAlloc::Unref(IR::Inst& inst) {
IR::Inst& value_inst{AliasInst(inst)};
value_inst.DestructiveRemoveUsage();
if (!value_inst.HasUses()) {
Free(value_inst.Definition<Id>());
}
}
Register RegAlloc::AllocReg() {
Register ret;
ret.type = Type::Register;
ret.id = Alloc(false);
return ret;
}
Register RegAlloc::AllocLongReg() {
Register ret;
ret.type = Type::Register;
ret.id = Alloc(true);
return ret;
}
void RegAlloc::FreeReg(Register reg) {
Free(reg.id);
}
Value RegAlloc::MakeImm(const IR::Value& value) {
Value ret;
switch (value.Type()) {
case IR::Type::Void:
ret.type = Type::Void;
break;
case IR::Type::U1:
ret.type = Type::U32;
ret.imm_u32 = value.U1() ? 0xffffffff : 0;
break;
case IR::Type::U32:
ret.type = Type::U32;
ret.imm_u32 = value.U32();
break;
case IR::Type::F32:
ret.type = Type::U32;
ret.imm_u32 = Common::BitCast<u32>(value.F32());
break;
case IR::Type::U64:
ret.type = Type::U64;
ret.imm_u64 = value.U64();
break;
case IR::Type::F64:
ret.type = Type::U64;
ret.imm_u64 = Common::BitCast<u64>(value.F64());
break;
default:
throw NotImplementedException("Immediate type {}", value.Type());
}
return ret;
}
Register RegAlloc::Define(IR::Inst& inst, bool is_long) {
if (inst.HasUses()) {
inst.SetDefinition<Id>(Alloc(is_long));
} else {
Id id{};
id.is_long.Assign(is_long ? 1 : 0);
id.is_null.Assign(1);
inst.SetDefinition<Id>(id);
}
return Register{PeekInst(inst)};
}
Value RegAlloc::PeekInst(IR::Inst& inst) {
Value ret;
ret.type = Type::Register;
ret.id = inst.Definition<Id>();
return ret;
}
Value RegAlloc::ConsumeInst(IR::Inst& inst) {
Unref(inst);
return PeekInst(inst);
}
Id RegAlloc::Alloc(bool is_long) {
size_t& num_regs{is_long ? num_used_long_registers : num_used_registers};
std::bitset<NUM_REGS>& use{is_long ? long_register_use : register_use};
if (num_used_registers + num_used_long_registers < NUM_REGS) {
for (size_t reg = 0; reg < NUM_REGS; ++reg) {
if (use[reg]) {
continue;
}
num_regs = std::max(num_regs, reg + 1);
use[reg] = true;
Id ret{};
ret.is_valid.Assign(1);
ret.is_long.Assign(is_long ? 1 : 0);
ret.is_spill.Assign(0);
ret.is_condition_code.Assign(0);
ret.is_null.Assign(0);
ret.index.Assign(static_cast<u32>(reg));
return ret;
}
}
throw NotImplementedException("Register spilling");
}
void RegAlloc::Free(Id id) {
if (id.is_valid == 0) {
throw LogicError("Freeing invalid register");
}
if (id.is_spill != 0) {
throw NotImplementedException("Free spill");
}
if (id.is_long != 0) {
long_register_use[id.index] = false;
} else {
register_use[id.index] = false;
}
}
/*static*/ bool RegAlloc::IsAliased(const IR::Inst& inst) {
switch (inst.GetOpcode()) {
case IR::Opcode::Identity:
case IR::Opcode::BitCastU16F16:
case IR::Opcode::BitCastU32F32:
case IR::Opcode::BitCastU64F64:
case IR::Opcode::BitCastF16U16:
case IR::Opcode::BitCastF32U32:
case IR::Opcode::BitCastF64U64:
return true;
default:
return false;
}
}
/*static*/ IR::Inst& RegAlloc::AliasInst(IR::Inst& inst) {
IR::Inst* it{&inst};
while (IsAliased(*it)) {
const IR::Value arg{it->Arg(0)};
if (arg.IsImmediate()) {
break;
}
it = arg.InstRecursive();
}
return *it;
}
} // namespace Shader::Backend::GLASM

View File

@ -0,0 +1,303 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <bitset>
#include <fmt/format.h>
#include "common/bit_cast.h"
#include "common/bit_field.h"
#include "common/common_types.h"
#include "shader_recompiler/exception.h"
namespace Shader::IR {
class Inst;
class Value;
} // namespace Shader::IR
namespace Shader::Backend::GLASM {
class EmitContext;
enum class Type : u32 {
Void,
Register,
U32,
U64,
};
struct Id {
union {
u32 raw;
BitField<0, 1, u32> is_valid;
BitField<1, 1, u32> is_long;
BitField<2, 1, u32> is_spill;
BitField<3, 1, u32> is_condition_code;
BitField<4, 1, u32> is_null;
BitField<5, 27, u32> index;
};
bool operator==(Id rhs) const noexcept {
return raw == rhs.raw;
}
bool operator!=(Id rhs) const noexcept {
return !operator==(rhs);
}
};
static_assert(sizeof(Id) == sizeof(u32));
struct Value {
Type type;
union {
Id id;
u32 imm_u32;
u64 imm_u64;
};
bool operator==(const Value& rhs) const noexcept {
if (type != rhs.type) {
return false;
}
switch (type) {
case Type::Void:
return true;
case Type::Register:
return id == rhs.id;
case Type::U32:
return imm_u32 == rhs.imm_u32;
case Type::U64:
return imm_u64 == rhs.imm_u64;
}
return false;
}
bool operator!=(const Value& rhs) const noexcept {
return !operator==(rhs);
}
};
struct Register : Value {};
struct ScalarRegister : Value {};
struct ScalarU32 : Value {};
struct ScalarS32 : Value {};
struct ScalarF32 : Value {};
struct ScalarF64 : Value {};
class RegAlloc {
public:
RegAlloc() = default;
Register Define(IR::Inst& inst);
Register LongDefine(IR::Inst& inst);
[[nodiscard]] Value Peek(const IR::Value& value);
Value Consume(const IR::Value& value);
void Unref(IR::Inst& inst);
[[nodiscard]] Register AllocReg();
[[nodiscard]] Register AllocLongReg();
void FreeReg(Register reg);
void InvalidateConditionCodes() {
// This does nothing for now
}
[[nodiscard]] size_t NumUsedRegisters() const noexcept {
return num_used_registers;
}
[[nodiscard]] size_t NumUsedLongRegisters() const noexcept {
return num_used_long_registers;
}
[[nodiscard]] bool IsEmpty() const noexcept {
return register_use.none() && long_register_use.none();
}
/// Returns true if the instruction is expected to be aliased to another
static bool IsAliased(const IR::Inst& inst);
/// Returns the underlying value out of an alias sequence
static IR::Inst& AliasInst(IR::Inst& inst);
private:
static constexpr size_t NUM_REGS = 4096;
static constexpr size_t NUM_ELEMENTS = 4;
Value MakeImm(const IR::Value& value);
Register Define(IR::Inst& inst, bool is_long);
Value PeekInst(IR::Inst& inst);
Value ConsumeInst(IR::Inst& inst);
Id Alloc(bool is_long);
void Free(Id id);
size_t num_used_registers{};
size_t num_used_long_registers{};
std::bitset<NUM_REGS> register_use{};
std::bitset<NUM_REGS> long_register_use{};
};
template <bool scalar, typename FormatContext>
auto FormatTo(FormatContext& ctx, Id id) {
if (id.is_condition_code != 0) {
throw NotImplementedException("Condition code emission");
}
if (id.is_spill != 0) {
throw NotImplementedException("Spill emission");
}
if constexpr (scalar) {
if (id.is_null != 0) {
return fmt::format_to(ctx.out(), "{}", id.is_long != 0 ? "DC.x" : "RC.x");
}
if (id.is_long != 0) {
return fmt::format_to(ctx.out(), "D{}.x", id.index.Value());
} else {
return fmt::format_to(ctx.out(), "R{}.x", id.index.Value());
}
} else {
if (id.is_null != 0) {
return fmt::format_to(ctx.out(), "{}", id.is_long != 0 ? "DC" : "RC");
}
if (id.is_long != 0) {
return fmt::format_to(ctx.out(), "D{}", id.index.Value());
} else {
return fmt::format_to(ctx.out(), "R{}", id.index.Value());
}
}
}
} // namespace Shader::Backend::GLASM
template <>
struct fmt::formatter<Shader::Backend::GLASM::Id> {
constexpr auto parse(format_parse_context& ctx) {
return ctx.begin();
}
template <typename FormatContext>
auto format(Shader::Backend::GLASM::Id id, FormatContext& ctx) {
return Shader::Backend::GLASM::FormatTo<true>(ctx, id);
}
};
template <>
struct fmt::formatter<Shader::Backend::GLASM::Register> {
constexpr auto parse(format_parse_context& ctx) {
return ctx.begin();
}
template <typename FormatContext>
auto format(const Shader::Backend::GLASM::Register& value, FormatContext& ctx) {
if (value.type != Shader::Backend::GLASM::Type::Register) {
throw Shader::InvalidArgument("Register value type is not register");
}
return Shader::Backend::GLASM::FormatTo<false>(ctx, value.id);
}
};
template <>
struct fmt::formatter<Shader::Backend::GLASM::ScalarRegister> {
constexpr auto parse(format_parse_context& ctx) {
return ctx.begin();
}
template <typename FormatContext>
auto format(const Shader::Backend::GLASM::ScalarRegister& value, FormatContext& ctx) {
if (value.type != Shader::Backend::GLASM::Type::Register) {
throw Shader::InvalidArgument("Register value type is not register");
}
return Shader::Backend::GLASM::FormatTo<true>(ctx, value.id);
}
};
template <>
struct fmt::formatter<Shader::Backend::GLASM::ScalarU32> {
constexpr auto parse(format_parse_context& ctx) {
return ctx.begin();
}
template <typename FormatContext>
auto format(const Shader::Backend::GLASM::ScalarU32& value, FormatContext& ctx) {
switch (value.type) {
case Shader::Backend::GLASM::Type::Void:
break;
case Shader::Backend::GLASM::Type::Register:
return Shader::Backend::GLASM::FormatTo<true>(ctx, value.id);
case Shader::Backend::GLASM::Type::U32:
return fmt::format_to(ctx.out(), "{}", value.imm_u32);
case Shader::Backend::GLASM::Type::U64:
break;
}
throw Shader::InvalidArgument("Invalid value type {}", value.type);
}
};
template <>
struct fmt::formatter<Shader::Backend::GLASM::ScalarS32> {
constexpr auto parse(format_parse_context& ctx) {
return ctx.begin();
}
template <typename FormatContext>
auto format(const Shader::Backend::GLASM::ScalarS32& value, FormatContext& ctx) {
switch (value.type) {
case Shader::Backend::GLASM::Type::Void:
break;
case Shader::Backend::GLASM::Type::Register:
return Shader::Backend::GLASM::FormatTo<true>(ctx, value.id);
case Shader::Backend::GLASM::Type::U32:
return fmt::format_to(ctx.out(), "{}", static_cast<s32>(value.imm_u32));
case Shader::Backend::GLASM::Type::U64:
break;
}
throw Shader::InvalidArgument("Invalid value type {}", value.type);
}
};
template <>
struct fmt::formatter<Shader::Backend::GLASM::ScalarF32> {
constexpr auto parse(format_parse_context& ctx) {
return ctx.begin();
}
template <typename FormatContext>
auto format(const Shader::Backend::GLASM::ScalarF32& value, FormatContext& ctx) {
switch (value.type) {
case Shader::Backend::GLASM::Type::Void:
break;
case Shader::Backend::GLASM::Type::Register:
return Shader::Backend::GLASM::FormatTo<true>(ctx, value.id);
case Shader::Backend::GLASM::Type::U32:
return fmt::format_to(ctx.out(), "{}", Common::BitCast<f32>(value.imm_u32));
case Shader::Backend::GLASM::Type::U64:
break;
}
throw Shader::InvalidArgument("Invalid value type {}", value.type);
}
};
template <>
struct fmt::formatter<Shader::Backend::GLASM::ScalarF64> {
constexpr auto parse(format_parse_context& ctx) {
return ctx.begin();
}
template <typename FormatContext>
auto format(const Shader::Backend::GLASM::ScalarF64& value, FormatContext& ctx) {
switch (value.type) {
case Shader::Backend::GLASM::Type::Void:
break;
case Shader::Backend::GLASM::Type::Register:
return Shader::Backend::GLASM::FormatTo<true>(ctx, value.id);
case Shader::Backend::GLASM::Type::U32:
break;
case Shader::Backend::GLASM::Type::U64:
return fmt::format_to(ctx.out(), "{}", Common::BitCast<f64>(value.imm_u64));
}
throw Shader::InvalidArgument("Invalid value type {}", value.type);
}
};

View File

@ -0,0 +1,715 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "shader_recompiler/backend/bindings.h"
#include "shader_recompiler/backend/glsl/emit_context.h"
#include "shader_recompiler/frontend/ir/program.h"
#include "shader_recompiler/profile.h"
#include "shader_recompiler/runtime_info.h"
namespace Shader::Backend::GLSL {
namespace {
u32 CbufIndex(size_t offset) {
return (offset / 4) % 4;
}
char Swizzle(size_t offset) {
return "xyzw"[CbufIndex(offset)];
}
std::string_view InterpDecorator(Interpolation interp) {
switch (interp) {
case Interpolation::Smooth:
return "";
case Interpolation::Flat:
return "flat ";
case Interpolation::NoPerspective:
return "noperspective ";
}
throw InvalidArgument("Invalid interpolation {}", interp);
}
std::string_view InputArrayDecorator(Stage stage) {
switch (stage) {
case Stage::Geometry:
case Stage::TessellationControl:
case Stage::TessellationEval:
return "[]";
default:
return "";
}
}
bool StoresPerVertexAttributes(Stage stage) {
switch (stage) {
case Stage::VertexA:
case Stage::VertexB:
case Stage::Geometry:
case Stage::TessellationEval:
return true;
default:
return false;
}
}
std::string OutputDecorator(Stage stage, u32 size) {
switch (stage) {
case Stage::TessellationControl:
return fmt::format("[{}]", size);
default:
return "";
}
}
std::string_view SamplerType(TextureType type, bool is_depth) {
if (is_depth) {
switch (type) {
case TextureType::Color1D:
return "sampler1DShadow";
case TextureType::ColorArray1D:
return "sampler1DArrayShadow";
case TextureType::Color2D:
return "sampler2DShadow";
case TextureType::ColorArray2D:
return "sampler2DArrayShadow";
case TextureType::ColorCube:
return "samplerCubeShadow";
case TextureType::ColorArrayCube:
return "samplerCubeArrayShadow";
default:
throw NotImplementedException("Texture type: {}", type);
}
}
switch (type) {
case TextureType::Color1D:
return "sampler1D";
case TextureType::ColorArray1D:
return "sampler1DArray";
case TextureType::Color2D:
return "sampler2D";
case TextureType::ColorArray2D:
return "sampler2DArray";
case TextureType::Color3D:
return "sampler3D";
case TextureType::ColorCube:
return "samplerCube";
case TextureType::ColorArrayCube:
return "samplerCubeArray";
case TextureType::Buffer:
return "samplerBuffer";
default:
throw NotImplementedException("Texture type: {}", type);
}
}
std::string_view ImageType(TextureType type) {
switch (type) {
case TextureType::Color1D:
return "uimage1D";
case TextureType::ColorArray1D:
return "uimage1DArray";
case TextureType::Color2D:
return "uimage2D";
case TextureType::ColorArray2D:
return "uimage2DArray";
case TextureType::Color3D:
return "uimage3D";
case TextureType::ColorCube:
return "uimageCube";
case TextureType::ColorArrayCube:
return "uimageCubeArray";
case TextureType::Buffer:
return "uimageBuffer";
default:
throw NotImplementedException("Image type: {}", type);
}
}
std::string_view ImageFormatString(ImageFormat format) {
switch (format) {
case ImageFormat::Typeless:
return "";
case ImageFormat::R8_UINT:
return ",r8ui";
case ImageFormat::R8_SINT:
return ",r8i";
case ImageFormat::R16_UINT:
return ",r16ui";
case ImageFormat::R16_SINT:
return ",r16i";
case ImageFormat::R32_UINT:
return ",r32ui";
case ImageFormat::R32G32_UINT:
return ",rg32ui";
case ImageFormat::R32G32B32A32_UINT:
return ",rgba32ui";
default:
throw NotImplementedException("Image format: {}", format);
}
}
std::string_view ImageAccessQualifier(bool is_written, bool is_read) {
if (is_written && !is_read) {
return "writeonly ";
}
if (is_read && !is_written) {
return "readonly ";
}
return "";
}
std::string_view GetTessMode(TessPrimitive primitive) {
switch (primitive) {
case TessPrimitive::Triangles:
return "triangles";
case TessPrimitive::Quads:
return "quads";
case TessPrimitive::Isolines:
return "isolines";
}
throw InvalidArgument("Invalid tessellation primitive {}", primitive);
}
std::string_view GetTessSpacing(TessSpacing spacing) {
switch (spacing) {
case TessSpacing::Equal:
return "equal_spacing";
case TessSpacing::FractionalOdd:
return "fractional_odd_spacing";
case TessSpacing::FractionalEven:
return "fractional_even_spacing";
}
throw InvalidArgument("Invalid tessellation spacing {}", spacing);
}
std::string_view InputPrimitive(InputTopology topology) {
switch (topology) {
case InputTopology::Points:
return "points";
case InputTopology::Lines:
return "lines";
case InputTopology::LinesAdjacency:
return "lines_adjacency";
case InputTopology::Triangles:
return "triangles";
case InputTopology::TrianglesAdjacency:
return "triangles_adjacency";
}
throw InvalidArgument("Invalid input topology {}", topology);
}
std::string_view OutputPrimitive(OutputTopology topology) {
switch (topology) {
case OutputTopology::PointList:
return "points";
case OutputTopology::LineStrip:
return "line_strip";
case OutputTopology::TriangleStrip:
return "triangle_strip";
}
throw InvalidArgument("Invalid output topology {}", topology);
}
void SetupLegacyOutPerVertex(EmitContext& ctx, std::string& header) {
if (!ctx.info.stores.Legacy()) {
return;
}
if (ctx.info.stores.FixedFunctionTexture()) {
header += "vec4 gl_TexCoord[8];";
}
if (ctx.info.stores.AnyComponent(IR::Attribute::ColorFrontDiffuseR)) {
header += "vec4 gl_FrontColor;";
}
if (ctx.info.stores.AnyComponent(IR::Attribute::ColorFrontSpecularR)) {
header += "vec4 gl_FrontSecondaryColor;";
}
if (ctx.info.stores.AnyComponent(IR::Attribute::ColorBackDiffuseR)) {
header += "vec4 gl_BackColor;";
}
if (ctx.info.stores.AnyComponent(IR::Attribute::ColorBackSpecularR)) {
header += "vec4 gl_BackSecondaryColor;";
}
}
void SetupOutPerVertex(EmitContext& ctx, std::string& header) {
if (!StoresPerVertexAttributes(ctx.stage)) {
return;
}
if (ctx.uses_geometry_passthrough) {
return;
}
header += "out gl_PerVertex{vec4 gl_Position;";
if (ctx.info.stores[IR::Attribute::PointSize]) {
header += "float gl_PointSize;";
}
if (ctx.info.stores.ClipDistances()) {
header += "float gl_ClipDistance[];";
}
if (ctx.info.stores[IR::Attribute::ViewportIndex] &&
ctx.profile.support_viewport_index_layer_non_geometry && ctx.stage != Stage::Geometry) {
header += "int gl_ViewportIndex;";
}
SetupLegacyOutPerVertex(ctx, header);
header += "};";
if (ctx.info.stores[IR::Attribute::ViewportIndex] && ctx.stage == Stage::Geometry) {
header += "out int gl_ViewportIndex;";
}
}
void SetupInPerVertex(EmitContext& ctx, std::string& header) {
// Currently only required for TessellationControl to adhere to
// ARB_separate_shader_objects requirements
if (ctx.stage != Stage::TessellationControl) {
return;
}
const bool loads_position{ctx.info.loads.AnyComponent(IR::Attribute::PositionX)};
const bool loads_point_size{ctx.info.loads[IR::Attribute::PointSize]};
const bool loads_clip_distance{ctx.info.loads.ClipDistances()};
const bool loads_per_vertex{loads_position || loads_point_size || loads_clip_distance};
if (!loads_per_vertex) {
return;
}
header += "in gl_PerVertex{";
if (loads_position) {
header += "vec4 gl_Position;";
}
if (loads_point_size) {
header += "float gl_PointSize;";
}
if (loads_clip_distance) {
header += "float gl_ClipDistance[];";
}
header += "}gl_in[gl_MaxPatchVertices];";
}
void SetupLegacyInPerFragment(EmitContext& ctx, std::string& header) {
if (!ctx.info.loads.Legacy()) {
return;
}
header += "in gl_PerFragment{";
if (ctx.info.loads.FixedFunctionTexture()) {
header += "vec4 gl_TexCoord[8];";
}
if (ctx.info.loads.AnyComponent(IR::Attribute::ColorFrontDiffuseR)) {
header += "vec4 gl_Color;";
}
header += "};";
}
} // Anonymous namespace
EmitContext::EmitContext(IR::Program& program, Bindings& bindings, const Profile& profile_,
const RuntimeInfo& runtime_info_)
: info{program.info}, profile{profile_}, runtime_info{runtime_info_}, stage{program.stage},
uses_geometry_passthrough{program.is_geometry_passthrough &&
profile.support_geometry_shader_passthrough} {
if (profile.need_fastmath_off) {
header += "#pragma optionNV(fastmath off)\n";
}
SetupExtensions();
switch (program.stage) {
case Stage::VertexA:
case Stage::VertexB:
stage_name = "vs";
break;
case Stage::TessellationControl:
stage_name = "tcs";
header += fmt::format("layout(vertices={})out;", program.invocations);
break;
case Stage::TessellationEval:
stage_name = "tes";
header += fmt::format("layout({},{},{})in;", GetTessMode(runtime_info.tess_primitive),
GetTessSpacing(runtime_info.tess_spacing),
runtime_info.tess_clockwise ? "cw" : "ccw");
break;
case Stage::Geometry:
stage_name = "gs";
header += fmt::format("layout({})in;", InputPrimitive(runtime_info.input_topology));
if (uses_geometry_passthrough) {
header += "layout(passthrough)in gl_PerVertex{vec4 gl_Position;};";
break;
} else if (program.is_geometry_passthrough &&
!profile.support_geometry_shader_passthrough) {
LOG_WARNING(Shader_GLSL, "Passthrough geometry program used but not supported");
}
header += fmt::format(
"layout({},max_vertices={})out;in gl_PerVertex{{vec4 gl_Position;}}gl_in[];",
OutputPrimitive(program.output_topology), program.output_vertices);
break;
case Stage::Fragment:
stage_name = "fs";
position_name = "gl_FragCoord";
if (runtime_info.force_early_z) {
header += "layout(early_fragment_tests)in;";
}
if (info.uses_sample_id) {
header += "in int gl_SampleID;";
}
if (info.stores_sample_mask) {
header += "out int gl_SampleMask[];";
}
break;
case Stage::Compute:
stage_name = "cs";
const u32 local_x{std::max(program.workgroup_size[0], 1u)};
const u32 local_y{std::max(program.workgroup_size[1], 1u)};
const u32 local_z{std::max(program.workgroup_size[2], 1u)};
header += fmt::format("layout(local_size_x={},local_size_y={},local_size_z={}) in;",
local_x, local_y, local_z);
break;
}
SetupOutPerVertex(*this, header);
SetupInPerVertex(*this, header);
SetupLegacyInPerFragment(*this, header);
for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
if (!info.loads.Generic(index) || !runtime_info.previous_stage_stores.Generic(index)) {
continue;
}
const auto qualifier{uses_geometry_passthrough ? "passthrough"
: fmt::format("location={}", index)};
header += fmt::format("layout({}){}in vec4 in_attr{}{};", qualifier,
InterpDecorator(info.interpolation[index]), index,
InputArrayDecorator(stage));
}
for (size_t index = 0; index < info.uses_patches.size(); ++index) {
if (!info.uses_patches[index]) {
continue;
}
const auto qualifier{stage == Stage::TessellationControl ? "out" : "in"};
header += fmt::format("layout(location={})patch {} vec4 patch{};", index, qualifier, index);
}
if (stage == Stage::Fragment) {
for (size_t index = 0; index < info.stores_frag_color.size(); ++index) {
if (!info.stores_frag_color[index] && !profile.need_declared_frag_colors) {
continue;
}
header += fmt::format("layout(location={})out vec4 frag_color{};", index, index);
}
}
for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
if (info.stores.Generic(index)) {
DefineGenericOutput(index, program.invocations);
}
}
DefineConstantBuffers(bindings);
DefineStorageBuffers(bindings);
SetupImages(bindings);
SetupTextures(bindings);
DefineHelperFunctions();
DefineConstants();
}
void EmitContext::SetupExtensions() {
header += "#extension GL_ARB_separate_shader_objects : enable\n";
if (info.uses_shadow_lod && profile.support_gl_texture_shadow_lod) {
header += "#extension GL_EXT_texture_shadow_lod : enable\n";
}
if (info.uses_int64 && profile.support_int64) {
header += "#extension GL_ARB_gpu_shader_int64 : enable\n";
}
if (info.uses_int64_bit_atomics) {
header += "#extension GL_NV_shader_atomic_int64 : enable\n";
}
if (info.uses_atomic_f32_add) {
header += "#extension GL_NV_shader_atomic_float : enable\n";
}
if (info.uses_atomic_f16x2_add || info.uses_atomic_f16x2_min || info.uses_atomic_f16x2_max) {
header += "#extension GL_NV_shader_atomic_fp16_vector : enable\n";
}
if (info.uses_fp16) {
if (profile.support_gl_nv_gpu_shader_5) {
header += "#extension GL_NV_gpu_shader5 : enable\n";
}
if (profile.support_gl_amd_gpu_shader_half_float) {
header += "#extension GL_AMD_gpu_shader_half_float : enable\n";
}
}
if (info.uses_subgroup_invocation_id || info.uses_subgroup_mask || info.uses_subgroup_vote ||
info.uses_subgroup_shuffles || info.uses_fswzadd) {
header += "#extension GL_ARB_shader_ballot : enable\n"
"#extension GL_ARB_shader_group_vote : enable\n";
if (!info.uses_int64 && profile.support_int64) {
header += "#extension GL_ARB_gpu_shader_int64 : enable\n";
}
if (profile.support_gl_warp_intrinsics) {
header += "#extension GL_NV_shader_thread_shuffle : enable\n";
}
}
if ((info.stores[IR::Attribute::ViewportIndex] || info.stores[IR::Attribute::Layer]) &&
profile.support_viewport_index_layer_non_geometry && stage != Stage::Geometry) {
header += "#extension GL_ARB_shader_viewport_layer_array : enable\n";
}
if (info.uses_sparse_residency && profile.support_gl_sparse_textures) {
header += "#extension GL_ARB_sparse_texture2 : enable\n";
}
if (info.stores[IR::Attribute::ViewportMask] && profile.support_viewport_mask) {
header += "#extension GL_NV_viewport_array2 : enable\n";
}
if (info.uses_typeless_image_reads) {
header += "#extension GL_EXT_shader_image_load_formatted : enable\n";
}
if (info.uses_derivatives && profile.support_gl_derivative_control) {
header += "#extension GL_ARB_derivative_control : enable\n";
}
if (uses_geometry_passthrough) {
header += "#extension GL_NV_geometry_shader_passthrough : enable\n";
}
}
void EmitContext::DefineConstantBuffers(Bindings& bindings) {
if (info.constant_buffer_descriptors.empty()) {
return;
}
for (const auto& desc : info.constant_buffer_descriptors) {
header += fmt::format(
"layout(std140,binding={}) uniform {}_cbuf_{}{{vec4 {}_cbuf{}[{}];}};",
bindings.uniform_buffer, stage_name, desc.index, stage_name, desc.index, 4 * 1024);
bindings.uniform_buffer += desc.count;
}
}
void EmitContext::DefineStorageBuffers(Bindings& bindings) {
if (info.storage_buffers_descriptors.empty()) {
return;
}
u32 index{};
for (const auto& desc : info.storage_buffers_descriptors) {
header += fmt::format("layout(std430,binding={}) buffer {}_ssbo_{}{{uint {}_ssbo{}[];}};",
bindings.storage_buffer, stage_name, bindings.storage_buffer,
stage_name, index);
bindings.storage_buffer += desc.count;
index += desc.count;
}
}
void EmitContext::DefineGenericOutput(size_t index, u32 invocations) {
static constexpr std::string_view swizzle{"xyzw"};
const size_t base_index{static_cast<size_t>(IR::Attribute::Generic0X) + index * 4};
u32 element{0};
while (element < 4) {
std::string definition{fmt::format("layout(location={}", index)};
const u32 remainder{4 - element};
const TransformFeedbackVarying* xfb_varying{};
if (!runtime_info.xfb_varyings.empty()) {
xfb_varying = &runtime_info.xfb_varyings[base_index + element];
xfb_varying = xfb_varying && xfb_varying->components > 0 ? xfb_varying : nullptr;
}
const u32 num_components{xfb_varying ? xfb_varying->components : remainder};
if (element > 0) {
definition += fmt::format(",component={}", element);
}
if (xfb_varying) {
definition +=
fmt::format(",xfb_buffer={},xfb_stride={},xfb_offset={}", xfb_varying->buffer,
xfb_varying->stride, xfb_varying->offset);
}
std::string name{fmt::format("out_attr{}", index)};
if (num_components < 4 || element > 0) {
name += fmt::format("_{}", swizzle.substr(element, num_components));
}
const auto type{num_components == 1 ? "float" : fmt::format("vec{}", num_components)};
definition += fmt::format(")out {} {}{};", type, name, OutputDecorator(stage, invocations));
header += definition;
const GenericElementInfo element_info{
.name = name,
.first_element = element,
.num_components = num_components,
};
std::fill_n(output_generics[index].begin() + element, num_components, element_info);
element += num_components;
}
}
void EmitContext::DefineHelperFunctions() {
header += "\n#define ftoi floatBitsToInt\n#define ftou floatBitsToUint\n"
"#define itof intBitsToFloat\n#define utof uintBitsToFloat\n";
if (info.uses_global_increment || info.uses_shared_increment) {
header += "uint CasIncrement(uint op_a,uint op_b){return op_a>=op_b?0u:(op_a+1u);}";
}
if (info.uses_global_decrement || info.uses_shared_decrement) {
header += "uint CasDecrement(uint op_a,uint op_b){"
"return op_a==0||op_a>op_b?op_b:(op_a-1u);}";
}
if (info.uses_atomic_f32_add) {
header += "uint CasFloatAdd(uint op_a,float op_b){"
"return ftou(utof(op_a)+op_b);}";
}
if (info.uses_atomic_f32x2_add) {
header += "uint CasFloatAdd32x2(uint op_a,vec2 op_b){"
"return packHalf2x16(unpackHalf2x16(op_a)+op_b);}";
}
if (info.uses_atomic_f32x2_min) {
header += "uint CasFloatMin32x2(uint op_a,vec2 op_b){return "
"packHalf2x16(min(unpackHalf2x16(op_a),op_b));}";
}
if (info.uses_atomic_f32x2_max) {
header += "uint CasFloatMax32x2(uint op_a,vec2 op_b){return "
"packHalf2x16(max(unpackHalf2x16(op_a),op_b));}";
}
if (info.uses_atomic_f16x2_add) {
header += "uint CasFloatAdd16x2(uint op_a,f16vec2 op_b){return "
"packFloat2x16(unpackFloat2x16(op_a)+op_b);}";
}
if (info.uses_atomic_f16x2_min) {
header += "uint CasFloatMin16x2(uint op_a,f16vec2 op_b){return "
"packFloat2x16(min(unpackFloat2x16(op_a),op_b));}";
}
if (info.uses_atomic_f16x2_max) {
header += "uint CasFloatMax16x2(uint op_a,f16vec2 op_b){return "
"packFloat2x16(max(unpackFloat2x16(op_a),op_b));}";
}
if (info.uses_atomic_s32_min) {
header += "uint CasMinS32(uint op_a,uint op_b){return uint(min(int(op_a),int(op_b)));}";
}
if (info.uses_atomic_s32_max) {
header += "uint CasMaxS32(uint op_a,uint op_b){return uint(max(int(op_a),int(op_b)));}";
}
if (info.uses_global_memory && profile.support_int64) {
header += DefineGlobalMemoryFunctions();
}
if (info.loads_indexed_attributes) {
const bool is_array{stage == Stage::Geometry};
const auto vertex_arg{is_array ? ",uint vertex" : ""};
std::string func{
fmt::format("float IndexedAttrLoad(int offset{}){{int base_index=offset>>2;uint "
"masked_index=uint(base_index)&3u;switch(base_index>>2){{",
vertex_arg)};
if (info.loads.AnyComponent(IR::Attribute::PositionX)) {
const auto position_idx{is_array ? "gl_in[vertex]." : ""};
func += fmt::format("case {}:return {}{}[masked_index];",
static_cast<u32>(IR::Attribute::PositionX) >> 2, position_idx,
position_name);
}
const u32 base_attribute_value = static_cast<u32>(IR::Attribute::Generic0X) >> 2;
for (u32 index = 0; index < IR::NUM_GENERICS; ++index) {
if (!info.loads.Generic(index)) {
continue;
}
const auto vertex_idx{is_array ? "[vertex]" : ""};
func += fmt::format("case {}:return in_attr{}{}[masked_index];",
base_attribute_value + index, index, vertex_idx);
}
func += "default: return 0.0;}}";
header += func;
}
if (info.stores_indexed_attributes) {
// TODO
}
}
std::string EmitContext::DefineGlobalMemoryFunctions() {
const auto define_body{[&](std::string& func, size_t index, std::string_view return_statement) {
const auto& ssbo{info.storage_buffers_descriptors[index]};
const u32 size_cbuf_offset{ssbo.cbuf_offset + 8};
const auto ssbo_addr{fmt::format("ssbo_addr{}", index)};
const auto cbuf{fmt::format("{}_cbuf{}", stage_name, ssbo.cbuf_index)};
std::array<std::string, 2> addr_xy;
std::array<std::string, 2> size_xy;
for (size_t i = 0; i < addr_xy.size(); ++i) {
const auto addr_loc{ssbo.cbuf_offset + 4 * i};
const auto size_loc{size_cbuf_offset + 4 * i};
addr_xy[i] = fmt::format("ftou({}[{}].{})", cbuf, addr_loc / 16, Swizzle(addr_loc));
size_xy[i] = fmt::format("ftou({}[{}].{})", cbuf, size_loc / 16, Swizzle(size_loc));
}
const auto addr_pack{fmt::format("packUint2x32(uvec2({},{}))", addr_xy[0], addr_xy[1])};
const auto addr_statment{fmt::format("uint64_t {}={};", ssbo_addr, addr_pack)};
func += addr_statment;
const auto size_vec{fmt::format("uvec2({},{})", size_xy[0], size_xy[1])};
const auto comp_lhs{fmt::format("(addr>={})", ssbo_addr)};
const auto comp_rhs{fmt::format("(addr<({}+uint64_t({})))", ssbo_addr, size_vec)};
const auto comparison{fmt::format("if({}&&{}){{", comp_lhs, comp_rhs)};
func += comparison;
const auto ssbo_name{fmt::format("{}_ssbo{}", stage_name, index)};
func += fmt::format(fmt::runtime(return_statement), ssbo_name, ssbo_addr);
}};
std::string write_func{"void WriteGlobal32(uint64_t addr,uint data){"};
std::string write_func_64{"void WriteGlobal64(uint64_t addr,uvec2 data){"};
std::string write_func_128{"void WriteGlobal128(uint64_t addr,uvec4 data){"};
std::string load_func{"uint LoadGlobal32(uint64_t addr){"};
std::string load_func_64{"uvec2 LoadGlobal64(uint64_t addr){"};
std::string load_func_128{"uvec4 LoadGlobal128(uint64_t addr){"};
const size_t num_buffers{info.storage_buffers_descriptors.size()};
for (size_t index = 0; index < num_buffers; ++index) {
if (!info.nvn_buffer_used[index]) {
continue;
}
define_body(write_func, index, "{0}[uint(addr-{1})>>2]=data;return;}}");
define_body(write_func_64, index,
"{0}[uint(addr-{1})>>2]=data.x;{0}[uint(addr-{1}+4)>>2]=data.y;return;}}");
define_body(write_func_128, index,
"{0}[uint(addr-{1})>>2]=data.x;{0}[uint(addr-{1}+4)>>2]=data.y;{0}[uint("
"addr-{1}+8)>>2]=data.z;{0}[uint(addr-{1}+12)>>2]=data.w;return;}}");
define_body(load_func, index, "return {0}[uint(addr-{1})>>2];}}");
define_body(load_func_64, index,
"return uvec2({0}[uint(addr-{1})>>2],{0}[uint(addr-{1}+4)>>2]);}}");
define_body(load_func_128, index,
"return uvec4({0}[uint(addr-{1})>>2],{0}[uint(addr-{1}+4)>>2],{0}["
"uint(addr-{1}+8)>>2],{0}[uint(addr-{1}+12)>>2]);}}");
}
write_func += '}';
write_func_64 += '}';
write_func_128 += '}';
load_func += "return 0u;}";
load_func_64 += "return uvec2(0);}";
load_func_128 += "return uvec4(0);}";
return write_func + write_func_64 + write_func_128 + load_func + load_func_64 + load_func_128;
}
void EmitContext::SetupImages(Bindings& bindings) {
image_buffers.reserve(info.image_buffer_descriptors.size());
for (const auto& desc : info.image_buffer_descriptors) {
image_buffers.push_back({bindings.image, desc.count});
const auto format{ImageFormatString(desc.format)};
const auto qualifier{ImageAccessQualifier(desc.is_written, desc.is_read)};
const auto array_decorator{desc.count > 1 ? fmt::format("[{}]", desc.count) : ""};
header += fmt::format("layout(binding={}{}) uniform {}uimageBuffer img{}{};",
bindings.image, format, qualifier, bindings.image, array_decorator);
bindings.image += desc.count;
}
images.reserve(info.image_descriptors.size());
for (const auto& desc : info.image_descriptors) {
images.push_back({bindings.image, desc.count});
const auto format{ImageFormatString(desc.format)};
const auto image_type{ImageType(desc.type)};
const auto qualifier{ImageAccessQualifier(desc.is_written, desc.is_read)};
const auto array_decorator{desc.count > 1 ? fmt::format("[{}]", desc.count) : ""};
header += fmt::format("layout(binding={}{})uniform {}{} img{}{};", bindings.image, format,
qualifier, image_type, bindings.image, array_decorator);
bindings.image += desc.count;
}
}
void EmitContext::SetupTextures(Bindings& bindings) {
texture_buffers.reserve(info.texture_buffer_descriptors.size());
for (const auto& desc : info.texture_buffer_descriptors) {
texture_buffers.push_back({bindings.texture, desc.count});
const auto sampler_type{SamplerType(TextureType::Buffer, false)};
const auto array_decorator{desc.count > 1 ? fmt::format("[{}]", desc.count) : ""};
header += fmt::format("layout(binding={}) uniform {} tex{}{};", bindings.texture,
sampler_type, bindings.texture, array_decorator);
bindings.texture += desc.count;
}
textures.reserve(info.texture_descriptors.size());
for (const auto& desc : info.texture_descriptors) {
textures.push_back({bindings.texture, desc.count});
const auto sampler_type{SamplerType(desc.type, desc.is_depth)};
const auto array_decorator{desc.count > 1 ? fmt::format("[{}]", desc.count) : ""};
header += fmt::format("layout(binding={}) uniform {} tex{}{};", bindings.texture,
sampler_type, bindings.texture, array_decorator);
bindings.texture += desc.count;
}
}
void EmitContext::DefineConstants() {
if (info.uses_fswzadd) {
header += "const float FSWZ_A[]=float[4](-1.f,1.f,-1.f,0.f);"
"const float FSWZ_B[]=float[4](-1.f,-1.f,1.f,-1.f);";
}
}
} // namespace Shader::Backend::GLSL

View File

@ -0,0 +1,174 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <string>
#include <utility>
#include <vector>
#include <fmt/format.h>
#include "shader_recompiler/backend/glsl/var_alloc.h"
#include "shader_recompiler/stage.h"
namespace Shader {
struct Info;
struct Profile;
struct RuntimeInfo;
} // namespace Shader
namespace Shader::Backend {
struct Bindings;
}
namespace Shader::IR {
class Inst;
struct Program;
} // namespace Shader::IR
namespace Shader::Backend::GLSL {
struct GenericElementInfo {
std::string name;
u32 first_element{};
u32 num_components{};
};
struct TextureImageDefinition {
u32 binding;
u32 count;
};
class EmitContext {
public:
explicit EmitContext(IR::Program& program, Bindings& bindings, const Profile& profile_,
const RuntimeInfo& runtime_info_);
template <GlslVarType type, typename... Args>
void Add(const char* format_str, IR::Inst& inst, Args&&... args) {
const auto var_def{var_alloc.AddDefine(inst, type)};
if (var_def.empty()) {
// skip assigment.
code += fmt::format(fmt::runtime(format_str + 3), std::forward<Args>(args)...);
} else {
code += fmt::format(fmt::runtime(format_str), var_def, std::forward<Args>(args)...);
}
// TODO: Remove this
code += '\n';
}
template <typename... Args>
void AddU1(const char* format_str, IR::Inst& inst, Args&&... args) {
Add<GlslVarType::U1>(format_str, inst, args...);
}
template <typename... Args>
void AddF16x2(const char* format_str, IR::Inst& inst, Args&&... args) {
Add<GlslVarType::F16x2>(format_str, inst, args...);
}
template <typename... Args>
void AddU32(const char* format_str, IR::Inst& inst, Args&&... args) {
Add<GlslVarType::U32>(format_str, inst, args...);
}
template <typename... Args>
void AddF32(const char* format_str, IR::Inst& inst, Args&&... args) {
Add<GlslVarType::F32>(format_str, inst, args...);
}
template <typename... Args>
void AddU64(const char* format_str, IR::Inst& inst, Args&&... args) {
Add<GlslVarType::U64>(format_str, inst, args...);
}
template <typename... Args>
void AddF64(const char* format_str, IR::Inst& inst, Args&&... args) {
Add<GlslVarType::F64>(format_str, inst, args...);
}
template <typename... Args>
void AddU32x2(const char* format_str, IR::Inst& inst, Args&&... args) {
Add<GlslVarType::U32x2>(format_str, inst, args...);
}
template <typename... Args>
void AddF32x2(const char* format_str, IR::Inst& inst, Args&&... args) {
Add<GlslVarType::F32x2>(format_str, inst, args...);
}
template <typename... Args>
void AddU32x3(const char* format_str, IR::Inst& inst, Args&&... args) {
Add<GlslVarType::U32x3>(format_str, inst, args...);
}
template <typename... Args>
void AddF32x3(const char* format_str, IR::Inst& inst, Args&&... args) {
Add<GlslVarType::F32x3>(format_str, inst, args...);
}
template <typename... Args>
void AddU32x4(const char* format_str, IR::Inst& inst, Args&&... args) {
Add<GlslVarType::U32x4>(format_str, inst, args...);
}
template <typename... Args>
void AddF32x4(const char* format_str, IR::Inst& inst, Args&&... args) {
Add<GlslVarType::F32x4>(format_str, inst, args...);
}
template <typename... Args>
void AddPrecF32(const char* format_str, IR::Inst& inst, Args&&... args) {
Add<GlslVarType::PrecF32>(format_str, inst, args...);
}
template <typename... Args>
void AddPrecF64(const char* format_str, IR::Inst& inst, Args&&... args) {
Add<GlslVarType::PrecF64>(format_str, inst, args...);
}
template <typename... Args>
void Add(const char* format_str, Args&&... args) {
code += fmt::format(fmt::runtime(format_str), std::forward<Args>(args)...);
// TODO: Remove this
code += '\n';
}
std::string header;
std::string code;
VarAlloc var_alloc;
const Info& info;
const Profile& profile;
const RuntimeInfo& runtime_info;
Stage stage{};
std::string_view stage_name = "invalid";
std::string_view position_name = "gl_Position";
std::vector<TextureImageDefinition> texture_buffers;
std::vector<TextureImageDefinition> image_buffers;
std::vector<TextureImageDefinition> textures;
std::vector<TextureImageDefinition> images;
std::array<std::array<GenericElementInfo, 4>, 32> output_generics{};
u32 num_safety_loop_vars{};
bool uses_y_direction{};
bool uses_cc_carry{};
bool uses_geometry_passthrough{};
private:
void SetupExtensions();
void DefineConstantBuffers(Bindings& bindings);
void DefineStorageBuffers(Bindings& bindings);
void DefineGenericOutput(size_t index, u32 invocations);
void DefineHelperFunctions();
void DefineConstants();
std::string DefineGlobalMemoryFunctions();
void SetupImages(Bindings& bindings);
void SetupTextures(Bindings& bindings);
};
} // namespace Shader::Backend::GLSL

View File

@ -0,0 +1,252 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <algorithm>
#include <string>
#include <tuple>
#include <type_traits>
#include "common/div_ceil.h"
#include "common/settings.h"
#include "shader_recompiler/backend/glsl/emit_context.h"
#include "shader_recompiler/backend/glsl/emit_glsl.h"
#include "shader_recompiler/backend/glsl/emit_glsl_instructions.h"
#include "shader_recompiler/frontend/ir/ir_emitter.h"
namespace Shader::Backend::GLSL {
namespace {
template <class Func>
struct FuncTraits {};
template <class ReturnType_, class... Args>
struct FuncTraits<ReturnType_ (*)(Args...)> {
using ReturnType = ReturnType_;
static constexpr size_t NUM_ARGS = sizeof...(Args);
template <size_t I>
using ArgType = std::tuple_element_t<I, std::tuple<Args...>>;
};
template <auto func, typename... Args>
void SetDefinition(EmitContext& ctx, IR::Inst* inst, Args... args) {
inst->SetDefinition<Id>(func(ctx, std::forward<Args>(args)...));
}
template <typename ArgType>
auto Arg(EmitContext& ctx, const IR::Value& arg) {
if constexpr (std::is_same_v<ArgType, std::string_view>) {
return ctx.var_alloc.Consume(arg);
} else if constexpr (std::is_same_v<ArgType, const IR::Value&>) {
return arg;
} else if constexpr (std::is_same_v<ArgType, u32>) {
return arg.U32();
} else if constexpr (std::is_same_v<ArgType, IR::Attribute>) {
return arg.Attribute();
} else if constexpr (std::is_same_v<ArgType, IR::Patch>) {
return arg.Patch();
} else if constexpr (std::is_same_v<ArgType, IR::Reg>) {
return arg.Reg();
}
}
template <auto func, bool is_first_arg_inst, size_t... I>
void Invoke(EmitContext& ctx, IR::Inst* inst, std::index_sequence<I...>) {
using Traits = FuncTraits<decltype(func)>;
if constexpr (std::is_same_v<typename Traits::ReturnType, Id>) {
if constexpr (is_first_arg_inst) {
SetDefinition<func>(
ctx, inst, *inst,
Arg<typename Traits::template ArgType<I + 2>>(ctx, inst->Arg(I))...);
} else {
SetDefinition<func>(
ctx, inst, Arg<typename Traits::template ArgType<I + 1>>(ctx, inst->Arg(I))...);
}
} else {
if constexpr (is_first_arg_inst) {
func(ctx, *inst, Arg<typename Traits::template ArgType<I + 2>>(ctx, inst->Arg(I))...);
} else {
func(ctx, Arg<typename Traits::template ArgType<I + 1>>(ctx, inst->Arg(I))...);
}
}
}
template <auto func>
void Invoke(EmitContext& ctx, IR::Inst* inst) {
using Traits = FuncTraits<decltype(func)>;
static_assert(Traits::NUM_ARGS >= 1, "Insufficient arguments");
if constexpr (Traits::NUM_ARGS == 1) {
Invoke<func, false>(ctx, inst, std::make_index_sequence<0>{});
} else {
using FirstArgType = typename Traits::template ArgType<1>;
static constexpr bool is_first_arg_inst = std::is_same_v<FirstArgType, IR::Inst&>;
using Indices = std::make_index_sequence<Traits::NUM_ARGS - (is_first_arg_inst ? 2 : 1)>;
Invoke<func, is_first_arg_inst>(ctx, inst, Indices{});
}
}
void EmitInst(EmitContext& ctx, IR::Inst* inst) {
switch (inst->GetOpcode()) {
#define OPCODE(name, result_type, ...) \
case IR::Opcode::name: \
return Invoke<&Emit##name>(ctx, inst);
#include "shader_recompiler/frontend/ir/opcodes.inc"
#undef OPCODE
}
throw LogicError("Invalid opcode {}", inst->GetOpcode());
}
bool IsReference(IR::Inst& inst) {
return inst.GetOpcode() == IR::Opcode::Reference;
}
void PrecolorInst(IR::Inst& phi) {
// Insert phi moves before references to avoid overwritting other phis
const size_t num_args{phi.NumArgs()};
for (size_t i = 0; i < num_args; ++i) {
IR::Block& phi_block{*phi.PhiBlock(i)};
auto it{std::find_if_not(phi_block.rbegin(), phi_block.rend(), IsReference).base()};
IR::IREmitter ir{phi_block, it};
const IR::Value arg{phi.Arg(i)};
if (arg.IsImmediate()) {
ir.PhiMove(phi, arg);
} else {
ir.PhiMove(phi, IR::Value{arg.InstRecursive()});
}
}
for (size_t i = 0; i < num_args; ++i) {
IR::IREmitter{*phi.PhiBlock(i)}.Reference(IR::Value{&phi});
}
}
void Precolor(const IR::Program& program) {
for (IR::Block* const block : program.blocks) {
for (IR::Inst& phi : block->Instructions()) {
if (!IR::IsPhi(phi)) {
break;
}
PrecolorInst(phi);
}
}
}
void EmitCode(EmitContext& ctx, const IR::Program& program) {
for (const IR::AbstractSyntaxNode& node : program.syntax_list) {
switch (node.type) {
case IR::AbstractSyntaxNode::Type::Block:
for (IR::Inst& inst : node.data.block->Instructions()) {
EmitInst(ctx, &inst);
}
break;
case IR::AbstractSyntaxNode::Type::If:
ctx.Add("if({}){{", ctx.var_alloc.Consume(node.data.if_node.cond));
break;
case IR::AbstractSyntaxNode::Type::EndIf:
ctx.Add("}}");
break;
case IR::AbstractSyntaxNode::Type::Break:
if (node.data.break_node.cond.IsImmediate()) {
if (node.data.break_node.cond.U1()) {
ctx.Add("break;");
}
} else {
ctx.Add("if({}){{break;}}", ctx.var_alloc.Consume(node.data.break_node.cond));
}
break;
case IR::AbstractSyntaxNode::Type::Return:
case IR::AbstractSyntaxNode::Type::Unreachable:
ctx.Add("return;");
break;
case IR::AbstractSyntaxNode::Type::Loop:
ctx.Add("for(;;){{");
break;
case IR::AbstractSyntaxNode::Type::Repeat:
if (Settings::values.disable_shader_loop_safety_checks) {
ctx.Add("if(!{}){{break;}}}}", ctx.var_alloc.Consume(node.data.repeat.cond));
} else {
ctx.Add("if(--loop{}<0 || !{}){{break;}}}}", ctx.num_safety_loop_vars++,
ctx.var_alloc.Consume(node.data.repeat.cond));
}
break;
default:
throw NotImplementedException("AbstractSyntaxNode Type {}", node.type);
}
}
}
std::string GlslVersionSpecifier(const EmitContext& ctx) {
if (ctx.uses_y_direction || ctx.info.stores.Legacy() || ctx.info.loads.Legacy()) {
return " compatibility";
}
return "";
}
bool IsPreciseType(GlslVarType type) {
switch (type) {
case GlslVarType::PrecF32:
case GlslVarType::PrecF64:
return true;
default:
return false;
}
}
void DefineVariables(const EmitContext& ctx, std::string& header) {
for (u32 i = 0; i < static_cast<u32>(GlslVarType::Void); ++i) {
const auto type{static_cast<GlslVarType>(i)};
const auto& tracker{ctx.var_alloc.GetUseTracker(type)};
const auto type_name{ctx.var_alloc.GetGlslType(type)};
const bool has_precise_bug{ctx.stage == Stage::Fragment && ctx.profile.has_gl_precise_bug};
const auto precise{!has_precise_bug && IsPreciseType(type) ? "precise " : ""};
// Temps/return types that are never used are stored at index 0
if (tracker.uses_temp) {
header += fmt::format("{}{} t{}={}(0);", precise, type_name,
ctx.var_alloc.Representation(0, type), type_name);
}
for (u32 index = 0; index < tracker.num_used; ++index) {
header += fmt::format("{}{} {}={}(0);", precise, type_name,
ctx.var_alloc.Representation(index, type), type_name);
}
}
for (u32 i = 0; i < ctx.num_safety_loop_vars; ++i) {
header += fmt::format("int loop{}=0x2000;", i);
}
}
} // Anonymous namespace
std::string EmitGLSL(const Profile& profile, const RuntimeInfo& runtime_info, IR::Program& program,
Bindings& bindings) {
EmitContext ctx{program, bindings, profile, runtime_info};
Precolor(program);
EmitCode(ctx, program);
const std::string version{fmt::format("#version 450{}\n", GlslVersionSpecifier(ctx))};
ctx.header.insert(0, version);
if (program.shared_memory_size > 0) {
const auto requested_size{program.shared_memory_size};
const auto max_size{profile.gl_max_compute_smem_size};
const bool needs_clamp{requested_size > max_size};
if (needs_clamp) {
LOG_WARNING(Shader_GLSL, "Requested shared memory size ({}) exceeds device limit ({})",
requested_size, max_size);
}
const auto smem_size{needs_clamp ? max_size : requested_size};
ctx.header += fmt::format("shared uint smem[{}];", Common::DivCeil(smem_size, 4U));
}
ctx.header += "void main(){\n";
if (program.local_memory_size > 0) {
ctx.header += fmt::format("uint lmem[{}];", Common::DivCeil(program.local_memory_size, 4U));
}
DefineVariables(ctx, ctx.header);
if (ctx.uses_cc_carry) {
ctx.header += "uint carry;";
}
if (program.info.uses_subgroup_shuffles) {
ctx.header += "bool shfl_in_bounds;";
}
ctx.code.insert(0, ctx.header);
ctx.code += '}';
return ctx.code;
}
} // namespace Shader::Backend::GLSL

View File

@ -0,0 +1,24 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <string>
#include "shader_recompiler/backend/bindings.h"
#include "shader_recompiler/frontend/ir/program.h"
#include "shader_recompiler/profile.h"
#include "shader_recompiler/runtime_info.h"
namespace Shader::Backend::GLSL {
[[nodiscard]] std::string EmitGLSL(const Profile& profile, const RuntimeInfo& runtime_info,
IR::Program& program, Bindings& bindings);
[[nodiscard]] inline std::string EmitGLSL(const Profile& profile, IR::Program& program) {
Bindings binding;
return EmitGLSL(profile, {}, program, binding);
}
} // namespace Shader::Backend::GLSL

View File

@ -0,0 +1,418 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <string_view>
#include "shader_recompiler/backend/glsl/emit_context.h"
#include "shader_recompiler/backend/glsl/emit_glsl_instructions.h"
#include "shader_recompiler/frontend/ir/value.h"
namespace Shader::Backend::GLSL {
namespace {
constexpr char cas_loop[]{
"for (;;){{uint old={};{}=atomicCompSwap({},old,{}({},{}));if({}==old){{break;}}}}"};
void SharedCasFunction(EmitContext& ctx, IR::Inst& inst, std::string_view offset,
std::string_view value, std::string_view function) {
const auto ret{ctx.var_alloc.Define(inst, GlslVarType::U32)};
const std::string smem{fmt::format("smem[{}>>2]", offset)};
ctx.Add(cas_loop, smem, ret, smem, function, smem, value, ret);
}
void SsboCasFunction(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value, std::string_view function) {
const auto ret{ctx.var_alloc.Define(inst, GlslVarType::U32)};
const std::string ssbo{fmt::format("{}_ssbo{}[{}>>2]", ctx.stage_name, binding.U32(),
ctx.var_alloc.Consume(offset))};
ctx.Add(cas_loop, ssbo, ret, ssbo, function, ssbo, value, ret);
}
void SsboCasFunctionF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value,
std::string_view function) {
const std::string ssbo{fmt::format("{}_ssbo{}[{}>>2]", ctx.stage_name, binding.U32(),
ctx.var_alloc.Consume(offset))};
const auto ret{ctx.var_alloc.Define(inst, GlslVarType::U32)};
ctx.Add(cas_loop, ssbo, ret, ssbo, function, ssbo, value, ret);
ctx.AddF32("{}=utof({});", inst, ret);
}
} // Anonymous namespace
void EmitSharedAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
std::string_view value) {
ctx.AddU32("{}=atomicAdd(smem[{}>>2],{});", inst, pointer_offset, value);
}
void EmitSharedAtomicSMin32(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
std::string_view value) {
const std::string u32_value{fmt::format("uint({})", value)};
SharedCasFunction(ctx, inst, pointer_offset, u32_value, "CasMinS32");
}
void EmitSharedAtomicUMin32(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
std::string_view value) {
ctx.AddU32("{}=atomicMin(smem[{}>>2],{});", inst, pointer_offset, value);
}
void EmitSharedAtomicSMax32(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
std::string_view value) {
const std::string u32_value{fmt::format("uint({})", value)};
SharedCasFunction(ctx, inst, pointer_offset, u32_value, "CasMaxS32");
}
void EmitSharedAtomicUMax32(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
std::string_view value) {
ctx.AddU32("{}=atomicMax(smem[{}>>2],{});", inst, pointer_offset, value);
}
void EmitSharedAtomicInc32(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
std::string_view value) {
SharedCasFunction(ctx, inst, pointer_offset, value, "CasIncrement");
}
void EmitSharedAtomicDec32(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
std::string_view value) {
SharedCasFunction(ctx, inst, pointer_offset, value, "CasDecrement");
}
void EmitSharedAtomicAnd32(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
std::string_view value) {
ctx.AddU32("{}=atomicAnd(smem[{}>>2],{});", inst, pointer_offset, value);
}
void EmitSharedAtomicOr32(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
std::string_view value) {
ctx.AddU32("{}=atomicOr(smem[{}>>2],{});", inst, pointer_offset, value);
}
void EmitSharedAtomicXor32(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
std::string_view value) {
ctx.AddU32("{}=atomicXor(smem[{}>>2],{});", inst, pointer_offset, value);
}
void EmitSharedAtomicExchange32(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
std::string_view value) {
ctx.AddU32("{}=atomicExchange(smem[{}>>2],{});", inst, pointer_offset, value);
}
void EmitSharedAtomicExchange64(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
std::string_view value) {
LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic");
ctx.AddU64("{}=packUint2x32(uvec2(smem[{}>>2],smem[({}+4)>>2]));", inst, pointer_offset,
pointer_offset);
ctx.Add("smem[{}>>2]=unpackUint2x32({}).x;smem[({}+4)>>2]=unpackUint2x32({}).y;",
pointer_offset, value, pointer_offset, value);
}
void EmitStorageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value) {
ctx.AddU32("{}=atomicAdd({}_ssbo{}[{}>>2],{});", inst, ctx.stage_name, binding.U32(),
ctx.var_alloc.Consume(offset), value);
}
void EmitStorageAtomicSMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value) {
const std::string u32_value{fmt::format("uint({})", value)};
SsboCasFunction(ctx, inst, binding, offset, u32_value, "CasMinS32");
}
void EmitStorageAtomicUMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value) {
ctx.AddU32("{}=atomicMin({}_ssbo{}[{}>>2],{});", inst, ctx.stage_name, binding.U32(),
ctx.var_alloc.Consume(offset), value);
}
void EmitStorageAtomicSMax32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value) {
const std::string u32_value{fmt::format("uint({})", value)};
SsboCasFunction(ctx, inst, binding, offset, u32_value, "CasMaxS32");
}
void EmitStorageAtomicUMax32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value) {
ctx.AddU32("{}=atomicMax({}_ssbo{}[{}>>2],{});", inst, ctx.stage_name, binding.U32(),
ctx.var_alloc.Consume(offset), value);
}
void EmitStorageAtomicInc32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value) {
SsboCasFunction(ctx, inst, binding, offset, value, "CasIncrement");
}
void EmitStorageAtomicDec32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value) {
SsboCasFunction(ctx, inst, binding, offset, value, "CasDecrement");
}
void EmitStorageAtomicAnd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value) {
ctx.AddU32("{}=atomicAnd({}_ssbo{}[{}>>2],{});", inst, ctx.stage_name, binding.U32(),
ctx.var_alloc.Consume(offset), value);
}
void EmitStorageAtomicOr32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value) {
ctx.AddU32("{}=atomicOr({}_ssbo{}[{}>>2],{});", inst, ctx.stage_name, binding.U32(),
ctx.var_alloc.Consume(offset), value);
}
void EmitStorageAtomicXor32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value) {
ctx.AddU32("{}=atomicXor({}_ssbo{}[{}>>2],{});", inst, ctx.stage_name, binding.U32(),
ctx.var_alloc.Consume(offset), value);
}
void EmitStorageAtomicExchange32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value) {
ctx.AddU32("{}=atomicExchange({}_ssbo{}[{}>>2],{});", inst, ctx.stage_name, binding.U32(),
ctx.var_alloc.Consume(offset), value);
}
void EmitStorageAtomicIAdd64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value) {
LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic");
ctx.AddU64("{}=packUint2x32(uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst,
ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
binding.U32(), ctx.var_alloc.Consume(offset));
ctx.Add("{}_ssbo{}[{}>>2]+=unpackUint2x32({}).x;{}_ssbo{}[({}>>2)+1]+=unpackUint2x32({}).y;",
ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value, ctx.stage_name,
binding.U32(), ctx.var_alloc.Consume(offset), value);
}
void EmitStorageAtomicSMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value) {
LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic");
ctx.AddU64("{}=packInt2x32(ivec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst,
ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
binding.U32(), ctx.var_alloc.Consume(offset));
ctx.Add("for(int i=0;i<2;++i){{ "
"{}_ssbo{}[({}>>2)+i]=uint(min(int({}_ssbo{}[({}>>2)+i]),unpackInt2x32(int64_t({}))[i])"
");}}",
ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
binding.U32(), ctx.var_alloc.Consume(offset), value);
}
void EmitStorageAtomicUMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value) {
LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic");
ctx.AddU64("{}=packUint2x32(uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst,
ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
binding.U32(), ctx.var_alloc.Consume(offset));
ctx.Add("for(int i=0;i<2;++i){{ "
"{}_ssbo{}[({}>>2)+i]=min({}_ssbo{}[({}>>2)+i],unpackUint2x32(uint64_t({}))[i]);}}",
ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
binding.U32(), ctx.var_alloc.Consume(offset), value);
}
void EmitStorageAtomicSMax64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value) {
LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic");
ctx.AddU64("{}=packInt2x32(ivec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst,
ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
binding.U32(), ctx.var_alloc.Consume(offset));
ctx.Add("for(int i=0;i<2;++i){{ "
"{}_ssbo{}[({}>>2)+i]=uint(max(int({}_ssbo{}[({}>>2)+i]),unpackInt2x32(int64_t({}))[i])"
");}}",
ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
binding.U32(), ctx.var_alloc.Consume(offset), value);
}
void EmitStorageAtomicUMax64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value) {
LOG_WARNING(Shader_GLSL, "Int64 atomics not supported, fallback to non-atomic");
ctx.AddU64("{}=packUint2x32(uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}>>2)+1]));", inst,
ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
binding.U32(), ctx.var_alloc.Consume(offset));
ctx.Add("for(int "
"i=0;i<2;++i){{{}_ssbo{}[({}>>2)+i]=max({}_ssbo{}[({}>>2)+i],unpackUint2x32(uint64_t({}"
"))[i]);}}",
ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), ctx.stage_name,
binding.U32(), ctx.var_alloc.Consume(offset), value);
}
void EmitStorageAtomicAnd64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value) {
ctx.AddU64(
"{}=packUint2x32(uvec2(atomicAnd({}_ssbo{}[{}>>2],unpackUint2x32({}).x),atomicAnd({}_"
"ssbo{}[({}>>2)+1],unpackUint2x32({}).y)));",
inst, ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value, ctx.stage_name,
binding.U32(), ctx.var_alloc.Consume(offset), value);
}
void EmitStorageAtomicOr64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value) {
ctx.AddU64("{}=packUint2x32(uvec2(atomicOr({}_ssbo{}[{}>>2],unpackUint2x32({}).x),atomicOr({}_"
"ssbo{}[({}>>2)+1],unpackUint2x32({}).y)));",
inst, ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value,
ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value);
}
void EmitStorageAtomicXor64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value) {
ctx.AddU64(
"{}=packUint2x32(uvec2(atomicXor({}_ssbo{}[{}>>2],unpackUint2x32({}).x),atomicXor({}_"
"ssbo{}[({}>>2)+1],unpackUint2x32({}).y)));",
inst, ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value, ctx.stage_name,
binding.U32(), ctx.var_alloc.Consume(offset), value);
}
void EmitStorageAtomicExchange64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value) {
ctx.AddU64("{}=packUint2x32(uvec2(atomicExchange({}_ssbo{}[{}>>2],unpackUint2x32({}).x),"
"atomicExchange({}_ssbo{}[({}>>2)+1],unpackUint2x32({}).y)));",
inst, ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value,
ctx.stage_name, binding.U32(), ctx.var_alloc.Consume(offset), value);
}
void EmitStorageAtomicAddF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value) {
SsboCasFunctionF32(ctx, inst, binding, offset, value, "CasFloatAdd");
}
void EmitStorageAtomicAddF16x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value) {
SsboCasFunction(ctx, inst, binding, offset, value, "CasFloatAdd16x2");
}
void EmitStorageAtomicAddF32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value) {
SsboCasFunction(ctx, inst, binding, offset, value, "CasFloatAdd32x2");
}
void EmitStorageAtomicMinF16x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value) {
SsboCasFunction(ctx, inst, binding, offset, value, "CasFloatMin16x2");
}
void EmitStorageAtomicMinF32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value) {
SsboCasFunction(ctx, inst, binding, offset, value, "CasFloatMin32x2");
}
void EmitStorageAtomicMaxF16x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value) {
SsboCasFunction(ctx, inst, binding, offset, value, "CasFloatMax16x2");
}
void EmitStorageAtomicMaxF32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value) {
SsboCasFunction(ctx, inst, binding, offset, value, "CasFloatMax32x2");
}
void EmitGlobalAtomicIAdd32(EmitContext&) {
throw NotImplementedException("GLSL Instrucion");
}
void EmitGlobalAtomicSMin32(EmitContext&) {
throw NotImplementedException("GLSL Instrucion");
}
void EmitGlobalAtomicUMin32(EmitContext&) {
throw NotImplementedException("GLSL Instrucion");
}
void EmitGlobalAtomicSMax32(EmitContext&) {
throw NotImplementedException("GLSL Instrucion");
}
void EmitGlobalAtomicUMax32(EmitContext&) {
throw NotImplementedException("GLSL Instrucion");
}
void EmitGlobalAtomicInc32(EmitContext&) {
throw NotImplementedException("GLSL Instrucion");
}
void EmitGlobalAtomicDec32(EmitContext&) {
throw NotImplementedException("GLSL Instrucion");
}
void EmitGlobalAtomicAnd32(EmitContext&) {
throw NotImplementedException("GLSL Instrucion");
}
void EmitGlobalAtomicOr32(EmitContext&) {
throw NotImplementedException("GLSL Instrucion");
}
void EmitGlobalAtomicXor32(EmitContext&) {
throw NotImplementedException("GLSL Instrucion");
}
void EmitGlobalAtomicExchange32(EmitContext&) {
throw NotImplementedException("GLSL Instrucion");
}
void EmitGlobalAtomicIAdd64(EmitContext&) {
throw NotImplementedException("GLSL Instrucion");
}
void EmitGlobalAtomicSMin64(EmitContext&) {
throw NotImplementedException("GLSL Instrucion");
}
void EmitGlobalAtomicUMin64(EmitContext&) {
throw NotImplementedException("GLSL Instrucion");
}
void EmitGlobalAtomicSMax64(EmitContext&) {
throw NotImplementedException("GLSL Instrucion");
}
void EmitGlobalAtomicUMax64(EmitContext&) {
throw NotImplementedException("GLSL Instrucion");
}
void EmitGlobalAtomicInc64(EmitContext&) {
throw NotImplementedException("GLSL Instrucion");
}
void EmitGlobalAtomicDec64(EmitContext&) {
throw NotImplementedException("GLSL Instrucion");
}
void EmitGlobalAtomicAnd64(EmitContext&) {
throw NotImplementedException("GLSL Instrucion");
}
void EmitGlobalAtomicOr64(EmitContext&) {
throw NotImplementedException("GLSL Instrucion");
}
void EmitGlobalAtomicXor64(EmitContext&) {
throw NotImplementedException("GLSL Instrucion");
}
void EmitGlobalAtomicExchange64(EmitContext&) {
throw NotImplementedException("GLSL Instrucion");
}
void EmitGlobalAtomicAddF32(EmitContext&) {
throw NotImplementedException("GLSL Instrucion");
}
void EmitGlobalAtomicAddF16x2(EmitContext&) {
throw NotImplementedException("GLSL Instrucion");
}
void EmitGlobalAtomicAddF32x2(EmitContext&) {
throw NotImplementedException("GLSL Instrucion");
}
void EmitGlobalAtomicMinF16x2(EmitContext&) {
throw NotImplementedException("GLSL Instrucion");
}
void EmitGlobalAtomicMinF32x2(EmitContext&) {
throw NotImplementedException("GLSL Instrucion");
}
void EmitGlobalAtomicMaxF16x2(EmitContext&) {
throw NotImplementedException("GLSL Instrucion");
}
void EmitGlobalAtomicMaxF32x2(EmitContext&) {
throw NotImplementedException("GLSL Instrucion");
}
} // namespace Shader::Backend::GLSL

View File

@ -0,0 +1,21 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "shader_recompiler/backend/glsl/emit_context.h"
#include "shader_recompiler/backend/glsl/emit_glsl_instructions.h"
#include "shader_recompiler/frontend/ir/value.h"
namespace Shader::Backend::GLSL {
void EmitBarrier(EmitContext& ctx) {
ctx.Add("barrier();");
}
void EmitWorkgroupMemoryBarrier(EmitContext& ctx) {
ctx.Add("groupMemoryBarrier();");
}
void EmitDeviceMemoryBarrier(EmitContext& ctx) {
ctx.Add("memoryBarrier();");
}
} // namespace Shader::Backend::GLSL

View File

@ -0,0 +1,94 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <string_view>
#include "shader_recompiler/backend/glsl/emit_context.h"
#include "shader_recompiler/backend/glsl/emit_glsl_instructions.h"
#include "shader_recompiler/frontend/ir/value.h"
namespace Shader::Backend::GLSL {
namespace {
void Alias(IR::Inst& inst, const IR::Value& value) {
if (value.IsImmediate()) {
return;
}
IR::Inst& value_inst{*value.InstRecursive()};
value_inst.DestructiveAddUsage(inst.UseCount());
value_inst.DestructiveRemoveUsage();
inst.SetDefinition(value_inst.Definition<Id>());
}
} // Anonymous namespace
void EmitIdentity(EmitContext&, IR::Inst& inst, const IR::Value& value) {
Alias(inst, value);
}
void EmitConditionRef(EmitContext& ctx, IR::Inst& inst, const IR::Value& value) {
// Fake one usage to get a real variable out of the condition
inst.DestructiveAddUsage(1);
const auto ret{ctx.var_alloc.Define(inst, GlslVarType::U1)};
const auto input{ctx.var_alloc.Consume(value)};
if (ret != input) {
ctx.Add("{}={};", ret, input);
}
}
void EmitBitCastU16F16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst) {
NotImplemented();
}
void EmitBitCastU32F32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddU32("{}=ftou({});", inst, value);
}
void EmitBitCastU64F64(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddU64("{}=doubleBitsToUint64({});", inst, value);
}
void EmitBitCastF16U16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst) {
NotImplemented();
}
void EmitBitCastF32U32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF32("{}=utof({});", inst, value);
}
void EmitBitCastF64U64(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF64("{}=uint64BitsToDouble({});", inst, value);
}
void EmitPackUint2x32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddU64("{}=packUint2x32({});", inst, value);
}
void EmitUnpackUint2x32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddU32x2("{}=unpackUint2x32({});", inst, value);
}
void EmitPackFloat2x16(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddU32("{}=packFloat2x16({});", inst, value);
}
void EmitUnpackFloat2x16(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF16x2("{}=unpackFloat2x16({});", inst, value);
}
void EmitPackHalf2x16(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddU32("{}=packHalf2x16({});", inst, value);
}
void EmitUnpackHalf2x16(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF32x2("{}=unpackHalf2x16({});", inst, value);
}
void EmitPackDouble2x32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF64("{}=packDouble2x32({});", inst, value);
}
void EmitUnpackDouble2x32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddU32x2("{}=unpackDouble2x32({});", inst, value);
}
} // namespace Shader::Backend::GLSL

View File

@ -0,0 +1,219 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <string_view>
#include "shader_recompiler/backend/glsl/emit_context.h"
#include "shader_recompiler/backend/glsl/emit_glsl_instructions.h"
#include "shader_recompiler/frontend/ir/value.h"
namespace Shader::Backend::GLSL {
namespace {
constexpr std::string_view SWIZZLE{"xyzw"};
void CompositeInsert(EmitContext& ctx, std::string_view result, std::string_view composite,
std::string_view object, u32 index) {
if (result == composite) {
// The result is aliased with the composite
ctx.Add("{}.{}={};", composite, SWIZZLE[index], object);
} else {
ctx.Add("{}={};{}.{}={};", result, composite, result, SWIZZLE[index], object);
}
}
} // Anonymous namespace
void EmitCompositeConstructU32x2(EmitContext& ctx, IR::Inst& inst, std::string_view e1,
std::string_view e2) {
ctx.AddU32x2("{}=uvec2({},{});", inst, e1, e2);
}
void EmitCompositeConstructU32x3(EmitContext& ctx, IR::Inst& inst, std::string_view e1,
std::string_view e2, std::string_view e3) {
ctx.AddU32x3("{}=uvec3({},{},{});", inst, e1, e2, e3);
}
void EmitCompositeConstructU32x4(EmitContext& ctx, IR::Inst& inst, std::string_view e1,
std::string_view e2, std::string_view e3, std::string_view e4) {
ctx.AddU32x4("{}=uvec4({},{},{},{});", inst, e1, e2, e3, e4);
}
void EmitCompositeExtractU32x2(EmitContext& ctx, IR::Inst& inst, std::string_view composite,
u32 index) {
ctx.AddU32("{}={}.{};", inst, composite, SWIZZLE[index]);
}
void EmitCompositeExtractU32x3(EmitContext& ctx, IR::Inst& inst, std::string_view composite,
u32 index) {
ctx.AddU32("{}={}.{};", inst, composite, SWIZZLE[index]);
}
void EmitCompositeExtractU32x4(EmitContext& ctx, IR::Inst& inst, std::string_view composite,
u32 index) {
ctx.AddU32("{}={}.{};", inst, composite, SWIZZLE[index]);
}
void EmitCompositeInsertU32x2(EmitContext& ctx, IR::Inst& inst, std::string_view composite,
std::string_view object, u32 index) {
const auto ret{ctx.var_alloc.Define(inst, GlslVarType::U32x2)};
CompositeInsert(ctx, ret, composite, object, index);
}
void EmitCompositeInsertU32x3(EmitContext& ctx, IR::Inst& inst, std::string_view composite,
std::string_view object, u32 index) {
const auto ret{ctx.var_alloc.Define(inst, GlslVarType::U32x3)};
CompositeInsert(ctx, ret, composite, object, index);
}
void EmitCompositeInsertU32x4(EmitContext& ctx, IR::Inst& inst, std::string_view composite,
std::string_view object, u32 index) {
const auto ret{ctx.var_alloc.Define(inst, GlslVarType::U32x4)};
CompositeInsert(ctx, ret, composite, object, index);
}
void EmitCompositeConstructF16x2([[maybe_unused]] EmitContext& ctx,
[[maybe_unused]] std::string_view e1,
[[maybe_unused]] std::string_view e2) {
NotImplemented();
}
void EmitCompositeConstructF16x3([[maybe_unused]] EmitContext& ctx,
[[maybe_unused]] std::string_view e1,
[[maybe_unused]] std::string_view e2,
[[maybe_unused]] std::string_view e3) {
NotImplemented();
}
void EmitCompositeConstructF16x4([[maybe_unused]] EmitContext& ctx,
[[maybe_unused]] std::string_view e1,
[[maybe_unused]] std::string_view e2,
[[maybe_unused]] std::string_view e3,
[[maybe_unused]] std::string_view e4) {
NotImplemented();
}
void EmitCompositeExtractF16x2([[maybe_unused]] EmitContext& ctx,
[[maybe_unused]] std::string_view composite,
[[maybe_unused]] u32 index) {
NotImplemented();
}
void EmitCompositeExtractF16x3([[maybe_unused]] EmitContext& ctx,
[[maybe_unused]] std::string_view composite,
[[maybe_unused]] u32 index) {
NotImplemented();
}
void EmitCompositeExtractF16x4([[maybe_unused]] EmitContext& ctx,
[[maybe_unused]] std::string_view composite,
[[maybe_unused]] u32 index) {
NotImplemented();
}
void EmitCompositeInsertF16x2([[maybe_unused]] EmitContext& ctx,
[[maybe_unused]] std::string_view composite,
[[maybe_unused]] std::string_view object,
[[maybe_unused]] u32 index) {
NotImplemented();
}
void EmitCompositeInsertF16x3([[maybe_unused]] EmitContext& ctx,
[[maybe_unused]] std::string_view composite,
[[maybe_unused]] std::string_view object,
[[maybe_unused]] u32 index) {
NotImplemented();
}
void EmitCompositeInsertF16x4([[maybe_unused]] EmitContext& ctx,
[[maybe_unused]] std::string_view composite,
[[maybe_unused]] std::string_view object,
[[maybe_unused]] u32 index) {
NotImplemented();
}
void EmitCompositeConstructF32x2(EmitContext& ctx, IR::Inst& inst, std::string_view e1,
std::string_view e2) {
ctx.AddF32x2("{}=vec2({},{});", inst, e1, e2);
}
void EmitCompositeConstructF32x3(EmitContext& ctx, IR::Inst& inst, std::string_view e1,
std::string_view e2, std::string_view e3) {
ctx.AddF32x3("{}=vec3({},{},{});", inst, e1, e2, e3);
}
void EmitCompositeConstructF32x4(EmitContext& ctx, IR::Inst& inst, std::string_view e1,
std::string_view e2, std::string_view e3, std::string_view e4) {
ctx.AddF32x4("{}=vec4({},{},{},{});", inst, e1, e2, e3, e4);
}
void EmitCompositeExtractF32x2(EmitContext& ctx, IR::Inst& inst, std::string_view composite,
u32 index) {
ctx.AddF32("{}={}.{};", inst, composite, SWIZZLE[index]);
}
void EmitCompositeExtractF32x3(EmitContext& ctx, IR::Inst& inst, std::string_view composite,
u32 index) {
ctx.AddF32("{}={}.{};", inst, composite, SWIZZLE[index]);
}
void EmitCompositeExtractF32x4(EmitContext& ctx, IR::Inst& inst, std::string_view composite,
u32 index) {
ctx.AddF32("{}={}.{};", inst, composite, SWIZZLE[index]);
}
void EmitCompositeInsertF32x2(EmitContext& ctx, IR::Inst& inst, std::string_view composite,
std::string_view object, u32 index) {
const auto ret{ctx.var_alloc.Define(inst, GlslVarType::F32x2)};
CompositeInsert(ctx, ret, composite, object, index);
}
void EmitCompositeInsertF32x3(EmitContext& ctx, IR::Inst& inst, std::string_view composite,
std::string_view object, u32 index) {
const auto ret{ctx.var_alloc.Define(inst, GlslVarType::F32x3)};
CompositeInsert(ctx, ret, composite, object, index);
}
void EmitCompositeInsertF32x4(EmitContext& ctx, IR::Inst& inst, std::string_view composite,
std::string_view object, u32 index) {
const auto ret{ctx.var_alloc.Define(inst, GlslVarType::F32x4)};
CompositeInsert(ctx, ret, composite, object, index);
}
void EmitCompositeConstructF64x2([[maybe_unused]] EmitContext& ctx) {
NotImplemented();
}
void EmitCompositeConstructF64x3([[maybe_unused]] EmitContext& ctx) {
NotImplemented();
}
void EmitCompositeConstructF64x4([[maybe_unused]] EmitContext& ctx) {
NotImplemented();
}
void EmitCompositeExtractF64x2([[maybe_unused]] EmitContext& ctx) {
NotImplemented();
}
void EmitCompositeExtractF64x3([[maybe_unused]] EmitContext& ctx) {
NotImplemented();
}
void EmitCompositeExtractF64x4([[maybe_unused]] EmitContext& ctx) {
NotImplemented();
}
void EmitCompositeInsertF64x2(EmitContext& ctx, std::string_view composite, std::string_view object,
u32 index) {
ctx.Add("{}.{}={};", composite, SWIZZLE[index], object);
}
void EmitCompositeInsertF64x3(EmitContext& ctx, std::string_view composite, std::string_view object,
u32 index) {
ctx.Add("{}.{}={};", composite, SWIZZLE[index], object);
}
void EmitCompositeInsertF64x4(EmitContext& ctx, std::string_view composite, std::string_view object,
u32 index) {
ctx.Add("{}.{}={};", composite, SWIZZLE[index], object);
}
} // namespace Shader::Backend::GLSL

View File

@ -0,0 +1,456 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <string_view>
#include "shader_recompiler/backend/glsl/emit_context.h"
#include "shader_recompiler/backend/glsl/emit_glsl_instructions.h"
#include "shader_recompiler/frontend/ir/value.h"
#include "shader_recompiler/profile.h"
#include "shader_recompiler/runtime_info.h"
namespace Shader::Backend::GLSL {
namespace {
constexpr char SWIZZLE[]{"xyzw"};
u32 CbufIndex(u32 offset) {
return (offset / 4) % 4;
}
char OffsetSwizzle(u32 offset) {
return SWIZZLE[CbufIndex(offset)];
}
bool IsInputArray(Stage stage) {
return stage == Stage::Geometry || stage == Stage::TessellationControl ||
stage == Stage::TessellationEval;
}
std::string InputVertexIndex(EmitContext& ctx, std::string_view vertex) {
return IsInputArray(ctx.stage) ? fmt::format("[{}]", vertex) : "";
}
std::string_view OutputVertexIndex(EmitContext& ctx) {
return ctx.stage == Stage::TessellationControl ? "[gl_InvocationID]" : "";
}
void GetCbuf(EmitContext& ctx, std::string_view ret, const IR::Value& binding,
const IR::Value& offset, u32 num_bits, std::string_view cast = {},
std::string_view bit_offset = {}) {
const bool is_immediate{offset.IsImmediate()};
const bool component_indexing_bug{!is_immediate && ctx.profile.has_gl_component_indexing_bug};
if (is_immediate) {
const s32 signed_offset{static_cast<s32>(offset.U32())};
static constexpr u32 cbuf_size{0x10000};
if (signed_offset < 0 || offset.U32() > cbuf_size) {
LOG_WARNING(Shader_GLSL, "Immediate constant buffer offset is out of bounds");
ctx.Add("{}=0u;", ret);
return;
}
}
const auto offset_var{ctx.var_alloc.Consume(offset)};
const auto index{is_immediate ? fmt::format("{}", offset.U32() / 16)
: fmt::format("{}>>4", offset_var)};
const auto swizzle{is_immediate ? fmt::format(".{}", OffsetSwizzle(offset.U32()))
: fmt::format("[({}>>2)%4]", offset_var)};
const auto cbuf{fmt::format("{}_cbuf{}", ctx.stage_name, binding.U32())};
const auto cbuf_cast{fmt::format("{}({}[{}]{{}})", cast, cbuf, index)};
const auto extraction{num_bits == 32 ? cbuf_cast
: fmt ::format("bitfieldExtract({},int({}),{})", cbuf_cast,
bit_offset, num_bits)};
if (!component_indexing_bug) {
const auto result{fmt::format(fmt::runtime(extraction), swizzle)};
ctx.Add("{}={};", ret, result);
return;
}
const auto cbuf_offset{fmt::format("{}>>2", offset_var)};
for (u32 i = 0; i < 4; ++i) {
const auto swizzle_string{fmt::format(".{}", "xyzw"[i])};
const auto result{fmt::format(fmt::runtime(extraction), swizzle_string)};
ctx.Add("if(({}&3)=={}){}={};", cbuf_offset, i, ret, result);
}
}
void GetCbuf8(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, const IR::Value& offset,
std::string_view cast) {
const auto ret{ctx.var_alloc.Define(inst, GlslVarType::U32)};
if (offset.IsImmediate()) {
const auto bit_offset{fmt::format("{}", (offset.U32() % 4) * 8)};
GetCbuf(ctx, ret, binding, offset, 8, cast, bit_offset);
} else {
const auto offset_var{ctx.var_alloc.Consume(offset)};
const auto bit_offset{fmt::format("({}%4)*8", offset_var)};
GetCbuf(ctx, ret, binding, offset, 8, cast, bit_offset);
}
}
void GetCbuf16(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding, const IR::Value& offset,
std::string_view cast) {
const auto ret{ctx.var_alloc.Define(inst, GlslVarType::U32)};
if (offset.IsImmediate()) {
const auto bit_offset{fmt::format("{}", ((offset.U32() / 2) % 2) * 16)};
GetCbuf(ctx, ret, binding, offset, 16, cast, bit_offset);
} else {
const auto offset_var{ctx.var_alloc.Consume(offset)};
const auto bit_offset{fmt::format("(({}>>1)%2)*16", offset_var)};
GetCbuf(ctx, ret, binding, offset, 16, cast, bit_offset);
}
}
u32 TexCoordIndex(IR::Attribute attr) {
return (static_cast<u32>(attr) - static_cast<u32>(IR::Attribute::FixedFncTexture0S)) / 4;
}
} // Anonymous namespace
void EmitGetCbufU8(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset) {
GetCbuf8(ctx, inst, binding, offset, "ftou");
}
void EmitGetCbufS8(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset) {
GetCbuf8(ctx, inst, binding, offset, "ftoi");
}
void EmitGetCbufU16(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset) {
GetCbuf16(ctx, inst, binding, offset, "ftou");
}
void EmitGetCbufS16(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset) {
GetCbuf16(ctx, inst, binding, offset, "ftoi");
}
void EmitGetCbufU32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset) {
const auto ret{ctx.var_alloc.Define(inst, GlslVarType::U32)};
GetCbuf(ctx, ret, binding, offset, 32, "ftou");
}
void EmitGetCbufF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset) {
const auto ret{ctx.var_alloc.Define(inst, GlslVarType::F32)};
GetCbuf(ctx, ret, binding, offset, 32);
}
void EmitGetCbufU32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset) {
const auto cbuf{fmt::format("{}_cbuf{}", ctx.stage_name, binding.U32())};
if (offset.IsImmediate()) {
static constexpr u32 cbuf_size{0x10000};
const u32 u32_offset{offset.U32()};
const s32 signed_offset{static_cast<s32>(offset.U32())};
if (signed_offset < 0 || u32_offset > cbuf_size) {
LOG_WARNING(Shader_GLSL, "Immediate constant buffer offset is out of bounds");
ctx.AddU32x2("{}=uvec2(0u);", inst);
return;
}
if (u32_offset % 2 == 0) {
ctx.AddU32x2("{}=ftou({}[{}].{}{});", inst, cbuf, u32_offset / 16,
OffsetSwizzle(u32_offset), OffsetSwizzle(u32_offset + 4));
} else {
ctx.AddU32x2("{}=uvec2(ftou({}[{}].{}),ftou({}[{}].{}));", inst, cbuf, u32_offset / 16,
OffsetSwizzle(u32_offset), cbuf, (u32_offset + 4) / 16,
OffsetSwizzle(u32_offset + 4));
}
return;
}
const auto offset_var{ctx.var_alloc.Consume(offset)};
if (!ctx.profile.has_gl_component_indexing_bug) {
ctx.AddU32x2("{}=uvec2(ftou({}[{}>>4][({}>>2)%4]),ftou({}[({}+4)>>4][(({}+4)>>2)%4]));",
inst, cbuf, offset_var, offset_var, cbuf, offset_var, offset_var);
return;
}
const auto ret{ctx.var_alloc.Define(inst, GlslVarType::U32x2)};
const auto cbuf_offset{fmt::format("{}>>2", offset_var)};
for (u32 swizzle = 0; swizzle < 4; ++swizzle) {
ctx.Add("if(({}&3)=={}){}=uvec2(ftou({}[{}>>4].{}),ftou({}[({}+4)>>4].{}));", cbuf_offset,
swizzle, ret, cbuf, offset_var, "xyzw"[swizzle], cbuf, offset_var,
"xyzw"[(swizzle + 1) % 4]);
}
}
void EmitGetAttribute(EmitContext& ctx, IR::Inst& inst, IR::Attribute attr,
std::string_view vertex) {
const u32 element{static_cast<u32>(attr) % 4};
const char swizzle{"xyzw"[element]};
if (IR::IsGeneric(attr)) {
const u32 index{IR::GenericAttributeIndex(attr)};
if (!ctx.runtime_info.previous_stage_stores.Generic(index, element)) {
if (element == 3) {
ctx.AddF32("{}=1.f;", inst, attr);
} else {
ctx.AddF32("{}=0.f;", inst, attr);
}
return;
}
ctx.AddF32("{}=in_attr{}{}.{};", inst, index, InputVertexIndex(ctx, vertex), swizzle);
return;
}
// GLSL only exposes 8 legacy texcoords
if (attr >= IR::Attribute::FixedFncTexture8S && attr <= IR::Attribute::FixedFncTexture9Q) {
LOG_WARNING(Shader_GLSL, "GLSL does not allow access to gl_TexCoord[{}]",
TexCoordIndex(attr));
ctx.AddF32("{}=0.f;", inst);
return;
}
if (attr >= IR::Attribute::FixedFncTexture0S && attr <= IR::Attribute::FixedFncTexture7Q) {
const u32 index{TexCoordIndex(attr)};
ctx.AddF32("{}=gl_TexCoord[{}].{};", inst, index, swizzle);
return;
}
switch (attr) {
case IR::Attribute::PrimitiveId:
ctx.AddF32("{}=itof(gl_PrimitiveID);", inst);
break;
case IR::Attribute::PositionX:
case IR::Attribute::PositionY:
case IR::Attribute::PositionZ:
case IR::Attribute::PositionW: {
const bool is_array{IsInputArray(ctx.stage)};
const auto input_decorator{is_array ? fmt::format("gl_in[{}].", vertex) : ""};
ctx.AddF32("{}={}{}.{};", inst, input_decorator, ctx.position_name, swizzle);
break;
}
case IR::Attribute::ColorFrontDiffuseR:
case IR::Attribute::ColorFrontDiffuseG:
case IR::Attribute::ColorFrontDiffuseB:
case IR::Attribute::ColorFrontDiffuseA:
if (ctx.stage == Stage::Fragment) {
ctx.AddF32("{}=gl_Color.{};", inst, swizzle);
} else {
ctx.AddF32("{}=gl_FrontColor.{};", inst, swizzle);
}
break;
case IR::Attribute::PointSpriteS:
case IR::Attribute::PointSpriteT:
ctx.AddF32("{}=gl_PointCoord.{};", inst, swizzle);
break;
case IR::Attribute::TessellationEvaluationPointU:
case IR::Attribute::TessellationEvaluationPointV:
ctx.AddF32("{}=gl_TessCoord.{};", inst, swizzle);
break;
case IR::Attribute::InstanceId:
ctx.AddF32("{}=itof(gl_InstanceID);", inst);
break;
case IR::Attribute::VertexId:
ctx.AddF32("{}=itof(gl_VertexID);", inst);
break;
case IR::Attribute::FrontFace:
ctx.AddF32("{}=itof(gl_FrontFacing?-1:0);", inst);
break;
default:
throw NotImplementedException("Get attribute {}", attr);
}
}
void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, std::string_view value,
[[maybe_unused]] std::string_view vertex) {
if (IR::IsGeneric(attr)) {
const u32 index{IR::GenericAttributeIndex(attr)};
const u32 attr_element{IR::GenericAttributeElement(attr)};
const GenericElementInfo& info{ctx.output_generics.at(index).at(attr_element)};
const auto output_decorator{OutputVertexIndex(ctx)};
if (info.num_components == 1) {
ctx.Add("{}{}={};", info.name, output_decorator, value);
} else {
const u32 index_element{attr_element - info.first_element};
ctx.Add("{}{}.{}={};", info.name, output_decorator, "xyzw"[index_element], value);
}
return;
}
const u32 element{static_cast<u32>(attr) % 4};
const char swizzle{"xyzw"[element]};
// GLSL only exposes 8 legacy texcoords
if (attr >= IR::Attribute::FixedFncTexture8S && attr <= IR::Attribute::FixedFncTexture9Q) {
LOG_WARNING(Shader_GLSL, "GLSL does not allow access to gl_TexCoord[{}]",
TexCoordIndex(attr));
return;
}
if (attr >= IR::Attribute::FixedFncTexture0S && attr <= IR::Attribute::FixedFncTexture7Q) {
const u32 index{TexCoordIndex(attr)};
ctx.Add("gl_TexCoord[{}].{}={};", index, swizzle, value);
return;
}
switch (attr) {
case IR::Attribute::Layer:
if (ctx.stage != Stage::Geometry &&
!ctx.profile.support_viewport_index_layer_non_geometry) {
LOG_WARNING(Shader_GLSL, "Shader stores viewport layer but device does not support "
"viewport layer extension");
break;
}
ctx.Add("gl_Layer=ftoi({});", value);
break;
case IR::Attribute::ViewportIndex:
if (ctx.stage != Stage::Geometry &&
!ctx.profile.support_viewport_index_layer_non_geometry) {
LOG_WARNING(Shader_GLSL, "Shader stores viewport index but device does not support "
"viewport layer extension");
break;
}
ctx.Add("gl_ViewportIndex=ftoi({});", value);
break;
case IR::Attribute::ViewportMask:
if (ctx.stage != Stage::Geometry && !ctx.profile.support_viewport_mask) {
LOG_WARNING(
Shader_GLSL,
"Shader stores viewport mask but device does not support viewport mask extension");
break;
}
ctx.Add("gl_ViewportMask[0]=ftoi({});", value);
break;
case IR::Attribute::PointSize:
ctx.Add("gl_PointSize={};", value);
break;
case IR::Attribute::PositionX:
case IR::Attribute::PositionY:
case IR::Attribute::PositionZ:
case IR::Attribute::PositionW:
ctx.Add("gl_Position.{}={};", swizzle, value);
break;
case IR::Attribute::ColorFrontDiffuseR:
case IR::Attribute::ColorFrontDiffuseG:
case IR::Attribute::ColorFrontDiffuseB:
case IR::Attribute::ColorFrontDiffuseA:
ctx.Add("gl_FrontColor.{}={};", swizzle, value);
break;
case IR::Attribute::ColorFrontSpecularR:
case IR::Attribute::ColorFrontSpecularG:
case IR::Attribute::ColorFrontSpecularB:
case IR::Attribute::ColorFrontSpecularA:
ctx.Add("gl_FrontSecondaryColor.{}={};", swizzle, value);
break;
case IR::Attribute::ColorBackDiffuseR:
case IR::Attribute::ColorBackDiffuseG:
case IR::Attribute::ColorBackDiffuseB:
case IR::Attribute::ColorBackDiffuseA:
ctx.Add("gl_BackColor.{}={};", swizzle, value);
break;
case IR::Attribute::ColorBackSpecularR:
case IR::Attribute::ColorBackSpecularG:
case IR::Attribute::ColorBackSpecularB:
case IR::Attribute::ColorBackSpecularA:
ctx.Add("gl_BackSecondaryColor.{}={};", swizzle, value);
break;
case IR::Attribute::FogCoordinate:
ctx.Add("gl_FogFragCoord={};", value);
break;
case IR::Attribute::ClipDistance0:
case IR::Attribute::ClipDistance1:
case IR::Attribute::ClipDistance2:
case IR::Attribute::ClipDistance3:
case IR::Attribute::ClipDistance4:
case IR::Attribute::ClipDistance5:
case IR::Attribute::ClipDistance6:
case IR::Attribute::ClipDistance7: {
const u32 index{static_cast<u32>(attr) - static_cast<u32>(IR::Attribute::ClipDistance0)};
ctx.Add("gl_ClipDistance[{}]={};", index, value);
break;
}
default:
throw NotImplementedException("Set attribute {}", attr);
}
}
void EmitGetAttributeIndexed(EmitContext& ctx, IR::Inst& inst, std::string_view offset,
std::string_view vertex) {
const bool is_array{ctx.stage == Stage::Geometry};
const auto vertex_arg{is_array ? fmt::format(",{}", vertex) : ""};
ctx.AddF32("{}=IndexedAttrLoad(int({}){});", inst, offset, vertex_arg);
}
void EmitSetAttributeIndexed([[maybe_unused]] EmitContext& ctx,
[[maybe_unused]] std::string_view offset,
[[maybe_unused]] std::string_view value,
[[maybe_unused]] std::string_view vertex) {
NotImplemented();
}
void EmitGetPatch(EmitContext& ctx, IR::Inst& inst, IR::Patch patch) {
if (!IR::IsGeneric(patch)) {
throw NotImplementedException("Non-generic patch load");
}
const u32 index{IR::GenericPatchIndex(patch)};
const u32 element{IR::GenericPatchElement(patch)};
const char swizzle{"xyzw"[element]};
ctx.AddF32("{}=patch{}.{};", inst, index, swizzle);
}
void EmitSetPatch(EmitContext& ctx, IR::Patch patch, std::string_view value) {
if (IR::IsGeneric(patch)) {
const u32 index{IR::GenericPatchIndex(patch)};
const u32 element{IR::GenericPatchElement(patch)};
ctx.Add("patch{}.{}={};", index, "xyzw"[element], value);
return;
}
switch (patch) {
case IR::Patch::TessellationLodLeft:
case IR::Patch::TessellationLodRight:
case IR::Patch::TessellationLodTop:
case IR::Patch::TessellationLodBottom: {
const u32 index{static_cast<u32>(patch) - u32(IR::Patch::TessellationLodLeft)};
ctx.Add("gl_TessLevelOuter[{}]={};", index, value);
break;
}
case IR::Patch::TessellationLodInteriorU:
ctx.Add("gl_TessLevelInner[0]={};", value);
break;
case IR::Patch::TessellationLodInteriorV:
ctx.Add("gl_TessLevelInner[1]={};", value);
break;
default:
throw NotImplementedException("Patch {}", patch);
}
}
void EmitSetFragColor(EmitContext& ctx, u32 index, u32 component, std::string_view value) {
const char swizzle{"xyzw"[component]};
ctx.Add("frag_color{}.{}={};", index, swizzle, value);
}
void EmitSetSampleMask(EmitContext& ctx, std::string_view value) {
ctx.Add("gl_SampleMask[0]=int({});", value);
}
void EmitSetFragDepth(EmitContext& ctx, std::string_view value) {
ctx.Add("gl_FragDepth={};", value);
}
void EmitLocalInvocationId(EmitContext& ctx, IR::Inst& inst) {
ctx.AddU32x3("{}=gl_LocalInvocationID;", inst);
}
void EmitWorkgroupId(EmitContext& ctx, IR::Inst& inst) {
ctx.AddU32x3("{}=gl_WorkGroupID;", inst);
}
void EmitInvocationId(EmitContext& ctx, IR::Inst& inst) {
ctx.AddU32("{}=uint(gl_InvocationID);", inst);
}
void EmitSampleId(EmitContext& ctx, IR::Inst& inst) {
ctx.AddU32("{}=uint(gl_SampleID);", inst);
}
void EmitIsHelperInvocation(EmitContext& ctx, IR::Inst& inst) {
ctx.AddU1("{}=gl_HelperInvocation;", inst);
}
void EmitYDirection(EmitContext& ctx, IR::Inst& inst) {
ctx.uses_y_direction = true;
ctx.AddF32("{}=gl_FrontMaterial.ambient.a;", inst);
}
void EmitLoadLocal(EmitContext& ctx, IR::Inst& inst, std::string_view word_offset) {
ctx.AddU32("{}=lmem[{}];", inst, word_offset);
}
void EmitWriteLocal(EmitContext& ctx, std::string_view word_offset, std::string_view value) {
ctx.Add("lmem[{}]={};", word_offset, value);
}
} // namespace Shader::Backend::GLSL

View File

@ -0,0 +1,21 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <string_view>
#include "shader_recompiler/backend/glsl/emit_context.h"
#include "shader_recompiler/backend/glsl/emit_glsl_instructions.h"
#include "shader_recompiler/exception.h"
namespace Shader::Backend::GLSL {
void EmitJoin(EmitContext&) {
throw NotImplementedException("Join shouldn't be emitted");
}
void EmitDemoteToHelperInvocation(EmitContext& ctx) {
ctx.Add("discard;");
}
} // namespace Shader::Backend::GLSL

View File

@ -0,0 +1,230 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <string_view>
#include "shader_recompiler/backend/glsl/emit_context.h"
#include "shader_recompiler/backend/glsl/emit_glsl_instructions.h"
#include "shader_recompiler/frontend/ir/value.h"
namespace Shader::Backend::GLSL {
void EmitConvertS16F16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value) {
NotImplemented();
}
void EmitConvertS16F32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddU32("{}=(int({})&0xffff)|(bitfieldExtract(int({}),31,1)<<15);", inst, value, value);
}
void EmitConvertS16F64([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value) {
NotImplemented();
}
void EmitConvertS32F16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value) {
NotImplemented();
}
void EmitConvertS32F32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddU32("{}=int({});", inst, value);
}
void EmitConvertS32F64(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddU32("{}=int({});", inst, value);
}
void EmitConvertS64F16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value) {
NotImplemented();
}
void EmitConvertS64F32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddU64("{}=int64_t({});", inst, value);
}
void EmitConvertS64F64(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddU64("{}=int64_t({});", inst, value);
}
void EmitConvertU16F16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value) {
NotImplemented();
}
void EmitConvertU16F32([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value) {
NotImplemented();
}
void EmitConvertU16F64([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value) {
NotImplemented();
}
void EmitConvertU32F16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value) {
NotImplemented();
}
void EmitConvertU32F32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddU32("{}=uint({});", inst, value);
}
void EmitConvertU32F64(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddU32("{}=uint({});", inst, value);
}
void EmitConvertU64F16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value) {
NotImplemented();
}
void EmitConvertU64F32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddU64("{}=uint64_t({});", inst, value);
}
void EmitConvertU64F64(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddU64("{}=uint64_t({});", inst, value);
}
void EmitConvertU64U32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddU64("{}=uint64_t({});", inst, value);
}
void EmitConvertU32U64(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddU32("{}=uint({});", inst, value);
}
void EmitConvertF16F32([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value) {
NotImplemented();
}
void EmitConvertF32F16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value) {
NotImplemented();
}
void EmitConvertF32F64(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF32("{}=float({});", inst, value);
}
void EmitConvertF64F32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF64("{}=double({});", inst, value);
}
void EmitConvertF16S8([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value) {
NotImplemented();
}
void EmitConvertF16S16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value) {
NotImplemented();
}
void EmitConvertF16S32([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value) {
NotImplemented();
}
void EmitConvertF16S64([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value) {
NotImplemented();
}
void EmitConvertF16U8([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value) {
NotImplemented();
}
void EmitConvertF16U16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value) {
NotImplemented();
}
void EmitConvertF16U32([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value) {
NotImplemented();
}
void EmitConvertF16U64([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value) {
NotImplemented();
}
void EmitConvertF32S8([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value) {
NotImplemented();
}
void EmitConvertF32S16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value) {
NotImplemented();
}
void EmitConvertF32S32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF32("{}=float(int({}));", inst, value);
}
void EmitConvertF32S64(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF32("{}=float(int64_t({}));", inst, value);
}
void EmitConvertF32U8([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value) {
NotImplemented();
}
void EmitConvertF32U16(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF32("{}=float({}&0xffff);", inst, value);
}
void EmitConvertF32U32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF32("{}=float({});", inst, value);
}
void EmitConvertF32U64(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF32("{}=float({});", inst, value);
}
void EmitConvertF64S8([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value) {
NotImplemented();
}
void EmitConvertF64S16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value) {
NotImplemented();
}
void EmitConvertF64S32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF64("{}=double(int({}));", inst, value);
}
void EmitConvertF64S64(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF64("{}=double(int64_t({}));", inst, value);
}
void EmitConvertF64U8([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value) {
NotImplemented();
}
void EmitConvertF64U16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value) {
NotImplemented();
}
void EmitConvertF64U32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF64("{}=double({});", inst, value);
}
void EmitConvertF64U64(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF64("{}=double({});", inst, value);
}
} // namespace Shader::Backend::GLSL

View File

@ -0,0 +1,456 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <string_view>
#include "shader_recompiler/backend/glsl/emit_context.h"
#include "shader_recompiler/backend/glsl/emit_glsl_instructions.h"
#include "shader_recompiler/frontend/ir/modifiers.h"
#include "shader_recompiler/frontend/ir/value.h"
namespace Shader::Backend::GLSL {
namespace {
void Compare(EmitContext& ctx, IR::Inst& inst, std::string_view lhs, std::string_view rhs,
std::string_view op, bool ordered) {
const auto nan_op{ordered ? "&&!" : "||"};
ctx.AddU1("{}={}{}{}"
"{}isnan({}){}isnan({});",
inst, lhs, op, rhs, nan_op, lhs, nan_op, rhs);
}
bool IsPrecise(const IR::Inst& inst) {
return inst.Flags<IR::FpControl>().no_contraction;
}
} // Anonymous namespace
void EmitFPAbs16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value) {
NotImplemented();
}
void EmitFPAbs32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF32("{}=abs({});", inst, value);
}
void EmitFPAbs64(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF64("{}=abs({});", inst, value);
}
void EmitFPAdd16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view a, [[maybe_unused]] std::string_view b) {
NotImplemented();
}
void EmitFPAdd32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b) {
if (IsPrecise(inst)) {
ctx.AddPrecF32("{}={}+{};", inst, a, b);
} else {
ctx.AddF32("{}={}+{};", inst, a, b);
}
}
void EmitFPAdd64(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b) {
if (IsPrecise(inst)) {
ctx.AddPrecF64("{}={}+{};", inst, a, b);
} else {
ctx.AddF64("{}={}+{};", inst, a, b);
}
}
void EmitFPFma16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view a, [[maybe_unused]] std::string_view b,
[[maybe_unused]] std::string_view c) {
NotImplemented();
}
void EmitFPFma32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b,
std::string_view c) {
if (IsPrecise(inst)) {
ctx.AddPrecF32("{}=fma({},{},{});", inst, a, b, c);
} else {
ctx.AddF32("{}=fma({},{},{});", inst, a, b, c);
}
}
void EmitFPFma64(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b,
std::string_view c) {
if (IsPrecise(inst)) {
ctx.AddPrecF64("{}=fma({},{},{});", inst, a, b, c);
} else {
ctx.AddF64("{}=fma({},{},{});", inst, a, b, c);
}
}
void EmitFPMax32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b) {
ctx.AddF32("{}=max({},{});", inst, a, b);
}
void EmitFPMax64(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b) {
ctx.AddF64("{}=max({},{});", inst, a, b);
}
void EmitFPMin32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b) {
ctx.AddF32("{}=min({},{});", inst, a, b);
}
void EmitFPMin64(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b) {
ctx.AddF64("{}=min({},{});", inst, a, b);
}
void EmitFPMul16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view a, [[maybe_unused]] std::string_view b) {
NotImplemented();
}
void EmitFPMul32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b) {
if (IsPrecise(inst)) {
ctx.AddPrecF32("{}={}*{};", inst, a, b);
} else {
ctx.AddF32("{}={}*{};", inst, a, b);
}
}
void EmitFPMul64(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b) {
if (IsPrecise(inst)) {
ctx.AddPrecF64("{}={}*{};", inst, a, b);
} else {
ctx.AddF64("{}={}*{};", inst, a, b);
}
}
void EmitFPNeg16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value) {
NotImplemented();
}
void EmitFPNeg32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF32("{}=-({});", inst, value);
}
void EmitFPNeg64(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF64("{}=-({});", inst, value);
}
void EmitFPSin(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF32("{}=sin({});", inst, value);
}
void EmitFPCos(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF32("{}=cos({});", inst, value);
}
void EmitFPExp2(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF32("{}=exp2({});", inst, value);
}
void EmitFPLog2(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF32("{}=log2({});", inst, value);
}
void EmitFPRecip32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF32("{}=(1.0f)/{};", inst, value);
}
void EmitFPRecip64(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF64("{}=1.0/{};", inst, value);
}
void EmitFPRecipSqrt32([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value) {
ctx.AddF32("{}=inversesqrt({});", inst, value);
}
void EmitFPRecipSqrt64([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value) {
NotImplemented();
}
void EmitFPSqrt(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF32("{}=sqrt({});", inst, value);
}
void EmitFPSaturate16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value) {
NotImplemented();
}
void EmitFPSaturate32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF32("{}=min(max({},0.0),1.0);", inst, value);
}
void EmitFPSaturate64(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF64("{}=min(max({},0.0),1.0);", inst, value);
}
void EmitFPClamp16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value,
[[maybe_unused]] std::string_view min_value,
[[maybe_unused]] std::string_view max_value) {
NotImplemented();
}
void EmitFPClamp32(EmitContext& ctx, IR::Inst& inst, std::string_view value,
std::string_view min_value, std::string_view max_value) {
// GLSL's clamp does not produce desirable results
ctx.AddF32("{}=min(max({},float({})),float({}));", inst, value, min_value, max_value);
}
void EmitFPClamp64(EmitContext& ctx, IR::Inst& inst, std::string_view value,
std::string_view min_value, std::string_view max_value) {
// GLSL's clamp does not produce desirable results
ctx.AddF64("{}=min(max({},double({})),double({}));", inst, value, min_value, max_value);
}
void EmitFPRoundEven16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value) {
NotImplemented();
}
void EmitFPRoundEven32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF32("{}=roundEven({});", inst, value);
}
void EmitFPRoundEven64(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF64("{}=roundEven({});", inst, value);
}
void EmitFPFloor16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value) {
NotImplemented();
}
void EmitFPFloor32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF32("{}=floor({});", inst, value);
}
void EmitFPFloor64(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF64("{}=floor({});", inst, value);
}
void EmitFPCeil16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value) {
NotImplemented();
}
void EmitFPCeil32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF32("{}=ceil({});", inst, value);
}
void EmitFPCeil64(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF64("{}=ceil({});", inst, value);
}
void EmitFPTrunc16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value) {
NotImplemented();
}
void EmitFPTrunc32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF32("{}=trunc({});", inst, value);
}
void EmitFPTrunc64(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddF64("{}=trunc({});", inst, value);
}
void EmitFPOrdEqual16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] std::string_view lhs,
[[maybe_unused]] std::string_view rhs) {
NotImplemented();
}
void EmitFPOrdEqual32(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs) {
Compare(ctx, inst, lhs, rhs, "==", true);
}
void EmitFPOrdEqual64(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs) {
Compare(ctx, inst, lhs, rhs, "==", true);
}
void EmitFPUnordEqual16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] std::string_view lhs,
[[maybe_unused]] std::string_view rhs) {
NotImplemented();
}
void EmitFPUnordEqual32(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs) {
Compare(ctx, inst, lhs, rhs, "==", false);
}
void EmitFPUnordEqual64(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs) {
Compare(ctx, inst, lhs, rhs, "==", false);
}
void EmitFPOrdNotEqual16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] std::string_view lhs,
[[maybe_unused]] std::string_view rhs) {
NotImplemented();
}
void EmitFPOrdNotEqual32(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs) {
Compare(ctx, inst, lhs, rhs, "!=", true);
}
void EmitFPOrdNotEqual64(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs) {
Compare(ctx, inst, lhs, rhs, "!=", true);
}
void EmitFPUnordNotEqual16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] std::string_view lhs,
[[maybe_unused]] std::string_view rhs) {
NotImplemented();
}
void EmitFPUnordNotEqual32(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs) {
Compare(ctx, inst, lhs, rhs, "!=", false);
}
void EmitFPUnordNotEqual64(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs) {
Compare(ctx, inst, lhs, rhs, "!=", false);
}
void EmitFPOrdLessThan16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] std::string_view lhs,
[[maybe_unused]] std::string_view rhs) {
NotImplemented();
}
void EmitFPOrdLessThan32(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs) {
Compare(ctx, inst, lhs, rhs, "<", true);
}
void EmitFPOrdLessThan64(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs) {
Compare(ctx, inst, lhs, rhs, "<", true);
}
void EmitFPUnordLessThan16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] std::string_view lhs,
[[maybe_unused]] std::string_view rhs) {
NotImplemented();
}
void EmitFPUnordLessThan32(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs) {
Compare(ctx, inst, lhs, rhs, "<", false);
}
void EmitFPUnordLessThan64(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs) {
Compare(ctx, inst, lhs, rhs, "<", false);
}
void EmitFPOrdGreaterThan16([[maybe_unused]] EmitContext& ctx,
[[maybe_unused]] std::string_view lhs,
[[maybe_unused]] std::string_view rhs) {
NotImplemented();
}
void EmitFPOrdGreaterThan32(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs) {
Compare(ctx, inst, lhs, rhs, ">", true);
}
void EmitFPOrdGreaterThan64(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs) {
Compare(ctx, inst, lhs, rhs, ">", true);
}
void EmitFPUnordGreaterThan16([[maybe_unused]] EmitContext& ctx,
[[maybe_unused]] std::string_view lhs,
[[maybe_unused]] std::string_view rhs) {
NotImplemented();
}
void EmitFPUnordGreaterThan32(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs) {
Compare(ctx, inst, lhs, rhs, ">", false);
}
void EmitFPUnordGreaterThan64(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs) {
Compare(ctx, inst, lhs, rhs, ">", false);
}
void EmitFPOrdLessThanEqual16([[maybe_unused]] EmitContext& ctx,
[[maybe_unused]] std::string_view lhs,
[[maybe_unused]] std::string_view rhs) {
NotImplemented();
}
void EmitFPOrdLessThanEqual32(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs) {
Compare(ctx, inst, lhs, rhs, "<=", true);
}
void EmitFPOrdLessThanEqual64(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs) {
Compare(ctx, inst, lhs, rhs, "<=", true);
}
void EmitFPUnordLessThanEqual16([[maybe_unused]] EmitContext& ctx,
[[maybe_unused]] std::string_view lhs,
[[maybe_unused]] std::string_view rhs) {
NotImplemented();
}
void EmitFPUnordLessThanEqual32(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs) {
Compare(ctx, inst, lhs, rhs, "<=", false);
}
void EmitFPUnordLessThanEqual64(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs) {
Compare(ctx, inst, lhs, rhs, "<=", false);
}
void EmitFPOrdGreaterThanEqual16([[maybe_unused]] EmitContext& ctx,
[[maybe_unused]] std::string_view lhs,
[[maybe_unused]] std::string_view rhs) {
NotImplemented();
}
void EmitFPOrdGreaterThanEqual32(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs) {
Compare(ctx, inst, lhs, rhs, ">=", true);
}
void EmitFPOrdGreaterThanEqual64(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs) {
Compare(ctx, inst, lhs, rhs, ">=", true);
}
void EmitFPUnordGreaterThanEqual16([[maybe_unused]] EmitContext& ctx,
[[maybe_unused]] std::string_view lhs,
[[maybe_unused]] std::string_view rhs) {
NotImplemented();
}
void EmitFPUnordGreaterThanEqual32(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs) {
Compare(ctx, inst, lhs, rhs, ">=", false);
}
void EmitFPUnordGreaterThanEqual64(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs) {
Compare(ctx, inst, lhs, rhs, ">=", false);
}
void EmitFPIsNan16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] IR::Inst& inst,
[[maybe_unused]] std::string_view value) {
NotImplemented();
}
void EmitFPIsNan32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddU1("{}=isnan({});", inst, value);
}
void EmitFPIsNan64(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddU1("{}=isnan({});", inst, value);
}
} // namespace Shader::Backend::GLSL

View File

@ -0,0 +1,799 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <string_view>
#include "shader_recompiler/backend/glsl/emit_context.h"
#include "shader_recompiler/backend/glsl/emit_glsl_instructions.h"
#include "shader_recompiler/frontend/ir/modifiers.h"
#include "shader_recompiler/frontend/ir/value.h"
#include "shader_recompiler/profile.h"
namespace Shader::Backend::GLSL {
namespace {
std::string Texture(EmitContext& ctx, const IR::TextureInstInfo& info, const IR::Value& index) {
const auto def{info.type == TextureType::Buffer ? ctx.texture_buffers.at(info.descriptor_index)
: ctx.textures.at(info.descriptor_index)};
const auto index_offset{def.count > 1 ? fmt::format("[{}]", ctx.var_alloc.Consume(index)) : ""};
return fmt::format("tex{}{}", def.binding, index_offset);
}
std::string Image(EmitContext& ctx, const IR::TextureInstInfo& info, const IR::Value& index) {
const auto def{info.type == TextureType::Buffer ? ctx.image_buffers.at(info.descriptor_index)
: ctx.images.at(info.descriptor_index)};
const auto index_offset{def.count > 1 ? fmt::format("[{}]", ctx.var_alloc.Consume(index)) : ""};
return fmt::format("img{}{}", def.binding, index_offset);
}
std::string CastToIntVec(std::string_view value, const IR::TextureInstInfo& info) {
switch (info.type) {
case TextureType::Color1D:
case TextureType::Buffer:
return fmt::format("int({})", value);
case TextureType::ColorArray1D:
case TextureType::Color2D:
case TextureType::ColorArray2D:
return fmt::format("ivec2({})", value);
case TextureType::Color3D:
case TextureType::ColorCube:
return fmt::format("ivec3({})", value);
case TextureType::ColorArrayCube:
return fmt::format("ivec4({})", value);
default:
throw NotImplementedException("Integer cast for TextureType {}", info.type.Value());
}
}
std::string CoordsCastToInt(std::string_view value, const IR::TextureInstInfo& info) {
switch (info.type) {
case TextureType::Color1D:
case TextureType::Buffer:
return fmt::format("int({})", value);
case TextureType::ColorArray1D:
case TextureType::Color2D:
return fmt::format("ivec2({})", value);
case TextureType::ColorArray2D:
case TextureType::Color3D:
case TextureType::ColorCube:
return fmt::format("ivec3({})", value);
case TextureType::ColorArrayCube:
return fmt::format("ivec4({})", value);
default:
throw NotImplementedException("TexelFetchCast type {}", info.type.Value());
}
}
bool NeedsShadowLodExt(TextureType type) {
switch (type) {
case TextureType::ColorArray2D:
case TextureType::ColorCube:
case TextureType::ColorArrayCube:
return true;
default:
return false;
}
}
std::string GetOffsetVec(EmitContext& ctx, const IR::Value& offset) {
if (offset.IsImmediate()) {
return fmt::format("int({})", offset.U32());
}
IR::Inst* const inst{offset.InstRecursive()};
if (inst->AreAllArgsImmediates()) {
switch (inst->GetOpcode()) {
case IR::Opcode::CompositeConstructU32x2:
return fmt::format("ivec2({},{})", inst->Arg(0).U32(), inst->Arg(1).U32());
case IR::Opcode::CompositeConstructU32x3:
return fmt::format("ivec3({},{},{})", inst->Arg(0).U32(), inst->Arg(1).U32(),
inst->Arg(2).U32());
case IR::Opcode::CompositeConstructU32x4:
return fmt::format("ivec4({},{},{},{})", inst->Arg(0).U32(), inst->Arg(1).U32(),
inst->Arg(2).U32(), inst->Arg(3).U32());
default:
break;
}
}
const bool has_var_aoffi{ctx.profile.support_gl_variable_aoffi};
if (!has_var_aoffi) {
LOG_WARNING(Shader_GLSL, "Device does not support variable texture offsets, STUBBING");
}
const auto offset_str{has_var_aoffi ? ctx.var_alloc.Consume(offset) : "0"};
switch (offset.Type()) {
case IR::Type::U32:
return fmt::format("int({})", offset_str);
case IR::Type::U32x2:
return fmt::format("ivec2({})", offset_str);
case IR::Type::U32x3:
return fmt::format("ivec3({})", offset_str);
case IR::Type::U32x4:
return fmt::format("ivec4({})", offset_str);
default:
throw NotImplementedException("Offset type {}", offset.Type());
}
}
std::string PtpOffsets(const IR::Value& offset, const IR::Value& offset2) {
const std::array values{offset.InstRecursive(), offset2.InstRecursive()};
if (!values[0]->AreAllArgsImmediates() || !values[1]->AreAllArgsImmediates()) {
LOG_WARNING(Shader_GLSL, "Not all arguments in PTP are immediate, STUBBING");
return "ivec2[](ivec2(0), ivec2(1), ivec2(2), ivec2(3))";
}
const IR::Opcode opcode{values[0]->GetOpcode()};
if (opcode != values[1]->GetOpcode() || opcode != IR::Opcode::CompositeConstructU32x4) {
throw LogicError("Invalid PTP arguments");
}
auto read{[&](unsigned int a, unsigned int b) { return values[a]->Arg(b).U32(); }};
return fmt::format("ivec2[](ivec2({},{}),ivec2({},{}),ivec2({},{}),ivec2({},{}))", read(0, 0),
read(0, 1), read(0, 2), read(0, 3), read(1, 0), read(1, 1), read(1, 2),
read(1, 3));
}
IR::Inst* PrepareSparse(IR::Inst& inst) {
const auto sparse_inst{inst.GetAssociatedPseudoOperation(IR::Opcode::GetSparseFromOp)};
if (sparse_inst) {
sparse_inst->Invalidate();
}
return sparse_inst;
}
} // Anonymous namespace
void EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, std::string_view bias_lc,
const IR::Value& offset) {
const auto info{inst.Flags<IR::TextureInstInfo>()};
if (info.has_lod_clamp) {
throw NotImplementedException("EmitImageSampleImplicitLod Lod clamp samples");
}
const auto texture{Texture(ctx, info, index)};
const auto bias{info.has_bias ? fmt::format(",{}", bias_lc) : ""};
const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)};
const auto sparse_inst{PrepareSparse(inst)};
const bool supports_sparse{ctx.profile.support_gl_sparse_textures};
if (sparse_inst && !supports_sparse) {
LOG_WARNING(Shader_GLSL, "Device does not support sparse texture queries. STUBBING");
ctx.AddU1("{}=true;", *sparse_inst);
}
if (!sparse_inst || !supports_sparse) {
if (!offset.IsEmpty()) {
const auto offset_str{GetOffsetVec(ctx, offset)};
if (ctx.stage == Stage::Fragment) {
ctx.Add("{}=textureOffset({},{},{}{});", texel, texture, coords, offset_str, bias);
} else {
ctx.Add("{}=textureLodOffset({},{},0.0,{});", texel, texture, coords, offset_str);
}
} else {
if (ctx.stage == Stage::Fragment) {
ctx.Add("{}=texture({},{}{});", texel, texture, coords, bias);
} else {
ctx.Add("{}=textureLod({},{},0.0);", texel, texture, coords);
}
}
return;
}
if (!offset.IsEmpty()) {
ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureOffsetARB({},{},{},{}{}));",
*sparse_inst, texture, coords, GetOffsetVec(ctx, offset), texel, bias);
} else {
ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureARB({},{},{}{}));", *sparse_inst,
texture, coords, texel, bias);
}
}
void EmitImageSampleExplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, std::string_view lod_lc,
const IR::Value& offset) {
const auto info{inst.Flags<IR::TextureInstInfo>()};
if (info.has_bias) {
throw NotImplementedException("EmitImageSampleExplicitLod Bias texture samples");
}
if (info.has_lod_clamp) {
throw NotImplementedException("EmitImageSampleExplicitLod Lod clamp samples");
}
const auto texture{Texture(ctx, info, index)};
const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)};
const auto sparse_inst{PrepareSparse(inst)};
const bool supports_sparse{ctx.profile.support_gl_sparse_textures};
if (sparse_inst && !supports_sparse) {
LOG_WARNING(Shader_GLSL, "Device does not support sparse texture queries. STUBBING");
ctx.AddU1("{}=true;", *sparse_inst);
}
if (!sparse_inst || !supports_sparse) {
if (!offset.IsEmpty()) {
ctx.Add("{}=textureLodOffset({},{},{},{});", texel, texture, coords, lod_lc,
GetOffsetVec(ctx, offset));
} else {
ctx.Add("{}=textureLod({},{},{});", texel, texture, coords, lod_lc);
}
return;
}
if (!offset.IsEmpty()) {
ctx.AddU1("{}=sparseTexelsResidentARB(sparseTexelFetchOffsetARB({},{},int({}),{},{}));",
*sparse_inst, texture, CastToIntVec(coords, info), lod_lc,
GetOffsetVec(ctx, offset), texel);
} else {
ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureLodARB({},{},{},{}));", *sparse_inst,
texture, coords, lod_lc, texel);
}
}
void EmitImageSampleDrefImplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, std::string_view dref,
std::string_view bias_lc, const IR::Value& offset) {
const auto info{inst.Flags<IR::TextureInstInfo>()};
const auto sparse_inst{PrepareSparse(inst)};
if (sparse_inst) {
throw NotImplementedException("EmitImageSampleDrefImplicitLod Sparse texture samples");
}
if (info.has_bias) {
throw NotImplementedException("EmitImageSampleDrefImplicitLod Bias texture samples");
}
if (info.has_lod_clamp) {
throw NotImplementedException("EmitImageSampleDrefImplicitLod Lod clamp samples");
}
const auto texture{Texture(ctx, info, index)};
const auto bias{info.has_bias ? fmt::format(",{}", bias_lc) : ""};
const bool needs_shadow_ext{NeedsShadowLodExt(info.type)};
const auto cast{needs_shadow_ext ? "vec4" : "vec3"};
const bool use_grad{!ctx.profile.support_gl_texture_shadow_lod &&
ctx.stage != Stage::Fragment && needs_shadow_ext};
if (use_grad) {
LOG_WARNING(Shader_GLSL,
"Device lacks GL_EXT_texture_shadow_lod. Using textureGrad fallback");
if (info.type == TextureType::ColorArrayCube) {
LOG_WARNING(Shader_GLSL, "textureGrad does not support ColorArrayCube. Stubbing");
ctx.AddF32("{}=0.0f;", inst);
return;
}
const auto d_cast{info.type == TextureType::ColorArray2D ? "vec2" : "vec3"};
ctx.AddF32("{}=textureGrad({},{}({},{}),{}(0),{}(0));", inst, texture, cast, coords, dref,
d_cast, d_cast);
return;
}
if (!offset.IsEmpty()) {
const auto offset_str{GetOffsetVec(ctx, offset)};
if (ctx.stage == Stage::Fragment) {
ctx.AddF32("{}=textureOffset({},{}({},{}),{}{});", inst, texture, cast, coords, dref,
offset_str, bias);
} else {
ctx.AddF32("{}=textureLodOffset({},{}({},{}),0.0,{});", inst, texture, cast, coords,
dref, offset_str);
}
} else {
if (ctx.stage == Stage::Fragment) {
if (info.type == TextureType::ColorArrayCube) {
ctx.AddF32("{}=texture({},vec4({}),{});", inst, texture, coords, dref);
} else {
ctx.AddF32("{}=texture({},{}({},{}){});", inst, texture, cast, coords, dref, bias);
}
} else {
ctx.AddF32("{}=textureLod({},{}({},{}),0.0);", inst, texture, cast, coords, dref);
}
}
}
void EmitImageSampleDrefExplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, std::string_view dref,
std::string_view lod_lc, const IR::Value& offset) {
const auto info{inst.Flags<IR::TextureInstInfo>()};
const auto sparse_inst{PrepareSparse(inst)};
if (sparse_inst) {
throw NotImplementedException("EmitImageSampleDrefExplicitLod Sparse texture samples");
}
if (info.has_bias) {
throw NotImplementedException("EmitImageSampleDrefExplicitLod Bias texture samples");
}
if (info.has_lod_clamp) {
throw NotImplementedException("EmitImageSampleDrefExplicitLod Lod clamp samples");
}
const auto texture{Texture(ctx, info, index)};
const bool needs_shadow_ext{NeedsShadowLodExt(info.type)};
const bool use_grad{!ctx.profile.support_gl_texture_shadow_lod && needs_shadow_ext};
const auto cast{needs_shadow_ext ? "vec4" : "vec3"};
if (use_grad) {
LOG_WARNING(Shader_GLSL,
"Device lacks GL_EXT_texture_shadow_lod. Using textureGrad fallback");
if (info.type == TextureType::ColorArrayCube) {
LOG_WARNING(Shader_GLSL, "textureGrad does not support ColorArrayCube. Stubbing");
ctx.AddF32("{}=0.0f;", inst);
return;
}
const auto d_cast{info.type == TextureType::ColorArray2D ? "vec2" : "vec3"};
ctx.AddF32("{}=textureGrad({},{}({},{}),{}(0),{}(0));", inst, texture, cast, coords, dref,
d_cast, d_cast);
return;
}
if (!offset.IsEmpty()) {
const auto offset_str{GetOffsetVec(ctx, offset)};
if (info.type == TextureType::ColorArrayCube) {
ctx.AddF32("{}=textureLodOffset({},{},{},{},{});", inst, texture, coords, dref, lod_lc,
offset_str);
} else {
ctx.AddF32("{}=textureLodOffset({},{}({},{}),{},{});", inst, texture, cast, coords,
dref, lod_lc, offset_str);
}
} else {
if (info.type == TextureType::ColorArrayCube) {
ctx.AddF32("{}=textureLod({},{},{},{});", inst, texture, coords, dref, lod_lc);
} else {
ctx.AddF32("{}=textureLod({},{}({},{}),{});", inst, texture, cast, coords, dref,
lod_lc);
}
}
}
void EmitImageGather(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, const IR::Value& offset, const IR::Value& offset2) {
const auto info{inst.Flags<IR::TextureInstInfo>()};
const auto texture{Texture(ctx, info, index)};
const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)};
const auto sparse_inst{PrepareSparse(inst)};
const bool supports_sparse{ctx.profile.support_gl_sparse_textures};
if (sparse_inst && !supports_sparse) {
LOG_WARNING(Shader_GLSL, "Device does not support sparse texture queries. STUBBING");
ctx.AddU1("{}=true;", *sparse_inst);
}
if (!sparse_inst || !supports_sparse) {
if (offset.IsEmpty()) {
ctx.Add("{}=textureGather({},{},int({}));", texel, texture, coords,
info.gather_component);
return;
}
if (offset2.IsEmpty()) {
ctx.Add("{}=textureGatherOffset({},{},{},int({}));", texel, texture, coords,
GetOffsetVec(ctx, offset), info.gather_component);
return;
}
// PTP
const auto offsets{PtpOffsets(offset, offset2)};
ctx.Add("{}=textureGatherOffsets({},{},{},int({}));", texel, texture, coords, offsets,
info.gather_component);
return;
}
if (offset.IsEmpty()) {
ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureGatherARB({},{},{},int({})));",
*sparse_inst, texture, coords, texel, info.gather_component);
return;
}
if (offset2.IsEmpty()) {
ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureGatherOffsetARB({},{},{},{},int({})));",
*sparse_inst, texture, CastToIntVec(coords, info), GetOffsetVec(ctx, offset),
texel, info.gather_component);
return;
}
// PTP
const auto offsets{PtpOffsets(offset, offset2)};
ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureGatherOffsetARB({},{},{},{},int({})));",
*sparse_inst, texture, CastToIntVec(coords, info), offsets, texel,
info.gather_component);
}
void EmitImageGatherDref(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, const IR::Value& offset, const IR::Value& offset2,
std::string_view dref) {
const auto info{inst.Flags<IR::TextureInstInfo>()};
const auto texture{Texture(ctx, info, index)};
const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)};
const auto sparse_inst{PrepareSparse(inst)};
const bool supports_sparse{ctx.profile.support_gl_sparse_textures};
if (sparse_inst && !supports_sparse) {
LOG_WARNING(Shader_GLSL, "Device does not support sparse texture queries. STUBBING");
ctx.AddU1("{}=true;", *sparse_inst);
}
if (!sparse_inst || !supports_sparse) {
if (offset.IsEmpty()) {
ctx.Add("{}=textureGather({},{},{});", texel, texture, coords, dref);
return;
}
if (offset2.IsEmpty()) {
ctx.Add("{}=textureGatherOffset({},{},{},{});", texel, texture, coords, dref,
GetOffsetVec(ctx, offset));
return;
}
// PTP
const auto offsets{PtpOffsets(offset, offset2)};
ctx.Add("{}=textureGatherOffsets({},{},{},{});", texel, texture, coords, dref, offsets);
return;
}
if (offset.IsEmpty()) {
ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureGatherARB({},{},{},{}));", *sparse_inst,
texture, coords, dref, texel);
return;
}
if (offset2.IsEmpty()) {
ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureGatherOffsetARB({},{},{},,{},{}));",
*sparse_inst, texture, CastToIntVec(coords, info), dref,
GetOffsetVec(ctx, offset), texel);
return;
}
// PTP
const auto offsets{PtpOffsets(offset, offset2)};
ctx.AddU1("{}=sparseTexelsResidentARB(sparseTextureGatherOffsetARB({},{},{},,{},{}));",
*sparse_inst, texture, CastToIntVec(coords, info), dref, offsets, texel);
}
void EmitImageFetch(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, std::string_view offset, std::string_view lod,
[[maybe_unused]] std::string_view ms) {
const auto info{inst.Flags<IR::TextureInstInfo>()};
if (info.has_bias) {
throw NotImplementedException("EmitImageFetch Bias texture samples");
}
if (info.has_lod_clamp) {
throw NotImplementedException("EmitImageFetch Lod clamp samples");
}
const auto texture{Texture(ctx, info, index)};
const auto sparse_inst{PrepareSparse(inst)};
const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)};
const bool supports_sparse{ctx.profile.support_gl_sparse_textures};
if (sparse_inst && !supports_sparse) {
LOG_WARNING(Shader_GLSL, "Device does not support sparse texture queries. STUBBING");
ctx.AddU1("{}=true;", *sparse_inst);
}
if (!sparse_inst || !supports_sparse) {
if (!offset.empty()) {
ctx.Add("{}=texelFetchOffset({},{},int({}),{});", texel, texture,
CoordsCastToInt(coords, info), lod, CoordsCastToInt(offset, info));
} else {
if (info.type == TextureType::Buffer) {
ctx.Add("{}=texelFetch({},int({}));", texel, texture, coords);
} else {
ctx.Add("{}=texelFetch({},{},int({}));", texel, texture,
CoordsCastToInt(coords, info), lod);
}
}
return;
}
if (!offset.empty()) {
ctx.AddU1("{}=sparseTexelsResidentARB(sparseTexelFetchOffsetARB({},{},int({}),{},{}));",
*sparse_inst, texture, CastToIntVec(coords, info), lod,
CastToIntVec(offset, info), texel);
} else {
ctx.AddU1("{}=sparseTexelsResidentARB(sparseTexelFetchARB({},{},int({}),{}));",
*sparse_inst, texture, CastToIntVec(coords, info), lod, texel);
}
}
void EmitImageQueryDimensions(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view lod) {
const auto info{inst.Flags<IR::TextureInstInfo>()};
const auto texture{Texture(ctx, info, index)};
switch (info.type) {
case TextureType::Color1D:
return ctx.AddU32x4(
"{}=uvec4(uint(textureSize({},int({}))),0u,0u,uint(textureQueryLevels({})));", inst,
texture, lod, texture);
case TextureType::ColorArray1D:
case TextureType::Color2D:
case TextureType::ColorCube:
return ctx.AddU32x4(
"{}=uvec4(uvec2(textureSize({},int({}))),0u,uint(textureQueryLevels({})));", inst,
texture, lod, texture);
case TextureType::ColorArray2D:
case TextureType::Color3D:
case TextureType::ColorArrayCube:
return ctx.AddU32x4(
"{}=uvec4(uvec3(textureSize({},int({}))),uint(textureQueryLevels({})));", inst, texture,
lod, texture);
case TextureType::Buffer:
throw NotImplementedException("EmitImageQueryDimensions Texture buffers");
}
throw LogicError("Unspecified image type {}", info.type.Value());
}
void EmitImageQueryLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords) {
const auto info{inst.Flags<IR::TextureInstInfo>()};
const auto texture{Texture(ctx, info, index)};
return ctx.AddF32x4("{}=vec4(textureQueryLod({},{}),0.0,0.0);", inst, texture, coords);
}
void EmitImageGradient(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, const IR::Value& derivatives,
const IR::Value& offset, [[maybe_unused]] const IR::Value& lod_clamp) {
const auto info{inst.Flags<IR::TextureInstInfo>()};
if (info.has_lod_clamp) {
throw NotImplementedException("EmitImageGradient Lod clamp samples");
}
const auto sparse_inst{PrepareSparse(inst)};
if (sparse_inst) {
throw NotImplementedException("EmitImageGradient Sparse");
}
if (!offset.IsEmpty()) {
throw NotImplementedException("EmitImageGradient offset");
}
const auto texture{Texture(ctx, info, index)};
const auto texel{ctx.var_alloc.Define(inst, GlslVarType::F32x4)};
const bool multi_component{info.num_derivates > 1 || info.has_lod_clamp};
const auto derivatives_vec{ctx.var_alloc.Consume(derivatives)};
if (multi_component) {
ctx.Add("{}=textureGrad({},{},vec2({}.xz),vec2({}.yz));", texel, texture, coords,
derivatives_vec, derivatives_vec);
} else {
ctx.Add("{}=textureGrad({},{},float({}.x),float({}.y));", texel, texture, coords,
derivatives_vec, derivatives_vec);
}
}
void EmitImageRead(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords) {
const auto info{inst.Flags<IR::TextureInstInfo>()};
const auto sparse_inst{PrepareSparse(inst)};
if (sparse_inst) {
throw NotImplementedException("EmitImageRead Sparse");
}
const auto image{Image(ctx, info, index)};
ctx.AddU32x4("{}=uvec4(imageLoad({},{}));", inst, image, CoordsCastToInt(coords, info));
}
void EmitImageWrite(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, std::string_view color) {
const auto info{inst.Flags<IR::TextureInstInfo>()};
const auto image{Image(ctx, info, index)};
ctx.Add("imageStore({},{},{});", image, CoordsCastToInt(coords, info), color);
}
void EmitImageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, std::string_view value) {
const auto info{inst.Flags<IR::TextureInstInfo>()};
const auto image{Image(ctx, info, index)};
ctx.AddU32("{}=imageAtomicAdd({},{},{});", inst, image, CoordsCastToInt(coords, info), value);
}
void EmitImageAtomicSMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, std::string_view value) {
const auto info{inst.Flags<IR::TextureInstInfo>()};
const auto image{Image(ctx, info, index)};
ctx.AddU32("{}=imageAtomicMin({},{},int({}));", inst, image, CoordsCastToInt(coords, info),
value);
}
void EmitImageAtomicUMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, std::string_view value) {
const auto info{inst.Flags<IR::TextureInstInfo>()};
const auto image{Image(ctx, info, index)};
ctx.AddU32("{}=imageAtomicMin({},{},uint({}));", inst, image, CoordsCastToInt(coords, info),
value);
}
void EmitImageAtomicSMax32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, std::string_view value) {
const auto info{inst.Flags<IR::TextureInstInfo>()};
const auto image{Image(ctx, info, index)};
ctx.AddU32("{}=imageAtomicMax({},{},int({}));", inst, image, CoordsCastToInt(coords, info),
value);
}
void EmitImageAtomicUMax32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, std::string_view value) {
const auto info{inst.Flags<IR::TextureInstInfo>()};
const auto image{Image(ctx, info, index)};
ctx.AddU32("{}=imageAtomicMax({},{},uint({}));", inst, image, CoordsCastToInt(coords, info),
value);
}
void EmitImageAtomicInc32(EmitContext&, IR::Inst&, const IR::Value&, std::string_view,
std::string_view) {
NotImplemented();
}
void EmitImageAtomicDec32(EmitContext&, IR::Inst&, const IR::Value&, std::string_view,
std::string_view) {
NotImplemented();
}
void EmitImageAtomicAnd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, std::string_view value) {
const auto info{inst.Flags<IR::TextureInstInfo>()};
const auto image{Image(ctx, info, index)};
ctx.AddU32("{}=imageAtomicAnd({},{},{});", inst, image, CoordsCastToInt(coords, info), value);
}
void EmitImageAtomicOr32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, std::string_view value) {
const auto info{inst.Flags<IR::TextureInstInfo>()};
const auto image{Image(ctx, info, index)};
ctx.AddU32("{}=imageAtomicOr({},{},{});", inst, image, CoordsCastToInt(coords, info), value);
}
void EmitImageAtomicXor32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, std::string_view value) {
const auto info{inst.Flags<IR::TextureInstInfo>()};
const auto image{Image(ctx, info, index)};
ctx.AddU32("{}=imageAtomicXor({},{},{});", inst, image, CoordsCastToInt(coords, info), value);
}
void EmitImageAtomicExchange32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, std::string_view value) {
const auto info{inst.Flags<IR::TextureInstInfo>()};
const auto image{Image(ctx, info, index)};
ctx.AddU32("{}=imageAtomicExchange({},{},{});", inst, image, CoordsCastToInt(coords, info),
value);
}
void EmitBindlessImageSampleImplicitLod(EmitContext&) {
NotImplemented();
}
void EmitBindlessImageSampleExplicitLod(EmitContext&) {
NotImplemented();
}
void EmitBindlessImageSampleDrefImplicitLod(EmitContext&) {
NotImplemented();
}
void EmitBindlessImageSampleDrefExplicitLod(EmitContext&) {
NotImplemented();
}
void EmitBindlessImageGather(EmitContext&) {
NotImplemented();
}
void EmitBindlessImageGatherDref(EmitContext&) {
NotImplemented();
}
void EmitBindlessImageFetch(EmitContext&) {
NotImplemented();
}
void EmitBindlessImageQueryDimensions(EmitContext&) {
NotImplemented();
}
void EmitBindlessImageQueryLod(EmitContext&) {
NotImplemented();
}
void EmitBindlessImageGradient(EmitContext&) {
NotImplemented();
}
void EmitBindlessImageRead(EmitContext&) {
NotImplemented();
}
void EmitBindlessImageWrite(EmitContext&) {
NotImplemented();
}
void EmitBoundImageSampleImplicitLod(EmitContext&) {
NotImplemented();
}
void EmitBoundImageSampleExplicitLod(EmitContext&) {
NotImplemented();
}
void EmitBoundImageSampleDrefImplicitLod(EmitContext&) {
NotImplemented();
}
void EmitBoundImageSampleDrefExplicitLod(EmitContext&) {
NotImplemented();
}
void EmitBoundImageGather(EmitContext&) {
NotImplemented();
}
void EmitBoundImageGatherDref(EmitContext&) {
NotImplemented();
}
void EmitBoundImageFetch(EmitContext&) {
NotImplemented();
}
void EmitBoundImageQueryDimensions(EmitContext&) {
NotImplemented();
}
void EmitBoundImageQueryLod(EmitContext&) {
NotImplemented();
}
void EmitBoundImageGradient(EmitContext&) {
NotImplemented();
}
void EmitBoundImageRead(EmitContext&) {
NotImplemented();
}
void EmitBoundImageWrite(EmitContext&) {
NotImplemented();
}
void EmitBindlessImageAtomicIAdd32(EmitContext&) {
NotImplemented();
}
void EmitBindlessImageAtomicSMin32(EmitContext&) {
NotImplemented();
}
void EmitBindlessImageAtomicUMin32(EmitContext&) {
NotImplemented();
}
void EmitBindlessImageAtomicSMax32(EmitContext&) {
NotImplemented();
}
void EmitBindlessImageAtomicUMax32(EmitContext&) {
NotImplemented();
}
void EmitBindlessImageAtomicInc32(EmitContext&) {
NotImplemented();
}
void EmitBindlessImageAtomicDec32(EmitContext&) {
NotImplemented();
}
void EmitBindlessImageAtomicAnd32(EmitContext&) {
NotImplemented();
}
void EmitBindlessImageAtomicOr32(EmitContext&) {
NotImplemented();
}
void EmitBindlessImageAtomicXor32(EmitContext&) {
NotImplemented();
}
void EmitBindlessImageAtomicExchange32(EmitContext&) {
NotImplemented();
}
void EmitBoundImageAtomicIAdd32(EmitContext&) {
NotImplemented();
}
void EmitBoundImageAtomicSMin32(EmitContext&) {
NotImplemented();
}
void EmitBoundImageAtomicUMin32(EmitContext&) {
NotImplemented();
}
void EmitBoundImageAtomicSMax32(EmitContext&) {
NotImplemented();
}
void EmitBoundImageAtomicUMax32(EmitContext&) {
NotImplemented();
}
void EmitBoundImageAtomicInc32(EmitContext&) {
NotImplemented();
}
void EmitBoundImageAtomicDec32(EmitContext&) {
NotImplemented();
}
void EmitBoundImageAtomicAnd32(EmitContext&) {
NotImplemented();
}
void EmitBoundImageAtomicOr32(EmitContext&) {
NotImplemented();
}
void EmitBoundImageAtomicXor32(EmitContext&) {
NotImplemented();
}
void EmitBoundImageAtomicExchange32(EmitContext&) {
NotImplemented();
}
} // namespace Shader::Backend::GLSL

View File

@ -0,0 +1,702 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <string_view>
#include "common/common_types.h"
namespace Shader::IR {
enum class Attribute : u64;
enum class Patch : u64;
class Inst;
class Value;
} // namespace Shader::IR
namespace Shader::Backend::GLSL {
class EmitContext;
#define NotImplemented() throw NotImplementedException("GLSL instruction {}", __func__)
// Microinstruction emitters
void EmitPhi(EmitContext& ctx, IR::Inst& inst);
void EmitVoid(EmitContext& ctx);
void EmitIdentity(EmitContext& ctx, IR::Inst& inst, const IR::Value& value);
void EmitConditionRef(EmitContext& ctx, IR::Inst& inst, const IR::Value& value);
void EmitReference(EmitContext& ctx, const IR::Value& value);
void EmitPhiMove(EmitContext& ctx, const IR::Value& phi, const IR::Value& value);
void EmitJoin(EmitContext& ctx);
void EmitDemoteToHelperInvocation(EmitContext& ctx);
void EmitBarrier(EmitContext& ctx);
void EmitWorkgroupMemoryBarrier(EmitContext& ctx);
void EmitDeviceMemoryBarrier(EmitContext& ctx);
void EmitPrologue(EmitContext& ctx);
void EmitEpilogue(EmitContext& ctx);
void EmitEmitVertex(EmitContext& ctx, const IR::Value& stream);
void EmitEndPrimitive(EmitContext& ctx, const IR::Value& stream);
void EmitGetRegister(EmitContext& ctx);
void EmitSetRegister(EmitContext& ctx);
void EmitGetPred(EmitContext& ctx);
void EmitSetPred(EmitContext& ctx);
void EmitSetGotoVariable(EmitContext& ctx);
void EmitGetGotoVariable(EmitContext& ctx);
void EmitSetIndirectBranchVariable(EmitContext& ctx);
void EmitGetIndirectBranchVariable(EmitContext& ctx);
void EmitGetCbufU8(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset);
void EmitGetCbufS8(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset);
void EmitGetCbufU16(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset);
void EmitGetCbufS16(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset);
void EmitGetCbufU32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset);
void EmitGetCbufF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset);
void EmitGetCbufU32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset);
void EmitGetAttribute(EmitContext& ctx, IR::Inst& inst, IR::Attribute attr,
std::string_view vertex);
void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, std::string_view value,
std::string_view vertex);
void EmitGetAttributeIndexed(EmitContext& ctx, IR::Inst& inst, std::string_view offset,
std::string_view vertex);
void EmitSetAttributeIndexed(EmitContext& ctx, std::string_view offset, std::string_view value,
std::string_view vertex);
void EmitGetPatch(EmitContext& ctx, IR::Inst& inst, IR::Patch patch);
void EmitSetPatch(EmitContext& ctx, IR::Patch patch, std::string_view value);
void EmitSetFragColor(EmitContext& ctx, u32 index, u32 component, std::string_view value);
void EmitSetSampleMask(EmitContext& ctx, std::string_view value);
void EmitSetFragDepth(EmitContext& ctx, std::string_view value);
void EmitGetZFlag(EmitContext& ctx);
void EmitGetSFlag(EmitContext& ctx);
void EmitGetCFlag(EmitContext& ctx);
void EmitGetOFlag(EmitContext& ctx);
void EmitSetZFlag(EmitContext& ctx);
void EmitSetSFlag(EmitContext& ctx);
void EmitSetCFlag(EmitContext& ctx);
void EmitSetOFlag(EmitContext& ctx);
void EmitWorkgroupId(EmitContext& ctx, IR::Inst& inst);
void EmitLocalInvocationId(EmitContext& ctx, IR::Inst& inst);
void EmitInvocationId(EmitContext& ctx, IR::Inst& inst);
void EmitSampleId(EmitContext& ctx, IR::Inst& inst);
void EmitIsHelperInvocation(EmitContext& ctx, IR::Inst& inst);
void EmitYDirection(EmitContext& ctx, IR::Inst& inst);
void EmitLoadLocal(EmitContext& ctx, IR::Inst& inst, std::string_view word_offset);
void EmitWriteLocal(EmitContext& ctx, std::string_view word_offset, std::string_view value);
void EmitUndefU1(EmitContext& ctx, IR::Inst& inst);
void EmitUndefU8(EmitContext& ctx, IR::Inst& inst);
void EmitUndefU16(EmitContext& ctx, IR::Inst& inst);
void EmitUndefU32(EmitContext& ctx, IR::Inst& inst);
void EmitUndefU64(EmitContext& ctx, IR::Inst& inst);
void EmitLoadGlobalU8(EmitContext& ctx);
void EmitLoadGlobalS8(EmitContext& ctx);
void EmitLoadGlobalU16(EmitContext& ctx);
void EmitLoadGlobalS16(EmitContext& ctx);
void EmitLoadGlobal32(EmitContext& ctx, IR::Inst& inst, std::string_view address);
void EmitLoadGlobal64(EmitContext& ctx, IR::Inst& inst, std::string_view address);
void EmitLoadGlobal128(EmitContext& ctx, IR::Inst& inst, std::string_view address);
void EmitWriteGlobalU8(EmitContext& ctx);
void EmitWriteGlobalS8(EmitContext& ctx);
void EmitWriteGlobalU16(EmitContext& ctx);
void EmitWriteGlobalS16(EmitContext& ctx);
void EmitWriteGlobal32(EmitContext& ctx, std::string_view address, std::string_view value);
void EmitWriteGlobal64(EmitContext& ctx, std::string_view address, std::string_view value);
void EmitWriteGlobal128(EmitContext& ctx, std::string_view address, std::string_view value);
void EmitLoadStorageU8(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset);
void EmitLoadStorageS8(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset);
void EmitLoadStorageU16(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset);
void EmitLoadStorageS16(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset);
void EmitLoadStorage32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset);
void EmitLoadStorage64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset);
void EmitLoadStorage128(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset);
void EmitWriteStorageU8(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
std::string_view value);
void EmitWriteStorageS8(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
std::string_view value);
void EmitWriteStorageU16(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
std::string_view value);
void EmitWriteStorageS16(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
std::string_view value);
void EmitWriteStorage32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
std::string_view value);
void EmitWriteStorage64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
std::string_view value);
void EmitWriteStorage128(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
std::string_view value);
void EmitLoadSharedU8(EmitContext& ctx, IR::Inst& inst, std::string_view offset);
void EmitLoadSharedS8(EmitContext& ctx, IR::Inst& inst, std::string_view offset);
void EmitLoadSharedU16(EmitContext& ctx, IR::Inst& inst, std::string_view offset);
void EmitLoadSharedS16(EmitContext& ctx, IR::Inst& inst, std::string_view offset);
void EmitLoadSharedU32(EmitContext& ctx, IR::Inst& inst, std::string_view offset);
void EmitLoadSharedU64(EmitContext& ctx, IR::Inst& inst, std::string_view offset);
void EmitLoadSharedU128(EmitContext& ctx, IR::Inst& inst, std::string_view offset);
void EmitWriteSharedU8(EmitContext& ctx, std::string_view offset, std::string_view value);
void EmitWriteSharedU16(EmitContext& ctx, std::string_view offset, std::string_view value);
void EmitWriteSharedU32(EmitContext& ctx, std::string_view offset, std::string_view value);
void EmitWriteSharedU64(EmitContext& ctx, std::string_view offset, std::string_view value);
void EmitWriteSharedU128(EmitContext& ctx, std::string_view offset, std::string_view value);
void EmitCompositeConstructU32x2(EmitContext& ctx, IR::Inst& inst, std::string_view e1,
std::string_view e2);
void EmitCompositeConstructU32x3(EmitContext& ctx, IR::Inst& inst, std::string_view e1,
std::string_view e2, std::string_view e3);
void EmitCompositeConstructU32x4(EmitContext& ctx, IR::Inst& inst, std::string_view e1,
std::string_view e2, std::string_view e3, std::string_view e4);
void EmitCompositeExtractU32x2(EmitContext& ctx, IR::Inst& inst, std::string_view composite,
u32 index);
void EmitCompositeExtractU32x3(EmitContext& ctx, IR::Inst& inst, std::string_view composite,
u32 index);
void EmitCompositeExtractU32x4(EmitContext& ctx, IR::Inst& inst, std::string_view composite,
u32 index);
void EmitCompositeInsertU32x2(EmitContext& ctx, IR::Inst& inst, std::string_view composite,
std::string_view object, u32 index);
void EmitCompositeInsertU32x3(EmitContext& ctx, IR::Inst& inst, std::string_view composite,
std::string_view object, u32 index);
void EmitCompositeInsertU32x4(EmitContext& ctx, IR::Inst& inst, std::string_view composite,
std::string_view object, u32 index);
void EmitCompositeConstructF16x2(EmitContext& ctx, std::string_view e1, std::string_view e2);
void EmitCompositeConstructF16x3(EmitContext& ctx, std::string_view e1, std::string_view e2,
std::string_view e3);
void EmitCompositeConstructF16x4(EmitContext& ctx, std::string_view e1, std::string_view e2,
std::string_view e3, std::string_view e4);
void EmitCompositeExtractF16x2(EmitContext& ctx, std::string_view composite, u32 index);
void EmitCompositeExtractF16x3(EmitContext& ctx, std::string_view composite, u32 index);
void EmitCompositeExtractF16x4(EmitContext& ctx, std::string_view composite, u32 index);
void EmitCompositeInsertF16x2(EmitContext& ctx, std::string_view composite, std::string_view object,
u32 index);
void EmitCompositeInsertF16x3(EmitContext& ctx, std::string_view composite, std::string_view object,
u32 index);
void EmitCompositeInsertF16x4(EmitContext& ctx, std::string_view composite, std::string_view object,
u32 index);
void EmitCompositeConstructF32x2(EmitContext& ctx, IR::Inst& inst, std::string_view e1,
std::string_view e2);
void EmitCompositeConstructF32x3(EmitContext& ctx, IR::Inst& inst, std::string_view e1,
std::string_view e2, std::string_view e3);
void EmitCompositeConstructF32x4(EmitContext& ctx, IR::Inst& inst, std::string_view e1,
std::string_view e2, std::string_view e3, std::string_view e4);
void EmitCompositeExtractF32x2(EmitContext& ctx, IR::Inst& inst, std::string_view composite,
u32 index);
void EmitCompositeExtractF32x3(EmitContext& ctx, IR::Inst& inst, std::string_view composite,
u32 index);
void EmitCompositeExtractF32x4(EmitContext& ctx, IR::Inst& inst, std::string_view composite,
u32 index);
void EmitCompositeInsertF32x2(EmitContext& ctx, IR::Inst& inst, std::string_view composite,
std::string_view object, u32 index);
void EmitCompositeInsertF32x3(EmitContext& ctx, IR::Inst& inst, std::string_view composite,
std::string_view object, u32 index);
void EmitCompositeInsertF32x4(EmitContext& ctx, IR::Inst& inst, std::string_view composite,
std::string_view object, u32 index);
void EmitCompositeConstructF64x2(EmitContext& ctx);
void EmitCompositeConstructF64x3(EmitContext& ctx);
void EmitCompositeConstructF64x4(EmitContext& ctx);
void EmitCompositeExtractF64x2(EmitContext& ctx);
void EmitCompositeExtractF64x3(EmitContext& ctx);
void EmitCompositeExtractF64x4(EmitContext& ctx);
void EmitCompositeInsertF64x2(EmitContext& ctx, std::string_view composite, std::string_view object,
u32 index);
void EmitCompositeInsertF64x3(EmitContext& ctx, std::string_view composite, std::string_view object,
u32 index);
void EmitCompositeInsertF64x4(EmitContext& ctx, std::string_view composite, std::string_view object,
u32 index);
void EmitSelectU1(EmitContext& ctx, IR::Inst& inst, std::string_view cond,
std::string_view true_value, std::string_view false_value);
void EmitSelectU8(EmitContext& ctx, std::string_view cond, std::string_view true_value,
std::string_view false_value);
void EmitSelectU16(EmitContext& ctx, std::string_view cond, std::string_view true_value,
std::string_view false_value);
void EmitSelectU32(EmitContext& ctx, IR::Inst& inst, std::string_view cond,
std::string_view true_value, std::string_view false_value);
void EmitSelectU64(EmitContext& ctx, IR::Inst& inst, std::string_view cond,
std::string_view true_value, std::string_view false_value);
void EmitSelectF16(EmitContext& ctx, std::string_view cond, std::string_view true_value,
std::string_view false_value);
void EmitSelectF32(EmitContext& ctx, IR::Inst& inst, std::string_view cond,
std::string_view true_value, std::string_view false_value);
void EmitSelectF64(EmitContext& ctx, IR::Inst& inst, std::string_view cond,
std::string_view true_value, std::string_view false_value);
void EmitBitCastU16F16(EmitContext& ctx, IR::Inst& inst);
void EmitBitCastU32F32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitBitCastU64F64(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitBitCastF16U16(EmitContext& ctx, IR::Inst& inst);
void EmitBitCastF32U32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitBitCastF64U64(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitPackUint2x32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitUnpackUint2x32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitPackFloat2x16(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitUnpackFloat2x16(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitPackHalf2x16(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitUnpackHalf2x16(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitPackDouble2x32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitUnpackDouble2x32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitGetZeroFromOp(EmitContext& ctx);
void EmitGetSignFromOp(EmitContext& ctx);
void EmitGetCarryFromOp(EmitContext& ctx);
void EmitGetOverflowFromOp(EmitContext& ctx);
void EmitGetSparseFromOp(EmitContext& ctx);
void EmitGetInBoundsFromOp(EmitContext& ctx);
void EmitFPAbs16(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitFPAbs32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitFPAbs64(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitFPAdd16(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b);
void EmitFPAdd32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b);
void EmitFPAdd64(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b);
void EmitFPFma16(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b,
std::string_view c);
void EmitFPFma32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b,
std::string_view c);
void EmitFPFma64(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b,
std::string_view c);
void EmitFPMax32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b);
void EmitFPMax64(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b);
void EmitFPMin32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b);
void EmitFPMin64(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b);
void EmitFPMul16(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b);
void EmitFPMul32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b);
void EmitFPMul64(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b);
void EmitFPNeg16(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitFPNeg32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitFPNeg64(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitFPSin(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitFPCos(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitFPExp2(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitFPLog2(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitFPRecip32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitFPRecip64(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitFPRecipSqrt32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitFPRecipSqrt64(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitFPSqrt(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitFPSaturate16(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitFPSaturate32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitFPSaturate64(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitFPClamp16(EmitContext& ctx, IR::Inst& inst, std::string_view value,
std::string_view min_value, std::string_view max_value);
void EmitFPClamp32(EmitContext& ctx, IR::Inst& inst, std::string_view value,
std::string_view min_value, std::string_view max_value);
void EmitFPClamp64(EmitContext& ctx, IR::Inst& inst, std::string_view value,
std::string_view min_value, std::string_view max_value);
void EmitFPRoundEven16(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitFPRoundEven32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitFPRoundEven64(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitFPFloor16(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitFPFloor32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitFPFloor64(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitFPCeil16(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitFPCeil32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitFPCeil64(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitFPTrunc16(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitFPTrunc32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitFPTrunc64(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitFPOrdEqual16(EmitContext& ctx, std::string_view lhs, std::string_view rhs);
void EmitFPOrdEqual32(EmitContext& ctx, IR::Inst& inst, std::string_view lhs, std::string_view rhs);
void EmitFPOrdEqual64(EmitContext& ctx, IR::Inst& inst, std::string_view lhs, std::string_view rhs);
void EmitFPUnordEqual16(EmitContext& ctx, std::string_view lhs, std::string_view rhs);
void EmitFPUnordEqual32(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs);
void EmitFPUnordEqual64(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs);
void EmitFPOrdNotEqual16(EmitContext& ctx, std::string_view lhs, std::string_view rhs);
void EmitFPOrdNotEqual32(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs);
void EmitFPOrdNotEqual64(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs);
void EmitFPUnordNotEqual16(EmitContext& ctx, std::string_view lhs, std::string_view rhs);
void EmitFPUnordNotEqual32(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs);
void EmitFPUnordNotEqual64(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs);
void EmitFPOrdLessThan16(EmitContext& ctx, std::string_view lhs, std::string_view rhs);
void EmitFPOrdLessThan32(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs);
void EmitFPOrdLessThan64(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs);
void EmitFPUnordLessThan16(EmitContext& ctx, std::string_view lhs, std::string_view rhs);
void EmitFPUnordLessThan32(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs);
void EmitFPUnordLessThan64(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs);
void EmitFPOrdGreaterThan16(EmitContext& ctx, std::string_view lhs, std::string_view rhs);
void EmitFPOrdGreaterThan32(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs);
void EmitFPOrdGreaterThan64(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs);
void EmitFPUnordGreaterThan16(EmitContext& ctx, std::string_view lhs, std::string_view rhs);
void EmitFPUnordGreaterThan32(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs);
void EmitFPUnordGreaterThan64(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs);
void EmitFPOrdLessThanEqual16(EmitContext& ctx, std::string_view lhs, std::string_view rhs);
void EmitFPOrdLessThanEqual32(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs);
void EmitFPOrdLessThanEqual64(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs);
void EmitFPUnordLessThanEqual16(EmitContext& ctx, std::string_view lhs, std::string_view rhs);
void EmitFPUnordLessThanEqual32(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs);
void EmitFPUnordLessThanEqual64(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs);
void EmitFPOrdGreaterThanEqual16(EmitContext& ctx, std::string_view lhs, std::string_view rhs);
void EmitFPOrdGreaterThanEqual32(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs);
void EmitFPOrdGreaterThanEqual64(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs);
void EmitFPUnordGreaterThanEqual16(EmitContext& ctx, std::string_view lhs, std::string_view rhs);
void EmitFPUnordGreaterThanEqual32(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs);
void EmitFPUnordGreaterThanEqual64(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs);
void EmitFPIsNan16(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitFPIsNan32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitFPIsNan64(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitIAdd32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b);
void EmitIAdd64(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b);
void EmitISub32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b);
void EmitISub64(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b);
void EmitIMul32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b);
void EmitINeg32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitINeg64(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitIAbs32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitShiftLeftLogical32(EmitContext& ctx, IR::Inst& inst, std::string_view base,
std::string_view shift);
void EmitShiftLeftLogical64(EmitContext& ctx, IR::Inst& inst, std::string_view base,
std::string_view shift);
void EmitShiftRightLogical32(EmitContext& ctx, IR::Inst& inst, std::string_view base,
std::string_view shift);
void EmitShiftRightLogical64(EmitContext& ctx, IR::Inst& inst, std::string_view base,
std::string_view shift);
void EmitShiftRightArithmetic32(EmitContext& ctx, IR::Inst& inst, std::string_view base,
std::string_view shift);
void EmitShiftRightArithmetic64(EmitContext& ctx, IR::Inst& inst, std::string_view base,
std::string_view shift);
void EmitBitwiseAnd32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b);
void EmitBitwiseOr32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b);
void EmitBitwiseXor32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b);
void EmitBitFieldInsert(EmitContext& ctx, IR::Inst& inst, std::string_view base,
std::string_view insert, std::string_view offset, std::string_view count);
void EmitBitFieldSExtract(EmitContext& ctx, IR::Inst& inst, std::string_view base,
std::string_view offset, std::string_view count);
void EmitBitFieldUExtract(EmitContext& ctx, IR::Inst& inst, std::string_view base,
std::string_view offset, std::string_view count);
void EmitBitReverse32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitBitCount32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitBitwiseNot32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitFindSMsb32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitFindUMsb32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitSMin32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b);
void EmitUMin32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b);
void EmitSMax32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b);
void EmitUMax32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b);
void EmitSClamp32(EmitContext& ctx, IR::Inst& inst, std::string_view value, std::string_view min,
std::string_view max);
void EmitUClamp32(EmitContext& ctx, IR::Inst& inst, std::string_view value, std::string_view min,
std::string_view max);
void EmitSLessThan(EmitContext& ctx, IR::Inst& inst, std::string_view lhs, std::string_view rhs);
void EmitULessThan(EmitContext& ctx, IR::Inst& inst, std::string_view lhs, std::string_view rhs);
void EmitIEqual(EmitContext& ctx, IR::Inst& inst, std::string_view lhs, std::string_view rhs);
void EmitSLessThanEqual(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs);
void EmitULessThanEqual(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs);
void EmitSGreaterThan(EmitContext& ctx, IR::Inst& inst, std::string_view lhs, std::string_view rhs);
void EmitUGreaterThan(EmitContext& ctx, IR::Inst& inst, std::string_view lhs, std::string_view rhs);
void EmitINotEqual(EmitContext& ctx, IR::Inst& inst, std::string_view lhs, std::string_view rhs);
void EmitSGreaterThanEqual(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs);
void EmitUGreaterThanEqual(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs);
void EmitSharedAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
std::string_view value);
void EmitSharedAtomicSMin32(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
std::string_view value);
void EmitSharedAtomicUMin32(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
std::string_view value);
void EmitSharedAtomicSMax32(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
std::string_view value);
void EmitSharedAtomicUMax32(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
std::string_view value);
void EmitSharedAtomicInc32(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
std::string_view value);
void EmitSharedAtomicDec32(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
std::string_view value);
void EmitSharedAtomicAnd32(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
std::string_view value);
void EmitSharedAtomicOr32(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
std::string_view value);
void EmitSharedAtomicXor32(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
std::string_view value);
void EmitSharedAtomicExchange32(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
std::string_view value);
void EmitSharedAtomicExchange64(EmitContext& ctx, IR::Inst& inst, std::string_view pointer_offset,
std::string_view value);
void EmitStorageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value);
void EmitStorageAtomicSMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value);
void EmitStorageAtomicUMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value);
void EmitStorageAtomicSMax32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value);
void EmitStorageAtomicUMax32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value);
void EmitStorageAtomicInc32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value);
void EmitStorageAtomicDec32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value);
void EmitStorageAtomicAnd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value);
void EmitStorageAtomicOr32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value);
void EmitStorageAtomicXor32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value);
void EmitStorageAtomicExchange32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value);
void EmitStorageAtomicIAdd64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value);
void EmitStorageAtomicSMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value);
void EmitStorageAtomicUMin64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value);
void EmitStorageAtomicSMax64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value);
void EmitStorageAtomicUMax64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value);
void EmitStorageAtomicAnd64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value);
void EmitStorageAtomicOr64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value);
void EmitStorageAtomicXor64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value);
void EmitStorageAtomicExchange64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value);
void EmitStorageAtomicAddF32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value);
void EmitStorageAtomicAddF16x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value);
void EmitStorageAtomicAddF32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value);
void EmitStorageAtomicMinF16x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value);
void EmitStorageAtomicMinF32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value);
void EmitStorageAtomicMaxF16x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value);
void EmitStorageAtomicMaxF32x2(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset, std::string_view value);
void EmitGlobalAtomicIAdd32(EmitContext& ctx);
void EmitGlobalAtomicSMin32(EmitContext& ctx);
void EmitGlobalAtomicUMin32(EmitContext& ctx);
void EmitGlobalAtomicSMax32(EmitContext& ctx);
void EmitGlobalAtomicUMax32(EmitContext& ctx);
void EmitGlobalAtomicInc32(EmitContext& ctx);
void EmitGlobalAtomicDec32(EmitContext& ctx);
void EmitGlobalAtomicAnd32(EmitContext& ctx);
void EmitGlobalAtomicOr32(EmitContext& ctx);
void EmitGlobalAtomicXor32(EmitContext& ctx);
void EmitGlobalAtomicExchange32(EmitContext& ctx);
void EmitGlobalAtomicIAdd64(EmitContext& ctx);
void EmitGlobalAtomicSMin64(EmitContext& ctx);
void EmitGlobalAtomicUMin64(EmitContext& ctx);
void EmitGlobalAtomicSMax64(EmitContext& ctx);
void EmitGlobalAtomicUMax64(EmitContext& ctx);
void EmitGlobalAtomicInc64(EmitContext& ctx);
void EmitGlobalAtomicDec64(EmitContext& ctx);
void EmitGlobalAtomicAnd64(EmitContext& ctx);
void EmitGlobalAtomicOr64(EmitContext& ctx);
void EmitGlobalAtomicXor64(EmitContext& ctx);
void EmitGlobalAtomicExchange64(EmitContext& ctx);
void EmitGlobalAtomicAddF32(EmitContext& ctx);
void EmitGlobalAtomicAddF16x2(EmitContext& ctx);
void EmitGlobalAtomicAddF32x2(EmitContext& ctx);
void EmitGlobalAtomicMinF16x2(EmitContext& ctx);
void EmitGlobalAtomicMinF32x2(EmitContext& ctx);
void EmitGlobalAtomicMaxF16x2(EmitContext& ctx);
void EmitGlobalAtomicMaxF32x2(EmitContext& ctx);
void EmitLogicalOr(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b);
void EmitLogicalAnd(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b);
void EmitLogicalXor(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b);
void EmitLogicalNot(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertS16F16(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertS16F32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertS16F64(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertS32F16(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertS32F32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertS32F64(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertS64F16(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertS64F32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertS64F64(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertU16F16(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertU16F32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertU16F64(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertU32F16(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertU32F32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertU32F64(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertU64F16(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertU64F32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertU64F64(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertU64U32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertU32U64(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertF16F32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertF32F16(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertF32F64(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertF64F32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertF16S8(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertF16S16(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertF16S32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertF16S64(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertF16U8(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertF16U16(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertF16U32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertF16U64(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertF32S8(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertF32S16(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertF32S32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertF32S64(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertF32U8(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertF32U16(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertF32U32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertF32U64(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertF64S8(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertF64S16(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertF64S32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertF64S64(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertF64U8(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertF64U16(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertF64U32(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitConvertF64U64(EmitContext& ctx, IR::Inst& inst, std::string_view value);
void EmitBindlessImageSampleImplicitLod(EmitContext&);
void EmitBindlessImageSampleExplicitLod(EmitContext&);
void EmitBindlessImageSampleDrefImplicitLod(EmitContext&);
void EmitBindlessImageSampleDrefExplicitLod(EmitContext&);
void EmitBindlessImageGather(EmitContext&);
void EmitBindlessImageGatherDref(EmitContext&);
void EmitBindlessImageFetch(EmitContext&);
void EmitBindlessImageQueryDimensions(EmitContext&);
void EmitBindlessImageQueryLod(EmitContext&);
void EmitBindlessImageGradient(EmitContext&);
void EmitBindlessImageRead(EmitContext&);
void EmitBindlessImageWrite(EmitContext&);
void EmitBoundImageSampleImplicitLod(EmitContext&);
void EmitBoundImageSampleExplicitLod(EmitContext&);
void EmitBoundImageSampleDrefImplicitLod(EmitContext&);
void EmitBoundImageSampleDrefExplicitLod(EmitContext&);
void EmitBoundImageGather(EmitContext&);
void EmitBoundImageGatherDref(EmitContext&);
void EmitBoundImageFetch(EmitContext&);
void EmitBoundImageQueryDimensions(EmitContext&);
void EmitBoundImageQueryLod(EmitContext&);
void EmitBoundImageGradient(EmitContext&);
void EmitBoundImageRead(EmitContext&);
void EmitBoundImageWrite(EmitContext&);
void EmitImageSampleImplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, std::string_view bias_lc,
const IR::Value& offset);
void EmitImageSampleExplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, std::string_view lod_lc,
const IR::Value& offset);
void EmitImageSampleDrefImplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, std::string_view dref,
std::string_view bias_lc, const IR::Value& offset);
void EmitImageSampleDrefExplicitLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, std::string_view dref,
std::string_view lod_lc, const IR::Value& offset);
void EmitImageGather(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, const IR::Value& offset, const IR::Value& offset2);
void EmitImageGatherDref(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, const IR::Value& offset, const IR::Value& offset2,
std::string_view dref);
void EmitImageFetch(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, std::string_view offset, std::string_view lod,
std::string_view ms);
void EmitImageQueryDimensions(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view lod);
void EmitImageQueryLod(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords);
void EmitImageGradient(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, const IR::Value& derivatives,
const IR::Value& offset, const IR::Value& lod_clamp);
void EmitImageRead(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords);
void EmitImageWrite(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, std::string_view color);
void EmitBindlessImageAtomicIAdd32(EmitContext&);
void EmitBindlessImageAtomicSMin32(EmitContext&);
void EmitBindlessImageAtomicUMin32(EmitContext&);
void EmitBindlessImageAtomicSMax32(EmitContext&);
void EmitBindlessImageAtomicUMax32(EmitContext&);
void EmitBindlessImageAtomicInc32(EmitContext&);
void EmitBindlessImageAtomicDec32(EmitContext&);
void EmitBindlessImageAtomicAnd32(EmitContext&);
void EmitBindlessImageAtomicOr32(EmitContext&);
void EmitBindlessImageAtomicXor32(EmitContext&);
void EmitBindlessImageAtomicExchange32(EmitContext&);
void EmitBoundImageAtomicIAdd32(EmitContext&);
void EmitBoundImageAtomicSMin32(EmitContext&);
void EmitBoundImageAtomicUMin32(EmitContext&);
void EmitBoundImageAtomicSMax32(EmitContext&);
void EmitBoundImageAtomicUMax32(EmitContext&);
void EmitBoundImageAtomicInc32(EmitContext&);
void EmitBoundImageAtomicDec32(EmitContext&);
void EmitBoundImageAtomicAnd32(EmitContext&);
void EmitBoundImageAtomicOr32(EmitContext&);
void EmitBoundImageAtomicXor32(EmitContext&);
void EmitBoundImageAtomicExchange32(EmitContext&);
void EmitImageAtomicIAdd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, std::string_view value);
void EmitImageAtomicSMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, std::string_view value);
void EmitImageAtomicUMin32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, std::string_view value);
void EmitImageAtomicSMax32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, std::string_view value);
void EmitImageAtomicUMax32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, std::string_view value);
void EmitImageAtomicInc32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, std::string_view value);
void EmitImageAtomicDec32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, std::string_view value);
void EmitImageAtomicAnd32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, std::string_view value);
void EmitImageAtomicOr32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, std::string_view value);
void EmitImageAtomicXor32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, std::string_view value);
void EmitImageAtomicExchange32(EmitContext& ctx, IR::Inst& inst, const IR::Value& index,
std::string_view coords, std::string_view value);
void EmitLaneId(EmitContext& ctx, IR::Inst& inst);
void EmitVoteAll(EmitContext& ctx, IR::Inst& inst, std::string_view pred);
void EmitVoteAny(EmitContext& ctx, IR::Inst& inst, std::string_view pred);
void EmitVoteEqual(EmitContext& ctx, IR::Inst& inst, std::string_view pred);
void EmitSubgroupBallot(EmitContext& ctx, IR::Inst& inst, std::string_view pred);
void EmitSubgroupEqMask(EmitContext& ctx, IR::Inst& inst);
void EmitSubgroupLtMask(EmitContext& ctx, IR::Inst& inst);
void EmitSubgroupLeMask(EmitContext& ctx, IR::Inst& inst);
void EmitSubgroupGtMask(EmitContext& ctx, IR::Inst& inst);
void EmitSubgroupGeMask(EmitContext& ctx, IR::Inst& inst);
void EmitShuffleIndex(EmitContext& ctx, IR::Inst& inst, std::string_view value,
std::string_view index, std::string_view clamp,
std::string_view segmentation_mask);
void EmitShuffleUp(EmitContext& ctx, IR::Inst& inst, std::string_view value, std::string_view index,
std::string_view clamp, std::string_view segmentation_mask);
void EmitShuffleDown(EmitContext& ctx, IR::Inst& inst, std::string_view value,
std::string_view index, std::string_view clamp,
std::string_view segmentation_mask);
void EmitShuffleButterfly(EmitContext& ctx, IR::Inst& inst, std::string_view value,
std::string_view index, std::string_view clamp,
std::string_view segmentation_mask);
void EmitFSwizzleAdd(EmitContext& ctx, IR::Inst& inst, std::string_view op_a, std::string_view op_b,
std::string_view swizzle);
void EmitDPdxFine(EmitContext& ctx, IR::Inst& inst, std::string_view op_a);
void EmitDPdyFine(EmitContext& ctx, IR::Inst& inst, std::string_view op_a);
void EmitDPdxCoarse(EmitContext& ctx, IR::Inst& inst, std::string_view op_a);
void EmitDPdyCoarse(EmitContext& ctx, IR::Inst& inst, std::string_view op_a);
} // namespace Shader::Backend::GLSL

View File

@ -0,0 +1,253 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <string_view>
#include "shader_recompiler/backend/glsl/emit_context.h"
#include "shader_recompiler/backend/glsl/emit_glsl_instructions.h"
#include "shader_recompiler/frontend/ir/value.h"
namespace Shader::Backend::GLSL {
namespace {
void SetZeroFlag(EmitContext& ctx, IR::Inst& inst, std::string_view result) {
IR::Inst* const zero{inst.GetAssociatedPseudoOperation(IR::Opcode::GetZeroFromOp)};
if (!zero) {
return;
}
ctx.AddU1("{}={}==0;", *zero, result);
zero->Invalidate();
}
void SetSignFlag(EmitContext& ctx, IR::Inst& inst, std::string_view result) {
IR::Inst* const sign{inst.GetAssociatedPseudoOperation(IR::Opcode::GetSignFromOp)};
if (!sign) {
return;
}
ctx.AddU1("{}=int({})<0;", *sign, result);
sign->Invalidate();
}
void BitwiseLogicalOp(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b,
char lop) {
const auto result{ctx.var_alloc.Define(inst, GlslVarType::U32)};
ctx.Add("{}={}{}{};", result, a, lop, b);
SetZeroFlag(ctx, inst, result);
SetSignFlag(ctx, inst, result);
}
} // Anonymous namespace
void EmitIAdd32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b) {
// Compute the overflow CC first as it requires the original operand values,
// which may be overwritten by the result of the addition
if (IR::Inst * overflow{inst.GetAssociatedPseudoOperation(IR::Opcode::GetOverflowFromOp)}) {
// https://stackoverflow.com/questions/55468823/how-to-detect-integer-overflow-in-c
constexpr u32 s32_max{static_cast<u32>(std::numeric_limits<s32>::max())};
const auto sub_a{fmt::format("{}u-{}", s32_max, a)};
const auto positive_result{fmt::format("int({})>int({})", b, sub_a)};
const auto negative_result{fmt::format("int({})<int({})", b, sub_a)};
ctx.AddU1("{}=int({})>=0?{}:{};", *overflow, a, positive_result, negative_result);
overflow->Invalidate();
}
const auto result{ctx.var_alloc.Define(inst, GlslVarType::U32)};
if (IR::Inst* const carry{inst.GetAssociatedPseudoOperation(IR::Opcode::GetCarryFromOp)}) {
ctx.uses_cc_carry = true;
ctx.Add("{}=uaddCarry({},{},carry);", result, a, b);
ctx.AddU1("{}=carry!=0;", *carry);
carry->Invalidate();
} else {
ctx.Add("{}={}+{};", result, a, b);
}
SetZeroFlag(ctx, inst, result);
SetSignFlag(ctx, inst, result);
}
void EmitIAdd64(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b) {
ctx.AddU64("{}={}+{};", inst, a, b);
}
void EmitISub32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b) {
ctx.AddU32("{}={}-{};", inst, a, b);
}
void EmitISub64(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b) {
ctx.AddU64("{}={}-{};", inst, a, b);
}
void EmitIMul32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b) {
ctx.AddU32("{}=uint({}*{});", inst, a, b);
}
void EmitINeg32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddU32("{}=uint(-({}));", inst, value);
}
void EmitINeg64(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddU64("{}=-({});", inst, value);
}
void EmitIAbs32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddU32("{}=abs(int({}));", inst, value);
}
void EmitShiftLeftLogical32(EmitContext& ctx, IR::Inst& inst, std::string_view base,
std::string_view shift) {
ctx.AddU32("{}={}<<{};", inst, base, shift);
}
void EmitShiftLeftLogical64(EmitContext& ctx, IR::Inst& inst, std::string_view base,
std::string_view shift) {
ctx.AddU64("{}={}<<{};", inst, base, shift);
}
void EmitShiftRightLogical32(EmitContext& ctx, IR::Inst& inst, std::string_view base,
std::string_view shift) {
ctx.AddU32("{}={}>>{};", inst, base, shift);
}
void EmitShiftRightLogical64(EmitContext& ctx, IR::Inst& inst, std::string_view base,
std::string_view shift) {
ctx.AddU64("{}={}>>{};", inst, base, shift);
}
void EmitShiftRightArithmetic32(EmitContext& ctx, IR::Inst& inst, std::string_view base,
std::string_view shift) {
ctx.AddU32("{}=int({})>>{};", inst, base, shift);
}
void EmitShiftRightArithmetic64(EmitContext& ctx, IR::Inst& inst, std::string_view base,
std::string_view shift) {
ctx.AddU64("{}=int64_t({})>>{};", inst, base, shift);
}
void EmitBitwiseAnd32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b) {
BitwiseLogicalOp(ctx, inst, a, b, '&');
}
void EmitBitwiseOr32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b) {
BitwiseLogicalOp(ctx, inst, a, b, '|');
}
void EmitBitwiseXor32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b) {
BitwiseLogicalOp(ctx, inst, a, b, '^');
}
void EmitBitFieldInsert(EmitContext& ctx, IR::Inst& inst, std::string_view base,
std::string_view insert, std::string_view offset, std::string_view count) {
ctx.AddU32("{}=bitfieldInsert({},{},int({}),int({}));", inst, base, insert, offset, count);
}
void EmitBitFieldSExtract(EmitContext& ctx, IR::Inst& inst, std::string_view base,
std::string_view offset, std::string_view count) {
const auto result{ctx.var_alloc.Define(inst, GlslVarType::U32)};
ctx.Add("{}=uint(bitfieldExtract(int({}),int({}),int({})));", result, base, offset, count);
SetZeroFlag(ctx, inst, result);
SetSignFlag(ctx, inst, result);
}
void EmitBitFieldUExtract(EmitContext& ctx, IR::Inst& inst, std::string_view base,
std::string_view offset, std::string_view count) {
const auto result{ctx.var_alloc.Define(inst, GlslVarType::U32)};
ctx.Add("{}=uint(bitfieldExtract(uint({}),int({}),int({})));", result, base, offset, count);
SetZeroFlag(ctx, inst, result);
SetSignFlag(ctx, inst, result);
}
void EmitBitReverse32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddU32("{}=bitfieldReverse({});", inst, value);
}
void EmitBitCount32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddU32("{}=bitCount({});", inst, value);
}
void EmitBitwiseNot32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddU32("{}=~{};", inst, value);
}
void EmitFindSMsb32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddU32("{}=findMSB(int({}));", inst, value);
}
void EmitFindUMsb32(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddU32("{}=findMSB(uint({}));", inst, value);
}
void EmitSMin32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b) {
ctx.AddU32("{}=min(int({}),int({}));", inst, a, b);
}
void EmitUMin32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b) {
ctx.AddU32("{}=min(uint({}),uint({}));", inst, a, b);
}
void EmitSMax32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b) {
ctx.AddU32("{}=max(int({}),int({}));", inst, a, b);
}
void EmitUMax32(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b) {
ctx.AddU32("{}=max(uint({}),uint({}));", inst, a, b);
}
void EmitSClamp32(EmitContext& ctx, IR::Inst& inst, std::string_view value, std::string_view min,
std::string_view max) {
const auto result{ctx.var_alloc.Define(inst, GlslVarType::U32)};
ctx.Add("{}=clamp(int({}),int({}),int({}));", result, value, min, max);
SetZeroFlag(ctx, inst, result);
SetSignFlag(ctx, inst, result);
}
void EmitUClamp32(EmitContext& ctx, IR::Inst& inst, std::string_view value, std::string_view min,
std::string_view max) {
const auto result{ctx.var_alloc.Define(inst, GlslVarType::U32)};
ctx.Add("{}=clamp(uint({}),uint({}),uint({}));", result, value, min, max);
SetZeroFlag(ctx, inst, result);
SetSignFlag(ctx, inst, result);
}
void EmitSLessThan(EmitContext& ctx, IR::Inst& inst, std::string_view lhs, std::string_view rhs) {
ctx.AddU1("{}=int({})<int({});", inst, lhs, rhs);
}
void EmitULessThan(EmitContext& ctx, IR::Inst& inst, std::string_view lhs, std::string_view rhs) {
ctx.AddU1("{}=uint({})<uint({});", inst, lhs, rhs);
}
void EmitIEqual(EmitContext& ctx, IR::Inst& inst, std::string_view lhs, std::string_view rhs) {
ctx.AddU1("{}={}=={};", inst, lhs, rhs);
}
void EmitSLessThanEqual(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs) {
ctx.AddU1("{}=int({})<=int({});", inst, lhs, rhs);
}
void EmitULessThanEqual(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs) {
ctx.AddU1("{}=uint({})<=uint({});", inst, lhs, rhs);
}
void EmitSGreaterThan(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs) {
ctx.AddU1("{}=int({})>int({});", inst, lhs, rhs);
}
void EmitUGreaterThan(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs) {
ctx.AddU1("{}=uint({})>uint({});", inst, lhs, rhs);
}
void EmitINotEqual(EmitContext& ctx, IR::Inst& inst, std::string_view lhs, std::string_view rhs) {
ctx.AddU1("{}={}!={};", inst, lhs, rhs);
}
void EmitSGreaterThanEqual(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs) {
ctx.AddU1("{}=int({})>=int({});", inst, lhs, rhs);
}
void EmitUGreaterThanEqual(EmitContext& ctx, IR::Inst& inst, std::string_view lhs,
std::string_view rhs) {
ctx.AddU1("{}=uint({})>=uint({});", inst, lhs, rhs);
}
} // namespace Shader::Backend::GLSL

View File

@ -0,0 +1,28 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <string_view>
#include "shader_recompiler/backend/glsl/emit_context.h"
#include "shader_recompiler/backend/glsl/emit_glsl_instructions.h"
#include "shader_recompiler/frontend/ir/value.h"
namespace Shader::Backend::GLSL {
void EmitLogicalOr(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b) {
ctx.AddU1("{}={}||{};", inst, a, b);
}
void EmitLogicalAnd(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b) {
ctx.AddU1("{}={}&&{};", inst, a, b);
}
void EmitLogicalXor(EmitContext& ctx, IR::Inst& inst, std::string_view a, std::string_view b) {
ctx.AddU1("{}={}^^{};", inst, a, b);
}
void EmitLogicalNot(EmitContext& ctx, IR::Inst& inst, std::string_view value) {
ctx.AddU1("{}=!{};", inst, value);
}
} // namespace Shader::Backend::GLSL

View File

@ -0,0 +1,202 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <string_view>
#include "shader_recompiler/backend/glsl/emit_context.h"
#include "shader_recompiler/backend/glsl/emit_glsl_instructions.h"
#include "shader_recompiler/frontend/ir/value.h"
#include "shader_recompiler/profile.h"
namespace Shader::Backend::GLSL {
namespace {
constexpr char cas_loop[]{"for(;;){{uint old_value={};uint "
"cas_result=atomicCompSwap({},old_value,bitfieldInsert({},{},{},{}));"
"if(cas_result==old_value){{break;}}}}"};
void SsboWriteCas(EmitContext& ctx, const IR::Value& binding, std::string_view offset_var,
std::string_view value, std::string_view bit_offset, u32 num_bits) {
const auto ssbo{fmt::format("{}_ssbo{}[{}>>2]", ctx.stage_name, binding.U32(), offset_var)};
ctx.Add(cas_loop, ssbo, ssbo, ssbo, value, bit_offset, num_bits);
}
} // Anonymous namespace
void EmitLoadGlobalU8(EmitContext&) {
NotImplemented();
}
void EmitLoadGlobalS8(EmitContext&) {
NotImplemented();
}
void EmitLoadGlobalU16(EmitContext&) {
NotImplemented();
}
void EmitLoadGlobalS16(EmitContext&) {
NotImplemented();
}
void EmitLoadGlobal32(EmitContext& ctx, IR::Inst& inst, std::string_view address) {
if (ctx.profile.support_int64) {
return ctx.AddU32("{}=LoadGlobal32({});", inst, address);
}
LOG_WARNING(Shader_GLSL, "Int64 not supported, ignoring memory operation");
ctx.AddU32("{}=0u;", inst);
}
void EmitLoadGlobal64(EmitContext& ctx, IR::Inst& inst, std::string_view address) {
if (ctx.profile.support_int64) {
return ctx.AddU32x2("{}=LoadGlobal64({});", inst, address);
}
LOG_WARNING(Shader_GLSL, "Int64 not supported, ignoring memory operation");
ctx.AddU32x2("{}=uvec2(0);", inst);
}
void EmitLoadGlobal128(EmitContext& ctx, IR::Inst& inst, std::string_view address) {
if (ctx.profile.support_int64) {
return ctx.AddU32x4("{}=LoadGlobal128({});", inst, address);
}
LOG_WARNING(Shader_GLSL, "Int64 not supported, ignoring memory operation");
ctx.AddU32x4("{}=uvec4(0);", inst);
}
void EmitWriteGlobalU8(EmitContext&) {
NotImplemented();
}
void EmitWriteGlobalS8(EmitContext&) {
NotImplemented();
}
void EmitWriteGlobalU16(EmitContext&) {
NotImplemented();
}
void EmitWriteGlobalS16(EmitContext&) {
NotImplemented();
}
void EmitWriteGlobal32(EmitContext& ctx, std::string_view address, std::string_view value) {
if (ctx.profile.support_int64) {
return ctx.Add("WriteGlobal32({},{});", address, value);
}
LOG_WARNING(Shader_GLSL, "Int64 not supported, ignoring memory operation");
}
void EmitWriteGlobal64(EmitContext& ctx, std::string_view address, std::string_view value) {
if (ctx.profile.support_int64) {
return ctx.Add("WriteGlobal64({},{});", address, value);
}
LOG_WARNING(Shader_GLSL, "Int64 not supported, ignoring memory operation");
}
void EmitWriteGlobal128(EmitContext& ctx, std::string_view address, std::string_view value) {
if (ctx.profile.support_int64) {
return ctx.Add("WriteGlobal128({},{});", address, value);
}
LOG_WARNING(Shader_GLSL, "Int64 not supported, ignoring memory operation");
}
void EmitLoadStorageU8(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset) {
const auto offset_var{ctx.var_alloc.Consume(offset)};
ctx.AddU32("{}=bitfieldExtract({}_ssbo{}[{}>>2],int({}%4)*8,8);", inst, ctx.stage_name,
binding.U32(), offset_var, offset_var);
}
void EmitLoadStorageS8(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset) {
const auto offset_var{ctx.var_alloc.Consume(offset)};
ctx.AddU32("{}=bitfieldExtract(int({}_ssbo{}[{}>>2]),int({}%4)*8,8);", inst, ctx.stage_name,
binding.U32(), offset_var, offset_var);
}
void EmitLoadStorageU16(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset) {
const auto offset_var{ctx.var_alloc.Consume(offset)};
ctx.AddU32("{}=bitfieldExtract({}_ssbo{}[{}>>2],int(({}>>1)%2)*16,16);", inst, ctx.stage_name,
binding.U32(), offset_var, offset_var);
}
void EmitLoadStorageS16(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset) {
const auto offset_var{ctx.var_alloc.Consume(offset)};
ctx.AddU32("{}=bitfieldExtract(int({}_ssbo{}[{}>>2]),int(({}>>1)%2)*16,16);", inst,
ctx.stage_name, binding.U32(), offset_var, offset_var);
}
void EmitLoadStorage32(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset) {
const auto offset_var{ctx.var_alloc.Consume(offset)};
ctx.AddU32("{}={}_ssbo{}[{}>>2];", inst, ctx.stage_name, binding.U32(), offset_var);
}
void EmitLoadStorage64(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset) {
const auto offset_var{ctx.var_alloc.Consume(offset)};
ctx.AddU32x2("{}=uvec2({}_ssbo{}[{}>>2],{}_ssbo{}[({}+4)>>2]);", inst, ctx.stage_name,
binding.U32(), offset_var, ctx.stage_name, binding.U32(), offset_var);
}
void EmitLoadStorage128(EmitContext& ctx, IR::Inst& inst, const IR::Value& binding,
const IR::Value& offset) {
const auto offset_var{ctx.var_alloc.Consume(offset)};
ctx.AddU32x4("{}=uvec4({}_ssbo{}[{}>>2],{}_ssbo{}[({}+4)>>2],{}_ssbo{}[({}+8)>>2],{}_ssbo{}[({}"
"+12)>>2]);",
inst, ctx.stage_name, binding.U32(), offset_var, ctx.stage_name, binding.U32(),
offset_var, ctx.stage_name, binding.U32(), offset_var, ctx.stage_name,
binding.U32(), offset_var);
}
void EmitWriteStorageU8(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
std::string_view value) {
const auto offset_var{ctx.var_alloc.Consume(offset)};
const auto bit_offset{fmt::format("int({}%4)*8", offset_var)};
SsboWriteCas(ctx, binding, offset_var, value, bit_offset, 8);
}
void EmitWriteStorageS8(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
std::string_view value) {
const auto offset_var{ctx.var_alloc.Consume(offset)};
const auto bit_offset{fmt::format("int({}%4)*8", offset_var)};
SsboWriteCas(ctx, binding, offset_var, value, bit_offset, 8);
}
void EmitWriteStorageU16(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
std::string_view value) {
const auto offset_var{ctx.var_alloc.Consume(offset)};
const auto bit_offset{fmt::format("int(({}>>1)%2)*16", offset_var)};
SsboWriteCas(ctx, binding, offset_var, value, bit_offset, 16);
}
void EmitWriteStorageS16(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
std::string_view value) {
const auto offset_var{ctx.var_alloc.Consume(offset)};
const auto bit_offset{fmt::format("int(({}>>1)%2)*16", offset_var)};
SsboWriteCas(ctx, binding, offset_var, value, bit_offset, 16);
}
void EmitWriteStorage32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
std::string_view value) {
const auto offset_var{ctx.var_alloc.Consume(offset)};
ctx.Add("{}_ssbo{}[{}>>2]={};", ctx.stage_name, binding.U32(), offset_var, value);
}
void EmitWriteStorage64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
std::string_view value) {
const auto offset_var{ctx.var_alloc.Consume(offset)};
ctx.Add("{}_ssbo{}[{}>>2]={}.x;", ctx.stage_name, binding.U32(), offset_var, value);
ctx.Add("{}_ssbo{}[({}+4)>>2]={}.y;", ctx.stage_name, binding.U32(), offset_var, value);
}
void EmitWriteStorage128(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
std::string_view value) {
const auto offset_var{ctx.var_alloc.Consume(offset)};
ctx.Add("{}_ssbo{}[{}>>2]={}.x;", ctx.stage_name, binding.U32(), offset_var, value);
ctx.Add("{}_ssbo{}[({}+4)>>2]={}.y;", ctx.stage_name, binding.U32(), offset_var, value);
ctx.Add("{}_ssbo{}[({}+8)>>2]={}.z;", ctx.stage_name, binding.U32(), offset_var, value);
ctx.Add("{}_ssbo{}[({}+12)>>2]={}.w;", ctx.stage_name, binding.U32(), offset_var, value);
}
} // namespace Shader::Backend::GLSL

View File

@ -0,0 +1,105 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <string_view>
#include "shader_recompiler/backend/glsl/emit_context.h"
#include "shader_recompiler/backend/glsl/emit_glsl_instructions.h"
#include "shader_recompiler/frontend/ir/value.h"
#ifdef _MSC_VER
#pragma warning(disable : 4100)
#endif
namespace Shader::Backend::GLSL {
void EmitGetRegister(EmitContext& ctx) {
NotImplemented();
}
void EmitSetRegister(EmitContext& ctx) {
NotImplemented();
}
void EmitGetPred(EmitContext& ctx) {
NotImplemented();
}
void EmitSetPred(EmitContext& ctx) {
NotImplemented();
}
void EmitSetGotoVariable(EmitContext& ctx) {
NotImplemented();
}
void EmitGetGotoVariable(EmitContext& ctx) {
NotImplemented();
}
void EmitSetIndirectBranchVariable(EmitContext& ctx) {
NotImplemented();
}
void EmitGetIndirectBranchVariable(EmitContext& ctx) {
NotImplemented();
}
void EmitGetZFlag(EmitContext& ctx) {
NotImplemented();
}
void EmitGetSFlag(EmitContext& ctx) {
NotImplemented();
}
void EmitGetCFlag(EmitContext& ctx) {
NotImplemented();
}
void EmitGetOFlag(EmitContext& ctx) {
NotImplemented();
}
void EmitSetZFlag(EmitContext& ctx) {
NotImplemented();
}
void EmitSetSFlag(EmitContext& ctx) {
NotImplemented();
}
void EmitSetCFlag(EmitContext& ctx) {
NotImplemented();
}
void EmitSetOFlag(EmitContext& ctx) {
NotImplemented();
}
void EmitGetZeroFromOp(EmitContext& ctx) {
NotImplemented();
}
void EmitGetSignFromOp(EmitContext& ctx) {
NotImplemented();
}
void EmitGetCarryFromOp(EmitContext& ctx) {
NotImplemented();
}
void EmitGetOverflowFromOp(EmitContext& ctx) {
NotImplemented();
}
void EmitGetSparseFromOp(EmitContext& ctx) {
NotImplemented();
}
void EmitGetInBoundsFromOp(EmitContext& ctx) {
NotImplemented();
}
} // namespace Shader::Backend::GLSL

View File

@ -0,0 +1,55 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <string_view>
#include "shader_recompiler/backend/glsl/emit_context.h"
#include "shader_recompiler/backend/glsl/emit_glsl_instructions.h"
#include "shader_recompiler/frontend/ir/value.h"
namespace Shader::Backend::GLSL {
void EmitSelectU1(EmitContext& ctx, IR::Inst& inst, std::string_view cond,
std::string_view true_value, std::string_view false_value) {
ctx.AddU1("{}={}?{}:{};", inst, cond, true_value, false_value);
}
void EmitSelectU8([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] std::string_view cond,
[[maybe_unused]] std::string_view true_value,
[[maybe_unused]] std::string_view false_value) {
NotImplemented();
}
void EmitSelectU16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] std::string_view cond,
[[maybe_unused]] std::string_view true_value,
[[maybe_unused]] std::string_view false_value) {
NotImplemented();
}
void EmitSelectU32(EmitContext& ctx, IR::Inst& inst, std::string_view cond,
std::string_view true_value, std::string_view false_value) {
ctx.AddU32("{}={}?{}:{};", inst, cond, true_value, false_value);
}
void EmitSelectU64(EmitContext& ctx, IR::Inst& inst, std::string_view cond,
std::string_view true_value, std::string_view false_value) {
ctx.AddU64("{}={}?{}:{};", inst, cond, true_value, false_value);
}
void EmitSelectF16([[maybe_unused]] EmitContext& ctx, [[maybe_unused]] std::string_view cond,
[[maybe_unused]] std::string_view true_value,
[[maybe_unused]] std::string_view false_value) {
NotImplemented();
}
void EmitSelectF32(EmitContext& ctx, IR::Inst& inst, std::string_view cond,
std::string_view true_value, std::string_view false_value) {
ctx.AddF32("{}={}?{}:{};", inst, cond, true_value, false_value);
}
void EmitSelectF64(EmitContext& ctx, IR::Inst& inst, std::string_view cond,
std::string_view true_value, std::string_view false_value) {
ctx.AddF64("{}={}?{}:{};", inst, cond, true_value, false_value);
}
} // namespace Shader::Backend::GLSL

View File

@ -0,0 +1,79 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <string_view>
#include "shader_recompiler/backend/glsl/emit_context.h"
#include "shader_recompiler/backend/glsl/emit_glsl_instructions.h"
#include "shader_recompiler/frontend/ir/value.h"
namespace Shader::Backend::GLSL {
namespace {
constexpr char cas_loop[]{"for(;;){{uint old_value={};uint "
"cas_result=atomicCompSwap({},old_value,bitfieldInsert({},{},{},{}));"
"if(cas_result==old_value){{break;}}}}"};
void SharedWriteCas(EmitContext& ctx, std::string_view offset, std::string_view value,
std::string_view bit_offset, u32 num_bits) {
const auto smem{fmt::format("smem[{}>>2]", offset)};
ctx.Add(cas_loop, smem, smem, smem, value, bit_offset, num_bits);
}
} // Anonymous namespace
void EmitLoadSharedU8(EmitContext& ctx, IR::Inst& inst, std::string_view offset) {
ctx.AddU32("{}=bitfieldExtract(smem[{}>>2],int({}%4)*8,8);", inst, offset, offset);
}
void EmitLoadSharedS8(EmitContext& ctx, IR::Inst& inst, std::string_view offset) {
ctx.AddU32("{}=bitfieldExtract(int(smem[{}>>2]),int({}%4)*8,8);", inst, offset, offset);
}
void EmitLoadSharedU16(EmitContext& ctx, IR::Inst& inst, std::string_view offset) {
ctx.AddU32("{}=bitfieldExtract(smem[{}>>2],int(({}>>1)%2)*16,16);", inst, offset, offset);
}
void EmitLoadSharedS16(EmitContext& ctx, IR::Inst& inst, std::string_view offset) {
ctx.AddU32("{}=bitfieldExtract(int(smem[{}>>2]),int(({}>>1)%2)*16,16);", inst, offset, offset);
}
void EmitLoadSharedU32(EmitContext& ctx, IR::Inst& inst, std::string_view offset) {
ctx.AddU32("{}=smem[{}>>2];", inst, offset);
}
void EmitLoadSharedU64(EmitContext& ctx, IR::Inst& inst, std::string_view offset) {
ctx.AddU32x2("{}=uvec2(smem[{}>>2],smem[({}+4)>>2]);", inst, offset, offset);
}
void EmitLoadSharedU128(EmitContext& ctx, IR::Inst& inst, std::string_view offset) {
ctx.AddU32x4("{}=uvec4(smem[{}>>2],smem[({}+4)>>2],smem[({}+8)>>2],smem[({}+12)>>2]);", inst,
offset, offset, offset, offset);
}
void EmitWriteSharedU8(EmitContext& ctx, std::string_view offset, std::string_view value) {
const auto bit_offset{fmt::format("int({}%4)*8", offset)};
SharedWriteCas(ctx, offset, value, bit_offset, 8);
}
void EmitWriteSharedU16(EmitContext& ctx, std::string_view offset, std::string_view value) {
const auto bit_offset{fmt::format("int(({}>>1)%2)*16", offset)};
SharedWriteCas(ctx, offset, value, bit_offset, 16);
}
void EmitWriteSharedU32(EmitContext& ctx, std::string_view offset, std::string_view value) {
ctx.Add("smem[{}>>2]={};", offset, value);
}
void EmitWriteSharedU64(EmitContext& ctx, std::string_view offset, std::string_view value) {
ctx.Add("smem[{}>>2]={}.x;", offset, value);
ctx.Add("smem[({}+4)>>2]={}.y;", offset, value);
}
void EmitWriteSharedU128(EmitContext& ctx, std::string_view offset, std::string_view value) {
ctx.Add("smem[{}>>2]={}.x;", offset, value);
ctx.Add("smem[({}+4)>>2]={}.y;", offset, value);
ctx.Add("smem[({}+8)>>2]={}.z;", offset, value);
ctx.Add("smem[({}+12)>>2]={}.w;", offset, value);
}
} // namespace Shader::Backend::GLSL

View File

@ -0,0 +1,111 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <string_view>
#include "shader_recompiler/backend/glsl/emit_context.h"
#include "shader_recompiler/backend/glsl/emit_glsl_instructions.h"
#include "shader_recompiler/frontend/ir/program.h"
#include "shader_recompiler/frontend/ir/value.h"
#include "shader_recompiler/profile.h"
namespace Shader::Backend::GLSL {
namespace {
std::string_view OutputVertexIndex(EmitContext& ctx) {
return ctx.stage == Stage::TessellationControl ? "[gl_InvocationID]" : "";
}
void InitializeOutputVaryings(EmitContext& ctx) {
if (ctx.uses_geometry_passthrough) {
return;
}
if (ctx.stage == Stage::VertexB || ctx.stage == Stage::Geometry) {
ctx.Add("gl_Position=vec4(0,0,0,1);");
}
for (size_t index = 0; index < IR::NUM_GENERICS; ++index) {
if (!ctx.info.stores.Generic(index)) {
continue;
}
const auto& info_array{ctx.output_generics.at(index)};
const auto output_decorator{OutputVertexIndex(ctx)};
size_t element{};
while (element < info_array.size()) {
const auto& info{info_array.at(element)};
const auto varying_name{fmt::format("{}{}", info.name, output_decorator)};
switch (info.num_components) {
case 1: {
const char value{element == 3 ? '1' : '0'};
ctx.Add("{}={}.f;", varying_name, value);
break;
}
case 2:
case 3:
if (element + info.num_components < 4) {
ctx.Add("{}=vec{}(0);", varying_name, info.num_components);
} else {
// last element is the w component, must be initialized to 1
const auto zeros{info.num_components == 3 ? "0,0," : "0,"};
ctx.Add("{}=vec{}({}1);", varying_name, info.num_components, zeros);
}
break;
case 4:
ctx.Add("{}=vec4(0,0,0,1);", varying_name);
break;
default:
break;
}
element += info.num_components;
}
}
}
} // Anonymous namespace
void EmitPhi(EmitContext& ctx, IR::Inst& phi) {
const size_t num_args{phi.NumArgs()};
for (size_t i = 0; i < num_args; ++i) {
ctx.var_alloc.Consume(phi.Arg(i));
}
if (!phi.Definition<Id>().is_valid) {
// The phi node wasn't forward defined
ctx.var_alloc.PhiDefine(phi, phi.Arg(0).Type());
}
}
void EmitVoid(EmitContext&) {}
void EmitReference(EmitContext& ctx, const IR::Value& value) {
ctx.var_alloc.Consume(value);
}
void EmitPhiMove(EmitContext& ctx, const IR::Value& phi_value, const IR::Value& value) {
IR::Inst& phi{*phi_value.InstRecursive()};
const auto phi_type{phi.Arg(0).Type()};
if (!phi.Definition<Id>().is_valid) {
// The phi node wasn't forward defined
ctx.var_alloc.PhiDefine(phi, phi_type);
}
const auto phi_reg{ctx.var_alloc.Consume(IR::Value{&phi})};
const auto val_reg{ctx.var_alloc.Consume(value)};
if (phi_reg == val_reg) {
return;
}
ctx.Add("{}={};", phi_reg, val_reg);
}
void EmitPrologue(EmitContext& ctx) {
InitializeOutputVaryings(ctx);
}
void EmitEpilogue(EmitContext&) {}
void EmitEmitVertex(EmitContext& ctx, const IR::Value& stream) {
ctx.Add("EmitStreamVertex(int({}));", ctx.var_alloc.Consume(stream));
InitializeOutputVaryings(ctx);
}
void EmitEndPrimitive(EmitContext& ctx, const IR::Value& stream) {
ctx.Add("EndStreamPrimitive(int({}));", ctx.var_alloc.Consume(stream));
}
} // namespace Shader::Backend::GLSL

View File

@ -0,0 +1,32 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <string_view>
#include "shader_recompiler/backend/glsl/emit_context.h"
#include "shader_recompiler/backend/glsl/emit_glsl_instructions.h"
namespace Shader::Backend::GLSL {
void EmitUndefU1(EmitContext& ctx, IR::Inst& inst) {
ctx.AddU1("{}=false;", inst);
}
void EmitUndefU8(EmitContext& ctx, IR::Inst& inst) {
ctx.AddU32("{}=0u;", inst);
}
void EmitUndefU16(EmitContext& ctx, IR::Inst& inst) {
ctx.AddU32("{}=0u;", inst);
}
void EmitUndefU32(EmitContext& ctx, IR::Inst& inst) {
ctx.AddU32("{}=0u;", inst);
}
void EmitUndefU64(EmitContext& ctx, IR::Inst& inst) {
ctx.AddU64("{}=0u;", inst);
}
} // namespace Shader::Backend::GLSL

View File

@ -0,0 +1,217 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <string_view>
#include "shader_recompiler/backend/glsl/emit_context.h"
#include "shader_recompiler/backend/glsl/emit_glsl_instructions.h"
#include "shader_recompiler/frontend/ir/value.h"
#include "shader_recompiler/profile.h"
namespace Shader::Backend::GLSL {
namespace {
void SetInBoundsFlag(EmitContext& ctx, IR::Inst& inst) {
IR::Inst* const in_bounds{inst.GetAssociatedPseudoOperation(IR::Opcode::GetInBoundsFromOp)};
if (!in_bounds) {
return;
}
ctx.AddU1("{}=shfl_in_bounds;", *in_bounds);
in_bounds->Invalidate();
}
std::string ComputeMinThreadId(std::string_view thread_id, std::string_view segmentation_mask) {
return fmt::format("({}&{})", thread_id, segmentation_mask);
}
std::string ComputeMaxThreadId(std::string_view min_thread_id, std::string_view clamp,
std::string_view not_seg_mask) {
return fmt::format("({})|({}&{})", min_thread_id, clamp, not_seg_mask);
}
std::string GetMaxThreadId(std::string_view thread_id, std::string_view clamp,
std::string_view segmentation_mask) {
const auto not_seg_mask{fmt::format("(~{})", segmentation_mask)};
const auto min_thread_id{ComputeMinThreadId(thread_id, segmentation_mask)};
return ComputeMaxThreadId(min_thread_id, clamp, not_seg_mask);
}
void UseShuffleNv(EmitContext& ctx, IR::Inst& inst, std::string_view shfl_op,
std::string_view value, std::string_view index,
[[maybe_unused]] std::string_view clamp, std::string_view segmentation_mask) {
const auto width{fmt::format("32u>>(bitCount({}&31u))", segmentation_mask)};
ctx.AddU32("{}={}({},{},{},shfl_in_bounds);", inst, shfl_op, value, index, width);
SetInBoundsFlag(ctx, inst);
}
} // Anonymous namespace
void EmitLaneId(EmitContext& ctx, IR::Inst& inst) {
ctx.AddU32("{}=gl_SubGroupInvocationARB&31u;", inst);
}
void EmitVoteAll(EmitContext& ctx, IR::Inst& inst, std::string_view pred) {
if (!ctx.profile.warp_size_potentially_larger_than_guest) {
ctx.AddU1("{}=allInvocationsEqualARB({});", inst, pred);
} else {
const auto active_mask{fmt::format("uvec2(ballotARB(true))[gl_SubGroupInvocationARB]")};
const auto ballot{fmt::format("uvec2(ballotARB({}))[gl_SubGroupInvocationARB]", pred)};
ctx.AddU1("{}=({}&{})=={};", inst, ballot, active_mask, active_mask);
}
}
void EmitVoteAny(EmitContext& ctx, IR::Inst& inst, std::string_view pred) {
if (!ctx.profile.warp_size_potentially_larger_than_guest) {
ctx.AddU1("{}=anyInvocationARB({});", inst, pred);
} else {
const auto active_mask{fmt::format("uvec2(ballotARB(true))[gl_SubGroupInvocationARB]")};
const auto ballot{fmt::format("uvec2(ballotARB({}))[gl_SubGroupInvocationARB]", pred)};
ctx.AddU1("{}=({}&{})!=0u;", inst, ballot, active_mask, active_mask);
}
}
void EmitVoteEqual(EmitContext& ctx, IR::Inst& inst, std::string_view pred) {
if (!ctx.profile.warp_size_potentially_larger_than_guest) {
ctx.AddU1("{}=allInvocationsEqualARB({});", inst, pred);
} else {
const auto active_mask{fmt::format("uvec2(ballotARB(true))[gl_SubGroupInvocationARB]")};
const auto ballot{fmt::format("uvec2(ballotARB({}))[gl_SubGroupInvocationARB]", pred)};
const auto value{fmt::format("({}^{})", ballot, active_mask)};
ctx.AddU1("{}=({}==0)||({}=={});", inst, value, value, active_mask);
}
}
void EmitSubgroupBallot(EmitContext& ctx, IR::Inst& inst, std::string_view pred) {
if (!ctx.profile.warp_size_potentially_larger_than_guest) {
ctx.AddU32("{}=uvec2(ballotARB({})).x;", inst, pred);
} else {
ctx.AddU32("{}=uvec2(ballotARB({}))[gl_SubGroupInvocationARB];", inst, pred);
}
}
void EmitSubgroupEqMask(EmitContext& ctx, IR::Inst& inst) {
ctx.AddU32("{}=uint(gl_SubGroupEqMaskARB.x);", inst);
}
void EmitSubgroupLtMask(EmitContext& ctx, IR::Inst& inst) {
ctx.AddU32("{}=uint(gl_SubGroupLtMaskARB.x);", inst);
}
void EmitSubgroupLeMask(EmitContext& ctx, IR::Inst& inst) {
ctx.AddU32("{}=uint(gl_SubGroupLeMaskARB.x);", inst);
}
void EmitSubgroupGtMask(EmitContext& ctx, IR::Inst& inst) {
ctx.AddU32("{}=uint(gl_SubGroupGtMaskARB.x);", inst);
}
void EmitSubgroupGeMask(EmitContext& ctx, IR::Inst& inst) {
ctx.AddU32("{}=uint(gl_SubGroupGeMaskARB.x);", inst);
}
void EmitShuffleIndex(EmitContext& ctx, IR::Inst& inst, std::string_view value,
std::string_view index, std::string_view clamp,
std::string_view segmentation_mask) {
if (ctx.profile.support_gl_warp_intrinsics) {
UseShuffleNv(ctx, inst, "shuffleNV", value, index, clamp, segmentation_mask);
return;
}
const auto not_seg_mask{fmt::format("(~{})", segmentation_mask)};
const auto thread_id{"gl_SubGroupInvocationARB"};
const auto min_thread_id{ComputeMinThreadId(thread_id, segmentation_mask)};
const auto max_thread_id{ComputeMaxThreadId(min_thread_id, clamp, not_seg_mask)};
const auto lhs{fmt::format("({}&{})", index, not_seg_mask)};
const auto src_thread_id{fmt::format("({})|({})", lhs, min_thread_id)};
ctx.Add("shfl_in_bounds=int({})<=int({});", src_thread_id, max_thread_id);
SetInBoundsFlag(ctx, inst);
ctx.AddU32("{}=shfl_in_bounds?readInvocationARB({},{}):{};", inst, value, src_thread_id, value);
}
void EmitShuffleUp(EmitContext& ctx, IR::Inst& inst, std::string_view value, std::string_view index,
std::string_view clamp, std::string_view segmentation_mask) {
if (ctx.profile.support_gl_warp_intrinsics) {
UseShuffleNv(ctx, inst, "shuffleUpNV", value, index, clamp, segmentation_mask);
return;
}
const auto thread_id{"gl_SubGroupInvocationARB"};
const auto max_thread_id{GetMaxThreadId(thread_id, clamp, segmentation_mask)};
const auto src_thread_id{fmt::format("({}-{})", thread_id, index)};
ctx.Add("shfl_in_bounds=int({})>=int({});", src_thread_id, max_thread_id);
SetInBoundsFlag(ctx, inst);
ctx.AddU32("{}=shfl_in_bounds?readInvocationARB({},{}):{};", inst, value, src_thread_id, value);
}
void EmitShuffleDown(EmitContext& ctx, IR::Inst& inst, std::string_view value,
std::string_view index, std::string_view clamp,
std::string_view segmentation_mask) {
if (ctx.profile.support_gl_warp_intrinsics) {
UseShuffleNv(ctx, inst, "shuffleDownNV", value, index, clamp, segmentation_mask);
return;
}
const auto thread_id{"gl_SubGroupInvocationARB"};
const auto max_thread_id{GetMaxThreadId(thread_id, clamp, segmentation_mask)};
const auto src_thread_id{fmt::format("({}+{})", thread_id, index)};
ctx.Add("shfl_in_bounds=int({})<=int({});", src_thread_id, max_thread_id);
SetInBoundsFlag(ctx, inst);
ctx.AddU32("{}=shfl_in_bounds?readInvocationARB({},{}):{};", inst, value, src_thread_id, value);
}
void EmitShuffleButterfly(EmitContext& ctx, IR::Inst& inst, std::string_view value,
std::string_view index, std::string_view clamp,
std::string_view segmentation_mask) {
if (ctx.profile.support_gl_warp_intrinsics) {
UseShuffleNv(ctx, inst, "shuffleXorNV", value, index, clamp, segmentation_mask);
return;
}
const auto thread_id{"gl_SubGroupInvocationARB"};
const auto max_thread_id{GetMaxThreadId(thread_id, clamp, segmentation_mask)};
const auto src_thread_id{fmt::format("({}^{})", thread_id, index)};
ctx.Add("shfl_in_bounds=int({})<=int({});", src_thread_id, max_thread_id);
SetInBoundsFlag(ctx, inst);
ctx.AddU32("{}=shfl_in_bounds?readInvocationARB({},{}):{};", inst, value, src_thread_id, value);
}
void EmitFSwizzleAdd(EmitContext& ctx, IR::Inst& inst, std::string_view op_a, std::string_view op_b,
std::string_view swizzle) {
const auto mask{fmt::format("({}>>((gl_SubGroupInvocationARB&3)<<1))&3", swizzle)};
const std::string modifier_a = fmt::format("FSWZ_A[{}]", mask);
const std::string modifier_b = fmt::format("FSWZ_B[{}]", mask);
ctx.AddF32("{}=({}*{})+({}*{});", inst, op_a, modifier_a, op_b, modifier_b);
}
void EmitDPdxFine(EmitContext& ctx, IR::Inst& inst, std::string_view op_a) {
if (ctx.profile.support_gl_derivative_control) {
ctx.AddF32("{}=dFdxFine({});", inst, op_a);
} else {
LOG_WARNING(Shader_GLSL, "Device does not support dFdxFine, fallback to dFdx");
ctx.AddF32("{}=dFdx({});", inst, op_a);
}
}
void EmitDPdyFine(EmitContext& ctx, IR::Inst& inst, std::string_view op_a) {
if (ctx.profile.support_gl_derivative_control) {
ctx.AddF32("{}=dFdyFine({});", inst, op_a);
} else {
LOG_WARNING(Shader_GLSL, "Device does not support dFdyFine, fallback to dFdy");
ctx.AddF32("{}=dFdy({});", inst, op_a);
}
}
void EmitDPdxCoarse(EmitContext& ctx, IR::Inst& inst, std::string_view op_a) {
if (ctx.profile.support_gl_derivative_control) {
ctx.AddF32("{}=dFdxCoarse({});", inst, op_a);
} else {
LOG_WARNING(Shader_GLSL, "Device does not support dFdxCoarse, fallback to dFdx");
ctx.AddF32("{}=dFdx({});", inst, op_a);
}
}
void EmitDPdyCoarse(EmitContext& ctx, IR::Inst& inst, std::string_view op_a) {
if (ctx.profile.support_gl_derivative_control) {
ctx.AddF32("{}=dFdyCoarse({});", inst, op_a);
} else {
LOG_WARNING(Shader_GLSL, "Device does not support dFdyCoarse, fallback to dFdy");
ctx.AddF32("{}=dFdy({});", inst, op_a);
}
}
} // namespace Shader::Backend::GLSL

View File

@ -0,0 +1,308 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <string>
#include <string_view>
#include <fmt/format.h>
#include "shader_recompiler/backend/glsl/var_alloc.h"
#include "shader_recompiler/exception.h"
#include "shader_recompiler/frontend/ir/value.h"
namespace Shader::Backend::GLSL {
namespace {
std::string TypePrefix(GlslVarType type) {
switch (type) {
case GlslVarType::U1:
return "b_";
case GlslVarType::F16x2:
return "f16x2_";
case GlslVarType::U32:
return "u_";
case GlslVarType::F32:
return "f_";
case GlslVarType::U64:
return "u64_";
case GlslVarType::F64:
return "d_";
case GlslVarType::U32x2:
return "u2_";
case GlslVarType::F32x2:
return "f2_";
case GlslVarType::U32x3:
return "u3_";
case GlslVarType::F32x3:
return "f3_";
case GlslVarType::U32x4:
return "u4_";
case GlslVarType::F32x4:
return "f4_";
case GlslVarType::PrecF32:
return "pf_";
case GlslVarType::PrecF64:
return "pd_";
case GlslVarType::Void:
return "";
default:
throw NotImplementedException("Type {}", type);
}
}
std::string FormatFloat(std::string_view value, IR::Type type) {
// TODO: Confirm FP64 nan/inf
if (type == IR::Type::F32) {
if (value == "nan") {
return "utof(0x7fc00000)";
}
if (value == "inf") {
return "utof(0x7f800000)";
}
if (value == "-inf") {
return "utof(0xff800000)";
}
}
if (value.find_first_of('e') != std::string_view::npos) {
// scientific notation
const auto cast{type == IR::Type::F32 ? "float" : "double"};
return fmt::format("{}({})", cast, value);
}
const bool needs_dot{value.find_first_of('.') == std::string_view::npos};
const bool needs_suffix{!value.ends_with('f')};
const auto suffix{type == IR::Type::F32 ? "f" : "lf"};
return fmt::format("{}{}{}", value, needs_dot ? "." : "", needs_suffix ? suffix : "");
}
std::string MakeImm(const IR::Value& value) {
switch (value.Type()) {
case IR::Type::U1:
return fmt::format("{}", value.U1() ? "true" : "false");
case IR::Type::U32:
return fmt::format("{}u", value.U32());
case IR::Type::F32:
return FormatFloat(fmt::format("{}", value.F32()), IR::Type::F32);
case IR::Type::U64:
return fmt::format("{}ul", value.U64());
case IR::Type::F64:
return FormatFloat(fmt::format("{}", value.F64()), IR::Type::F64);
case IR::Type::Void:
return "";
default:
throw NotImplementedException("Immediate type {}", value.Type());
}
}
} // Anonymous namespace
std::string VarAlloc::Representation(u32 index, GlslVarType type) const {
const auto prefix{TypePrefix(type)};
return fmt::format("{}{}", prefix, index);
}
std::string VarAlloc::Representation(Id id) const {
return Representation(id.index, id.type);
}
std::string VarAlloc::Define(IR::Inst& inst, GlslVarType type) {
if (inst.HasUses()) {
inst.SetDefinition<Id>(Alloc(type));
return Representation(inst.Definition<Id>());
} else {
Id id{};
id.type.Assign(type);
GetUseTracker(type).uses_temp = true;
inst.SetDefinition<Id>(id);
return 't' + Representation(inst.Definition<Id>());
}
}
std::string VarAlloc::Define(IR::Inst& inst, IR::Type type) {
return Define(inst, RegType(type));
}
std::string VarAlloc::PhiDefine(IR::Inst& inst, IR::Type type) {
return AddDefine(inst, RegType(type));
}
std::string VarAlloc::AddDefine(IR::Inst& inst, GlslVarType type) {
if (inst.HasUses()) {
inst.SetDefinition<Id>(Alloc(type));
return Representation(inst.Definition<Id>());
} else {
return "";
}
return Representation(inst.Definition<Id>());
}
std::string VarAlloc::Consume(const IR::Value& value) {
return value.IsImmediate() ? MakeImm(value) : ConsumeInst(*value.InstRecursive());
}
std::string VarAlloc::ConsumeInst(IR::Inst& inst) {
inst.DestructiveRemoveUsage();
if (!inst.HasUses()) {
Free(inst.Definition<Id>());
}
return Representation(inst.Definition<Id>());
}
std::string VarAlloc::GetGlslType(IR::Type type) const {
return GetGlslType(RegType(type));
}
Id VarAlloc::Alloc(GlslVarType type) {
auto& use_tracker{GetUseTracker(type)};
const auto num_vars{use_tracker.var_use.size()};
for (size_t var = 0; var < num_vars; ++var) {
if (use_tracker.var_use[var]) {
continue;
}
use_tracker.num_used = std::max(use_tracker.num_used, var + 1);
use_tracker.var_use[var] = true;
Id ret{};
ret.is_valid.Assign(1);
ret.type.Assign(type);
ret.index.Assign(static_cast<u32>(var));
return ret;
}
// Allocate a new variable
use_tracker.var_use.push_back(true);
Id ret{};
ret.is_valid.Assign(1);
ret.type.Assign(type);
ret.index.Assign(static_cast<u32>(use_tracker.num_used));
++use_tracker.num_used;
return ret;
}
void VarAlloc::Free(Id id) {
if (id.is_valid == 0) {
throw LogicError("Freeing invalid variable");
}
auto& use_tracker{GetUseTracker(id.type)};
use_tracker.var_use[id.index] = false;
}
GlslVarType VarAlloc::RegType(IR::Type type) const {
switch (type) {
case IR::Type::U1:
return GlslVarType::U1;
case IR::Type::U32:
return GlslVarType::U32;
case IR::Type::F32:
return GlslVarType::F32;
case IR::Type::U64:
return GlslVarType::U64;
case IR::Type::F64:
return GlslVarType::F64;
default:
throw NotImplementedException("IR type {}", type);
}
}
std::string VarAlloc::GetGlslType(GlslVarType type) const {
switch (type) {
case GlslVarType::U1:
return "bool";
case GlslVarType::F16x2:
return "f16vec2";
case GlslVarType::U32:
return "uint";
case GlslVarType::F32:
case GlslVarType::PrecF32:
return "float";
case GlslVarType::U64:
return "uint64_t";
case GlslVarType::F64:
case GlslVarType::PrecF64:
return "double";
case GlslVarType::U32x2:
return "uvec2";
case GlslVarType::F32x2:
return "vec2";
case GlslVarType::U32x3:
return "uvec3";
case GlslVarType::F32x3:
return "vec3";
case GlslVarType::U32x4:
return "uvec4";
case GlslVarType::F32x4:
return "vec4";
case GlslVarType::Void:
return "";
default:
throw NotImplementedException("Type {}", type);
}
}
VarAlloc::UseTracker& VarAlloc::GetUseTracker(GlslVarType type) {
switch (type) {
case GlslVarType::U1:
return var_bool;
case GlslVarType::F16x2:
return var_f16x2;
case GlslVarType::U32:
return var_u32;
case GlslVarType::F32:
return var_f32;
case GlslVarType::U64:
return var_u64;
case GlslVarType::F64:
return var_f64;
case GlslVarType::U32x2:
return var_u32x2;
case GlslVarType::F32x2:
return var_f32x2;
case GlslVarType::U32x3:
return var_u32x3;
case GlslVarType::F32x3:
return var_f32x3;
case GlslVarType::U32x4:
return var_u32x4;
case GlslVarType::F32x4:
return var_f32x4;
case GlslVarType::PrecF32:
return var_precf32;
case GlslVarType::PrecF64:
return var_precf64;
default:
throw NotImplementedException("Type {}", type);
}
}
const VarAlloc::UseTracker& VarAlloc::GetUseTracker(GlslVarType type) const {
switch (type) {
case GlslVarType::U1:
return var_bool;
case GlslVarType::F16x2:
return var_f16x2;
case GlslVarType::U32:
return var_u32;
case GlslVarType::F32:
return var_f32;
case GlslVarType::U64:
return var_u64;
case GlslVarType::F64:
return var_f64;
case GlslVarType::U32x2:
return var_u32x2;
case GlslVarType::F32x2:
return var_f32x2;
case GlslVarType::U32x3:
return var_u32x3;
case GlslVarType::F32x3:
return var_f32x3;
case GlslVarType::U32x4:
return var_u32x4;
case GlslVarType::F32x4:
return var_f32x4;
case GlslVarType::PrecF32:
return var_precf32;
case GlslVarType::PrecF64:
return var_precf64;
default:
throw NotImplementedException("Type {}", type);
}
}
} // namespace Shader::Backend::GLSL

View File

@ -0,0 +1,105 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <bitset>
#include <string>
#include <vector>
#include "common/bit_field.h"
#include "common/common_types.h"
namespace Shader::IR {
class Inst;
class Value;
enum class Type;
} // namespace Shader::IR
namespace Shader::Backend::GLSL {
enum class GlslVarType : u32 {
U1,
F16x2,
U32,
F32,
U64,
F64,
U32x2,
F32x2,
U32x3,
F32x3,
U32x4,
F32x4,
PrecF32,
PrecF64,
Void,
};
struct Id {
union {
u32 raw;
BitField<0, 1, u32> is_valid;
BitField<1, 4, GlslVarType> type;
BitField<6, 26, u32> index;
};
bool operator==(Id rhs) const noexcept {
return raw == rhs.raw;
}
bool operator!=(Id rhs) const noexcept {
return !operator==(rhs);
}
};
static_assert(sizeof(Id) == sizeof(u32));
class VarAlloc {
public:
struct UseTracker {
bool uses_temp{};
size_t num_used{};
std::vector<bool> var_use;
};
/// Used for explicit usages of variables, may revert to temporaries
std::string Define(IR::Inst& inst, GlslVarType type);
std::string Define(IR::Inst& inst, IR::Type type);
/// Used to assign variables used by the IR. May return a blank string if
/// the instruction's result is unused in the IR.
std::string AddDefine(IR::Inst& inst, GlslVarType type);
std::string PhiDefine(IR::Inst& inst, IR::Type type);
std::string Consume(const IR::Value& value);
std::string ConsumeInst(IR::Inst& inst);
std::string GetGlslType(GlslVarType type) const;
std::string GetGlslType(IR::Type type) const;
const UseTracker& GetUseTracker(GlslVarType type) const;
std::string Representation(u32 index, GlslVarType type) const;
private:
GlslVarType RegType(IR::Type type) const;
Id Alloc(GlslVarType type);
void Free(Id id);
UseTracker& GetUseTracker(GlslVarType type);
std::string Representation(Id id) const;
UseTracker var_bool{};
UseTracker var_f16x2{};
UseTracker var_u32{};
UseTracker var_u32x2{};
UseTracker var_u32x3{};
UseTracker var_u32x4{};
UseTracker var_f32{};
UseTracker var_f32x2{};
UseTracker var_f32x3{};
UseTracker var_f32x4{};
UseTracker var_u64{};
UseTracker var_f64{};
UseTracker var_precf32{};
UseTracker var_precf64{};
};
} // namespace Shader::Backend::GLSL

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,307 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <array>
#include <string_view>
#include <sirit/sirit.h>
#include "shader_recompiler/backend/bindings.h"
#include "shader_recompiler/frontend/ir/program.h"
#include "shader_recompiler/profile.h"
#include "shader_recompiler/runtime_info.h"
#include "shader_recompiler/shader_info.h"
namespace Shader::Backend::SPIRV {
using Sirit::Id;
class VectorTypes {
public:
void Define(Sirit::Module& sirit_ctx, Id base_type, std::string_view name);
[[nodiscard]] Id operator[](size_t size) const noexcept {
return defs[size - 1];
}
private:
std::array<Id, 4> defs{};
};
struct TextureDefinition {
Id id;
Id sampled_type;
Id pointer_type;
Id image_type;
u32 count;
};
struct TextureBufferDefinition {
Id id;
u32 count;
};
struct ImageBufferDefinition {
Id id;
Id image_type;
u32 count;
};
struct ImageDefinition {
Id id;
Id image_type;
u32 count;
};
struct UniformDefinitions {
Id U8{};
Id S8{};
Id U16{};
Id S16{};
Id U32{};
Id F32{};
Id U32x2{};
Id U32x4{};
};
struct StorageTypeDefinition {
Id array{};
Id element{};
};
struct StorageTypeDefinitions {
StorageTypeDefinition U8{};
StorageTypeDefinition S8{};
StorageTypeDefinition U16{};
StorageTypeDefinition S16{};
StorageTypeDefinition U32{};
StorageTypeDefinition U64{};
StorageTypeDefinition F32{};
StorageTypeDefinition U32x2{};
StorageTypeDefinition U32x4{};
};
struct StorageDefinitions {
Id U8{};
Id S8{};
Id U16{};
Id S16{};
Id U32{};
Id F32{};
Id U64{};
Id U32x2{};
Id U32x4{};
};
struct GenericElementInfo {
Id id{};
u32 first_element{};
u32 num_components{};
};
class EmitContext final : public Sirit::Module {
public:
explicit EmitContext(const Profile& profile, const RuntimeInfo& runtime_info,
IR::Program& program, Bindings& binding);
~EmitContext();
[[nodiscard]] Id Def(const IR::Value& value);
[[nodiscard]] Id BitOffset8(const IR::Value& offset);
[[nodiscard]] Id BitOffset16(const IR::Value& offset);
Id Const(u32 value) {
return Constant(U32[1], value);
}
Id Const(u32 element_1, u32 element_2) {
return ConstantComposite(U32[2], Const(element_1), Const(element_2));
}
Id Const(u32 element_1, u32 element_2, u32 element_3) {
return ConstantComposite(U32[3], Const(element_1), Const(element_2), Const(element_3));
}
Id Const(u32 element_1, u32 element_2, u32 element_3, u32 element_4) {
return ConstantComposite(U32[4], Const(element_1), Const(element_2), Const(element_3),
Const(element_4));
}
Id SConst(s32 value) {
return Constant(S32[1], value);
}
Id SConst(s32 element_1, s32 element_2) {
return ConstantComposite(S32[2], SConst(element_1), SConst(element_2));
}
Id SConst(s32 element_1, s32 element_2, s32 element_3) {
return ConstantComposite(S32[3], SConst(element_1), SConst(element_2), SConst(element_3));
}
Id SConst(s32 element_1, s32 element_2, s32 element_3, s32 element_4) {
return ConstantComposite(S32[4], SConst(element_1), SConst(element_2), SConst(element_3),
SConst(element_4));
}
Id Const(f32 value) {
return Constant(F32[1], value);
}
const Profile& profile;
const RuntimeInfo& runtime_info;
Stage stage{};
Id void_id{};
Id U1{};
Id U8{};
Id S8{};
Id U16{};
Id S16{};
Id U64{};
VectorTypes F32;
VectorTypes U32;
VectorTypes S32;
VectorTypes F16;
VectorTypes F64;
Id true_value{};
Id false_value{};
Id u32_zero_value{};
Id f32_zero_value{};
UniformDefinitions uniform_types;
StorageTypeDefinitions storage_types;
Id private_u32{};
Id shared_u8{};
Id shared_u16{};
Id shared_u32{};
Id shared_u64{};
Id shared_u32x2{};
Id shared_u32x4{};
Id input_f32{};
Id input_u32{};
Id input_s32{};
Id output_f32{};
Id output_u32{};
Id image_buffer_type{};
Id sampled_texture_buffer_type{};
Id image_u32{};
std::array<UniformDefinitions, Info::MAX_CBUFS> cbufs{};
std::array<StorageDefinitions, Info::MAX_SSBOS> ssbos{};
std::vector<TextureBufferDefinition> texture_buffers;
std::vector<ImageBufferDefinition> image_buffers;
std::vector<TextureDefinition> textures;
std::vector<ImageDefinition> images;
Id workgroup_id{};
Id local_invocation_id{};
Id invocation_id{};
Id sample_id{};
Id is_helper_invocation{};
Id subgroup_local_invocation_id{};
Id subgroup_mask_eq{};
Id subgroup_mask_lt{};
Id subgroup_mask_le{};
Id subgroup_mask_gt{};
Id subgroup_mask_ge{};
Id instance_id{};
Id instance_index{};
Id base_instance{};
Id vertex_id{};
Id vertex_index{};
Id base_vertex{};
Id front_face{};
Id point_coord{};
Id tess_coord{};
Id clip_distances{};
Id layer{};
Id viewport_index{};
Id viewport_mask{};
Id primitive_id{};
Id fswzadd_lut_a{};
Id fswzadd_lut_b{};
Id indexed_load_func{};
Id indexed_store_func{};
Id local_memory{};
Id shared_memory_u8{};
Id shared_memory_u16{};
Id shared_memory_u32{};
Id shared_memory_u64{};
Id shared_memory_u32x2{};
Id shared_memory_u32x4{};
Id shared_memory_u32_type{};
Id shared_store_u8_func{};
Id shared_store_u16_func{};
Id increment_cas_shared{};
Id increment_cas_ssbo{};
Id decrement_cas_shared{};
Id decrement_cas_ssbo{};
Id f32_add_cas{};
Id f16x2_add_cas{};
Id f16x2_min_cas{};
Id f16x2_max_cas{};
Id f32x2_add_cas{};
Id f32x2_min_cas{};
Id f32x2_max_cas{};
Id load_global_func_u32{};
Id load_global_func_u32x2{};
Id load_global_func_u32x4{};
Id write_global_func_u32{};
Id write_global_func_u32x2{};
Id write_global_func_u32x4{};
Id input_position{};
std::array<Id, 32> input_generics{};
Id output_point_size{};
Id output_position{};
std::array<std::array<GenericElementInfo, 4>, 32> output_generics{};
Id output_tess_level_outer{};
Id output_tess_level_inner{};
std::array<Id, 30> patches{};
std::array<Id, 8> frag_color{};
Id sample_mask{};
Id frag_depth{};
std::vector<Id> interfaces;
private:
void DefineCommonTypes(const Info& info);
void DefineCommonConstants();
void DefineInterfaces(const IR::Program& program);
void DefineLocalMemory(const IR::Program& program);
void DefineSharedMemory(const IR::Program& program);
void DefineSharedMemoryFunctions(const IR::Program& program);
void DefineConstantBuffers(const Info& info, u32& binding);
void DefineStorageBuffers(const Info& info, u32& binding);
void DefineTextureBuffers(const Info& info, u32& binding);
void DefineImageBuffers(const Info& info, u32& binding);
void DefineTextures(const Info& info, u32& binding);
void DefineImages(const Info& info, u32& binding);
void DefineAttributeMemAccess(const Info& info);
void DefineGlobalMemoryFunctions(const Info& info);
void DefineInputs(const IR::Program& program);
void DefineOutputs(const IR::Program& program);
};
} // namespace Shader::Backend::SPIRV

View File

@ -0,0 +1,541 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <span>
#include <tuple>
#include <type_traits>
#include <utility>
#include <vector>
#include "common/settings.h"
#include "shader_recompiler/backend/spirv/emit_spirv.h"
#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
#include "shader_recompiler/frontend/ir/basic_block.h"
#include "shader_recompiler/frontend/ir/program.h"
namespace Shader::Backend::SPIRV {
namespace {
template <class Func>
struct FuncTraits {};
template <class ReturnType_, class... Args>
struct FuncTraits<ReturnType_ (*)(Args...)> {
using ReturnType = ReturnType_;
static constexpr size_t NUM_ARGS = sizeof...(Args);
template <size_t I>
using ArgType = std::tuple_element_t<I, std::tuple<Args...>>;
};
template <auto func, typename... Args>
void SetDefinition(EmitContext& ctx, IR::Inst* inst, Args... args) {
inst->SetDefinition<Id>(func(ctx, std::forward<Args>(args)...));
}
template <typename ArgType>
ArgType Arg(EmitContext& ctx, const IR::Value& arg) {
if constexpr (std::is_same_v<ArgType, Id>) {
return ctx.Def(arg);
} else if constexpr (std::is_same_v<ArgType, const IR::Value&>) {
return arg;
} else if constexpr (std::is_same_v<ArgType, u32>) {
return arg.U32();
} else if constexpr (std::is_same_v<ArgType, IR::Attribute>) {
return arg.Attribute();
} else if constexpr (std::is_same_v<ArgType, IR::Patch>) {
return arg.Patch();
} else if constexpr (std::is_same_v<ArgType, IR::Reg>) {
return arg.Reg();
}
}
template <auto func, bool is_first_arg_inst, size_t... I>
void Invoke(EmitContext& ctx, IR::Inst* inst, std::index_sequence<I...>) {
using Traits = FuncTraits<decltype(func)>;
if constexpr (std::is_same_v<typename Traits::ReturnType, Id>) {
if constexpr (is_first_arg_inst) {
SetDefinition<func>(
ctx, inst, inst,
Arg<typename Traits::template ArgType<I + 2>>(ctx, inst->Arg(I))...);
} else {
SetDefinition<func>(
ctx, inst, Arg<typename Traits::template ArgType<I + 1>>(ctx, inst->Arg(I))...);
}
} else {
if constexpr (is_first_arg_inst) {
func(ctx, inst, Arg<typename Traits::template ArgType<I + 2>>(ctx, inst->Arg(I))...);
} else {
func(ctx, Arg<typename Traits::template ArgType<I + 1>>(ctx, inst->Arg(I))...);
}
}
}
template <auto func>
void Invoke(EmitContext& ctx, IR::Inst* inst) {
using Traits = FuncTraits<decltype(func)>;
static_assert(Traits::NUM_ARGS >= 1, "Insufficient arguments");
if constexpr (Traits::NUM_ARGS == 1) {
Invoke<func, false>(ctx, inst, std::make_index_sequence<0>{});
} else {
using FirstArgType = typename Traits::template ArgType<1>;
static constexpr bool is_first_arg_inst = std::is_same_v<FirstArgType, IR::Inst*>;
using Indices = std::make_index_sequence<Traits::NUM_ARGS - (is_first_arg_inst ? 2 : 1)>;
Invoke<func, is_first_arg_inst>(ctx, inst, Indices{});
}
}
void EmitInst(EmitContext& ctx, IR::Inst* inst) {
switch (inst->GetOpcode()) {
#define OPCODE(name, result_type, ...) \
case IR::Opcode::name: \
return Invoke<&Emit##name>(ctx, inst);
#include "shader_recompiler/frontend/ir/opcodes.inc"
#undef OPCODE
}
throw LogicError("Invalid opcode {}", inst->GetOpcode());
}
Id TypeId(const EmitContext& ctx, IR::Type type) {
switch (type) {
case IR::Type::U1:
return ctx.U1;
case IR::Type::U32:
return ctx.U32[1];
default:
throw NotImplementedException("Phi node type {}", type);
}
}
void Traverse(EmitContext& ctx, IR::Program& program) {
IR::Block* current_block{};
for (const IR::AbstractSyntaxNode& node : program.syntax_list) {
switch (node.type) {
case IR::AbstractSyntaxNode::Type::Block: {
const Id label{node.data.block->Definition<Id>()};
if (current_block) {
ctx.OpBranch(label);
}
current_block = node.data.block;
ctx.AddLabel(label);
for (IR::Inst& inst : node.data.block->Instructions()) {
EmitInst(ctx, &inst);
}
break;
}
case IR::AbstractSyntaxNode::Type::If: {
const Id if_label{node.data.if_node.body->Definition<Id>()};
const Id endif_label{node.data.if_node.merge->Definition<Id>()};
ctx.OpSelectionMerge(endif_label, spv::SelectionControlMask::MaskNone);
ctx.OpBranchConditional(ctx.Def(node.data.if_node.cond), if_label, endif_label);
break;
}
case IR::AbstractSyntaxNode::Type::Loop: {
const Id body_label{node.data.loop.body->Definition<Id>()};
const Id continue_label{node.data.loop.continue_block->Definition<Id>()};
const Id endloop_label{node.data.loop.merge->Definition<Id>()};
ctx.OpLoopMerge(endloop_label, continue_label, spv::LoopControlMask::MaskNone);
ctx.OpBranch(body_label);
break;
}
case IR::AbstractSyntaxNode::Type::Break: {
const Id break_label{node.data.break_node.merge->Definition<Id>()};
const Id skip_label{node.data.break_node.skip->Definition<Id>()};
ctx.OpBranchConditional(ctx.Def(node.data.break_node.cond), break_label, skip_label);
break;
}
case IR::AbstractSyntaxNode::Type::EndIf:
if (current_block) {
ctx.OpBranch(node.data.end_if.merge->Definition<Id>());
}
break;
case IR::AbstractSyntaxNode::Type::Repeat: {
Id cond{ctx.Def(node.data.repeat.cond)};
if (!Settings::values.disable_shader_loop_safety_checks) {
const Id pointer_type{ctx.TypePointer(spv::StorageClass::Private, ctx.U32[1])};
const Id safety_counter{ctx.AddGlobalVariable(
pointer_type, spv::StorageClass::Private, ctx.Const(0x2000u))};
if (ctx.profile.supported_spirv >= 0x00010400) {
ctx.interfaces.push_back(safety_counter);
}
const Id old_counter{ctx.OpLoad(ctx.U32[1], safety_counter)};
const Id new_counter{ctx.OpISub(ctx.U32[1], old_counter, ctx.Const(1u))};
ctx.OpStore(safety_counter, new_counter);
const Id safety_cond{
ctx.OpSGreaterThanEqual(ctx.U1, new_counter, ctx.u32_zero_value)};
cond = ctx.OpLogicalAnd(ctx.U1, cond, safety_cond);
}
const Id loop_header_label{node.data.repeat.loop_header->Definition<Id>()};
const Id merge_label{node.data.repeat.merge->Definition<Id>()};
ctx.OpBranchConditional(cond, loop_header_label, merge_label);
break;
}
case IR::AbstractSyntaxNode::Type::Return:
ctx.OpReturn();
break;
case IR::AbstractSyntaxNode::Type::Unreachable:
ctx.OpUnreachable();
break;
}
if (node.type != IR::AbstractSyntaxNode::Type::Block) {
current_block = nullptr;
}
}
}
Id DefineMain(EmitContext& ctx, IR::Program& program) {
const Id void_function{ctx.TypeFunction(ctx.void_id)};
const Id main{ctx.OpFunction(ctx.void_id, spv::FunctionControlMask::MaskNone, void_function)};
for (IR::Block* const block : program.blocks) {
block->SetDefinition(ctx.OpLabel());
}
Traverse(ctx, program);
ctx.OpFunctionEnd();
return main;
}
spv::ExecutionMode ExecutionMode(TessPrimitive primitive) {
switch (primitive) {
case TessPrimitive::Isolines:
return spv::ExecutionMode::Isolines;
case TessPrimitive::Triangles:
return spv::ExecutionMode::Triangles;
case TessPrimitive::Quads:
return spv::ExecutionMode::Quads;
}
throw InvalidArgument("Tessellation primitive {}", primitive);
}
spv::ExecutionMode ExecutionMode(TessSpacing spacing) {
switch (spacing) {
case TessSpacing::Equal:
return spv::ExecutionMode::SpacingEqual;
case TessSpacing::FractionalOdd:
return spv::ExecutionMode::SpacingFractionalOdd;
case TessSpacing::FractionalEven:
return spv::ExecutionMode::SpacingFractionalEven;
}
throw InvalidArgument("Tessellation spacing {}", spacing);
}
void DefineEntryPoint(const IR::Program& program, EmitContext& ctx, Id main) {
const std::span interfaces(ctx.interfaces.data(), ctx.interfaces.size());
spv::ExecutionModel execution_model{};
switch (program.stage) {
case Stage::Compute: {
const std::array<u32, 3> workgroup_size{program.workgroup_size};
execution_model = spv::ExecutionModel::GLCompute;
ctx.AddExecutionMode(main, spv::ExecutionMode::LocalSize, workgroup_size[0],
workgroup_size[1], workgroup_size[2]);
break;
}
case Stage::VertexB:
execution_model = spv::ExecutionModel::Vertex;
break;
case Stage::TessellationControl:
execution_model = spv::ExecutionModel::TessellationControl;
ctx.AddCapability(spv::Capability::Tessellation);
ctx.AddExecutionMode(main, spv::ExecutionMode::OutputVertices, program.invocations);
break;
case Stage::TessellationEval:
execution_model = spv::ExecutionModel::TessellationEvaluation;
ctx.AddCapability(spv::Capability::Tessellation);
ctx.AddExecutionMode(main, ExecutionMode(ctx.runtime_info.tess_primitive));
ctx.AddExecutionMode(main, ExecutionMode(ctx.runtime_info.tess_spacing));
ctx.AddExecutionMode(main, ctx.runtime_info.tess_clockwise
? spv::ExecutionMode::VertexOrderCw
: spv::ExecutionMode::VertexOrderCcw);
break;
case Stage::Geometry:
execution_model = spv::ExecutionModel::Geometry;
ctx.AddCapability(spv::Capability::Geometry);
ctx.AddCapability(spv::Capability::GeometryStreams);
switch (ctx.runtime_info.input_topology) {
case InputTopology::Points:
ctx.AddExecutionMode(main, spv::ExecutionMode::InputPoints);
break;
case InputTopology::Lines:
ctx.AddExecutionMode(main, spv::ExecutionMode::InputLines);
break;
case InputTopology::LinesAdjacency:
ctx.AddExecutionMode(main, spv::ExecutionMode::InputLinesAdjacency);
break;
case InputTopology::Triangles:
ctx.AddExecutionMode(main, spv::ExecutionMode::Triangles);
break;
case InputTopology::TrianglesAdjacency:
ctx.AddExecutionMode(main, spv::ExecutionMode::InputTrianglesAdjacency);
break;
}
switch (program.output_topology) {
case OutputTopology::PointList:
ctx.AddExecutionMode(main, spv::ExecutionMode::OutputPoints);
break;
case OutputTopology::LineStrip:
ctx.AddExecutionMode(main, spv::ExecutionMode::OutputLineStrip);
break;
case OutputTopology::TriangleStrip:
ctx.AddExecutionMode(main, spv::ExecutionMode::OutputTriangleStrip);
break;
}
if (program.info.stores[IR::Attribute::PointSize]) {
ctx.AddCapability(spv::Capability::GeometryPointSize);
}
ctx.AddExecutionMode(main, spv::ExecutionMode::OutputVertices, program.output_vertices);
ctx.AddExecutionMode(main, spv::ExecutionMode::Invocations, program.invocations);
if (program.is_geometry_passthrough) {
if (ctx.profile.support_geometry_shader_passthrough) {
ctx.AddExtension("SPV_NV_geometry_shader_passthrough");
ctx.AddCapability(spv::Capability::GeometryShaderPassthroughNV);
} else {
LOG_WARNING(Shader_SPIRV, "Geometry shader passthrough used with no support");
}
}
break;
case Stage::Fragment:
execution_model = spv::ExecutionModel::Fragment;
if (ctx.profile.lower_left_origin_mode) {
ctx.AddExecutionMode(main, spv::ExecutionMode::OriginLowerLeft);
} else {
ctx.AddExecutionMode(main, spv::ExecutionMode::OriginUpperLeft);
}
if (program.info.stores_frag_depth) {
ctx.AddExecutionMode(main, spv::ExecutionMode::DepthReplacing);
}
if (ctx.runtime_info.force_early_z) {
ctx.AddExecutionMode(main, spv::ExecutionMode::EarlyFragmentTests);
}
break;
default:
throw NotImplementedException("Stage {}", program.stage);
}
ctx.AddEntryPoint(execution_model, main, "main", interfaces);
}
void SetupDenormControl(const Profile& profile, const IR::Program& program, EmitContext& ctx,
Id main_func) {
const Info& info{program.info};
if (info.uses_fp32_denorms_flush && info.uses_fp32_denorms_preserve) {
LOG_DEBUG(Shader_SPIRV, "Fp32 denorm flush and preserve on the same shader");
} else if (info.uses_fp32_denorms_flush) {
if (profile.support_fp32_denorm_flush) {
ctx.AddCapability(spv::Capability::DenormFlushToZero);
ctx.AddExecutionMode(main_func, spv::ExecutionMode::DenormFlushToZero, 32U);
} else {
// Drivers will most likely flush denorms by default, no need to warn
}
} else if (info.uses_fp32_denorms_preserve) {
if (profile.support_fp32_denorm_preserve) {
ctx.AddCapability(spv::Capability::DenormPreserve);
ctx.AddExecutionMode(main_func, spv::ExecutionMode::DenormPreserve, 32U);
} else {
LOG_DEBUG(Shader_SPIRV, "Fp32 denorm preserve used in shader without host support");
}
}
if (!profile.support_separate_denorm_behavior || profile.has_broken_fp16_float_controls) {
// No separate denorm behavior
return;
}
if (info.uses_fp16_denorms_flush && info.uses_fp16_denorms_preserve) {
LOG_DEBUG(Shader_SPIRV, "Fp16 denorm flush and preserve on the same shader");
} else if (info.uses_fp16_denorms_flush) {
if (profile.support_fp16_denorm_flush) {
ctx.AddCapability(spv::Capability::DenormFlushToZero);
ctx.AddExecutionMode(main_func, spv::ExecutionMode::DenormFlushToZero, 16U);
} else {
// Same as fp32, no need to warn as most drivers will flush by default
}
} else if (info.uses_fp16_denorms_preserve) {
if (profile.support_fp16_denorm_preserve) {
ctx.AddCapability(spv::Capability::DenormPreserve);
ctx.AddExecutionMode(main_func, spv::ExecutionMode::DenormPreserve, 16U);
} else {
LOG_DEBUG(Shader_SPIRV, "Fp16 denorm preserve used in shader without host support");
}
}
}
void SetupSignedNanCapabilities(const Profile& profile, const IR::Program& program,
EmitContext& ctx, Id main_func) {
if (profile.has_broken_fp16_float_controls && program.info.uses_fp16) {
return;
}
if (program.info.uses_fp16 && profile.support_fp16_signed_zero_nan_preserve) {
ctx.AddCapability(spv::Capability::SignedZeroInfNanPreserve);
ctx.AddExecutionMode(main_func, spv::ExecutionMode::SignedZeroInfNanPreserve, 16U);
}
if (profile.support_fp32_signed_zero_nan_preserve) {
ctx.AddCapability(spv::Capability::SignedZeroInfNanPreserve);
ctx.AddExecutionMode(main_func, spv::ExecutionMode::SignedZeroInfNanPreserve, 32U);
}
if (program.info.uses_fp64 && profile.support_fp64_signed_zero_nan_preserve) {
ctx.AddCapability(spv::Capability::SignedZeroInfNanPreserve);
ctx.AddExecutionMode(main_func, spv::ExecutionMode::SignedZeroInfNanPreserve, 64U);
}
}
void SetupCapabilities(const Profile& profile, const Info& info, EmitContext& ctx) {
if (info.uses_sampled_1d) {
ctx.AddCapability(spv::Capability::Sampled1D);
}
if (info.uses_sparse_residency) {
ctx.AddCapability(spv::Capability::SparseResidency);
}
if (info.uses_demote_to_helper_invocation && profile.support_demote_to_helper_invocation) {
ctx.AddExtension("SPV_EXT_demote_to_helper_invocation");
ctx.AddCapability(spv::Capability::DemoteToHelperInvocationEXT);
}
if (info.stores[IR::Attribute::ViewportIndex]) {
ctx.AddCapability(spv::Capability::MultiViewport);
}
if (info.stores[IR::Attribute::ViewportMask] && profile.support_viewport_mask) {
ctx.AddExtension("SPV_NV_viewport_array2");
ctx.AddCapability(spv::Capability::ShaderViewportMaskNV);
}
if (info.stores[IR::Attribute::Layer] || info.stores[IR::Attribute::ViewportIndex]) {
if (profile.support_viewport_index_layer_non_geometry && ctx.stage != Stage::Geometry) {
ctx.AddExtension("SPV_EXT_shader_viewport_index_layer");
ctx.AddCapability(spv::Capability::ShaderViewportIndexLayerEXT);
}
}
if (!profile.support_vertex_instance_id &&
(info.loads[IR::Attribute::InstanceId] || info.loads[IR::Attribute::VertexId])) {
ctx.AddExtension("SPV_KHR_shader_draw_parameters");
ctx.AddCapability(spv::Capability::DrawParameters);
}
if ((info.uses_subgroup_vote || info.uses_subgroup_invocation_id ||
info.uses_subgroup_shuffles) &&
profile.support_vote) {
ctx.AddExtension("SPV_KHR_shader_ballot");
ctx.AddCapability(spv::Capability::SubgroupBallotKHR);
if (!profile.warp_size_potentially_larger_than_guest) {
// vote ops are only used when not taking the long path
ctx.AddExtension("SPV_KHR_subgroup_vote");
ctx.AddCapability(spv::Capability::SubgroupVoteKHR);
}
}
if (info.uses_int64_bit_atomics && profile.support_int64_atomics) {
ctx.AddCapability(spv::Capability::Int64Atomics);
}
if (info.uses_typeless_image_reads && profile.support_typeless_image_loads) {
ctx.AddCapability(spv::Capability::StorageImageReadWithoutFormat);
}
if (info.uses_typeless_image_writes) {
ctx.AddCapability(spv::Capability::StorageImageWriteWithoutFormat);
}
if (info.uses_image_buffers) {
ctx.AddCapability(spv::Capability::ImageBuffer);
}
if (info.uses_sample_id) {
ctx.AddCapability(spv::Capability::SampleRateShading);
}
if (!ctx.runtime_info.xfb_varyings.empty()) {
ctx.AddCapability(spv::Capability::TransformFeedback);
}
if (info.uses_derivatives) {
ctx.AddCapability(spv::Capability::DerivativeControl);
}
// TODO: Track this usage
ctx.AddCapability(spv::Capability::ImageGatherExtended);
ctx.AddCapability(spv::Capability::ImageQuery);
ctx.AddCapability(spv::Capability::SampledBuffer);
}
void PatchPhiNodes(IR::Program& program, EmitContext& ctx) {
auto inst{program.blocks.front()->begin()};
size_t block_index{0};
ctx.PatchDeferredPhi([&](size_t phi_arg) {
if (phi_arg == 0) {
++inst;
if (inst == program.blocks[block_index]->end() ||
inst->GetOpcode() != IR::Opcode::Phi) {
do {
++block_index;
inst = program.blocks[block_index]->begin();
} while (inst->GetOpcode() != IR::Opcode::Phi);
}
}
return ctx.Def(inst->Arg(phi_arg));
});
}
} // Anonymous namespace
std::vector<u32> EmitSPIRV(const Profile& profile, const RuntimeInfo& runtime_info,
IR::Program& program, Bindings& bindings) {
EmitContext ctx{profile, runtime_info, program, bindings};
const Id main{DefineMain(ctx, program)};
DefineEntryPoint(program, ctx, main);
if (profile.support_float_controls) {
ctx.AddExtension("SPV_KHR_float_controls");
SetupDenormControl(profile, program, ctx, main);
SetupSignedNanCapabilities(profile, program, ctx, main);
}
SetupCapabilities(profile, program.info, ctx);
PatchPhiNodes(program, ctx);
return ctx.Assemble();
}
Id EmitPhi(EmitContext& ctx, IR::Inst* inst) {
const size_t num_args{inst->NumArgs()};
boost::container::small_vector<Id, 32> blocks;
blocks.reserve(num_args);
for (size_t index = 0; index < num_args; ++index) {
blocks.push_back(inst->PhiBlock(index)->Definition<Id>());
}
// The type of a phi instruction is stored in its flags
const Id result_type{TypeId(ctx, inst->Flags<IR::Type>())};
return ctx.DeferredOpPhi(result_type, std::span(blocks.data(), blocks.size()));
}
void EmitVoid(EmitContext&) {}
Id EmitIdentity(EmitContext& ctx, const IR::Value& value) {
const Id id{ctx.Def(value)};
if (!Sirit::ValidId(id)) {
throw NotImplementedException("Forward identity declaration");
}
return id;
}
Id EmitConditionRef(EmitContext& ctx, const IR::Value& value) {
const Id id{ctx.Def(value)};
if (!Sirit::ValidId(id)) {
throw NotImplementedException("Forward identity declaration");
}
return id;
}
void EmitReference(EmitContext&) {}
void EmitPhiMove(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitGetZeroFromOp(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitGetSignFromOp(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitGetCarryFromOp(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitGetOverflowFromOp(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitGetSparseFromOp(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitGetInBoundsFromOp(EmitContext&) {
throw LogicError("Unreachable instruction");
}
} // namespace Shader::Backend::SPIRV

View File

@ -0,0 +1,27 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#pragma once
#include <vector>
#include <sirit/sirit.h>
#include "common/common_types.h"
#include "shader_recompiler/backend/bindings.h"
#include "shader_recompiler/backend/spirv/emit_context.h"
#include "shader_recompiler/frontend/ir/program.h"
#include "shader_recompiler/profile.h"
namespace Shader::Backend::SPIRV {
[[nodiscard]] std::vector<u32> EmitSPIRV(const Profile& profile, const RuntimeInfo& runtime_info,
IR::Program& program, Bindings& bindings);
[[nodiscard]] inline std::vector<u32> EmitSPIRV(const Profile& profile, IR::Program& program) {
Bindings binding;
return EmitSPIRV(profile, {}, program, binding);
}
} // namespace Shader::Backend::SPIRV

View File

@ -0,0 +1,448 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "shader_recompiler/backend/spirv/emit_spirv.h"
#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
namespace Shader::Backend::SPIRV {
namespace {
Id SharedPointer(EmitContext& ctx, Id offset, u32 index_offset = 0) {
const Id shift_id{ctx.Const(2U)};
Id index{ctx.OpShiftRightArithmetic(ctx.U32[1], offset, shift_id)};
if (index_offset > 0) {
index = ctx.OpIAdd(ctx.U32[1], index, ctx.Const(index_offset));
}
return ctx.profile.support_explicit_workgroup_layout
? ctx.OpAccessChain(ctx.shared_u32, ctx.shared_memory_u32, ctx.u32_zero_value, index)
: ctx.OpAccessChain(ctx.shared_u32, ctx.shared_memory_u32, index);
}
Id StorageIndex(EmitContext& ctx, const IR::Value& offset, size_t element_size) {
if (offset.IsImmediate()) {
const u32 imm_offset{static_cast<u32>(offset.U32() / element_size)};
return ctx.Const(imm_offset);
}
const u32 shift{static_cast<u32>(std::countr_zero(element_size))};
const Id index{ctx.Def(offset)};
if (shift == 0) {
return index;
}
const Id shift_id{ctx.Const(shift)};
return ctx.OpShiftRightLogical(ctx.U32[1], index, shift_id);
}
Id StoragePointer(EmitContext& ctx, const StorageTypeDefinition& type_def,
Id StorageDefinitions::*member_ptr, const IR::Value& binding,
const IR::Value& offset, size_t element_size) {
if (!binding.IsImmediate()) {
throw NotImplementedException("Dynamic storage buffer indexing");
}
const Id ssbo{ctx.ssbos[binding.U32()].*member_ptr};
const Id index{StorageIndex(ctx, offset, element_size)};
return ctx.OpAccessChain(type_def.element, ssbo, ctx.u32_zero_value, index);
}
std::pair<Id, Id> AtomicArgs(EmitContext& ctx) {
const Id scope{ctx.Const(static_cast<u32>(spv::Scope::Device))};
const Id semantics{ctx.u32_zero_value};
return {scope, semantics};
}
Id SharedAtomicU32(EmitContext& ctx, Id offset, Id value,
Id (Sirit::Module::*atomic_func)(Id, Id, Id, Id, Id)) {
const Id pointer{SharedPointer(ctx, offset)};
const auto [scope, semantics]{AtomicArgs(ctx)};
return (ctx.*atomic_func)(ctx.U32[1], pointer, scope, semantics, value);
}
Id StorageAtomicU32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset, Id value,
Id (Sirit::Module::*atomic_func)(Id, Id, Id, Id, Id)) {
const Id pointer{StoragePointer(ctx, ctx.storage_types.U32, &StorageDefinitions::U32, binding,
offset, sizeof(u32))};
const auto [scope, semantics]{AtomicArgs(ctx)};
return (ctx.*atomic_func)(ctx.U32[1], pointer, scope, semantics, value);
}
Id StorageAtomicU64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset, Id value,
Id (Sirit::Module::*atomic_func)(Id, Id, Id, Id, Id),
Id (Sirit::Module::*non_atomic_func)(Id, Id, Id)) {
if (ctx.profile.support_int64_atomics) {
const Id pointer{StoragePointer(ctx, ctx.storage_types.U64, &StorageDefinitions::U64,
binding, offset, sizeof(u64))};
const auto [scope, semantics]{AtomicArgs(ctx)};
return (ctx.*atomic_func)(ctx.U64, pointer, scope, semantics, value);
}
LOG_ERROR(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic");
const Id pointer{StoragePointer(ctx, ctx.storage_types.U32x2, &StorageDefinitions::U32x2,
binding, offset, sizeof(u32[2]))};
const Id original_value{ctx.OpBitcast(ctx.U64, ctx.OpLoad(ctx.U32[2], pointer))};
const Id result{(ctx.*non_atomic_func)(ctx.U64, value, original_value)};
ctx.OpStore(pointer, ctx.OpBitcast(ctx.U32[2], result));
return original_value;
}
} // Anonymous namespace
Id EmitSharedAtomicIAdd32(EmitContext& ctx, Id offset, Id value) {
return SharedAtomicU32(ctx, offset, value, &Sirit::Module::OpAtomicIAdd);
}
Id EmitSharedAtomicSMin32(EmitContext& ctx, Id offset, Id value) {
return SharedAtomicU32(ctx, offset, value, &Sirit::Module::OpAtomicSMin);
}
Id EmitSharedAtomicUMin32(EmitContext& ctx, Id offset, Id value) {
return SharedAtomicU32(ctx, offset, value, &Sirit::Module::OpAtomicUMin);
}
Id EmitSharedAtomicSMax32(EmitContext& ctx, Id offset, Id value) {
return SharedAtomicU32(ctx, offset, value, &Sirit::Module::OpAtomicSMax);
}
Id EmitSharedAtomicUMax32(EmitContext& ctx, Id offset, Id value) {
return SharedAtomicU32(ctx, offset, value, &Sirit::Module::OpAtomicUMax);
}
Id EmitSharedAtomicInc32(EmitContext& ctx, Id offset, Id value) {
const Id shift_id{ctx.Const(2U)};
const Id index{ctx.OpShiftRightArithmetic(ctx.U32[1], offset, shift_id)};
return ctx.OpFunctionCall(ctx.U32[1], ctx.increment_cas_shared, index, value);
}
Id EmitSharedAtomicDec32(EmitContext& ctx, Id offset, Id value) {
const Id shift_id{ctx.Const(2U)};
const Id index{ctx.OpShiftRightArithmetic(ctx.U32[1], offset, shift_id)};
return ctx.OpFunctionCall(ctx.U32[1], ctx.decrement_cas_shared, index, value);
}
Id EmitSharedAtomicAnd32(EmitContext& ctx, Id offset, Id value) {
return SharedAtomicU32(ctx, offset, value, &Sirit::Module::OpAtomicAnd);
}
Id EmitSharedAtomicOr32(EmitContext& ctx, Id offset, Id value) {
return SharedAtomicU32(ctx, offset, value, &Sirit::Module::OpAtomicOr);
}
Id EmitSharedAtomicXor32(EmitContext& ctx, Id offset, Id value) {
return SharedAtomicU32(ctx, offset, value, &Sirit::Module::OpAtomicXor);
}
Id EmitSharedAtomicExchange32(EmitContext& ctx, Id offset, Id value) {
return SharedAtomicU32(ctx, offset, value, &Sirit::Module::OpAtomicExchange);
}
Id EmitSharedAtomicExchange64(EmitContext& ctx, Id offset, Id value) {
if (ctx.profile.support_int64_atomics && ctx.profile.support_explicit_workgroup_layout) {
const Id shift_id{ctx.Const(3U)};
const Id index{ctx.OpShiftRightArithmetic(ctx.U32[1], offset, shift_id)};
const Id pointer{
ctx.OpAccessChain(ctx.shared_u64, ctx.shared_memory_u64, ctx.u32_zero_value, index)};
const auto [scope, semantics]{AtomicArgs(ctx)};
return ctx.OpAtomicExchange(ctx.U64, pointer, scope, semantics, value);
}
LOG_ERROR(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic");
const Id pointer_1{SharedPointer(ctx, offset, 0)};
const Id pointer_2{SharedPointer(ctx, offset, 1)};
const Id value_1{ctx.OpLoad(ctx.U32[1], pointer_1)};
const Id value_2{ctx.OpLoad(ctx.U32[1], pointer_2)};
const Id new_vector{ctx.OpBitcast(ctx.U32[2], value)};
ctx.OpStore(pointer_1, ctx.OpCompositeExtract(ctx.U32[1], new_vector, 0U));
ctx.OpStore(pointer_2, ctx.OpCompositeExtract(ctx.U32[1], new_vector, 1U));
return ctx.OpBitcast(ctx.U64, ctx.OpCompositeConstruct(ctx.U32[2], value_1, value_2));
}
Id EmitStorageAtomicIAdd32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
Id value) {
return StorageAtomicU32(ctx, binding, offset, value, &Sirit::Module::OpAtomicIAdd);
}
Id EmitStorageAtomicSMin32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
Id value) {
return StorageAtomicU32(ctx, binding, offset, value, &Sirit::Module::OpAtomicSMin);
}
Id EmitStorageAtomicUMin32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
Id value) {
return StorageAtomicU32(ctx, binding, offset, value, &Sirit::Module::OpAtomicUMin);
}
Id EmitStorageAtomicSMax32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
Id value) {
return StorageAtomicU32(ctx, binding, offset, value, &Sirit::Module::OpAtomicSMax);
}
Id EmitStorageAtomicUMax32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
Id value) {
return StorageAtomicU32(ctx, binding, offset, value, &Sirit::Module::OpAtomicUMax);
}
Id EmitStorageAtomicInc32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
Id value) {
const Id ssbo{ctx.ssbos[binding.U32()].U32};
const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
return ctx.OpFunctionCall(ctx.U32[1], ctx.increment_cas_ssbo, base_index, value, ssbo);
}
Id EmitStorageAtomicDec32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
Id value) {
const Id ssbo{ctx.ssbos[binding.U32()].U32};
const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
return ctx.OpFunctionCall(ctx.U32[1], ctx.decrement_cas_ssbo, base_index, value, ssbo);
}
Id EmitStorageAtomicAnd32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
Id value) {
return StorageAtomicU32(ctx, binding, offset, value, &Sirit::Module::OpAtomicAnd);
}
Id EmitStorageAtomicOr32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
Id value) {
return StorageAtomicU32(ctx, binding, offset, value, &Sirit::Module::OpAtomicOr);
}
Id EmitStorageAtomicXor32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
Id value) {
return StorageAtomicU32(ctx, binding, offset, value, &Sirit::Module::OpAtomicXor);
}
Id EmitStorageAtomicExchange32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
Id value) {
return StorageAtomicU32(ctx, binding, offset, value, &Sirit::Module::OpAtomicExchange);
}
Id EmitStorageAtomicIAdd64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
Id value) {
return StorageAtomicU64(ctx, binding, offset, value, &Sirit::Module::OpAtomicIAdd,
&Sirit::Module::OpIAdd);
}
Id EmitStorageAtomicSMin64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
Id value) {
return StorageAtomicU64(ctx, binding, offset, value, &Sirit::Module::OpAtomicSMin,
&Sirit::Module::OpSMin);
}
Id EmitStorageAtomicUMin64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
Id value) {
return StorageAtomicU64(ctx, binding, offset, value, &Sirit::Module::OpAtomicUMin,
&Sirit::Module::OpUMin);
}
Id EmitStorageAtomicSMax64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
Id value) {
return StorageAtomicU64(ctx, binding, offset, value, &Sirit::Module::OpAtomicSMax,
&Sirit::Module::OpSMax);
}
Id EmitStorageAtomicUMax64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
Id value) {
return StorageAtomicU64(ctx, binding, offset, value, &Sirit::Module::OpAtomicUMax,
&Sirit::Module::OpUMax);
}
Id EmitStorageAtomicAnd64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
Id value) {
return StorageAtomicU64(ctx, binding, offset, value, &Sirit::Module::OpAtomicAnd,
&Sirit::Module::OpBitwiseAnd);
}
Id EmitStorageAtomicOr64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
Id value) {
return StorageAtomicU64(ctx, binding, offset, value, &Sirit::Module::OpAtomicOr,
&Sirit::Module::OpBitwiseOr);
}
Id EmitStorageAtomicXor64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
Id value) {
return StorageAtomicU64(ctx, binding, offset, value, &Sirit::Module::OpAtomicXor,
&Sirit::Module::OpBitwiseXor);
}
Id EmitStorageAtomicExchange64(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
Id value) {
if (ctx.profile.support_int64_atomics) {
const Id pointer{StoragePointer(ctx, ctx.storage_types.U64, &StorageDefinitions::U64,
binding, offset, sizeof(u64))};
const auto [scope, semantics]{AtomicArgs(ctx)};
return ctx.OpAtomicExchange(ctx.U64, pointer, scope, semantics, value);
}
LOG_ERROR(Shader_SPIRV, "Int64 atomics not supported, fallback to non-atomic");
const Id pointer{StoragePointer(ctx, ctx.storage_types.U32x2, &StorageDefinitions::U32x2,
binding, offset, sizeof(u32[2]))};
const Id original{ctx.OpBitcast(ctx.U64, ctx.OpLoad(ctx.U32[2], pointer))};
ctx.OpStore(pointer, value);
return original;
}
Id EmitStorageAtomicAddF32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
Id value) {
const Id ssbo{ctx.ssbos[binding.U32()].U32};
const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
return ctx.OpFunctionCall(ctx.F32[1], ctx.f32_add_cas, base_index, value, ssbo);
}
Id EmitStorageAtomicAddF16x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
Id value) {
const Id ssbo{ctx.ssbos[binding.U32()].U32};
const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
const Id result{ctx.OpFunctionCall(ctx.F16[2], ctx.f16x2_add_cas, base_index, value, ssbo)};
return ctx.OpBitcast(ctx.U32[1], result);
}
Id EmitStorageAtomicAddF32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
Id value) {
const Id ssbo{ctx.ssbos[binding.U32()].U32};
const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
const Id result{ctx.OpFunctionCall(ctx.F32[2], ctx.f32x2_add_cas, base_index, value, ssbo)};
return ctx.OpPackHalf2x16(ctx.U32[1], result);
}
Id EmitStorageAtomicMinF16x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
Id value) {
const Id ssbo{ctx.ssbos[binding.U32()].U32};
const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
const Id result{ctx.OpFunctionCall(ctx.F16[2], ctx.f16x2_min_cas, base_index, value, ssbo)};
return ctx.OpBitcast(ctx.U32[1], result);
}
Id EmitStorageAtomicMinF32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
Id value) {
const Id ssbo{ctx.ssbos[binding.U32()].U32};
const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
const Id result{ctx.OpFunctionCall(ctx.F32[2], ctx.f32x2_min_cas, base_index, value, ssbo)};
return ctx.OpPackHalf2x16(ctx.U32[1], result);
}
Id EmitStorageAtomicMaxF16x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
Id value) {
const Id ssbo{ctx.ssbos[binding.U32()].U32};
const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
const Id result{ctx.OpFunctionCall(ctx.F16[2], ctx.f16x2_max_cas, base_index, value, ssbo)};
return ctx.OpBitcast(ctx.U32[1], result);
}
Id EmitStorageAtomicMaxF32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset,
Id value) {
const Id ssbo{ctx.ssbos[binding.U32()].U32};
const Id base_index{StorageIndex(ctx, offset, sizeof(u32))};
const Id result{ctx.OpFunctionCall(ctx.F32[2], ctx.f32x2_max_cas, base_index, value, ssbo)};
return ctx.OpPackHalf2x16(ctx.U32[1], result);
}
Id EmitGlobalAtomicIAdd32(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitGlobalAtomicSMin32(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitGlobalAtomicUMin32(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitGlobalAtomicSMax32(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitGlobalAtomicUMax32(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitGlobalAtomicInc32(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitGlobalAtomicDec32(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitGlobalAtomicAnd32(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitGlobalAtomicOr32(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitGlobalAtomicXor32(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitGlobalAtomicExchange32(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitGlobalAtomicIAdd64(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitGlobalAtomicSMin64(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitGlobalAtomicUMin64(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitGlobalAtomicSMax64(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitGlobalAtomicUMax64(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitGlobalAtomicInc64(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitGlobalAtomicDec64(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitGlobalAtomicAnd64(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitGlobalAtomicOr64(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitGlobalAtomicXor64(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitGlobalAtomicExchange64(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitGlobalAtomicAddF32(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitGlobalAtomicAddF16x2(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitGlobalAtomicAddF32x2(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitGlobalAtomicMinF16x2(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitGlobalAtomicMinF32x2(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitGlobalAtomicMaxF16x2(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitGlobalAtomicMaxF32x2(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
} // namespace Shader::Backend::SPIRV

View File

@ -0,0 +1,38 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "shader_recompiler/backend/spirv/emit_spirv.h"
#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
#include "shader_recompiler/frontend/ir/modifiers.h"
namespace Shader::Backend::SPIRV {
namespace {
void MemoryBarrier(EmitContext& ctx, spv::Scope scope) {
const auto semantics{
spv::MemorySemanticsMask::AcquireRelease | spv::MemorySemanticsMask::UniformMemory |
spv::MemorySemanticsMask::WorkgroupMemory | spv::MemorySemanticsMask::AtomicCounterMemory |
spv::MemorySemanticsMask::ImageMemory};
ctx.OpMemoryBarrier(ctx.Const(static_cast<u32>(scope)), ctx.Const(static_cast<u32>(semantics)));
}
} // Anonymous namespace
void EmitBarrier(EmitContext& ctx) {
const auto execution{spv::Scope::Workgroup};
const auto memory{spv::Scope::Workgroup};
const auto memory_semantics{spv::MemorySemanticsMask::AcquireRelease |
spv::MemorySemanticsMask::WorkgroupMemory};
ctx.OpControlBarrier(ctx.Const(static_cast<u32>(execution)),
ctx.Const(static_cast<u32>(memory)),
ctx.Const(static_cast<u32>(memory_semantics)));
}
void EmitWorkgroupMemoryBarrier(EmitContext& ctx) {
MemoryBarrier(ctx, spv::Scope::Workgroup);
}
void EmitDeviceMemoryBarrier(EmitContext& ctx) {
MemoryBarrier(ctx, spv::Scope::Device);
}
} // namespace Shader::Backend::SPIRV

View File

@ -0,0 +1,66 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "shader_recompiler/backend/spirv/emit_spirv.h"
#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
namespace Shader::Backend::SPIRV {
void EmitBitCastU16F16(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitBitCastU32F32(EmitContext& ctx, Id value) {
return ctx.OpBitcast(ctx.U32[1], value);
}
void EmitBitCastU64F64(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
void EmitBitCastF16U16(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitBitCastF32U32(EmitContext& ctx, Id value) {
return ctx.OpBitcast(ctx.F32[1], value);
}
void EmitBitCastF64U64(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitPackUint2x32(EmitContext& ctx, Id value) {
return ctx.OpBitcast(ctx.U64, value);
}
Id EmitUnpackUint2x32(EmitContext& ctx, Id value) {
return ctx.OpBitcast(ctx.U32[2], value);
}
Id EmitPackFloat2x16(EmitContext& ctx, Id value) {
return ctx.OpBitcast(ctx.U32[1], value);
}
Id EmitUnpackFloat2x16(EmitContext& ctx, Id value) {
return ctx.OpBitcast(ctx.F16[2], value);
}
Id EmitPackHalf2x16(EmitContext& ctx, Id value) {
return ctx.OpPackHalf2x16(ctx.U32[1], value);
}
Id EmitUnpackHalf2x16(EmitContext& ctx, Id value) {
return ctx.OpUnpackHalf2x16(ctx.F32[2], value);
}
Id EmitPackDouble2x32(EmitContext& ctx, Id value) {
return ctx.OpBitcast(ctx.F64[1], value);
}
Id EmitUnpackDouble2x32(EmitContext& ctx, Id value) {
return ctx.OpBitcast(ctx.U32[2], value);
}
} // namespace Shader::Backend::SPIRV

View File

@ -0,0 +1,155 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "shader_recompiler/backend/spirv/emit_spirv.h"
#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
#include "shader_recompiler/frontend/ir/modifiers.h"
namespace Shader::Backend::SPIRV {
Id EmitCompositeConstructU32x2(EmitContext& ctx, Id e1, Id e2) {
return ctx.OpCompositeConstruct(ctx.U32[2], e1, e2);
}
Id EmitCompositeConstructU32x3(EmitContext& ctx, Id e1, Id e2, Id e3) {
return ctx.OpCompositeConstruct(ctx.U32[3], e1, e2, e3);
}
Id EmitCompositeConstructU32x4(EmitContext& ctx, Id e1, Id e2, Id e3, Id e4) {
return ctx.OpCompositeConstruct(ctx.U32[4], e1, e2, e3, e4);
}
Id EmitCompositeExtractU32x2(EmitContext& ctx, Id composite, u32 index) {
return ctx.OpCompositeExtract(ctx.U32[1], composite, index);
}
Id EmitCompositeExtractU32x3(EmitContext& ctx, Id composite, u32 index) {
return ctx.OpCompositeExtract(ctx.U32[1], composite, index);
}
Id EmitCompositeExtractU32x4(EmitContext& ctx, Id composite, u32 index) {
return ctx.OpCompositeExtract(ctx.U32[1], composite, index);
}
Id EmitCompositeInsertU32x2(EmitContext& ctx, Id composite, Id object, u32 index) {
return ctx.OpCompositeInsert(ctx.U32[2], object, composite, index);
}
Id EmitCompositeInsertU32x3(EmitContext& ctx, Id composite, Id object, u32 index) {
return ctx.OpCompositeInsert(ctx.U32[3], object, composite, index);
}
Id EmitCompositeInsertU32x4(EmitContext& ctx, Id composite, Id object, u32 index) {
return ctx.OpCompositeInsert(ctx.U32[4], object, composite, index);
}
Id EmitCompositeConstructF16x2(EmitContext& ctx, Id e1, Id e2) {
return ctx.OpCompositeConstruct(ctx.F16[2], e1, e2);
}
Id EmitCompositeConstructF16x3(EmitContext& ctx, Id e1, Id e2, Id e3) {
return ctx.OpCompositeConstruct(ctx.F16[3], e1, e2, e3);
}
Id EmitCompositeConstructF16x4(EmitContext& ctx, Id e1, Id e2, Id e3, Id e4) {
return ctx.OpCompositeConstruct(ctx.F16[4], e1, e2, e3, e4);
}
Id EmitCompositeExtractF16x2(EmitContext& ctx, Id composite, u32 index) {
return ctx.OpCompositeExtract(ctx.F16[1], composite, index);
}
Id EmitCompositeExtractF16x3(EmitContext& ctx, Id composite, u32 index) {
return ctx.OpCompositeExtract(ctx.F16[1], composite, index);
}
Id EmitCompositeExtractF16x4(EmitContext& ctx, Id composite, u32 index) {
return ctx.OpCompositeExtract(ctx.F16[1], composite, index);
}
Id EmitCompositeInsertF16x2(EmitContext& ctx, Id composite, Id object, u32 index) {
return ctx.OpCompositeInsert(ctx.F16[2], object, composite, index);
}
Id EmitCompositeInsertF16x3(EmitContext& ctx, Id composite, Id object, u32 index) {
return ctx.OpCompositeInsert(ctx.F16[3], object, composite, index);
}
Id EmitCompositeInsertF16x4(EmitContext& ctx, Id composite, Id object, u32 index) {
return ctx.OpCompositeInsert(ctx.F16[4], object, composite, index);
}
Id EmitCompositeConstructF32x2(EmitContext& ctx, Id e1, Id e2) {
return ctx.OpCompositeConstruct(ctx.F32[2], e1, e2);
}
Id EmitCompositeConstructF32x3(EmitContext& ctx, Id e1, Id e2, Id e3) {
return ctx.OpCompositeConstruct(ctx.F32[3], e1, e2, e3);
}
Id EmitCompositeConstructF32x4(EmitContext& ctx, Id e1, Id e2, Id e3, Id e4) {
return ctx.OpCompositeConstruct(ctx.F32[4], e1, e2, e3, e4);
}
Id EmitCompositeExtractF32x2(EmitContext& ctx, Id composite, u32 index) {
return ctx.OpCompositeExtract(ctx.F32[1], composite, index);
}
Id EmitCompositeExtractF32x3(EmitContext& ctx, Id composite, u32 index) {
return ctx.OpCompositeExtract(ctx.F32[1], composite, index);
}
Id EmitCompositeExtractF32x4(EmitContext& ctx, Id composite, u32 index) {
return ctx.OpCompositeExtract(ctx.F32[1], composite, index);
}
Id EmitCompositeInsertF32x2(EmitContext& ctx, Id composite, Id object, u32 index) {
return ctx.OpCompositeInsert(ctx.F32[2], object, composite, index);
}
Id EmitCompositeInsertF32x3(EmitContext& ctx, Id composite, Id object, u32 index) {
return ctx.OpCompositeInsert(ctx.F32[3], object, composite, index);
}
Id EmitCompositeInsertF32x4(EmitContext& ctx, Id composite, Id object, u32 index) {
return ctx.OpCompositeInsert(ctx.F32[4], object, composite, index);
}
void EmitCompositeConstructF64x2(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
void EmitCompositeConstructF64x3(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
void EmitCompositeConstructF64x4(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
void EmitCompositeExtractF64x2(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
void EmitCompositeExtractF64x3(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
void EmitCompositeExtractF64x4(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitCompositeInsertF64x2(EmitContext& ctx, Id composite, Id object, u32 index) {
return ctx.OpCompositeInsert(ctx.F64[2], object, composite, index);
}
Id EmitCompositeInsertF64x3(EmitContext& ctx, Id composite, Id object, u32 index) {
return ctx.OpCompositeInsert(ctx.F64[3], object, composite, index);
}
Id EmitCompositeInsertF64x4(EmitContext& ctx, Id composite, Id object, u32 index) {
return ctx.OpCompositeInsert(ctx.F64[4], object, composite, index);
}
} // namespace Shader::Backend::SPIRV

View File

@ -0,0 +1,505 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <tuple>
#include <utility>
#include "shader_recompiler/backend/spirv/emit_spirv.h"
#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
namespace Shader::Backend::SPIRV {
namespace {
struct AttrInfo {
Id pointer;
Id id;
bool needs_cast;
};
std::optional<AttrInfo> AttrTypes(EmitContext& ctx, u32 index) {
const AttributeType type{ctx.runtime_info.generic_input_types.at(index)};
switch (type) {
case AttributeType::Float:
return AttrInfo{ctx.input_f32, ctx.F32[1], false};
case AttributeType::UnsignedInt:
return AttrInfo{ctx.input_u32, ctx.U32[1], true};
case AttributeType::SignedInt:
return AttrInfo{ctx.input_s32, ctx.TypeInt(32, true), true};
case AttributeType::Disabled:
return std::nullopt;
}
throw InvalidArgument("Invalid attribute type {}", type);
}
template <typename... Args>
Id AttrPointer(EmitContext& ctx, Id pointer_type, Id vertex, Id base, Args&&... args) {
switch (ctx.stage) {
case Stage::TessellationControl:
case Stage::TessellationEval:
case Stage::Geometry:
return ctx.OpAccessChain(pointer_type, base, vertex, std::forward<Args>(args)...);
default:
return ctx.OpAccessChain(pointer_type, base, std::forward<Args>(args)...);
}
}
template <typename... Args>
Id OutputAccessChain(EmitContext& ctx, Id result_type, Id base, Args&&... args) {
if (ctx.stage == Stage::TessellationControl) {
const Id invocation_id{ctx.OpLoad(ctx.U32[1], ctx.invocation_id)};
return ctx.OpAccessChain(result_type, base, invocation_id, std::forward<Args>(args)...);
} else {
return ctx.OpAccessChain(result_type, base, std::forward<Args>(args)...);
}
}
struct OutAttr {
OutAttr(Id pointer_) : pointer{pointer_} {}
OutAttr(Id pointer_, Id type_) : pointer{pointer_}, type{type_} {}
Id pointer{};
Id type{};
};
std::optional<OutAttr> OutputAttrPointer(EmitContext& ctx, IR::Attribute attr) {
if (IR::IsGeneric(attr)) {
const u32 index{IR::GenericAttributeIndex(attr)};
const u32 element{IR::GenericAttributeElement(attr)};
const GenericElementInfo& info{ctx.output_generics.at(index).at(element)};
if (info.num_components == 1) {
return info.id;
} else {
const u32 index_element{element - info.first_element};
const Id index_id{ctx.Const(index_element)};
return OutputAccessChain(ctx, ctx.output_f32, info.id, index_id);
}
}
switch (attr) {
case IR::Attribute::PointSize:
return ctx.output_point_size;
case IR::Attribute::PositionX:
case IR::Attribute::PositionY:
case IR::Attribute::PositionZ:
case IR::Attribute::PositionW: {
const u32 element{static_cast<u32>(attr) % 4};
const Id element_id{ctx.Const(element)};
return OutputAccessChain(ctx, ctx.output_f32, ctx.output_position, element_id);
}
case IR::Attribute::ClipDistance0:
case IR::Attribute::ClipDistance1:
case IR::Attribute::ClipDistance2:
case IR::Attribute::ClipDistance3:
case IR::Attribute::ClipDistance4:
case IR::Attribute::ClipDistance5:
case IR::Attribute::ClipDistance6:
case IR::Attribute::ClipDistance7: {
const u32 base{static_cast<u32>(IR::Attribute::ClipDistance0)};
const u32 index{static_cast<u32>(attr) - base};
const Id clip_num{ctx.Const(index)};
return OutputAccessChain(ctx, ctx.output_f32, ctx.clip_distances, clip_num);
}
case IR::Attribute::Layer:
if (ctx.profile.support_viewport_index_layer_non_geometry ||
ctx.stage == Shader::Stage::Geometry) {
return OutAttr{ctx.layer, ctx.U32[1]};
}
return std::nullopt;
case IR::Attribute::ViewportIndex:
if (ctx.profile.support_viewport_index_layer_non_geometry ||
ctx.stage == Shader::Stage::Geometry) {
return OutAttr{ctx.viewport_index, ctx.U32[1]};
}
return std::nullopt;
case IR::Attribute::ViewportMask:
if (!ctx.profile.support_viewport_mask) {
return std::nullopt;
}
return OutAttr{ctx.OpAccessChain(ctx.output_u32, ctx.viewport_mask, ctx.u32_zero_value),
ctx.U32[1]};
default:
throw NotImplementedException("Read attribute {}", attr);
}
}
Id GetCbuf(EmitContext& ctx, Id result_type, Id UniformDefinitions::*member_ptr, u32 element_size,
const IR::Value& binding, const IR::Value& offset) {
if (!binding.IsImmediate()) {
throw NotImplementedException("Constant buffer indexing");
}
const Id cbuf{ctx.cbufs[binding.U32()].*member_ptr};
const Id uniform_type{ctx.uniform_types.*member_ptr};
if (!offset.IsImmediate()) {
Id index{ctx.Def(offset)};
if (element_size > 1) {
const u32 log2_element_size{static_cast<u32>(std::countr_zero(element_size))};
const Id shift{ctx.Const(log2_element_size)};
index = ctx.OpShiftRightArithmetic(ctx.U32[1], ctx.Def(offset), shift);
}
const Id access_chain{ctx.OpAccessChain(uniform_type, cbuf, ctx.u32_zero_value, index)};
return ctx.OpLoad(result_type, access_chain);
}
// Hardware been proved to read the aligned offset (e.g. LDC.U32 at 6 will read offset 4)
const Id imm_offset{ctx.Const(offset.U32() / element_size)};
const Id access_chain{ctx.OpAccessChain(uniform_type, cbuf, ctx.u32_zero_value, imm_offset)};
return ctx.OpLoad(result_type, access_chain);
}
Id GetCbufU32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
return GetCbuf(ctx, ctx.U32[1], &UniformDefinitions::U32, sizeof(u32), binding, offset);
}
Id GetCbufU32x4(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
return GetCbuf(ctx, ctx.U32[4], &UniformDefinitions::U32x4, sizeof(u32[4]), binding, offset);
}
Id GetCbufElement(EmitContext& ctx, Id vector, const IR::Value& offset, u32 index_offset) {
if (offset.IsImmediate()) {
const u32 element{(offset.U32() / 4) % 4 + index_offset};
return ctx.OpCompositeExtract(ctx.U32[1], vector, element);
}
const Id shift{ctx.OpShiftRightArithmetic(ctx.U32[1], ctx.Def(offset), ctx.Const(2u))};
Id element{ctx.OpBitwiseAnd(ctx.U32[1], shift, ctx.Const(3u))};
if (index_offset > 0) {
element = ctx.OpIAdd(ctx.U32[1], element, ctx.Const(index_offset));
}
return ctx.OpVectorExtractDynamic(ctx.U32[1], vector, element);
}
} // Anonymous namespace
void EmitGetRegister(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitSetRegister(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitGetPred(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitSetPred(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitSetGotoVariable(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitGetGotoVariable(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitSetIndirectBranchVariable(EmitContext&) {
throw LogicError("Unreachable instruction");
}
void EmitGetIndirectBranchVariable(EmitContext&) {
throw LogicError("Unreachable instruction");
}
Id EmitGetCbufU8(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
if (ctx.profile.support_descriptor_aliasing && ctx.profile.support_int8) {
const Id load{GetCbuf(ctx, ctx.U8, &UniformDefinitions::U8, sizeof(u8), binding, offset)};
return ctx.OpUConvert(ctx.U32[1], load);
}
Id element{};
if (ctx.profile.support_descriptor_aliasing) {
element = GetCbufU32(ctx, binding, offset);
} else {
const Id vector{GetCbufU32x4(ctx, binding, offset)};
element = GetCbufElement(ctx, vector, offset, 0u);
}
const Id bit_offset{ctx.BitOffset8(offset)};
return ctx.OpBitFieldUExtract(ctx.U32[1], element, bit_offset, ctx.Const(8u));
}
Id EmitGetCbufS8(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
if (ctx.profile.support_descriptor_aliasing && ctx.profile.support_int8) {
const Id load{GetCbuf(ctx, ctx.S8, &UniformDefinitions::S8, sizeof(s8), binding, offset)};
return ctx.OpSConvert(ctx.U32[1], load);
}
Id element{};
if (ctx.profile.support_descriptor_aliasing) {
element = GetCbufU32(ctx, binding, offset);
} else {
const Id vector{GetCbufU32x4(ctx, binding, offset)};
element = GetCbufElement(ctx, vector, offset, 0u);
}
const Id bit_offset{ctx.BitOffset8(offset)};
return ctx.OpBitFieldSExtract(ctx.U32[1], element, bit_offset, ctx.Const(8u));
}
Id EmitGetCbufU16(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
if (ctx.profile.support_descriptor_aliasing && ctx.profile.support_int16) {
const Id load{
GetCbuf(ctx, ctx.U16, &UniformDefinitions::U16, sizeof(u16), binding, offset)};
return ctx.OpUConvert(ctx.U32[1], load);
}
Id element{};
if (ctx.profile.support_descriptor_aliasing) {
element = GetCbufU32(ctx, binding, offset);
} else {
const Id vector{GetCbufU32x4(ctx, binding, offset)};
element = GetCbufElement(ctx, vector, offset, 0u);
}
const Id bit_offset{ctx.BitOffset16(offset)};
return ctx.OpBitFieldUExtract(ctx.U32[1], element, bit_offset, ctx.Const(16u));
}
Id EmitGetCbufS16(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
if (ctx.profile.support_descriptor_aliasing && ctx.profile.support_int16) {
const Id load{
GetCbuf(ctx, ctx.S16, &UniformDefinitions::S16, sizeof(s16), binding, offset)};
return ctx.OpSConvert(ctx.U32[1], load);
}
Id element{};
if (ctx.profile.support_descriptor_aliasing) {
element = GetCbufU32(ctx, binding, offset);
} else {
const Id vector{GetCbufU32x4(ctx, binding, offset)};
element = GetCbufElement(ctx, vector, offset, 0u);
}
const Id bit_offset{ctx.BitOffset16(offset)};
return ctx.OpBitFieldSExtract(ctx.U32[1], element, bit_offset, ctx.Const(16u));
}
Id EmitGetCbufU32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
if (ctx.profile.support_descriptor_aliasing) {
return GetCbufU32(ctx, binding, offset);
} else {
const Id vector{GetCbufU32x4(ctx, binding, offset)};
return GetCbufElement(ctx, vector, offset, 0u);
}
}
Id EmitGetCbufF32(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
if (ctx.profile.support_descriptor_aliasing) {
return GetCbuf(ctx, ctx.F32[1], &UniformDefinitions::F32, sizeof(f32), binding, offset);
} else {
const Id vector{GetCbufU32x4(ctx, binding, offset)};
return ctx.OpBitcast(ctx.F32[1], GetCbufElement(ctx, vector, offset, 0u));
}
}
Id EmitGetCbufU32x2(EmitContext& ctx, const IR::Value& binding, const IR::Value& offset) {
if (ctx.profile.support_descriptor_aliasing) {
return GetCbuf(ctx, ctx.U32[2], &UniformDefinitions::U32x2, sizeof(u32[2]), binding,
offset);
} else {
const Id vector{GetCbufU32x4(ctx, binding, offset)};
return ctx.OpCompositeConstruct(ctx.U32[2], GetCbufElement(ctx, vector, offset, 0u),
GetCbufElement(ctx, vector, offset, 1u));
}
}
Id EmitGetAttribute(EmitContext& ctx, IR::Attribute attr, Id vertex) {
const u32 element{static_cast<u32>(attr) % 4};
if (IR::IsGeneric(attr)) {
const u32 index{IR::GenericAttributeIndex(attr)};
const std::optional<AttrInfo> type{AttrTypes(ctx, index)};
if (!type) {
// Attribute is disabled
return ctx.Const(element == 3 ? 1.0f : 0.0f);
}
if (!ctx.runtime_info.previous_stage_stores.Generic(index, element)) {
// Varying component is not written
return ctx.Const(type && element == 3 ? 1.0f : 0.0f);
}
const Id generic_id{ctx.input_generics.at(index)};
const Id pointer{AttrPointer(ctx, type->pointer, vertex, generic_id, ctx.Const(element))};
const Id value{ctx.OpLoad(type->id, pointer)};
return type->needs_cast ? ctx.OpBitcast(ctx.F32[1], value) : value;
}
switch (attr) {
case IR::Attribute::PrimitiveId:
return ctx.OpBitcast(ctx.F32[1], ctx.OpLoad(ctx.U32[1], ctx.primitive_id));
case IR::Attribute::PositionX:
case IR::Attribute::PositionY:
case IR::Attribute::PositionZ:
case IR::Attribute::PositionW:
return ctx.OpLoad(ctx.F32[1], AttrPointer(ctx, ctx.input_f32, vertex, ctx.input_position,
ctx.Const(element)));
case IR::Attribute::InstanceId:
if (ctx.profile.support_vertex_instance_id) {
return ctx.OpBitcast(ctx.F32[1], ctx.OpLoad(ctx.U32[1], ctx.instance_id));
} else {
const Id index{ctx.OpLoad(ctx.U32[1], ctx.instance_index)};
const Id base{ctx.OpLoad(ctx.U32[1], ctx.base_instance)};
return ctx.OpBitcast(ctx.F32[1], ctx.OpISub(ctx.U32[1], index, base));
}
case IR::Attribute::VertexId:
if (ctx.profile.support_vertex_instance_id) {
return ctx.OpBitcast(ctx.F32[1], ctx.OpLoad(ctx.U32[1], ctx.vertex_id));
} else {
const Id index{ctx.OpLoad(ctx.U32[1], ctx.vertex_index)};
const Id base{ctx.OpLoad(ctx.U32[1], ctx.base_vertex)};
return ctx.OpBitcast(ctx.F32[1], ctx.OpISub(ctx.U32[1], index, base));
}
case IR::Attribute::FrontFace:
return ctx.OpSelect(ctx.U32[1], ctx.OpLoad(ctx.U1, ctx.front_face),
ctx.Const(std::numeric_limits<u32>::max()), ctx.u32_zero_value);
case IR::Attribute::PointSpriteS:
return ctx.OpLoad(ctx.F32[1],
ctx.OpAccessChain(ctx.input_f32, ctx.point_coord, ctx.u32_zero_value));
case IR::Attribute::PointSpriteT:
return ctx.OpLoad(ctx.F32[1],
ctx.OpAccessChain(ctx.input_f32, ctx.point_coord, ctx.Const(1U)));
case IR::Attribute::TessellationEvaluationPointU:
return ctx.OpLoad(ctx.F32[1],
ctx.OpAccessChain(ctx.input_f32, ctx.tess_coord, ctx.u32_zero_value));
case IR::Attribute::TessellationEvaluationPointV:
return ctx.OpLoad(ctx.F32[1],
ctx.OpAccessChain(ctx.input_f32, ctx.tess_coord, ctx.Const(1U)));
default:
throw NotImplementedException("Read attribute {}", attr);
}
}
void EmitSetAttribute(EmitContext& ctx, IR::Attribute attr, Id value, [[maybe_unused]] Id vertex) {
const std::optional<OutAttr> output{OutputAttrPointer(ctx, attr)};
if (!output) {
return;
}
if (Sirit::ValidId(output->type)) {
value = ctx.OpBitcast(output->type, value);
}
ctx.OpStore(output->pointer, value);
}
Id EmitGetAttributeIndexed(EmitContext& ctx, Id offset, Id vertex) {
switch (ctx.stage) {
case Stage::TessellationControl:
case Stage::TessellationEval:
case Stage::Geometry:
return ctx.OpFunctionCall(ctx.F32[1], ctx.indexed_load_func, offset, vertex);
default:
return ctx.OpFunctionCall(ctx.F32[1], ctx.indexed_load_func, offset);
}
}
void EmitSetAttributeIndexed(EmitContext& ctx, Id offset, Id value, [[maybe_unused]] Id vertex) {
ctx.OpFunctionCall(ctx.void_id, ctx.indexed_store_func, offset, value);
}
Id EmitGetPatch(EmitContext& ctx, IR::Patch patch) {
if (!IR::IsGeneric(patch)) {
throw NotImplementedException("Non-generic patch load");
}
const u32 index{IR::GenericPatchIndex(patch)};
const Id element{ctx.Const(IR::GenericPatchElement(patch))};
const Id type{ctx.stage == Stage::TessellationControl ? ctx.output_f32 : ctx.input_f32};
const Id pointer{ctx.OpAccessChain(type, ctx.patches.at(index), element)};
return ctx.OpLoad(ctx.F32[1], pointer);
}
void EmitSetPatch(EmitContext& ctx, IR::Patch patch, Id value) {
const Id pointer{[&] {
if (IR::IsGeneric(patch)) {
const u32 index{IR::GenericPatchIndex(patch)};
const Id element{ctx.Const(IR::GenericPatchElement(patch))};
return ctx.OpAccessChain(ctx.output_f32, ctx.patches.at(index), element);
}
switch (patch) {
case IR::Patch::TessellationLodLeft:
case IR::Patch::TessellationLodRight:
case IR::Patch::TessellationLodTop:
case IR::Patch::TessellationLodBottom: {
const u32 index{static_cast<u32>(patch) - u32(IR::Patch::TessellationLodLeft)};
const Id index_id{ctx.Const(index)};
return ctx.OpAccessChain(ctx.output_f32, ctx.output_tess_level_outer, index_id);
}
case IR::Patch::TessellationLodInteriorU:
return ctx.OpAccessChain(ctx.output_f32, ctx.output_tess_level_inner,
ctx.u32_zero_value);
case IR::Patch::TessellationLodInteriorV:
return ctx.OpAccessChain(ctx.output_f32, ctx.output_tess_level_inner, ctx.Const(1u));
default:
throw NotImplementedException("Patch {}", patch);
}
}()};
ctx.OpStore(pointer, value);
}
void EmitSetFragColor(EmitContext& ctx, u32 index, u32 component, Id value) {
const Id component_id{ctx.Const(component)};
const Id pointer{ctx.OpAccessChain(ctx.output_f32, ctx.frag_color.at(index), component_id)};
ctx.OpStore(pointer, value);
}
void EmitSetSampleMask(EmitContext& ctx, Id value) {
ctx.OpStore(ctx.sample_mask, value);
}
void EmitSetFragDepth(EmitContext& ctx, Id value) {
ctx.OpStore(ctx.frag_depth, value);
}
void EmitGetZFlag(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
void EmitGetSFlag(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
void EmitGetCFlag(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
void EmitGetOFlag(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
void EmitSetZFlag(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
void EmitSetSFlag(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
void EmitSetCFlag(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
void EmitSetOFlag(EmitContext&) {
throw NotImplementedException("SPIR-V Instruction");
}
Id EmitWorkgroupId(EmitContext& ctx) {
return ctx.OpLoad(ctx.U32[3], ctx.workgroup_id);
}
Id EmitLocalInvocationId(EmitContext& ctx) {
return ctx.OpLoad(ctx.U32[3], ctx.local_invocation_id);
}
Id EmitInvocationId(EmitContext& ctx) {
return ctx.OpLoad(ctx.U32[1], ctx.invocation_id);
}
Id EmitSampleId(EmitContext& ctx) {
return ctx.OpLoad(ctx.U32[1], ctx.sample_id);
}
Id EmitIsHelperInvocation(EmitContext& ctx) {
return ctx.OpLoad(ctx.U1, ctx.is_helper_invocation);
}
Id EmitYDirection(EmitContext& ctx) {
return ctx.Const(ctx.runtime_info.y_negate ? -1.0f : 1.0f);
}
Id EmitLoadLocal(EmitContext& ctx, Id word_offset) {
const Id pointer{ctx.OpAccessChain(ctx.private_u32, ctx.local_memory, word_offset)};
return ctx.OpLoad(ctx.U32[1], pointer);
}
void EmitWriteLocal(EmitContext& ctx, Id word_offset, Id value) {
const Id pointer{ctx.OpAccessChain(ctx.private_u32, ctx.local_memory, word_offset)};
ctx.OpStore(pointer, value);
}
} // namespace Shader::Backend::SPIRV

View File

@ -0,0 +1,28 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "shader_recompiler/backend/spirv/emit_spirv.h"
#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
namespace Shader::Backend::SPIRV {
void EmitJoin(EmitContext&) {
throw NotImplementedException("Join shouldn't be emitted");
}
void EmitDemoteToHelperInvocation(EmitContext& ctx) {
if (ctx.profile.support_demote_to_helper_invocation) {
ctx.OpDemoteToHelperInvocationEXT();
} else {
const Id kill_label{ctx.OpLabel()};
const Id impossible_label{ctx.OpLabel()};
ctx.OpSelectionMerge(impossible_label, spv::SelectionControlMask::MaskNone);
ctx.OpBranchConditional(ctx.true_value, kill_label, impossible_label);
ctx.AddLabel(kill_label);
ctx.OpKill();
ctx.AddLabel(impossible_label);
}
}
} // namespace Shader::Backend::SPIRV

View File

@ -0,0 +1,269 @@
// Copyright 2021 yuzu Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include "shader_recompiler/backend/spirv/emit_spirv.h"
#include "shader_recompiler/backend/spirv/emit_spirv_instructions.h"
namespace Shader::Backend::SPIRV {
namespace {
Id ExtractU16(EmitContext& ctx, Id value) {
if (ctx.profile.support_int16) {
return ctx.OpUConvert(ctx.U16, value);
} else {
return ctx.OpBitFieldUExtract(ctx.U32[1], value, ctx.u32_zero_value, ctx.Const(16u));
}
}
Id ExtractS16(EmitContext& ctx, Id value) {
if (ctx.profile.support_int16) {
return ctx.OpSConvert(ctx.S16, value);
} else {
return ctx.OpBitFieldSExtract(ctx.U32[1], value, ctx.u32_zero_value, ctx.Const(16u));
}
}
Id ExtractU8(EmitContext& ctx, Id value) {
if (ctx.profile.support_int8) {
return ctx.OpUConvert(ctx.U8, value);
} else {
return ctx.OpBitFieldUExtract(ctx.U32[1], value, ctx.u32_zero_value, ctx.Const(8u));
}
}
Id ExtractS8(EmitContext& ctx, Id value) {
if (ctx.profile.support_int8) {
return ctx.OpSConvert(ctx.S8, value);
} else {
return ctx.OpBitFieldSExtract(ctx.U32[1], value, ctx.u32_zero_value, ctx.Const(8u));
}
}
} // Anonymous namespace
Id EmitConvertS16F16(EmitContext& ctx, Id value) {
if (ctx.profile.support_int16) {
return ctx.OpSConvert(ctx.U32[1], ctx.OpConvertFToS(ctx.U16, value));
} else {
return ExtractS16(ctx, ctx.OpConvertFToS(ctx.U32[1], value));
}
}
Id EmitConvertS16F32(EmitContext& ctx, Id value) {
if (ctx.profile.support_int16) {
return ctx.OpSConvert(ctx.U32[1], ctx.OpConvertFToS(ctx.U16, value));
} else {
return ExtractS16(ctx, ctx.OpConvertFToS(ctx.U32[1], value));
}
}
Id EmitConvertS16F64(EmitContext& ctx, Id value) {
if (ctx.profile.support_int16) {
return ctx.OpSConvert(ctx.U32[1], ctx.OpConvertFToS(ctx.U16, value));
} else {
return ExtractS16(ctx, ctx.OpConvertFToS(ctx.U32[1], value));
}
}
Id EmitConvertS32F16(EmitContext& ctx, Id value) {
return ctx.OpConvertFToS(ctx.U32[1], value);
}
Id EmitConvertS32F32(EmitContext& ctx, Id value) {
if (ctx.profile.has_broken_signed_operations) {
return ctx.OpBitcast(ctx.U32[1], ctx.OpConvertFToS(ctx.S32[1], value));
} else {
return ctx.OpConvertFToS(ctx.U32[1], value);
}
}
Id EmitConvertS32F64(EmitContext& ctx, Id value) {
return ctx.OpConvertFToS(ctx.U32[1], value);
}
Id EmitConvertS64F16(EmitContext& ctx, Id value) {
return ctx.OpConvertFToS(ctx.U64, value);
}
Id EmitConvertS64F32(EmitContext& ctx, Id value) {
return ctx.OpConvertFToS(ctx.U64, value);
}
Id EmitConvertS64F64(EmitContext& ctx, Id value) {
return ctx.OpConvertFToS(ctx.U64, value);
}
Id EmitConvertU16F16(EmitContext& ctx, Id value) {
if (ctx.profile.support_int16) {
return ctx.OpUConvert(ctx.U32[1], ctx.OpConvertFToU(ctx.U16, value));
} else {
return ExtractU16(ctx, ctx.OpConvertFToU(ctx.U32[1], value));
}
}
Id EmitConvertU16F32(EmitContext& ctx, Id value) {
if (ctx.profile.support_int16) {
return ctx.OpUConvert(ctx.U32[1], ctx.OpConvertFToU(ctx.U16, value));
} else {
return ExtractU16(ctx, ctx.OpConvertFToU(ctx.U32[1], value));
}
}
Id EmitConvertU16F64(EmitContext& ctx, Id value) {
if (ctx.profile.support_int16) {
return ctx.OpUConvert(ctx.U32[1], ctx.OpConvertFToU(ctx.U16, value));
} else {
return ExtractU16(ctx, ctx.OpConvertFToU(ctx.U32[1], value));
}
}
Id EmitConvertU32F16(EmitContext& ctx, Id value) {
return ctx.OpConvertFToU(ctx.U32[1], value);
}
Id EmitConvertU32F32(EmitContext& ctx, Id value) {
return ctx.OpConvertFToU(ctx.U32[1], value);
}
Id EmitConvertU32F64(EmitContext& ctx, Id value) {
return ctx.OpConvertFToU(ctx.U32[1], value);
}
Id EmitConvertU64F16(EmitContext& ctx, Id value) {
return ctx.OpConvertFToU(ctx.U64, value);
}
Id EmitConvertU64F32(EmitContext& ctx, Id value) {
return ctx.OpConvertFToU(ctx.U64, value);
}
Id EmitConvertU64F64(EmitContext& ctx, Id value) {
return ctx.OpConvertFToU(ctx.U64, value);
}
Id EmitConvertU64U32(EmitContext& ctx, Id value) {
return ctx.OpUConvert(ctx.U64, value);
}
Id EmitConvertU32U64(EmitContext& ctx, Id value) {
return ctx.OpUConvert(ctx.U32[1], value);
}
Id EmitConvertF16F32(EmitContext& ctx, Id value) {
return ctx.OpFConvert(ctx.F16[1], value);
}
Id EmitConvertF32F16(EmitContext& ctx, Id value) {
return ctx.OpFConvert(ctx.F32[1], value);
}
Id EmitConvertF32F64(EmitContext& ctx, Id value) {
return ctx.OpFConvert(ctx.F32[1], value);
}
Id EmitConvertF64F32(EmitContext& ctx, Id value) {
return ctx.OpFConvert(ctx.F64[1], value);
}
Id EmitConvertF16S8(EmitContext& ctx, Id value) {
return ctx.OpConvertSToF(ctx.F16[1], ExtractS8(ctx, value));
}
Id EmitConvertF16S16(EmitContext& ctx, Id value) {
return ctx.OpConvertSToF(ctx.F16[1], ExtractS16(ctx, value));
}
Id EmitConvertF16S32(EmitContext& ctx, Id value) {
return ctx.OpConvertSToF(ctx.F16[1], value);
}
Id EmitConvertF16S64(EmitContext& ctx, Id value) {
return ctx.OpConvertSToF(ctx.F16[1], value);
}
Id EmitConvertF16U8(EmitContext& ctx, Id value) {
return ctx.OpConvertUToF(ctx.F16[1], ExtractU8(ctx, value));
}
Id EmitConvertF16U16(EmitContext& ctx, Id value) {
return ctx.OpConvertUToF(ctx.F16[1], ExtractU16(ctx, value));
}
Id EmitConvertF16U32(EmitContext& ctx, Id value) {
return ctx.OpConvertUToF(ctx.F16[1], value);
}
Id EmitConvertF16U64(EmitContext& ctx, Id value) {
return ctx.OpConvertUToF(ctx.F16[1], value);
}
Id EmitConvertF32S8(EmitContext& ctx, Id value) {
return ctx.OpConvertSToF(ctx.F32[1], ExtractS8(ctx, value));
}
Id EmitConvertF32S16(EmitContext& ctx, Id value) {
return ctx.OpConvertSToF(ctx.F32[1], ExtractS16(ctx, value));
}
Id EmitConvertF32S32(EmitContext& ctx, Id value) {
if (ctx.profile.has_broken_signed_operations) {
value = ctx.OpBitcast(ctx.S32[1], value);
}
return ctx.OpConvertSToF(ctx.F32[1], value);
}
Id EmitConvertF32S64(EmitContext& ctx, Id value) {
return ctx.OpConvertSToF(ctx.F32[1], value);
}
Id EmitConvertF32U8(EmitContext& ctx, Id value) {
return ctx.OpConvertUToF(ctx.F32[1], ExtractU8(ctx, value));
}
Id EmitConvertF32U16(EmitContext& ctx, Id value) {
return ctx.OpConvertUToF(ctx.F32[1], ExtractU16(ctx, value));
}
Id EmitConvertF32U32(EmitContext& ctx, Id value) {
return ctx.OpConvertUToF(ctx.F32[1], value);
}
Id EmitConvertF32U64(EmitContext& ctx, Id value) {
return ctx.OpConvertUToF(ctx.F32[1], value);
}
Id EmitConvertF64S8(EmitContext& ctx, Id value) {
return ctx.OpConvertSToF(ctx.F64[1], ExtractS8(ctx, value));
}
Id EmitConvertF64S16(EmitContext& ctx, Id value) {
return ctx.OpConvertSToF(ctx.F64[1], ExtractS16(ctx, value));
}
Id EmitConvertF64S32(EmitContext& ctx, Id value) {
if (ctx.profile.has_broken_signed_operations) {
value = ctx.OpBitcast(ctx.S32[1], value);
}
return ctx.OpConvertSToF(ctx.F64[1], value);
}
Id EmitConvertF64S64(EmitContext& ctx, Id value) {
return ctx.OpConvertSToF(ctx.F64[1], value);
}
Id EmitConvertF64U8(EmitContext& ctx, Id value) {
return ctx.OpConvertUToF(ctx.F64[1], ExtractU8(ctx, value));
}
Id EmitConvertF64U16(EmitContext& ctx, Id value) {
return ctx.OpConvertUToF(ctx.F64[1], ExtractU16(ctx, value));
}
Id EmitConvertF64U32(EmitContext& ctx, Id value) {
return ctx.OpConvertUToF(ctx.F64[1], value);
}
Id EmitConvertF64U64(EmitContext& ctx, Id value) {
return ctx.OpConvertUToF(ctx.F64[1], value);
}
} // namespace Shader::Backend::SPIRV

Some files were not shown because too many files have changed in this diff Show More