yuzu-emu
/
yuzu
Archived
1
0
Fork 0

Merge pull request #11284 from liamwhite/nca-release

vfs: expand support for NCA reading
This commit is contained in:
Fernando S 2023-08-21 16:29:04 +02:00 committed by GitHub
commit 861597eb2e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
75 changed files with 8032 additions and 1041 deletions

View File

@ -3,6 +3,7 @@
#pragma once #pragma once
#include <bit>
#include <cstddef> #include <cstddef>
#include <new> #include <new>
#include <type_traits> #include <type_traits>
@ -10,8 +11,10 @@
namespace Common { namespace Common {
template <typename T> template <typename T>
requires std::is_unsigned_v<T> requires std::is_integral_v<T>
[[nodiscard]] constexpr T AlignUp(T value, size_t size) { [[nodiscard]] constexpr T AlignUp(T value_, size_t size) {
using U = typename std::make_unsigned_t<T>;
auto value{static_cast<U>(value_)};
auto mod{static_cast<T>(value % size)}; auto mod{static_cast<T>(value % size)};
value -= mod; value -= mod;
return static_cast<T>(mod == T{0} ? value : value + size); return static_cast<T>(mod == T{0} ? value : value + size);
@ -24,8 +27,10 @@ template <typename T>
} }
template <typename T> template <typename T>
requires std::is_unsigned_v<T> requires std::is_integral_v<T>
[[nodiscard]] constexpr T AlignDown(T value, size_t size) { [[nodiscard]] constexpr T AlignDown(T value_, size_t size) {
using U = typename std::make_unsigned_t<T>;
const auto value{static_cast<U>(value_)};
return static_cast<T>(value - value % size); return static_cast<T>(value - value % size);
} }
@ -55,6 +60,30 @@ template <typename T, typename U>
return (x + (y - 1)) / y; return (x + (y - 1)) / y;
} }
template <typename T>
requires std::is_integral_v<T>
[[nodiscard]] constexpr T LeastSignificantOneBit(T x) {
return x & ~(x - 1);
}
template <typename T>
requires std::is_integral_v<T>
[[nodiscard]] constexpr T ResetLeastSignificantOneBit(T x) {
return x & (x - 1);
}
template <typename T>
requires std::is_integral_v<T>
[[nodiscard]] constexpr bool IsPowerOfTwo(T x) {
return x > 0 && ResetLeastSignificantOneBit(x) == 0;
}
template <typename T>
requires std::is_integral_v<T>
[[nodiscard]] constexpr T FloorPowerOfTwo(T x) {
return T{1} << (sizeof(T) * 8 - std::countl_zero(x) - 1);
}
template <typename T, size_t Align = 16> template <typename T, size_t Align = 16>
class AlignmentAllocator { class AlignmentAllocator {
public: public:

View File

@ -71,4 +71,10 @@ std::vector<u8> DecompressDataLZ4(std::span<const u8> compressed, std::size_t un
return uncompressed; return uncompressed;
} }
int DecompressDataLZ4(void* dst, size_t dst_size, const void* src, size_t src_size) {
// This is just a thin wrapper around LZ4.
return LZ4_decompress_safe(reinterpret_cast<const char*>(src), reinterpret_cast<char*>(dst),
static_cast<int>(src_size), static_cast<int>(dst_size));
}
} // namespace Common::Compression } // namespace Common::Compression

View File

@ -56,4 +56,6 @@ namespace Common::Compression {
[[nodiscard]] std::vector<u8> DecompressDataLZ4(std::span<const u8> compressed, [[nodiscard]] std::vector<u8> DecompressDataLZ4(std::span<const u8> compressed,
std::size_t uncompressed_size); std::size_t uncompressed_size);
[[nodiscard]] int DecompressDataLZ4(void* dst, size_t dst_size, const void* src, size_t src_size);
} // namespace Common::Compression } // namespace Common::Compression

View File

@ -37,6 +37,49 @@ add_library(core STATIC
debugger/gdbstub.h debugger/gdbstub.h
device_memory.cpp device_memory.cpp
device_memory.h device_memory.h
file_sys/fssystem/fs_i_storage.h
file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.cpp
file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.h
file_sys/fssystem/fssystem_aes_ctr_storage.cpp
file_sys/fssystem/fssystem_aes_ctr_storage.h
file_sys/fssystem/fssystem_aes_xts_storage.cpp
file_sys/fssystem/fssystem_aes_xts_storage.h
file_sys/fssystem/fssystem_alignment_matching_storage.h
file_sys/fssystem/fssystem_alignment_matching_storage_impl.cpp
file_sys/fssystem/fssystem_alignment_matching_storage_impl.h
file_sys/fssystem/fssystem_bucket_tree.cpp
file_sys/fssystem/fssystem_bucket_tree.h
file_sys/fssystem/fssystem_bucket_tree_utils.h
file_sys/fssystem/fssystem_compressed_storage.h
file_sys/fssystem/fssystem_compression_common.h
file_sys/fssystem/fssystem_compression_configuration.cpp
file_sys/fssystem/fssystem_compression_configuration.h
file_sys/fssystem/fssystem_crypto_configuration.cpp
file_sys/fssystem/fssystem_crypto_configuration.h
file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.cpp
file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.h
file_sys/fssystem/fssystem_hierarchical_sha256_storage.cpp
file_sys/fssystem/fssystem_hierarchical_sha256_storage.h
file_sys/fssystem/fssystem_indirect_storage.cpp
file_sys/fssystem/fssystem_indirect_storage.h
file_sys/fssystem/fssystem_integrity_romfs_storage.cpp
file_sys/fssystem/fssystem_integrity_romfs_storage.h
file_sys/fssystem/fssystem_integrity_verification_storage.cpp
file_sys/fssystem/fssystem_integrity_verification_storage.h
file_sys/fssystem/fssystem_memory_resource_buffer_hold_storage.h
file_sys/fssystem/fssystem_nca_file_system_driver.cpp
file_sys/fssystem/fssystem_nca_file_system_driver.h
file_sys/fssystem/fssystem_nca_header.cpp
file_sys/fssystem/fssystem_nca_header.h
file_sys/fssystem/fssystem_nca_reader.cpp
file_sys/fssystem/fssystem_pooled_buffer.cpp
file_sys/fssystem/fssystem_pooled_buffer.h
file_sys/fssystem/fssystem_sparse_storage.cpp
file_sys/fssystem/fssystem_sparse_storage.h
file_sys/fssystem/fssystem_switch_storage.h
file_sys/fssystem/fssystem_utility.cpp
file_sys/fssystem/fssystem_utility.h
file_sys/fssystem/fs_types.h
file_sys/bis_factory.cpp file_sys/bis_factory.cpp
file_sys/bis_factory.h file_sys/bis_factory.h
file_sys/card_image.cpp file_sys/card_image.cpp
@ -57,8 +100,6 @@ add_library(core STATIC
file_sys/mode.h file_sys/mode.h
file_sys/nca_metadata.cpp file_sys/nca_metadata.cpp
file_sys/nca_metadata.h file_sys/nca_metadata.h
file_sys/nca_patch.cpp
file_sys/nca_patch.h
file_sys/partition_filesystem.cpp file_sys/partition_filesystem.cpp
file_sys/partition_filesystem.h file_sys/partition_filesystem.h
file_sys/patch_manager.cpp file_sys/patch_manager.cpp

View File

@ -183,9 +183,9 @@ u32 XCI::GetSystemUpdateVersion() {
} }
for (const auto& update_file : update->GetFiles()) { for (const auto& update_file : update->GetFiles()) {
NCA nca{update_file, nullptr, 0}; NCA nca{update_file};
if (nca.GetStatus() != Loader::ResultStatus::Success) { if (nca.GetStatus() != Loader::ResultStatus::Success || nca.GetSubdirectories().empty()) {
continue; continue;
} }
@ -296,7 +296,7 @@ Loader::ResultStatus XCI::AddNCAFromPartition(XCIPartition part) {
continue; continue;
} }
auto nca = std::make_shared<NCA>(partition_file, nullptr, 0); auto nca = std::make_shared<NCA>(partition_file);
if (nca->IsUpdate()) { if (nca->IsUpdate()) {
continue; continue;
} }

View File

@ -12,546 +12,110 @@
#include "core/crypto/ctr_encryption_layer.h" #include "core/crypto/ctr_encryption_layer.h"
#include "core/crypto/key_manager.h" #include "core/crypto/key_manager.h"
#include "core/file_sys/content_archive.h" #include "core/file_sys/content_archive.h"
#include "core/file_sys/nca_patch.h"
#include "core/file_sys/partition_filesystem.h" #include "core/file_sys/partition_filesystem.h"
#include "core/file_sys/vfs_offset.h" #include "core/file_sys/vfs_offset.h"
#include "core/loader/loader.h" #include "core/loader/loader.h"
#include "core/file_sys/fssystem/fssystem_compression_configuration.h"
#include "core/file_sys/fssystem/fssystem_crypto_configuration.h"
#include "core/file_sys/fssystem/fssystem_nca_file_system_driver.h"
namespace FileSys { namespace FileSys {
// Media offsets in headers are stored divided by 512. Mult. by this to get real offset. NCA::NCA(VirtualFile file_, const NCA* base_nca)
constexpr u64 MEDIA_OFFSET_MULTIPLIER = 0x200; : file(std::move(file_)), keys{Core::Crypto::KeyManager::Instance()} {
constexpr u64 SECTION_HEADER_SIZE = 0x200;
constexpr u64 SECTION_HEADER_OFFSET = 0x400;
constexpr u32 IVFC_MAX_LEVEL = 6;
enum class NCASectionFilesystemType : u8 {
PFS0 = 0x2,
ROMFS = 0x3,
};
struct IVFCLevel {
u64_le offset;
u64_le size;
u32_le block_size;
u32_le reserved;
};
static_assert(sizeof(IVFCLevel) == 0x18, "IVFCLevel has incorrect size.");
struct IVFCHeader {
u32_le magic;
u32_le magic_number;
INSERT_PADDING_BYTES_NOINIT(8);
std::array<IVFCLevel, 6> levels;
INSERT_PADDING_BYTES_NOINIT(64);
};
static_assert(sizeof(IVFCHeader) == 0xE0, "IVFCHeader has incorrect size.");
struct NCASectionHeaderBlock {
INSERT_PADDING_BYTES_NOINIT(3);
NCASectionFilesystemType filesystem_type;
NCASectionCryptoType crypto_type;
INSERT_PADDING_BYTES_NOINIT(3);
};
static_assert(sizeof(NCASectionHeaderBlock) == 0x8, "NCASectionHeaderBlock has incorrect size.");
struct NCABucketInfo {
u64 table_offset;
u64 table_size;
std::array<u8, 0x10> table_header;
};
static_assert(sizeof(NCABucketInfo) == 0x20, "NCABucketInfo has incorrect size.");
struct NCASparseInfo {
NCABucketInfo bucket;
u64 physical_offset;
u16 generation;
INSERT_PADDING_BYTES_NOINIT(0x6);
};
static_assert(sizeof(NCASparseInfo) == 0x30, "NCASparseInfo has incorrect size.");
struct NCACompressionInfo {
NCABucketInfo bucket;
INSERT_PADDING_BYTES_NOINIT(0x8);
};
static_assert(sizeof(NCACompressionInfo) == 0x28, "NCACompressionInfo has incorrect size.");
struct NCASectionRaw {
NCASectionHeaderBlock header;
std::array<u8, 0x138> block_data;
std::array<u8, 0x8> section_ctr;
NCASparseInfo sparse_info;
NCACompressionInfo compression_info;
INSERT_PADDING_BYTES_NOINIT(0x60);
};
static_assert(sizeof(NCASectionRaw) == 0x200, "NCASectionRaw has incorrect size.");
struct PFS0Superblock {
NCASectionHeaderBlock header_block;
std::array<u8, 0x20> hash;
u32_le size;
INSERT_PADDING_BYTES_NOINIT(4);
u64_le hash_table_offset;
u64_le hash_table_size;
u64_le pfs0_header_offset;
u64_le pfs0_size;
INSERT_PADDING_BYTES_NOINIT(0x1B0);
};
static_assert(sizeof(PFS0Superblock) == 0x200, "PFS0Superblock has incorrect size.");
struct RomFSSuperblock {
NCASectionHeaderBlock header_block;
IVFCHeader ivfc;
INSERT_PADDING_BYTES_NOINIT(0x118);
};
static_assert(sizeof(RomFSSuperblock) == 0x200, "RomFSSuperblock has incorrect size.");
struct BKTRHeader {
u64_le offset;
u64_le size;
u32_le magic;
INSERT_PADDING_BYTES_NOINIT(0x4);
u32_le number_entries;
INSERT_PADDING_BYTES_NOINIT(0x4);
};
static_assert(sizeof(BKTRHeader) == 0x20, "BKTRHeader has incorrect size.");
struct BKTRSuperblock {
NCASectionHeaderBlock header_block;
IVFCHeader ivfc;
INSERT_PADDING_BYTES_NOINIT(0x18);
BKTRHeader relocation;
BKTRHeader subsection;
INSERT_PADDING_BYTES_NOINIT(0xC0);
};
static_assert(sizeof(BKTRSuperblock) == 0x200, "BKTRSuperblock has incorrect size.");
union NCASectionHeader {
NCASectionRaw raw{};
PFS0Superblock pfs0;
RomFSSuperblock romfs;
BKTRSuperblock bktr;
};
static_assert(sizeof(NCASectionHeader) == 0x200, "NCASectionHeader has incorrect size.");
static bool IsValidNCA(const NCAHeader& header) {
// TODO(DarkLordZach): Add NCA2/NCA0 support.
return header.magic == Common::MakeMagic('N', 'C', 'A', '3');
}
NCA::NCA(VirtualFile file_, VirtualFile bktr_base_romfs_, u64 bktr_base_ivfc_offset)
: file(std::move(file_)),
bktr_base_romfs(std::move(bktr_base_romfs_)), keys{Core::Crypto::KeyManager::Instance()} {
if (file == nullptr) { if (file == nullptr) {
status = Loader::ResultStatus::ErrorNullFile; status = Loader::ResultStatus::ErrorNullFile;
return; return;
} }
if (sizeof(NCAHeader) != file->ReadObject(&header)) { reader = std::make_shared<NcaReader>();
LOG_ERROR(Loader, "File reader errored out during header read."); if (Result rc =
reader->Initialize(file, GetCryptoConfiguration(), GetNcaCompressionConfiguration());
R_FAILED(rc)) {
if (rc != ResultInvalidNcaSignature) {
LOG_ERROR(Loader, "File reader errored out during header read: {:#x}",
rc.GetInnerValue());
}
status = Loader::ResultStatus::ErrorBadNCAHeader; status = Loader::ResultStatus::ErrorBadNCAHeader;
return; return;
} }
if (!HandlePotentialHeaderDecryption()) { RightsId rights_id{};
return; reader->GetRightsId(rights_id.data(), rights_id.size());
if (rights_id != RightsId{}) {
// External decryption key required; provide it here.
const auto key_generation = std::max<s32>(reader->GetKeyGeneration(), 1) - 1;
u128 rights_id_u128;
std::memcpy(rights_id_u128.data(), rights_id.data(), sizeof(rights_id));
auto titlekey =
keys.GetKey(Core::Crypto::S128KeyType::Titlekey, rights_id_u128[1], rights_id_u128[0]);
if (titlekey == Core::Crypto::Key128{}) {
status = Loader::ResultStatus::ErrorMissingTitlekey;
return;
}
if (!keys.HasKey(Core::Crypto::S128KeyType::Titlekek, key_generation)) {
status = Loader::ResultStatus::ErrorMissingTitlekek;
return;
}
auto titlekek = keys.GetKey(Core::Crypto::S128KeyType::Titlekek, key_generation);
Core::Crypto::AESCipher<Core::Crypto::Key128> cipher(titlekek, Core::Crypto::Mode::ECB);
cipher.Transcode(titlekey.data(), titlekey.size(), titlekey.data(),
Core::Crypto::Op::Decrypt);
reader->SetExternalDecryptionKey(titlekey.data(), titlekey.size());
} }
has_rights_id = std::ranges::any_of(header.rights_id, [](char c) { return c != '\0'; }); const s32 fs_count = reader->GetFsCount();
NcaFileSystemDriver fs(base_nca ? base_nca->reader : nullptr, reader);
std::vector<VirtualFile> filesystems(fs_count);
for (s32 i = 0; i < fs_count; i++) {
NcaFsHeaderReader header_reader;
const Result rc = fs.OpenStorage(&filesystems[i], &header_reader, i);
if (R_FAILED(rc)) {
LOG_ERROR(Loader, "File reader errored out during read of section {}: {:#x}", i,
rc.GetInnerValue());
status = Loader::ResultStatus::ErrorBadNCAHeader;
return;
}
const std::vector<NCASectionHeader> sections = ReadSectionHeaders(); if (header_reader.GetFsType() == NcaFsHeader::FsType::RomFs) {
is_update = std::ranges::any_of(sections, [](const NCASectionHeader& nca_header) { files.push_back(filesystems[i]);
return nca_header.raw.header.crypto_type == NCASectionCryptoType::BKTR; romfs = files.back();
}); }
if (!ReadSections(sections, bktr_base_ivfc_offset)) { if (header_reader.GetFsType() == NcaFsHeader::FsType::PartitionFs) {
return; auto npfs = std::make_shared<PartitionFilesystem>(filesystems[i]);
if (npfs->GetStatus() == Loader::ResultStatus::Success) {
dirs.push_back(npfs);
if (IsDirectoryExeFS(npfs)) {
exefs = dirs.back();
} else if (IsDirectoryLogoPartition(npfs)) {
logo = dirs.back();
} else {
continue;
}
}
}
if (header_reader.GetEncryptionType() == NcaFsHeader::EncryptionType::AesCtrEx) {
is_update = true;
}
} }
status = Loader::ResultStatus::Success; if (is_update && base_nca == nullptr) {
status = Loader::ResultStatus::ErrorMissingBKTRBaseRomFS;
} else {
status = Loader::ResultStatus::Success;
}
} }
NCA::~NCA() = default; NCA::~NCA() = default;
bool NCA::CheckSupportedNCA(const NCAHeader& nca_header) {
if (nca_header.magic == Common::MakeMagic('N', 'C', 'A', '2')) {
status = Loader::ResultStatus::ErrorNCA2;
return false;
}
if (nca_header.magic == Common::MakeMagic('N', 'C', 'A', '0')) {
status = Loader::ResultStatus::ErrorNCA0;
return false;
}
return true;
}
bool NCA::HandlePotentialHeaderDecryption() {
if (IsValidNCA(header)) {
return true;
}
if (!CheckSupportedNCA(header)) {
return false;
}
NCAHeader dec_header{};
Core::Crypto::AESCipher<Core::Crypto::Key256> cipher(
keys.GetKey(Core::Crypto::S256KeyType::Header), Core::Crypto::Mode::XTS);
cipher.XTSTranscode(&header, sizeof(NCAHeader), &dec_header, 0, 0x200,
Core::Crypto::Op::Decrypt);
if (IsValidNCA(dec_header)) {
header = dec_header;
encrypted = true;
} else {
if (!CheckSupportedNCA(dec_header)) {
return false;
}
if (keys.HasKey(Core::Crypto::S256KeyType::Header)) {
status = Loader::ResultStatus::ErrorIncorrectHeaderKey;
} else {
status = Loader::ResultStatus::ErrorMissingHeaderKey;
}
return false;
}
return true;
}
std::vector<NCASectionHeader> NCA::ReadSectionHeaders() const {
const std::ptrdiff_t number_sections =
std::ranges::count_if(header.section_tables, [](const NCASectionTableEntry& entry) {
return entry.media_offset > 0;
});
std::vector<NCASectionHeader> sections(number_sections);
const auto length_sections = SECTION_HEADER_SIZE * number_sections;
if (encrypted) {
auto raw = file->ReadBytes(length_sections, SECTION_HEADER_OFFSET);
Core::Crypto::AESCipher<Core::Crypto::Key256> cipher(
keys.GetKey(Core::Crypto::S256KeyType::Header), Core::Crypto::Mode::XTS);
cipher.XTSTranscode(raw.data(), length_sections, sections.data(), 2, SECTION_HEADER_SIZE,
Core::Crypto::Op::Decrypt);
} else {
file->ReadBytes(sections.data(), length_sections, SECTION_HEADER_OFFSET);
}
return sections;
}
bool NCA::ReadSections(const std::vector<NCASectionHeader>& sections, u64 bktr_base_ivfc_offset) {
for (std::size_t i = 0; i < sections.size(); ++i) {
const auto& section = sections[i];
if (section.raw.sparse_info.bucket.table_offset != 0 &&
section.raw.sparse_info.bucket.table_size != 0) {
LOG_ERROR(Loader, "Sparse NCAs are not supported.");
status = Loader::ResultStatus::ErrorSparseNCA;
return false;
}
if (section.raw.compression_info.bucket.table_offset != 0 &&
section.raw.compression_info.bucket.table_size != 0) {
LOG_ERROR(Loader, "Compressed NCAs are not supported.");
status = Loader::ResultStatus::ErrorCompressedNCA;
return false;
}
if (section.raw.header.filesystem_type == NCASectionFilesystemType::ROMFS) {
if (!ReadRomFSSection(section, header.section_tables[i], bktr_base_ivfc_offset)) {
return false;
}
} else if (section.raw.header.filesystem_type == NCASectionFilesystemType::PFS0) {
if (!ReadPFS0Section(section, header.section_tables[i])) {
return false;
}
}
}
return true;
}
bool NCA::ReadRomFSSection(const NCASectionHeader& section, const NCASectionTableEntry& entry,
u64 bktr_base_ivfc_offset) {
const std::size_t base_offset = entry.media_offset * MEDIA_OFFSET_MULTIPLIER;
ivfc_offset = section.romfs.ivfc.levels[IVFC_MAX_LEVEL - 1].offset;
const std::size_t romfs_offset = base_offset + ivfc_offset;
const std::size_t romfs_size = section.romfs.ivfc.levels[IVFC_MAX_LEVEL - 1].size;
auto raw = std::make_shared<OffsetVfsFile>(file, romfs_size, romfs_offset);
auto dec = Decrypt(section, raw, romfs_offset);
if (dec == nullptr) {
if (status != Loader::ResultStatus::Success)
return false;
if (has_rights_id)
status = Loader::ResultStatus::ErrorIncorrectTitlekeyOrTitlekek;
else
status = Loader::ResultStatus::ErrorIncorrectKeyAreaKey;
return false;
}
if (section.raw.header.crypto_type == NCASectionCryptoType::BKTR) {
if (section.bktr.relocation.magic != Common::MakeMagic('B', 'K', 'T', 'R') ||
section.bktr.subsection.magic != Common::MakeMagic('B', 'K', 'T', 'R')) {
status = Loader::ResultStatus::ErrorBadBKTRHeader;
return false;
}
if (section.bktr.relocation.offset + section.bktr.relocation.size !=
section.bktr.subsection.offset) {
status = Loader::ResultStatus::ErrorBKTRSubsectionNotAfterRelocation;
return false;
}
const u64 size = MEDIA_OFFSET_MULTIPLIER * (entry.media_end_offset - entry.media_offset);
if (section.bktr.subsection.offset + section.bktr.subsection.size != size) {
status = Loader::ResultStatus::ErrorBKTRSubsectionNotAtEnd;
return false;
}
const u64 offset = section.romfs.ivfc.levels[IVFC_MAX_LEVEL - 1].offset;
RelocationBlock relocation_block{};
if (dec->ReadObject(&relocation_block, section.bktr.relocation.offset - offset) !=
sizeof(RelocationBlock)) {
status = Loader::ResultStatus::ErrorBadRelocationBlock;
return false;
}
SubsectionBlock subsection_block{};
if (dec->ReadObject(&subsection_block, section.bktr.subsection.offset - offset) !=
sizeof(RelocationBlock)) {
status = Loader::ResultStatus::ErrorBadSubsectionBlock;
return false;
}
std::vector<RelocationBucketRaw> relocation_buckets_raw(
(section.bktr.relocation.size - sizeof(RelocationBlock)) / sizeof(RelocationBucketRaw));
if (dec->ReadBytes(relocation_buckets_raw.data(),
section.bktr.relocation.size - sizeof(RelocationBlock),
section.bktr.relocation.offset + sizeof(RelocationBlock) - offset) !=
section.bktr.relocation.size - sizeof(RelocationBlock)) {
status = Loader::ResultStatus::ErrorBadRelocationBuckets;
return false;
}
std::vector<SubsectionBucketRaw> subsection_buckets_raw(
(section.bktr.subsection.size - sizeof(SubsectionBlock)) / sizeof(SubsectionBucketRaw));
if (dec->ReadBytes(subsection_buckets_raw.data(),
section.bktr.subsection.size - sizeof(SubsectionBlock),
section.bktr.subsection.offset + sizeof(SubsectionBlock) - offset) !=
section.bktr.subsection.size - sizeof(SubsectionBlock)) {
status = Loader::ResultStatus::ErrorBadSubsectionBuckets;
return false;
}
std::vector<RelocationBucket> relocation_buckets(relocation_buckets_raw.size());
std::ranges::transform(relocation_buckets_raw, relocation_buckets.begin(),
&ConvertRelocationBucketRaw);
std::vector<SubsectionBucket> subsection_buckets(subsection_buckets_raw.size());
std::ranges::transform(subsection_buckets_raw, subsection_buckets.begin(),
&ConvertSubsectionBucketRaw);
u32 ctr_low;
std::memcpy(&ctr_low, section.raw.section_ctr.data(), sizeof(ctr_low));
subsection_buckets.back().entries.push_back({section.bktr.relocation.offset, {0}, ctr_low});
subsection_buckets.back().entries.push_back({size, {0}, 0});
std::optional<Core::Crypto::Key128> key;
if (encrypted) {
if (has_rights_id) {
status = Loader::ResultStatus::Success;
key = GetTitlekey();
if (!key) {
status = Loader::ResultStatus::ErrorMissingTitlekey;
return false;
}
} else {
key = GetKeyAreaKey(NCASectionCryptoType::BKTR);
if (!key) {
status = Loader::ResultStatus::ErrorMissingKeyAreaKey;
return false;
}
}
}
if (bktr_base_romfs == nullptr) {
status = Loader::ResultStatus::ErrorMissingBKTRBaseRomFS;
return false;
}
auto bktr = std::make_shared<BKTR>(
bktr_base_romfs, std::make_shared<OffsetVfsFile>(file, romfs_size, base_offset),
relocation_block, relocation_buckets, subsection_block, subsection_buckets, encrypted,
encrypted ? *key : Core::Crypto::Key128{}, base_offset, bktr_base_ivfc_offset,
section.raw.section_ctr);
// BKTR applies to entire IVFC, so make an offset version to level 6
files.push_back(std::make_shared<OffsetVfsFile>(
bktr, romfs_size, section.romfs.ivfc.levels[IVFC_MAX_LEVEL - 1].offset));
} else {
files.push_back(std::move(dec));
}
romfs = files.back();
return true;
}
bool NCA::ReadPFS0Section(const NCASectionHeader& section, const NCASectionTableEntry& entry) {
const u64 offset = (static_cast<u64>(entry.media_offset) * MEDIA_OFFSET_MULTIPLIER) +
section.pfs0.pfs0_header_offset;
const u64 size = MEDIA_OFFSET_MULTIPLIER * (entry.media_end_offset - entry.media_offset);
auto dec = Decrypt(section, std::make_shared<OffsetVfsFile>(file, size, offset), offset);
if (dec != nullptr) {
auto npfs = std::make_shared<PartitionFilesystem>(std::move(dec));
if (npfs->GetStatus() == Loader::ResultStatus::Success) {
dirs.push_back(std::move(npfs));
if (IsDirectoryExeFS(dirs.back()))
exefs = dirs.back();
else if (IsDirectoryLogoPartition(dirs.back()))
logo = dirs.back();
} else {
if (has_rights_id)
status = Loader::ResultStatus::ErrorIncorrectTitlekeyOrTitlekek;
else
status = Loader::ResultStatus::ErrorIncorrectKeyAreaKey;
return false;
}
} else {
if (status != Loader::ResultStatus::Success)
return false;
if (has_rights_id)
status = Loader::ResultStatus::ErrorIncorrectTitlekeyOrTitlekek;
else
status = Loader::ResultStatus::ErrorIncorrectKeyAreaKey;
return false;
}
return true;
}
u8 NCA::GetCryptoRevision() const {
u8 master_key_id = header.crypto_type;
if (header.crypto_type_2 > master_key_id)
master_key_id = header.crypto_type_2;
if (master_key_id > 0)
--master_key_id;
return master_key_id;
}
std::optional<Core::Crypto::Key128> NCA::GetKeyAreaKey(NCASectionCryptoType type) const {
const auto master_key_id = GetCryptoRevision();
if (!keys.HasKey(Core::Crypto::S128KeyType::KeyArea, master_key_id, header.key_index)) {
return std::nullopt;
}
std::vector<u8> key_area(header.key_area.begin(), header.key_area.end());
Core::Crypto::AESCipher<Core::Crypto::Key128> cipher(
keys.GetKey(Core::Crypto::S128KeyType::KeyArea, master_key_id, header.key_index),
Core::Crypto::Mode::ECB);
cipher.Transcode(key_area.data(), key_area.size(), key_area.data(), Core::Crypto::Op::Decrypt);
Core::Crypto::Key128 out{};
if (type == NCASectionCryptoType::XTS) {
std::copy(key_area.begin(), key_area.begin() + 0x10, out.begin());
} else if (type == NCASectionCryptoType::CTR || type == NCASectionCryptoType::BKTR) {
std::copy(key_area.begin() + 0x20, key_area.begin() + 0x30, out.begin());
} else {
LOG_CRITICAL(Crypto, "Called GetKeyAreaKey on invalid NCASectionCryptoType type={:02X}",
type);
}
u128 out_128{};
std::memcpy(out_128.data(), out.data(), sizeof(u128));
LOG_TRACE(Crypto, "called with crypto_rev={:02X}, kak_index={:02X}, key={:016X}{:016X}",
master_key_id, header.key_index, out_128[1], out_128[0]);
return out;
}
std::optional<Core::Crypto::Key128> NCA::GetTitlekey() {
const auto master_key_id = GetCryptoRevision();
u128 rights_id{};
memcpy(rights_id.data(), header.rights_id.data(), 16);
if (rights_id == u128{}) {
status = Loader::ResultStatus::ErrorInvalidRightsID;
return std::nullopt;
}
auto titlekey = keys.GetKey(Core::Crypto::S128KeyType::Titlekey, rights_id[1], rights_id[0]);
if (titlekey == Core::Crypto::Key128{}) {
status = Loader::ResultStatus::ErrorMissingTitlekey;
return std::nullopt;
}
if (!keys.HasKey(Core::Crypto::S128KeyType::Titlekek, master_key_id)) {
status = Loader::ResultStatus::ErrorMissingTitlekek;
return std::nullopt;
}
Core::Crypto::AESCipher<Core::Crypto::Key128> cipher(
keys.GetKey(Core::Crypto::S128KeyType::Titlekek, master_key_id), Core::Crypto::Mode::ECB);
cipher.Transcode(titlekey.data(), titlekey.size(), titlekey.data(), Core::Crypto::Op::Decrypt);
return titlekey;
}
VirtualFile NCA::Decrypt(const NCASectionHeader& s_header, VirtualFile in, u64 starting_offset) {
if (!encrypted)
return in;
switch (s_header.raw.header.crypto_type) {
case NCASectionCryptoType::NONE:
LOG_TRACE(Crypto, "called with mode=NONE");
return in;
case NCASectionCryptoType::CTR:
// During normal BKTR decryption, this entire function is skipped. This is for the metadata,
// which uses the same CTR as usual.
case NCASectionCryptoType::BKTR:
LOG_TRACE(Crypto, "called with mode=CTR, starting_offset={:016X}", starting_offset);
{
std::optional<Core::Crypto::Key128> key;
if (has_rights_id) {
status = Loader::ResultStatus::Success;
key = GetTitlekey();
if (!key) {
if (status == Loader::ResultStatus::Success)
status = Loader::ResultStatus::ErrorMissingTitlekey;
return nullptr;
}
} else {
key = GetKeyAreaKey(NCASectionCryptoType::CTR);
if (!key) {
status = Loader::ResultStatus::ErrorMissingKeyAreaKey;
return nullptr;
}
}
auto out = std::make_shared<Core::Crypto::CTREncryptionLayer>(std::move(in), *key,
starting_offset);
Core::Crypto::CTREncryptionLayer::IVData iv{};
for (std::size_t i = 0; i < 8; ++i) {
iv[i] = s_header.raw.section_ctr[8 - i - 1];
}
out->SetIV(iv);
return std::static_pointer_cast<VfsFile>(out);
}
case NCASectionCryptoType::XTS:
// TODO(DarkLordZach): Find a test case for XTS-encrypted NCAs
default:
LOG_ERROR(Crypto, "called with unhandled crypto type={:02X}",
s_header.raw.header.crypto_type);
return nullptr;
}
}
Loader::ResultStatus NCA::GetStatus() const { Loader::ResultStatus NCA::GetStatus() const {
return status; return status;
} }
@ -579,21 +143,24 @@ VirtualDir NCA::GetParentDirectory() const {
} }
NCAContentType NCA::GetType() const { NCAContentType NCA::GetType() const {
return header.content_type; return static_cast<NCAContentType>(reader->GetContentType());
} }
u64 NCA::GetTitleId() const { u64 NCA::GetTitleId() const {
if (is_update || status == Loader::ResultStatus::ErrorMissingBKTRBaseRomFS) if (is_update) {
return header.title_id | 0x800; return reader->GetProgramId() | 0x800;
return header.title_id; }
return reader->GetProgramId();
} }
std::array<u8, 16> NCA::GetRightsId() const { RightsId NCA::GetRightsId() const {
return header.rights_id; RightsId result;
reader->GetRightsId(result.data(), result.size());
return result;
} }
u32 NCA::GetSDKVersion() const { u32 NCA::GetSDKVersion() const {
return header.sdk_version; return reader->GetSdkAddonVersion();
} }
bool NCA::IsUpdate() const { bool NCA::IsUpdate() const {
@ -612,10 +179,6 @@ VirtualFile NCA::GetBaseFile() const {
return file; return file;
} }
u64 NCA::GetBaseIVFCOffset() const {
return ivfc_offset;
}
VirtualDir NCA::GetLogoPartition() const { VirtualDir NCA::GetLogoPartition() const {
return logo; return logo;
} }

View File

@ -21,7 +21,7 @@ enum class ResultStatus : u16;
namespace FileSys { namespace FileSys {
union NCASectionHeader; class NcaReader;
/// Describes the type of content within an NCA archive. /// Describes the type of content within an NCA archive.
enum class NCAContentType : u8 { enum class NCAContentType : u8 {
@ -45,41 +45,7 @@ enum class NCAContentType : u8 {
PublicData = 5, PublicData = 5,
}; };
enum class NCASectionCryptoType : u8 { using RightsId = std::array<u8, 0x10>;
NONE = 1,
XTS = 2,
CTR = 3,
BKTR = 4,
};
struct NCASectionTableEntry {
u32_le media_offset;
u32_le media_end_offset;
INSERT_PADDING_BYTES(0x8);
};
static_assert(sizeof(NCASectionTableEntry) == 0x10, "NCASectionTableEntry has incorrect size.");
struct NCAHeader {
std::array<u8, 0x100> rsa_signature_1;
std::array<u8, 0x100> rsa_signature_2;
u32_le magic;
u8 is_system;
NCAContentType content_type;
u8 crypto_type;
u8 key_index;
u64_le size;
u64_le title_id;
INSERT_PADDING_BYTES(0x4);
u32_le sdk_version;
u8 crypto_type_2;
INSERT_PADDING_BYTES(15);
std::array<u8, 0x10> rights_id;
std::array<NCASectionTableEntry, 0x4> section_tables;
std::array<std::array<u8, 0x20>, 0x4> hash_tables;
std::array<u8, 0x40> key_area;
INSERT_PADDING_BYTES(0xC0);
};
static_assert(sizeof(NCAHeader) == 0x400, "NCAHeader has incorrect size.");
inline bool IsDirectoryExeFS(const VirtualDir& pfs) { inline bool IsDirectoryExeFS(const VirtualDir& pfs) {
// According to switchbrew, an exefs must only contain these two files: // According to switchbrew, an exefs must only contain these two files:
@ -97,8 +63,7 @@ inline bool IsDirectoryLogoPartition(const VirtualDir& pfs) {
// After construction, use GetStatus to determine if the file is valid and ready to be used. // After construction, use GetStatus to determine if the file is valid and ready to be used.
class NCA : public ReadOnlyVfsDirectory { class NCA : public ReadOnlyVfsDirectory {
public: public:
explicit NCA(VirtualFile file, VirtualFile bktr_base_romfs = nullptr, explicit NCA(VirtualFile file, const NCA* base_nca = nullptr);
u64 bktr_base_ivfc_offset = 0);
~NCA() override; ~NCA() override;
Loader::ResultStatus GetStatus() const; Loader::ResultStatus GetStatus() const;
@ -110,7 +75,7 @@ public:
NCAContentType GetType() const; NCAContentType GetType() const;
u64 GetTitleId() const; u64 GetTitleId() const;
std::array<u8, 0x10> GetRightsId() const; RightsId GetRightsId() const;
u32 GetSDKVersion() const; u32 GetSDKVersion() const;
bool IsUpdate() const; bool IsUpdate() const;
@ -119,26 +84,9 @@ public:
VirtualFile GetBaseFile() const; VirtualFile GetBaseFile() const;
// Returns the base ivfc offset used in BKTR patching.
u64 GetBaseIVFCOffset() const;
VirtualDir GetLogoPartition() const; VirtualDir GetLogoPartition() const;
private: private:
bool CheckSupportedNCA(const NCAHeader& header);
bool HandlePotentialHeaderDecryption();
std::vector<NCASectionHeader> ReadSectionHeaders() const;
bool ReadSections(const std::vector<NCASectionHeader>& sections, u64 bktr_base_ivfc_offset);
bool ReadRomFSSection(const NCASectionHeader& section, const NCASectionTableEntry& entry,
u64 bktr_base_ivfc_offset);
bool ReadPFS0Section(const NCASectionHeader& section, const NCASectionTableEntry& entry);
u8 GetCryptoRevision() const;
std::optional<Core::Crypto::Key128> GetKeyAreaKey(NCASectionCryptoType type) const;
std::optional<Core::Crypto::Key128> GetTitlekey();
VirtualFile Decrypt(const NCASectionHeader& header, VirtualFile in, u64 starting_offset);
std::vector<VirtualDir> dirs; std::vector<VirtualDir> dirs;
std::vector<VirtualFile> files; std::vector<VirtualFile> files;
@ -146,11 +94,6 @@ private:
VirtualDir exefs = nullptr; VirtualDir exefs = nullptr;
VirtualDir logo = nullptr; VirtualDir logo = nullptr;
VirtualFile file; VirtualFile file;
VirtualFile bktr_base_romfs;
u64 ivfc_offset = 0;
NCAHeader header{};
bool has_rights_id{};
Loader::ResultStatus status{}; Loader::ResultStatus status{};
@ -158,6 +101,7 @@ private:
bool is_update = false; bool is_update = false;
Core::Crypto::KeyManager& keys; Core::Crypto::KeyManager& keys;
std::shared_ptr<NcaReader> reader;
}; };
} // namespace FileSys } // namespace FileSys

View File

@ -17,4 +17,74 @@ constexpr Result ERROR_INVALID_ARGUMENT{ErrorModule::FS, 6001};
constexpr Result ERROR_INVALID_OFFSET{ErrorModule::FS, 6061}; constexpr Result ERROR_INVALID_OFFSET{ErrorModule::FS, 6061};
constexpr Result ERROR_INVALID_SIZE{ErrorModule::FS, 6062}; constexpr Result ERROR_INVALID_SIZE{ErrorModule::FS, 6062};
constexpr Result ResultUnsupportedSdkVersion{ErrorModule::FS, 50};
constexpr Result ResultPartitionNotFound{ErrorModule::FS, 1001};
constexpr Result ResultUnsupportedVersion{ErrorModule::FS, 3002};
constexpr Result ResultOutOfRange{ErrorModule::FS, 3005};
constexpr Result ResultAllocationMemoryFailedInFileSystemBuddyHeapA{ErrorModule::FS, 3294};
constexpr Result ResultAllocationMemoryFailedInNcaFileSystemDriverI{ErrorModule::FS, 3341};
constexpr Result ResultAllocationMemoryFailedInNcaReaderA{ErrorModule::FS, 3363};
constexpr Result ResultAllocationMemoryFailedInAesCtrCounterExtendedStorageA{ErrorModule::FS, 3399};
constexpr Result ResultAllocationMemoryFailedInIntegrityRomFsStorageA{ErrorModule::FS, 3412};
constexpr Result ResultAllocationMemoryFailedMakeUnique{ErrorModule::FS, 3422};
constexpr Result ResultAllocationMemoryFailedAllocateShared{ErrorModule::FS, 3423};
constexpr Result ResultInvalidAesCtrCounterExtendedEntryOffset{ErrorModule::FS, 4012};
constexpr Result ResultIndirectStorageCorrupted{ErrorModule::FS, 4021};
constexpr Result ResultInvalidIndirectEntryOffset{ErrorModule::FS, 4022};
constexpr Result ResultInvalidIndirectEntryStorageIndex{ErrorModule::FS, 4023};
constexpr Result ResultInvalidIndirectStorageSize{ErrorModule::FS, 4024};
constexpr Result ResultInvalidBucketTreeSignature{ErrorModule::FS, 4032};
constexpr Result ResultInvalidBucketTreeEntryCount{ErrorModule::FS, 4033};
constexpr Result ResultInvalidBucketTreeNodeEntryCount{ErrorModule::FS, 4034};
constexpr Result ResultInvalidBucketTreeNodeOffset{ErrorModule::FS, 4035};
constexpr Result ResultInvalidBucketTreeEntryOffset{ErrorModule::FS, 4036};
constexpr Result ResultInvalidBucketTreeEntrySetOffset{ErrorModule::FS, 4037};
constexpr Result ResultInvalidBucketTreeNodeIndex{ErrorModule::FS, 4038};
constexpr Result ResultInvalidBucketTreeVirtualOffset{ErrorModule::FS, 4039};
constexpr Result ResultRomNcaInvalidPatchMetaDataHashType{ErrorModule::FS, 4084};
constexpr Result ResultRomNcaInvalidIntegrityLayerInfoOffset{ErrorModule::FS, 4085};
constexpr Result ResultRomNcaInvalidPatchMetaDataHashDataSize{ErrorModule::FS, 4086};
constexpr Result ResultRomNcaInvalidPatchMetaDataHashDataOffset{ErrorModule::FS, 4087};
constexpr Result ResultRomNcaInvalidPatchMetaDataHashDataHash{ErrorModule::FS, 4088};
constexpr Result ResultRomNcaInvalidSparseMetaDataHashType{ErrorModule::FS, 4089};
constexpr Result ResultRomNcaInvalidSparseMetaDataHashDataSize{ErrorModule::FS, 4090};
constexpr Result ResultRomNcaInvalidSparseMetaDataHashDataOffset{ErrorModule::FS, 4091};
constexpr Result ResultRomNcaInvalidSparseMetaDataHashDataHash{ErrorModule::FS, 4091};
constexpr Result ResultNcaBaseStorageOutOfRangeB{ErrorModule::FS, 4509};
constexpr Result ResultNcaBaseStorageOutOfRangeC{ErrorModule::FS, 4510};
constexpr Result ResultNcaBaseStorageOutOfRangeD{ErrorModule::FS, 4511};
constexpr Result ResultInvalidNcaSignature{ErrorModule::FS, 4517};
constexpr Result ResultNcaFsHeaderHashVerificationFailed{ErrorModule::FS, 4520};
constexpr Result ResultInvalidNcaKeyIndex{ErrorModule::FS, 4521};
constexpr Result ResultInvalidNcaFsHeaderHashType{ErrorModule::FS, 4522};
constexpr Result ResultInvalidNcaFsHeaderEncryptionType{ErrorModule::FS, 4523};
constexpr Result ResultInvalidNcaPatchInfoIndirectSize{ErrorModule::FS, 4524};
constexpr Result ResultInvalidNcaPatchInfoAesCtrExSize{ErrorModule::FS, 4525};
constexpr Result ResultInvalidNcaPatchInfoAesCtrExOffset{ErrorModule::FS, 4526};
constexpr Result ResultInvalidNcaHeader{ErrorModule::FS, 4528};
constexpr Result ResultInvalidNcaFsHeader{ErrorModule::FS, 4529};
constexpr Result ResultNcaBaseStorageOutOfRangeE{ErrorModule::FS, 4530};
constexpr Result ResultInvalidHierarchicalSha256BlockSize{ErrorModule::FS, 4532};
constexpr Result ResultInvalidHierarchicalSha256LayerCount{ErrorModule::FS, 4533};
constexpr Result ResultHierarchicalSha256BaseStorageTooLarge{ErrorModule::FS, 4534};
constexpr Result ResultHierarchicalSha256HashVerificationFailed{ErrorModule::FS, 4535};
constexpr Result ResultInvalidNcaHierarchicalIntegrityVerificationLayerCount{ErrorModule::FS, 4541};
constexpr Result ResultInvalidNcaIndirectStorageOutOfRange{ErrorModule::FS, 4542};
constexpr Result ResultInvalidNcaHeader1SignatureKeyGeneration{ErrorModule::FS, 4543};
constexpr Result ResultInvalidCompressedStorageSize{ErrorModule::FS, 4547};
constexpr Result ResultInvalidNcaMetaDataHashDataSize{ErrorModule::FS, 4548};
constexpr Result ResultInvalidNcaMetaDataHashDataHash{ErrorModule::FS, 4549};
constexpr Result ResultUnexpectedInCompressedStorageA{ErrorModule::FS, 5324};
constexpr Result ResultUnexpectedInCompressedStorageB{ErrorModule::FS, 5325};
constexpr Result ResultUnexpectedInCompressedStorageC{ErrorModule::FS, 5326};
constexpr Result ResultUnexpectedInCompressedStorageD{ErrorModule::FS, 5327};
constexpr Result ResultInvalidArgument{ErrorModule::FS, 6001};
constexpr Result ResultInvalidOffset{ErrorModule::FS, 6061};
constexpr Result ResultInvalidSize{ErrorModule::FS, 6062};
constexpr Result ResultNullptrArgument{ErrorModule::FS, 6063};
constexpr Result ResultUnsupportedSetSizeForIndirectStorage{ErrorModule::FS, 6325};
constexpr Result ResultUnsupportedWriteForCompressedStorage{ErrorModule::FS, 6387};
constexpr Result ResultUnsupportedOperateRangeForCompressedStorage{ErrorModule::FS, 6388};
constexpr Result ResultBufferAllocationFailed{ErrorModule::FS, 6705};
} // namespace FileSys } // namespace FileSys

View File

@ -0,0 +1,58 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "common/overflow.h"
#include "core/file_sys/errors.h"
#include "core/file_sys/vfs.h"
namespace FileSys {
class IStorage : public VfsFile {
public:
virtual std::string GetName() const override {
return {};
}
virtual VirtualDir GetContainingDirectory() const override {
return {};
}
virtual bool IsWritable() const override {
return true;
}
virtual bool IsReadable() const override {
return true;
}
virtual bool Resize(size_t size) override {
return false;
}
virtual bool Rename(std::string_view name) override {
return false;
}
static inline Result CheckAccessRange(s64 offset, s64 size, s64 total_size) {
R_UNLESS(offset >= 0, ResultInvalidOffset);
R_UNLESS(size >= 0, ResultInvalidSize);
R_UNLESS(Common::WrappingAdd(offset, size) >= offset, ResultOutOfRange);
R_UNLESS(offset + size <= total_size, ResultOutOfRange);
R_SUCCEED();
}
};
class IReadOnlyStorage : public IStorage {
public:
virtual bool IsWritable() const override {
return false;
}
virtual size_t Write(const u8* buffer, size_t size, size_t offset) override {
return 0;
}
};
} // namespace FileSys

View File

@ -0,0 +1,46 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "common/common_funcs.h"
namespace FileSys {
struct Int64 {
u32 low;
u32 high;
constexpr void Set(s64 v) {
this->low = static_cast<u32>((v & static_cast<u64>(0x00000000FFFFFFFFULL)) >> 0);
this->high = static_cast<u32>((v & static_cast<u64>(0xFFFFFFFF00000000ULL)) >> 32);
}
constexpr s64 Get() const {
return (static_cast<s64>(this->high) << 32) | (static_cast<s64>(this->low));
}
constexpr Int64& operator=(s64 v) {
this->Set(v);
return *this;
}
constexpr operator s64() const {
return this->Get();
}
};
struct HashSalt {
static constexpr size_t Size = 32;
std::array<u8, Size> value;
};
static_assert(std::is_trivial_v<HashSalt>);
static_assert(sizeof(HashSalt) == HashSalt::Size);
constexpr inline size_t IntegrityMinLayerCount = 2;
constexpr inline size_t IntegrityMaxLayerCount = 7;
constexpr inline size_t IntegrityLayerCountSave = 5;
constexpr inline size_t IntegrityLayerCountSaveDataMeta = 4;
} // namespace FileSys

View File

@ -0,0 +1,251 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/file_sys/fssystem/fssystem_aes_ctr_counter_extended_storage.h"
#include "core/file_sys/fssystem/fssystem_aes_ctr_storage.h"
#include "core/file_sys/fssystem/fssystem_nca_header.h"
#include "core/file_sys/vfs_offset.h"
namespace FileSys {
namespace {
class SoftwareDecryptor final : public AesCtrCounterExtendedStorage::IDecryptor {
public:
virtual void Decrypt(
u8* buf, size_t buf_size, const std::array<u8, AesCtrCounterExtendedStorage::KeySize>& key,
const std::array<u8, AesCtrCounterExtendedStorage::IvSize>& iv) override final;
};
} // namespace
Result AesCtrCounterExtendedStorage::CreateSoftwareDecryptor(std::unique_ptr<IDecryptor>* out) {
std::unique_ptr<IDecryptor> decryptor = std::make_unique<SoftwareDecryptor>();
R_UNLESS(decryptor != nullptr, ResultAllocationMemoryFailedInAesCtrCounterExtendedStorageA);
*out = std::move(decryptor);
R_SUCCEED();
}
Result AesCtrCounterExtendedStorage::Initialize(const void* key, size_t key_size, u32 secure_value,
VirtualFile data_storage,
VirtualFile table_storage) {
// Read and verify the bucket tree header.
BucketTree::Header header;
table_storage->ReadObject(std::addressof(header), 0);
R_TRY(header.Verify());
// Determine extents.
const auto node_storage_size = QueryNodeStorageSize(header.entry_count);
const auto entry_storage_size = QueryEntryStorageSize(header.entry_count);
const auto node_storage_offset = QueryHeaderStorageSize();
const auto entry_storage_offset = node_storage_offset + node_storage_size;
// Create a software decryptor.
std::unique_ptr<IDecryptor> sw_decryptor;
R_TRY(CreateSoftwareDecryptor(std::addressof(sw_decryptor)));
// Initialize.
R_RETURN(this->Initialize(
key, key_size, secure_value, 0, data_storage,
std::make_shared<OffsetVfsFile>(table_storage, node_storage_size, node_storage_offset),
std::make_shared<OffsetVfsFile>(table_storage, entry_storage_size, entry_storage_offset),
header.entry_count, std::move(sw_decryptor)));
}
Result AesCtrCounterExtendedStorage::Initialize(const void* key, size_t key_size, u32 secure_value,
s64 counter_offset, VirtualFile data_storage,
VirtualFile node_storage, VirtualFile entry_storage,
s32 entry_count,
std::unique_ptr<IDecryptor>&& decryptor) {
// Validate preconditions.
ASSERT(key != nullptr);
ASSERT(key_size == KeySize);
ASSERT(counter_offset >= 0);
ASSERT(decryptor != nullptr);
// Initialize the bucket tree table.
if (entry_count > 0) {
R_TRY(
m_table.Initialize(node_storage, entry_storage, NodeSize, sizeof(Entry), entry_count));
} else {
m_table.Initialize(NodeSize, 0);
}
// Set members.
m_data_storage = data_storage;
std::memcpy(m_key.data(), key, key_size);
m_secure_value = secure_value;
m_counter_offset = counter_offset;
m_decryptor = std::move(decryptor);
R_SUCCEED();
}
void AesCtrCounterExtendedStorage::Finalize() {
if (this->IsInitialized()) {
m_table.Finalize();
m_data_storage = VirtualFile();
}
}
Result AesCtrCounterExtendedStorage::GetEntryList(Entry* out_entries, s32* out_entry_count,
s32 entry_count, s64 offset, s64 size) {
// Validate pre-conditions.
ASSERT(offset >= 0);
ASSERT(size >= 0);
ASSERT(this->IsInitialized());
// Clear the out count.
R_UNLESS(out_entry_count != nullptr, ResultNullptrArgument);
*out_entry_count = 0;
// Succeed if there's no range.
R_SUCCEED_IF(size == 0);
// If we have an output array, we need it to be non-null.
R_UNLESS(out_entries != nullptr || entry_count == 0, ResultNullptrArgument);
// Check that our range is valid.
BucketTree::Offsets table_offsets;
R_TRY(m_table.GetOffsets(std::addressof(table_offsets)));
R_UNLESS(table_offsets.IsInclude(offset, size), ResultOutOfRange);
// Find the offset in our tree.
BucketTree::Visitor visitor;
R_TRY(m_table.Find(std::addressof(visitor), offset));
{
const auto entry_offset = visitor.Get<Entry>()->GetOffset();
R_UNLESS(0 <= entry_offset && table_offsets.IsInclude(entry_offset),
ResultInvalidAesCtrCounterExtendedEntryOffset);
}
// Prepare to loop over entries.
const auto end_offset = offset + static_cast<s64>(size);
s32 count = 0;
auto cur_entry = *visitor.Get<Entry>();
while (cur_entry.GetOffset() < end_offset) {
// Try to write the entry to the out list.
if (entry_count != 0) {
if (count >= entry_count) {
break;
}
std::memcpy(out_entries + count, std::addressof(cur_entry), sizeof(Entry));
}
count++;
// Advance.
if (visitor.CanMoveNext()) {
R_TRY(visitor.MoveNext());
cur_entry = *visitor.Get<Entry>();
} else {
break;
}
}
// Write the output count.
*out_entry_count = count;
R_SUCCEED();
}
size_t AesCtrCounterExtendedStorage::Read(u8* buffer, size_t size, size_t offset) const {
// Validate preconditions.
ASSERT(this->IsInitialized());
// Allow zero size.
if (size == 0) {
return size;
}
// Validate arguments.
ASSERT(buffer != nullptr);
ASSERT(Common::IsAligned(offset, BlockSize));
ASSERT(Common::IsAligned(size, BlockSize));
BucketTree::Offsets table_offsets;
ASSERT(R_SUCCEEDED(m_table.GetOffsets(std::addressof(table_offsets))));
ASSERT(table_offsets.IsInclude(offset, size));
// Read the data.
m_data_storage->Read(buffer, size, offset);
// Find the offset in our tree.
BucketTree::Visitor visitor;
ASSERT(R_SUCCEEDED(m_table.Find(std::addressof(visitor), offset)));
{
const auto entry_offset = visitor.Get<Entry>()->GetOffset();
ASSERT(Common::IsAligned(entry_offset, BlockSize));
ASSERT(0 <= entry_offset && table_offsets.IsInclude(entry_offset));
}
// Prepare to read in chunks.
u8* cur_data = static_cast<u8*>(buffer);
auto cur_offset = offset;
const auto end_offset = offset + static_cast<s64>(size);
while (cur_offset < end_offset) {
// Get the current entry.
const auto cur_entry = *visitor.Get<Entry>();
// Get and validate the entry's offset.
const auto cur_entry_offset = cur_entry.GetOffset();
ASSERT(static_cast<size_t>(cur_entry_offset) <= cur_offset);
// Get and validate the next entry offset.
s64 next_entry_offset;
if (visitor.CanMoveNext()) {
ASSERT(R_SUCCEEDED(visitor.MoveNext()));
next_entry_offset = visitor.Get<Entry>()->GetOffset();
ASSERT(table_offsets.IsInclude(next_entry_offset));
} else {
next_entry_offset = table_offsets.end_offset;
}
ASSERT(Common::IsAligned(next_entry_offset, BlockSize));
ASSERT(cur_offset < static_cast<size_t>(next_entry_offset));
// Get the offset of the entry in the data we read.
const auto data_offset = cur_offset - cur_entry_offset;
const auto data_size = (next_entry_offset - cur_entry_offset) - data_offset;
ASSERT(data_size > 0);
// Determine how much is left.
const auto remaining_size = end_offset - cur_offset;
const auto cur_size = static_cast<size_t>(std::min(remaining_size, data_size));
ASSERT(cur_size <= size);
// If necessary, perform decryption.
if (cur_entry.encryption_value == Entry::Encryption::Encrypted) {
// Make the CTR for the data we're decrypting.
const auto counter_offset = m_counter_offset + cur_entry_offset + data_offset;
NcaAesCtrUpperIv upper_iv = {
.part = {.generation = static_cast<u32>(cur_entry.generation),
.secure_value = m_secure_value}};
std::array<u8, IvSize> iv;
AesCtrStorage::MakeIv(iv.data(), IvSize, upper_iv.value, counter_offset);
// Decrypt.
m_decryptor->Decrypt(cur_data, cur_size, m_key, iv);
}
// Advance.
cur_data += cur_size;
cur_offset += cur_size;
}
return size;
}
void SoftwareDecryptor::Decrypt(u8* buf, size_t buf_size,
const std::array<u8, AesCtrCounterExtendedStorage::KeySize>& key,
const std::array<u8, AesCtrCounterExtendedStorage::IvSize>& iv) {
Core::Crypto::AESCipher<Core::Crypto::Key128, AesCtrCounterExtendedStorage::KeySize> cipher(
key, Core::Crypto::Mode::CTR);
cipher.SetIV(iv);
cipher.Transcode(buf, buf_size, buf, Core::Crypto::Op::Decrypt);
}
} // namespace FileSys

View File

@ -0,0 +1,114 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <optional>
#include "common/literals.h"
#include "core/file_sys/fssystem/fs_i_storage.h"
#include "core/file_sys/fssystem/fssystem_bucket_tree.h"
namespace FileSys {
using namespace Common::Literals;
class AesCtrCounterExtendedStorage : public IReadOnlyStorage {
YUZU_NON_COPYABLE(AesCtrCounterExtendedStorage);
YUZU_NON_MOVEABLE(AesCtrCounterExtendedStorage);
public:
static constexpr size_t BlockSize = 0x10;
static constexpr size_t KeySize = 0x10;
static constexpr size_t IvSize = 0x10;
static constexpr size_t NodeSize = 16_KiB;
class IDecryptor {
public:
virtual ~IDecryptor() {}
virtual void Decrypt(u8* buf, size_t buf_size, const std::array<u8, KeySize>& key,
const std::array<u8, IvSize>& iv) = 0;
};
struct Entry {
enum class Encryption : u8 {
Encrypted = 0,
NotEncrypted = 1,
};
std::array<u8, sizeof(s64)> offset;
Encryption encryption_value;
std::array<u8, 3> reserved;
s32 generation;
void SetOffset(s64 value) {
std::memcpy(this->offset.data(), std::addressof(value), sizeof(s64));
}
s64 GetOffset() const {
s64 value;
std::memcpy(std::addressof(value), this->offset.data(), sizeof(s64));
return value;
}
};
static_assert(sizeof(Entry) == 0x10);
static_assert(alignof(Entry) == 4);
static_assert(std::is_trivial_v<Entry>);
public:
static constexpr s64 QueryHeaderStorageSize() {
return BucketTree::QueryHeaderStorageSize();
}
static constexpr s64 QueryNodeStorageSize(s32 entry_count) {
return BucketTree::QueryNodeStorageSize(NodeSize, sizeof(Entry), entry_count);
}
static constexpr s64 QueryEntryStorageSize(s32 entry_count) {
return BucketTree::QueryEntryStorageSize(NodeSize, sizeof(Entry), entry_count);
}
static Result CreateSoftwareDecryptor(std::unique_ptr<IDecryptor>* out);
public:
AesCtrCounterExtendedStorage()
: m_table(), m_data_storage(), m_secure_value(), m_counter_offset(), m_decryptor() {}
virtual ~AesCtrCounterExtendedStorage() {
this->Finalize();
}
Result Initialize(const void* key, size_t key_size, u32 secure_value, s64 counter_offset,
VirtualFile data_storage, VirtualFile node_storage, VirtualFile entry_storage,
s32 entry_count, std::unique_ptr<IDecryptor>&& decryptor);
void Finalize();
bool IsInitialized() const {
return m_table.IsInitialized();
}
virtual size_t Read(u8* buffer, size_t size, size_t offset) const override;
virtual size_t GetSize() const override {
BucketTree::Offsets offsets;
ASSERT(R_SUCCEEDED(m_table.GetOffsets(std::addressof(offsets))));
return offsets.end_offset;
}
Result GetEntryList(Entry* out_entries, s32* out_entry_count, s32 entry_count, s64 offset,
s64 size);
private:
Result Initialize(const void* key, size_t key_size, u32 secure_value, VirtualFile data_storage,
VirtualFile table_storage);
private:
mutable BucketTree m_table;
VirtualFile m_data_storage;
std::array<u8, KeySize> m_key;
u32 m_secure_value;
s64 m_counter_offset;
std::unique_ptr<IDecryptor> m_decryptor;
};
} // namespace FileSys

View File

@ -0,0 +1,129 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/alignment.h"
#include "common/swap.h"
#include "core/file_sys/fssystem/fssystem_aes_ctr_storage.h"
#include "core/file_sys/fssystem/fssystem_pooled_buffer.h"
#include "core/file_sys/fssystem/fssystem_utility.h"
namespace FileSys {
void AesCtrStorage::MakeIv(void* dst, size_t dst_size, u64 upper, s64 offset) {
ASSERT(dst != nullptr);
ASSERT(dst_size == IvSize);
ASSERT(offset >= 0);
const uintptr_t out_addr = reinterpret_cast<uintptr_t>(dst);
*reinterpret_cast<u64_be*>(out_addr + 0) = upper;
*reinterpret_cast<s64_be*>(out_addr + sizeof(u64)) = static_cast<s64>(offset / BlockSize);
}
AesCtrStorage::AesCtrStorage(VirtualFile base, const void* key, size_t key_size, const void* iv,
size_t iv_size)
: m_base_storage(std::move(base)) {
ASSERT(m_base_storage != nullptr);
ASSERT(key != nullptr);
ASSERT(iv != nullptr);
ASSERT(key_size == KeySize);
ASSERT(iv_size == IvSize);
std::memcpy(m_key.data(), key, KeySize);
std::memcpy(m_iv.data(), iv, IvSize);
m_cipher.emplace(m_key, Core::Crypto::Mode::CTR);
}
size_t AesCtrStorage::Read(u8* buffer, size_t size, size_t offset) const {
// Allow zero-size reads.
if (size == 0) {
return size;
}
// Ensure buffer is valid.
ASSERT(buffer != nullptr);
// We can only read at block aligned offsets.
ASSERT(Common::IsAligned(offset, BlockSize));
ASSERT(Common::IsAligned(size, BlockSize));
// Read the data.
m_base_storage->Read(buffer, size, offset);
// Setup the counter.
std::array<u8, IvSize> ctr;
std::memcpy(ctr.data(), m_iv.data(), IvSize);
AddCounter(ctr.data(), IvSize, offset / BlockSize);
// Decrypt.
m_cipher->SetIV(ctr);
m_cipher->Transcode(buffer, size, buffer, Core::Crypto::Op::Decrypt);
return size;
}
size_t AesCtrStorage::Write(const u8* buffer, size_t size, size_t offset) {
// Allow zero-size writes.
if (size == 0) {
return size;
}
// Ensure buffer is valid.
ASSERT(buffer != nullptr);
// We can only write at block aligned offsets.
ASSERT(Common::IsAligned(offset, BlockSize));
ASSERT(Common::IsAligned(size, BlockSize));
// Get a pooled buffer.
PooledBuffer pooled_buffer;
const bool use_work_buffer = true;
if (use_work_buffer) {
pooled_buffer.Allocate(size, BlockSize);
}
// Setup the counter.
std::array<u8, IvSize> ctr;
std::memcpy(ctr.data(), m_iv.data(), IvSize);
AddCounter(ctr.data(), IvSize, offset / BlockSize);
// Loop until all data is written.
size_t remaining = size;
s64 cur_offset = 0;
while (remaining > 0) {
// Determine data we're writing and where.
const size_t write_size =
use_work_buffer ? std::min(pooled_buffer.GetSize(), remaining) : remaining;
void* write_buf;
if (use_work_buffer) {
write_buf = pooled_buffer.GetBuffer();
} else {
write_buf = const_cast<u8*>(buffer);
}
// Encrypt the data.
m_cipher->SetIV(ctr);
m_cipher->Transcode(buffer, write_size, reinterpret_cast<u8*>(write_buf),
Core::Crypto::Op::Encrypt);
// Write the encrypted data.
m_base_storage->Write(reinterpret_cast<u8*>(write_buf), write_size, offset + cur_offset);
// Advance.
cur_offset += write_size;
remaining -= write_size;
if (remaining > 0) {
AddCounter(ctr.data(), IvSize, write_size / BlockSize);
}
}
return size;
}
size_t AesCtrStorage::GetSize() const {
return m_base_storage->GetSize();
}
} // namespace FileSys

View File

@ -0,0 +1,43 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <optional>
#include "core/crypto/aes_util.h"
#include "core/crypto/key_manager.h"
#include "core/file_sys/errors.h"
#include "core/file_sys/fssystem/fs_i_storage.h"
#include "core/file_sys/vfs.h"
namespace FileSys {
class AesCtrStorage : public IStorage {
YUZU_NON_COPYABLE(AesCtrStorage);
YUZU_NON_MOVEABLE(AesCtrStorage);
public:
static constexpr size_t BlockSize = 0x10;
static constexpr size_t KeySize = 0x10;
static constexpr size_t IvSize = 0x10;
public:
static void MakeIv(void* dst, size_t dst_size, u64 upper, s64 offset);
public:
AesCtrStorage(VirtualFile base, const void* key, size_t key_size, const void* iv,
size_t iv_size);
virtual size_t Read(u8* buffer, size_t size, size_t offset) const override;
virtual size_t Write(const u8* buffer, size_t size, size_t offset) override;
virtual size_t GetSize() const override;
private:
VirtualFile m_base_storage;
std::array<u8, KeySize> m_key;
std::array<u8, IvSize> m_iv;
mutable std::optional<Core::Crypto::AESCipher<Core::Crypto::Key128>> m_cipher;
};
} // namespace FileSys

View File

@ -0,0 +1,112 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/alignment.h"
#include "common/swap.h"
#include "core/file_sys/errors.h"
#include "core/file_sys/fssystem/fssystem_aes_xts_storage.h"
#include "core/file_sys/fssystem/fssystem_pooled_buffer.h"
#include "core/file_sys/fssystem/fssystem_utility.h"
namespace FileSys {
void AesXtsStorage::MakeAesXtsIv(void* dst, size_t dst_size, s64 offset, size_t block_size) {
ASSERT(dst != nullptr);
ASSERT(dst_size == IvSize);
ASSERT(offset >= 0);
const uintptr_t out_addr = reinterpret_cast<uintptr_t>(dst);
*reinterpret_cast<s64_be*>(out_addr + sizeof(s64)) = offset / block_size;
}
AesXtsStorage::AesXtsStorage(VirtualFile base, const void* key1, const void* key2, size_t key_size,
const void* iv, size_t iv_size, size_t block_size)
: m_base_storage(std::move(base)), m_block_size(block_size), m_mutex() {
ASSERT(m_base_storage != nullptr);
ASSERT(key1 != nullptr);
ASSERT(key2 != nullptr);
ASSERT(iv != nullptr);
ASSERT(key_size == KeySize);
ASSERT(iv_size == IvSize);
ASSERT(Common::IsAligned(m_block_size, AesBlockSize));
std::memcpy(m_key.data() + 0, key1, KeySize);
std::memcpy(m_key.data() + 0x10, key2, KeySize);
std::memcpy(m_iv.data(), iv, IvSize);
m_cipher.emplace(m_key, Core::Crypto::Mode::XTS);
}
size_t AesXtsStorage::Read(u8* buffer, size_t size, size_t offset) const {
// Allow zero-size reads.
if (size == 0) {
return size;
}
// Ensure buffer is valid.
ASSERT(buffer != nullptr);
// We can only read at block aligned offsets.
ASSERT(Common::IsAligned(offset, AesBlockSize));
ASSERT(Common::IsAligned(size, AesBlockSize));
// Read the data.
m_base_storage->Read(buffer, size, offset);
// Setup the counter.
std::array<u8, IvSize> ctr;
std::memcpy(ctr.data(), m_iv.data(), IvSize);
AddCounter(ctr.data(), IvSize, offset / m_block_size);
// Handle any unaligned data before the start.
size_t processed_size = 0;
if ((offset % m_block_size) != 0) {
// Determine the size of the pre-data read.
const size_t skip_size =
static_cast<size_t>(offset - Common::AlignDown(offset, m_block_size));
const size_t data_size = std::min(size, m_block_size - skip_size);
// Decrypt into a pooled buffer.
{
PooledBuffer tmp_buf(m_block_size, m_block_size);
ASSERT(tmp_buf.GetSize() >= m_block_size);
std::memset(tmp_buf.GetBuffer(), 0, skip_size);
std::memcpy(tmp_buf.GetBuffer() + skip_size, buffer, data_size);
m_cipher->SetIV(ctr);
m_cipher->Transcode(tmp_buf.GetBuffer(), m_block_size, tmp_buf.GetBuffer(),
Core::Crypto::Op::Decrypt);
std::memcpy(buffer, tmp_buf.GetBuffer() + skip_size, data_size);
}
AddCounter(ctr.data(), IvSize, 1);
processed_size += data_size;
ASSERT(processed_size == std::min(size, m_block_size - skip_size));
}
// Decrypt aligned chunks.
char* cur = reinterpret_cast<char*>(buffer) + processed_size;
size_t remaining = size - processed_size;
while (remaining > 0) {
const size_t cur_size = std::min(m_block_size, remaining);
m_cipher->SetIV(ctr);
m_cipher->Transcode(cur, cur_size, cur, Core::Crypto::Op::Decrypt);
remaining -= cur_size;
cur += cur_size;
AddCounter(ctr.data(), IvSize, 1);
}
return size;
}
size_t AesXtsStorage::GetSize() const {
return m_base_storage->GetSize();
}
} // namespace FileSys

View File

@ -0,0 +1,42 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <optional>
#include "core/crypto/aes_util.h"
#include "core/crypto/key_manager.h"
#include "core/file_sys/fssystem/fs_i_storage.h"
namespace FileSys {
class AesXtsStorage : public IReadOnlyStorage {
YUZU_NON_COPYABLE(AesXtsStorage);
YUZU_NON_MOVEABLE(AesXtsStorage);
public:
static constexpr size_t AesBlockSize = 0x10;
static constexpr size_t KeySize = 0x20;
static constexpr size_t IvSize = 0x10;
public:
static void MakeAesXtsIv(void* dst, size_t dst_size, s64 offset, size_t block_size);
public:
AesXtsStorage(VirtualFile base, const void* key1, const void* key2, size_t key_size,
const void* iv, size_t iv_size, size_t block_size);
virtual size_t Read(u8* buffer, size_t size, size_t offset) const override;
virtual size_t GetSize() const override;
private:
VirtualFile m_base_storage;
std::array<u8, KeySize> m_key;
std::array<u8, IvSize> m_iv;
const size_t m_block_size;
std::mutex m_mutex;
mutable std::optional<Core::Crypto::AESCipher<Core::Crypto::Key256>> m_cipher;
};
} // namespace FileSys

View File

@ -0,0 +1,146 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "common/alignment.h"
#include "core/file_sys/errors.h"
#include "core/file_sys/fssystem/fs_i_storage.h"
#include "core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.h"
#include "core/file_sys/fssystem/fssystem_pooled_buffer.h"
namespace FileSys {
template <size_t DataAlign_, size_t BufferAlign_>
class AlignmentMatchingStorage : public IStorage {
YUZU_NON_COPYABLE(AlignmentMatchingStorage);
YUZU_NON_MOVEABLE(AlignmentMatchingStorage);
public:
static constexpr size_t DataAlign = DataAlign_;
static constexpr size_t BufferAlign = BufferAlign_;
static constexpr size_t DataAlignMax = 0x200;
static_assert(DataAlign <= DataAlignMax);
static_assert(Common::IsPowerOfTwo(DataAlign));
static_assert(Common::IsPowerOfTwo(BufferAlign));
private:
VirtualFile m_base_storage;
s64 m_base_storage_size;
public:
explicit AlignmentMatchingStorage(VirtualFile bs) : m_base_storage(std::move(bs)) {}
virtual size_t Read(u8* buffer, size_t size, size_t offset) const override {
// Allocate a work buffer on stack.
alignas(DataAlignMax) std::array<char, DataAlign> work_buf;
// Succeed if zero size.
if (size == 0) {
return size;
}
// Validate arguments.
ASSERT(buffer != nullptr);
s64 bs_size = this->GetSize();
ASSERT(R_SUCCEEDED(IStorage::CheckAccessRange(offset, size, bs_size)));
return AlignmentMatchingStorageImpl::Read(m_base_storage, work_buf.data(), work_buf.size(),
DataAlign, BufferAlign, offset, buffer, size);
}
virtual size_t Write(const u8* buffer, size_t size, size_t offset) override {
// Allocate a work buffer on stack.
alignas(DataAlignMax) std::array<char, DataAlign> work_buf;
// Succeed if zero size.
if (size == 0) {
return size;
}
// Validate arguments.
ASSERT(buffer != nullptr);
s64 bs_size = this->GetSize();
ASSERT(R_SUCCEEDED(IStorage::CheckAccessRange(offset, size, bs_size)));
return AlignmentMatchingStorageImpl::Write(m_base_storage, work_buf.data(), work_buf.size(),
DataAlign, BufferAlign, offset, buffer, size);
}
virtual size_t GetSize() const override {
return m_base_storage->GetSize();
}
};
template <size_t BufferAlign_>
class AlignmentMatchingStoragePooledBuffer : public IStorage {
YUZU_NON_COPYABLE(AlignmentMatchingStoragePooledBuffer);
YUZU_NON_MOVEABLE(AlignmentMatchingStoragePooledBuffer);
public:
static constexpr size_t BufferAlign = BufferAlign_;
static_assert(Common::IsPowerOfTwo(BufferAlign));
private:
VirtualFile m_base_storage;
s64 m_base_storage_size;
size_t m_data_align;
public:
explicit AlignmentMatchingStoragePooledBuffer(VirtualFile bs, size_t da)
: m_base_storage(std::move(bs)), m_data_align(da) {
ASSERT(Common::IsPowerOfTwo(da));
}
virtual size_t Read(u8* buffer, size_t size, size_t offset) const override {
// Succeed if zero size.
if (size == 0) {
return size;
}
// Validate arguments.
ASSERT(buffer != nullptr);
s64 bs_size = this->GetSize();
ASSERT(R_SUCCEEDED(IStorage::CheckAccessRange(offset, size, bs_size)));
// Allocate a pooled buffer.
PooledBuffer pooled_buffer;
pooled_buffer.AllocateParticularlyLarge(m_data_align, m_data_align);
return AlignmentMatchingStorageImpl::Read(m_base_storage, pooled_buffer.GetBuffer(),
pooled_buffer.GetSize(), m_data_align,
BufferAlign, offset, buffer, size);
}
virtual size_t Write(const u8* buffer, size_t size, size_t offset) override {
// Succeed if zero size.
if (size == 0) {
return size;
}
// Validate arguments.
ASSERT(buffer != nullptr);
s64 bs_size = this->GetSize();
ASSERT(R_SUCCEEDED(IStorage::CheckAccessRange(offset, size, bs_size)));
// Allocate a pooled buffer.
PooledBuffer pooled_buffer;
pooled_buffer.AllocateParticularlyLarge(m_data_align, m_data_align);
return AlignmentMatchingStorageImpl::Write(m_base_storage, pooled_buffer.GetBuffer(),
pooled_buffer.GetSize(), m_data_align,
BufferAlign, offset, buffer, size);
}
virtual size_t GetSize() const override {
return m_base_storage->GetSize();
}
};
} // namespace FileSys

View File

@ -0,0 +1,204 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/alignment.h"
#include "core/file_sys/fssystem/fssystem_alignment_matching_storage_impl.h"
namespace FileSys {
namespace {
template <typename T>
constexpr size_t GetRoundDownDifference(T x, size_t align) {
return static_cast<size_t>(x - Common::AlignDown(x, align));
}
template <typename T>
constexpr size_t GetRoundUpDifference(T x, size_t align) {
return static_cast<size_t>(Common::AlignUp(x, align) - x);
}
template <typename T>
size_t GetRoundUpDifference(T* x, size_t align) {
return GetRoundUpDifference(reinterpret_cast<uintptr_t>(x), align);
}
} // namespace
size_t AlignmentMatchingStorageImpl::Read(VirtualFile base_storage, char* work_buf,
size_t work_buf_size, size_t data_alignment,
size_t buffer_alignment, s64 offset, u8* buffer,
size_t size) {
// Check preconditions.
ASSERT(work_buf_size >= data_alignment);
// Succeed if zero size.
if (size == 0) {
return size;
}
// Validate arguments.
ASSERT(buffer != nullptr);
// Determine extents.
u8* aligned_core_buffer;
s64 core_offset;
size_t core_size;
size_t buffer_gap;
size_t offset_gap;
s64 covered_offset;
const size_t offset_round_up_difference = GetRoundUpDifference(offset, data_alignment);
if (Common::IsAligned(reinterpret_cast<uintptr_t>(buffer) + offset_round_up_difference,
buffer_alignment)) {
aligned_core_buffer = buffer + offset_round_up_difference;
core_offset = Common::AlignUp(offset, data_alignment);
core_size = (size < offset_round_up_difference)
? 0
: Common::AlignDown(size - offset_round_up_difference, data_alignment);
buffer_gap = 0;
offset_gap = 0;
covered_offset = core_size > 0 ? core_offset : offset;
} else {
const size_t buffer_round_up_difference = GetRoundUpDifference(buffer, buffer_alignment);
aligned_core_buffer = buffer + buffer_round_up_difference;
core_offset = Common::AlignDown(offset, data_alignment);
core_size = (size < buffer_round_up_difference)
? 0
: Common::AlignDown(size - buffer_round_up_difference, data_alignment);
buffer_gap = buffer_round_up_difference;
offset_gap = GetRoundDownDifference(offset, data_alignment);
covered_offset = offset;
}
// Read the core portion.
if (core_size > 0) {
base_storage->Read(aligned_core_buffer, core_size, core_offset);
if (offset_gap != 0 || buffer_gap != 0) {
std::memmove(aligned_core_buffer - buffer_gap, aligned_core_buffer + offset_gap,
core_size - offset_gap);
core_size -= offset_gap;
}
}
// Handle the head portion.
if (offset < covered_offset) {
const s64 head_offset = Common::AlignDown(offset, data_alignment);
const size_t head_size = static_cast<size_t>(covered_offset - offset);
ASSERT(GetRoundDownDifference(offset, data_alignment) + head_size <= work_buf_size);
base_storage->Read(reinterpret_cast<u8*>(work_buf), data_alignment, head_offset);
std::memcpy(buffer, work_buf + GetRoundDownDifference(offset, data_alignment), head_size);
}
// Handle the tail portion.
s64 tail_offset = covered_offset + core_size;
size_t remaining_tail_size = static_cast<size_t>((offset + size) - tail_offset);
while (remaining_tail_size > 0) {
const auto aligned_tail_offset = Common::AlignDown(tail_offset, data_alignment);
const auto cur_size =
std::min(static_cast<size_t>(aligned_tail_offset + data_alignment - tail_offset),
remaining_tail_size);
base_storage->Read(reinterpret_cast<u8*>(work_buf), data_alignment, aligned_tail_offset);
ASSERT((tail_offset - offset) + cur_size <= size);
ASSERT((tail_offset - aligned_tail_offset) + cur_size <= data_alignment);
std::memcpy(reinterpret_cast<char*>(buffer) + (tail_offset - offset),
work_buf + (tail_offset - aligned_tail_offset), cur_size);
remaining_tail_size -= cur_size;
tail_offset += cur_size;
}
return size;
}
size_t AlignmentMatchingStorageImpl::Write(VirtualFile base_storage, char* work_buf,
size_t work_buf_size, size_t data_alignment,
size_t buffer_alignment, s64 offset, const u8* buffer,
size_t size) {
// Check preconditions.
ASSERT(work_buf_size >= data_alignment);
// Succeed if zero size.
if (size == 0) {
return size;
}
// Validate arguments.
ASSERT(buffer != nullptr);
// Determine extents.
const u8* aligned_core_buffer;
s64 core_offset;
size_t core_size;
s64 covered_offset;
const size_t offset_round_up_difference = GetRoundUpDifference(offset, data_alignment);
if (Common::IsAligned(reinterpret_cast<uintptr_t>(buffer) + offset_round_up_difference,
buffer_alignment)) {
aligned_core_buffer = buffer + offset_round_up_difference;
core_offset = Common::AlignUp(offset, data_alignment);
core_size = (size < offset_round_up_difference)
? 0
: Common::AlignDown(size - offset_round_up_difference, data_alignment);
covered_offset = core_size > 0 ? core_offset : offset;
} else {
aligned_core_buffer = nullptr;
core_offset = Common::AlignDown(offset, data_alignment);
core_size = 0;
covered_offset = offset;
}
// Write the core portion.
if (core_size > 0) {
base_storage->Write(aligned_core_buffer, core_size, core_offset);
}
// Handle the head portion.
if (offset < covered_offset) {
const s64 head_offset = Common::AlignDown(offset, data_alignment);
const size_t head_size = static_cast<size_t>(covered_offset - offset);
ASSERT((offset - head_offset) + head_size <= data_alignment);
base_storage->Read(reinterpret_cast<u8*>(work_buf), data_alignment, head_offset);
std::memcpy(work_buf + (offset - head_offset), buffer, head_size);
base_storage->Write(reinterpret_cast<u8*>(work_buf), data_alignment, head_offset);
}
// Handle the tail portion.
s64 tail_offset = covered_offset + core_size;
size_t remaining_tail_size = static_cast<size_t>((offset + size) - tail_offset);
while (remaining_tail_size > 0) {
ASSERT(static_cast<size_t>(tail_offset - offset) < size);
const auto aligned_tail_offset = Common::AlignDown(tail_offset, data_alignment);
const auto cur_size =
std::min(static_cast<size_t>(aligned_tail_offset + data_alignment - tail_offset),
remaining_tail_size);
base_storage->Read(reinterpret_cast<u8*>(work_buf), data_alignment, aligned_tail_offset);
std::memcpy(work_buf + GetRoundDownDifference(tail_offset, data_alignment),
buffer + (tail_offset - offset), cur_size);
base_storage->Write(reinterpret_cast<u8*>(work_buf), data_alignment, aligned_tail_offset);
remaining_tail_size -= cur_size;
tail_offset += cur_size;
}
return size;
}
} // namespace FileSys

View File

@ -0,0 +1,21 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "core/file_sys/errors.h"
#include "core/file_sys/fssystem/fs_i_storage.h"
namespace FileSys {
class AlignmentMatchingStorageImpl {
public:
static size_t Read(VirtualFile base_storage, char* work_buf, size_t work_buf_size,
size_t data_alignment, size_t buffer_alignment, s64 offset, u8* buffer,
size_t size);
static size_t Write(VirtualFile base_storage, char* work_buf, size_t work_buf_size,
size_t data_alignment, size_t buffer_alignment, s64 offset,
const u8* buffer, size_t size);
};
} // namespace FileSys

View File

@ -0,0 +1,598 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/file_sys/errors.h"
#include "core/file_sys/fssystem/fssystem_bucket_tree.h"
#include "core/file_sys/fssystem/fssystem_bucket_tree_utils.h"
#include "core/file_sys/fssystem/fssystem_pooled_buffer.h"
namespace FileSys {
namespace {
using Node = impl::BucketTreeNode<const s64*>;
static_assert(sizeof(Node) == sizeof(BucketTree::NodeHeader));
static_assert(std::is_trivial_v<Node>);
constexpr inline s32 NodeHeaderSize = sizeof(BucketTree::NodeHeader);
class StorageNode {
private:
class Offset {
public:
using difference_type = s64;
private:
s64 m_offset;
s32 m_stride;
public:
constexpr Offset(s64 offset, s32 stride) : m_offset(offset), m_stride(stride) {}
constexpr Offset& operator++() {
m_offset += m_stride;
return *this;
}
constexpr Offset operator++(int) {
Offset ret(*this);
m_offset += m_stride;
return ret;
}
constexpr Offset& operator--() {
m_offset -= m_stride;
return *this;
}
constexpr Offset operator--(int) {
Offset ret(*this);
m_offset -= m_stride;
return ret;
}
constexpr difference_type operator-(const Offset& rhs) const {
return (m_offset - rhs.m_offset) / m_stride;
}
constexpr Offset operator+(difference_type ofs) const {
return Offset(m_offset + ofs * m_stride, m_stride);
}
constexpr Offset operator-(difference_type ofs) const {
return Offset(m_offset - ofs * m_stride, m_stride);
}
constexpr Offset& operator+=(difference_type ofs) {
m_offset += ofs * m_stride;
return *this;
}
constexpr Offset& operator-=(difference_type ofs) {
m_offset -= ofs * m_stride;
return *this;
}
constexpr bool operator==(const Offset& rhs) const {
return m_offset == rhs.m_offset;
}
constexpr bool operator!=(const Offset& rhs) const {
return m_offset != rhs.m_offset;
}
constexpr s64 Get() const {
return m_offset;
}
};
private:
const Offset m_start;
const s32 m_count;
s32 m_index;
public:
StorageNode(size_t size, s32 count)
: m_start(NodeHeaderSize, static_cast<s32>(size)), m_count(count), m_index(-1) {}
StorageNode(s64 ofs, size_t size, s32 count)
: m_start(NodeHeaderSize + ofs, static_cast<s32>(size)), m_count(count), m_index(-1) {}
s32 GetIndex() const {
return m_index;
}
void Find(const char* buffer, s64 virtual_address) {
s32 end = m_count;
auto pos = m_start;
while (end > 0) {
auto half = end / 2;
auto mid = pos + half;
s64 offset = 0;
std::memcpy(std::addressof(offset), buffer + mid.Get(), sizeof(s64));
if (offset <= virtual_address) {
pos = mid + 1;
end -= half + 1;
} else {
end = half;
}
}
m_index = static_cast<s32>(pos - m_start) - 1;
}
Result Find(VirtualFile storage, s64 virtual_address) {
s32 end = m_count;
auto pos = m_start;
while (end > 0) {
auto half = end / 2;
auto mid = pos + half;
s64 offset = 0;
storage->ReadObject(std::addressof(offset), mid.Get());
if (offset <= virtual_address) {
pos = mid + 1;
end -= half + 1;
} else {
end = half;
}
}
m_index = static_cast<s32>(pos - m_start) - 1;
R_SUCCEED();
}
};
} // namespace
void BucketTree::Header::Format(s32 entry_count_) {
ASSERT(entry_count_ >= 0);
this->magic = Magic;
this->version = Version;
this->entry_count = entry_count_;
this->reserved = 0;
}
Result BucketTree::Header::Verify() const {
R_UNLESS(this->magic == Magic, ResultInvalidBucketTreeSignature);
R_UNLESS(this->entry_count >= 0, ResultInvalidBucketTreeEntryCount);
R_UNLESS(this->version <= Version, ResultUnsupportedVersion);
R_SUCCEED();
}
Result BucketTree::NodeHeader::Verify(s32 node_index, size_t node_size, size_t entry_size) const {
R_UNLESS(this->index == node_index, ResultInvalidBucketTreeNodeIndex);
R_UNLESS(entry_size != 0 && node_size >= entry_size + NodeHeaderSize, ResultInvalidSize);
const size_t max_entry_count = (node_size - NodeHeaderSize) / entry_size;
R_UNLESS(this->count > 0 && static_cast<size_t>(this->count) <= max_entry_count,
ResultInvalidBucketTreeNodeEntryCount);
R_UNLESS(this->offset >= 0, ResultInvalidBucketTreeNodeOffset);
R_SUCCEED();
}
Result BucketTree::Initialize(VirtualFile node_storage, VirtualFile entry_storage, size_t node_size,
size_t entry_size, s32 entry_count) {
// Validate preconditions.
ASSERT(entry_size >= sizeof(s64));
ASSERT(node_size >= entry_size + sizeof(NodeHeader));
ASSERT(NodeSizeMin <= node_size && node_size <= NodeSizeMax);
ASSERT(Common::IsPowerOfTwo(node_size));
ASSERT(!this->IsInitialized());
// Ensure valid entry count.
R_UNLESS(entry_count > 0, ResultInvalidArgument);
// Allocate node.
R_UNLESS(m_node_l1.Allocate(node_size), ResultBufferAllocationFailed);
ON_RESULT_FAILURE {
m_node_l1.Free(node_size);
};
// Read node.
node_storage->Read(reinterpret_cast<u8*>(m_node_l1.Get()), node_size);
// Verify node.
R_TRY(m_node_l1->Verify(0, node_size, sizeof(s64)));
// Validate offsets.
const auto offset_count = GetOffsetCount(node_size);
const auto entry_set_count = GetEntrySetCount(node_size, entry_size, entry_count);
const auto* const node = m_node_l1.Get<Node>();
s64 start_offset;
if (offset_count < entry_set_count && node->GetCount() < offset_count) {
start_offset = *node->GetEnd();
} else {
start_offset = *node->GetBegin();
}
const auto end_offset = node->GetEndOffset();
R_UNLESS(0 <= start_offset && start_offset <= node->GetBeginOffset(),
ResultInvalidBucketTreeEntryOffset);
R_UNLESS(start_offset < end_offset, ResultInvalidBucketTreeEntryOffset);
// Set member variables.
m_node_storage = node_storage;
m_entry_storage = entry_storage;
m_node_size = node_size;
m_entry_size = entry_size;
m_entry_count = entry_count;
m_offset_count = offset_count;
m_entry_set_count = entry_set_count;
m_offset_cache.offsets.start_offset = start_offset;
m_offset_cache.offsets.end_offset = end_offset;
m_offset_cache.is_initialized = true;
// We succeeded.
R_SUCCEED();
}
void BucketTree::Initialize(size_t node_size, s64 end_offset) {
ASSERT(NodeSizeMin <= node_size && node_size <= NodeSizeMax);
ASSERT(Common::IsPowerOfTwo(node_size));
ASSERT(end_offset > 0);
ASSERT(!this->IsInitialized());
m_node_size = node_size;
m_offset_cache.offsets.start_offset = 0;
m_offset_cache.offsets.end_offset = end_offset;
m_offset_cache.is_initialized = true;
}
void BucketTree::Finalize() {
if (this->IsInitialized()) {
m_node_storage = VirtualFile();
m_entry_storage = VirtualFile();
m_node_l1.Free(m_node_size);
m_node_size = 0;
m_entry_size = 0;
m_entry_count = 0;
m_offset_count = 0;
m_entry_set_count = 0;
m_offset_cache.offsets.start_offset = 0;
m_offset_cache.offsets.end_offset = 0;
m_offset_cache.is_initialized = false;
}
}
Result BucketTree::Find(Visitor* visitor, s64 virtual_address) {
ASSERT(visitor != nullptr);
ASSERT(this->IsInitialized());
R_UNLESS(virtual_address >= 0, ResultInvalidOffset);
R_UNLESS(!this->IsEmpty(), ResultOutOfRange);
BucketTree::Offsets offsets;
R_TRY(this->GetOffsets(std::addressof(offsets)));
R_TRY(visitor->Initialize(this, offsets));
R_RETURN(visitor->Find(virtual_address));
}
Result BucketTree::InvalidateCache() {
// Reset our offsets.
m_offset_cache.is_initialized = false;
R_SUCCEED();
}
Result BucketTree::EnsureOffsetCache() {
// If we already have an offset cache, we're good.
R_SUCCEED_IF(m_offset_cache.is_initialized);
// Acquire exclusive right to edit the offset cache.
std::scoped_lock lk(m_offset_cache.mutex);
// Check again, to be sure.
R_SUCCEED_IF(m_offset_cache.is_initialized);
// Read/verify L1.
m_node_storage->Read(reinterpret_cast<u8*>(m_node_l1.Get()), m_node_size);
R_TRY(m_node_l1->Verify(0, m_node_size, sizeof(s64)));
// Get the node.
auto* const node = m_node_l1.Get<Node>();
s64 start_offset;
if (m_offset_count < m_entry_set_count && node->GetCount() < m_offset_count) {
start_offset = *node->GetEnd();
} else {
start_offset = *node->GetBegin();
}
const auto end_offset = node->GetEndOffset();
R_UNLESS(0 <= start_offset && start_offset <= node->GetBeginOffset(),
ResultInvalidBucketTreeEntryOffset);
R_UNLESS(start_offset < end_offset, ResultInvalidBucketTreeEntryOffset);
m_offset_cache.offsets.start_offset = start_offset;
m_offset_cache.offsets.end_offset = end_offset;
m_offset_cache.is_initialized = true;
R_SUCCEED();
}
Result BucketTree::Visitor::Initialize(const BucketTree* tree, const BucketTree::Offsets& offsets) {
ASSERT(tree != nullptr);
ASSERT(m_tree == nullptr || m_tree == tree);
if (m_entry == nullptr) {
m_entry = ::operator new(tree->m_entry_size);
R_UNLESS(m_entry != nullptr, ResultBufferAllocationFailed);
m_tree = tree;
m_offsets = offsets;
}
R_SUCCEED();
}
Result BucketTree::Visitor::MoveNext() {
R_UNLESS(this->IsValid(), ResultOutOfRange);
// Invalidate our index, and read the header for the next index.
auto entry_index = m_entry_index + 1;
if (entry_index == m_entry_set.info.count) {
const auto entry_set_index = m_entry_set.info.index + 1;
R_UNLESS(entry_set_index < m_entry_set_count, ResultOutOfRange);
m_entry_index = -1;
const auto end = m_entry_set.info.end;
const auto entry_set_size = m_tree->m_node_size;
const auto entry_set_offset = entry_set_index * static_cast<s64>(entry_set_size);
m_tree->m_entry_storage->ReadObject(std::addressof(m_entry_set), entry_set_offset);
R_TRY(m_entry_set.header.Verify(entry_set_index, entry_set_size, m_tree->m_entry_size));
R_UNLESS(m_entry_set.info.start == end && m_entry_set.info.start < m_entry_set.info.end,
ResultInvalidBucketTreeEntrySetOffset);
entry_index = 0;
} else {
m_entry_index = -1;
}
// Read the new entry.
const auto entry_size = m_tree->m_entry_size;
const auto entry_offset = impl::GetBucketTreeEntryOffset(
m_entry_set.info.index, m_tree->m_node_size, entry_size, entry_index);
m_tree->m_entry_storage->Read(reinterpret_cast<u8*>(m_entry), entry_size, entry_offset);
// Note that we changed index.
m_entry_index = entry_index;
R_SUCCEED();
}
Result BucketTree::Visitor::MovePrevious() {
R_UNLESS(this->IsValid(), ResultOutOfRange);
// Invalidate our index, and read the header for the previous index.
auto entry_index = m_entry_index;
if (entry_index == 0) {
R_UNLESS(m_entry_set.info.index > 0, ResultOutOfRange);
m_entry_index = -1;
const auto start = m_entry_set.info.start;
const auto entry_set_size = m_tree->m_node_size;
const auto entry_set_index = m_entry_set.info.index - 1;
const auto entry_set_offset = entry_set_index * static_cast<s64>(entry_set_size);
m_tree->m_entry_storage->ReadObject(std::addressof(m_entry_set), entry_set_offset);
R_TRY(m_entry_set.header.Verify(entry_set_index, entry_set_size, m_tree->m_entry_size));
R_UNLESS(m_entry_set.info.end == start && m_entry_set.info.start < m_entry_set.info.end,
ResultInvalidBucketTreeEntrySetOffset);
entry_index = m_entry_set.info.count;
} else {
m_entry_index = -1;
}
--entry_index;
// Read the new entry.
const auto entry_size = m_tree->m_entry_size;
const auto entry_offset = impl::GetBucketTreeEntryOffset(
m_entry_set.info.index, m_tree->m_node_size, entry_size, entry_index);
m_tree->m_entry_storage->Read(reinterpret_cast<u8*>(m_entry), entry_size, entry_offset);
// Note that we changed index.
m_entry_index = entry_index;
R_SUCCEED();
}
Result BucketTree::Visitor::Find(s64 virtual_address) {
ASSERT(m_tree != nullptr);
// Get the node.
const auto* const node = m_tree->m_node_l1.Get<Node>();
R_UNLESS(virtual_address < node->GetEndOffset(), ResultOutOfRange);
// Get the entry set index.
s32 entry_set_index = -1;
if (m_tree->IsExistOffsetL2OnL1() && virtual_address < node->GetBeginOffset()) {
const auto start = node->GetEnd();
const auto end = node->GetBegin() + m_tree->m_offset_count;
auto pos = std::upper_bound(start, end, virtual_address);
R_UNLESS(start < pos, ResultOutOfRange);
--pos;
entry_set_index = static_cast<s32>(pos - start);
} else {
const auto start = node->GetBegin();
const auto end = node->GetEnd();
auto pos = std::upper_bound(start, end, virtual_address);
R_UNLESS(start < pos, ResultOutOfRange);
--pos;
if (m_tree->IsExistL2()) {
const auto node_index = static_cast<s32>(pos - start);
R_UNLESS(0 <= node_index && node_index < m_tree->m_offset_count,
ResultInvalidBucketTreeNodeOffset);
R_TRY(this->FindEntrySet(std::addressof(entry_set_index), virtual_address, node_index));
} else {
entry_set_index = static_cast<s32>(pos - start);
}
}
// Validate the entry set index.
R_UNLESS(0 <= entry_set_index && entry_set_index < m_tree->m_entry_set_count,
ResultInvalidBucketTreeNodeOffset);
// Find the entry.
R_TRY(this->FindEntry(virtual_address, entry_set_index));
// Set count.
m_entry_set_count = m_tree->m_entry_set_count;
R_SUCCEED();
}
Result BucketTree::Visitor::FindEntrySet(s32* out_index, s64 virtual_address, s32 node_index) {
const auto node_size = m_tree->m_node_size;
PooledBuffer pool(node_size, 1);
if (node_size <= pool.GetSize()) {
R_RETURN(
this->FindEntrySetWithBuffer(out_index, virtual_address, node_index, pool.GetBuffer()));
} else {
pool.Deallocate();
R_RETURN(this->FindEntrySetWithoutBuffer(out_index, virtual_address, node_index));
}
}
Result BucketTree::Visitor::FindEntrySetWithBuffer(s32* out_index, s64 virtual_address,
s32 node_index, char* buffer) {
// Calculate node extents.
const auto node_size = m_tree->m_node_size;
const auto node_offset = (node_index + 1) * static_cast<s64>(node_size);
VirtualFile storage = m_tree->m_node_storage;
// Read the node.
storage->Read(reinterpret_cast<u8*>(buffer), node_size, node_offset);
// Validate the header.
NodeHeader header;
std::memcpy(std::addressof(header), buffer, NodeHeaderSize);
R_TRY(header.Verify(node_index, node_size, sizeof(s64)));
// Create the node, and find.
StorageNode node(sizeof(s64), header.count);
node.Find(buffer, virtual_address);
R_UNLESS(node.GetIndex() >= 0, ResultInvalidBucketTreeVirtualOffset);
// Return the index.
*out_index = static_cast<s32>(m_tree->GetEntrySetIndex(header.index, node.GetIndex()));
R_SUCCEED();
}
Result BucketTree::Visitor::FindEntrySetWithoutBuffer(s32* out_index, s64 virtual_address,
s32 node_index) {
// Calculate node extents.
const auto node_size = m_tree->m_node_size;
const auto node_offset = (node_index + 1) * static_cast<s64>(node_size);
VirtualFile storage = m_tree->m_node_storage;
// Read and validate the header.
NodeHeader header;
storage->ReadObject(std::addressof(header), node_offset);
R_TRY(header.Verify(node_index, node_size, sizeof(s64)));
// Create the node, and find.
StorageNode node(node_offset, sizeof(s64), header.count);
R_TRY(node.Find(storage, virtual_address));
R_UNLESS(node.GetIndex() >= 0, ResultOutOfRange);
// Return the index.
*out_index = static_cast<s32>(m_tree->GetEntrySetIndex(header.index, node.GetIndex()));
R_SUCCEED();
}
Result BucketTree::Visitor::FindEntry(s64 virtual_address, s32 entry_set_index) {
const auto entry_set_size = m_tree->m_node_size;
PooledBuffer pool(entry_set_size, 1);
if (entry_set_size <= pool.GetSize()) {
R_RETURN(this->FindEntryWithBuffer(virtual_address, entry_set_index, pool.GetBuffer()));
} else {
pool.Deallocate();
R_RETURN(this->FindEntryWithoutBuffer(virtual_address, entry_set_index));
}
}
Result BucketTree::Visitor::FindEntryWithBuffer(s64 virtual_address, s32 entry_set_index,
char* buffer) {
// Calculate entry set extents.
const auto entry_size = m_tree->m_entry_size;
const auto entry_set_size = m_tree->m_node_size;
const auto entry_set_offset = entry_set_index * static_cast<s64>(entry_set_size);
VirtualFile storage = m_tree->m_entry_storage;
// Read the entry set.
storage->Read(reinterpret_cast<u8*>(buffer), entry_set_size, entry_set_offset);
// Validate the entry_set.
EntrySetHeader entry_set;
std::memcpy(std::addressof(entry_set), buffer, sizeof(EntrySetHeader));
R_TRY(entry_set.header.Verify(entry_set_index, entry_set_size, entry_size));
// Create the node, and find.
StorageNode node(entry_size, entry_set.info.count);
node.Find(buffer, virtual_address);
R_UNLESS(node.GetIndex() >= 0, ResultOutOfRange);
// Copy the data into entry.
const auto entry_index = node.GetIndex();
const auto entry_offset = impl::GetBucketTreeEntryOffset(0, entry_size, entry_index);
std::memcpy(m_entry, buffer + entry_offset, entry_size);
// Set our entry set/index.
m_entry_set = entry_set;
m_entry_index = entry_index;
R_SUCCEED();
}
Result BucketTree::Visitor::FindEntryWithoutBuffer(s64 virtual_address, s32 entry_set_index) {
// Calculate entry set extents.
const auto entry_size = m_tree->m_entry_size;
const auto entry_set_size = m_tree->m_node_size;
const auto entry_set_offset = entry_set_index * static_cast<s64>(entry_set_size);
VirtualFile storage = m_tree->m_entry_storage;
// Read and validate the entry_set.
EntrySetHeader entry_set;
storage->ReadObject(std::addressof(entry_set), entry_set_offset);
R_TRY(entry_set.header.Verify(entry_set_index, entry_set_size, entry_size));
// Create the node, and find.
StorageNode node(entry_set_offset, entry_size, entry_set.info.count);
R_TRY(node.Find(storage, virtual_address));
R_UNLESS(node.GetIndex() >= 0, ResultOutOfRange);
// Copy the data into entry.
const auto entry_index = node.GetIndex();
const auto entry_offset =
impl::GetBucketTreeEntryOffset(entry_set_offset, entry_size, entry_index);
storage->Read(reinterpret_cast<u8*>(m_entry), entry_size, entry_offset);
// Set our entry set/index.
m_entry_set = entry_set;
m_entry_index = entry_index;
R_SUCCEED();
}
} // namespace FileSys

View File

@ -0,0 +1,416 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <mutex>
#include "common/alignment.h"
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/literals.h"
#include "core/file_sys/vfs.h"
#include "core/hle/result.h"
namespace FileSys {
using namespace Common::Literals;
class BucketTree {
YUZU_NON_COPYABLE(BucketTree);
YUZU_NON_MOVEABLE(BucketTree);
public:
static constexpr u32 Magic = Common::MakeMagic('B', 'K', 'T', 'R');
static constexpr u32 Version = 1;
static constexpr size_t NodeSizeMin = 1_KiB;
static constexpr size_t NodeSizeMax = 512_KiB;
public:
class Visitor;
struct Header {
u32 magic;
u32 version;
s32 entry_count;
s32 reserved;
void Format(s32 entry_count);
Result Verify() const;
};
static_assert(std::is_trivial_v<Header>);
static_assert(sizeof(Header) == 0x10);
struct NodeHeader {
s32 index;
s32 count;
s64 offset;
Result Verify(s32 node_index, size_t node_size, size_t entry_size) const;
};
static_assert(std::is_trivial_v<NodeHeader>);
static_assert(sizeof(NodeHeader) == 0x10);
struct Offsets {
s64 start_offset;
s64 end_offset;
constexpr bool IsInclude(s64 offset) const {
return this->start_offset <= offset && offset < this->end_offset;
}
constexpr bool IsInclude(s64 offset, s64 size) const {
return size > 0 && this->start_offset <= offset && size <= (this->end_offset - offset);
}
};
static_assert(std::is_trivial_v<Offsets>);
static_assert(sizeof(Offsets) == 0x10);
struct OffsetCache {
Offsets offsets;
std::mutex mutex;
bool is_initialized;
OffsetCache() : offsets{-1, -1}, mutex(), is_initialized(false) {}
};
class ContinuousReadingInfo {
public:
constexpr ContinuousReadingInfo() : m_read_size(), m_skip_count(), m_done() {}
constexpr void Reset() {
m_read_size = 0;
m_skip_count = 0;
m_done = false;
}
constexpr void SetSkipCount(s32 count) {
ASSERT(count >= 0);
m_skip_count = count;
}
constexpr s32 GetSkipCount() const {
return m_skip_count;
}
constexpr bool CheckNeedScan() {
return (--m_skip_count) <= 0;
}
constexpr void Done() {
m_read_size = 0;
m_done = true;
}
constexpr bool IsDone() const {
return m_done;
}
constexpr void SetReadSize(size_t size) {
m_read_size = size;
}
constexpr size_t GetReadSize() const {
return m_read_size;
}
constexpr bool CanDo() const {
return m_read_size > 0;
}
private:
size_t m_read_size;
s32 m_skip_count;
bool m_done;
};
private:
class NodeBuffer {
YUZU_NON_COPYABLE(NodeBuffer);
public:
NodeBuffer() : m_header() {}
~NodeBuffer() {
ASSERT(m_header == nullptr);
}
NodeBuffer(NodeBuffer&& rhs) : m_header(rhs.m_header) {
rhs.m_header = nullptr;
}
NodeBuffer& operator=(NodeBuffer&& rhs) {
if (this != std::addressof(rhs)) {
ASSERT(m_header == nullptr);
m_header = rhs.m_header;
rhs.m_header = nullptr;
}
return *this;
}
bool Allocate(size_t node_size) {
ASSERT(m_header == nullptr);
m_header = ::operator new(node_size, std::align_val_t{sizeof(s64)});
// ASSERT(Common::IsAligned(m_header, sizeof(s64)));
return m_header != nullptr;
}
void Free(size_t node_size) {
if (m_header) {
::operator delete(m_header, std::align_val_t{sizeof(s64)});
m_header = nullptr;
}
}
void FillZero(size_t node_size) const {
if (m_header) {
std::memset(m_header, 0, node_size);
}
}
NodeHeader* Get() const {
return reinterpret_cast<NodeHeader*>(m_header);
}
NodeHeader* operator->() const {
return this->Get();
}
template <typename T>
T* Get() const {
static_assert(std::is_trivial_v<T>);
static_assert(sizeof(T) == sizeof(NodeHeader));
return reinterpret_cast<T*>(m_header);
}
private:
void* m_header;
};
private:
static constexpr s32 GetEntryCount(size_t node_size, size_t entry_size) {
return static_cast<s32>((node_size - sizeof(NodeHeader)) / entry_size);
}
static constexpr s32 GetOffsetCount(size_t node_size) {
return static_cast<s32>((node_size - sizeof(NodeHeader)) / sizeof(s64));
}
static constexpr s32 GetEntrySetCount(size_t node_size, size_t entry_size, s32 entry_count) {
const s32 entry_count_per_node = GetEntryCount(node_size, entry_size);
return Common::DivideUp(entry_count, entry_count_per_node);
}
static constexpr s32 GetNodeL2Count(size_t node_size, size_t entry_size, s32 entry_count) {
const s32 offset_count_per_node = GetOffsetCount(node_size);
const s32 entry_set_count = GetEntrySetCount(node_size, entry_size, entry_count);
if (entry_set_count <= offset_count_per_node) {
return 0;
}
const s32 node_l2_count = Common::DivideUp(entry_set_count, offset_count_per_node);
ASSERT(node_l2_count <= offset_count_per_node);
return Common::DivideUp(entry_set_count - (offset_count_per_node - (node_l2_count - 1)),
offset_count_per_node);
}
public:
BucketTree()
: m_node_storage(), m_entry_storage(), m_node_l1(), m_node_size(), m_entry_size(),
m_entry_count(), m_offset_count(), m_entry_set_count(), m_offset_cache() {}
~BucketTree() {
this->Finalize();
}
Result Initialize(VirtualFile node_storage, VirtualFile entry_storage, size_t node_size,
size_t entry_size, s32 entry_count);
void Initialize(size_t node_size, s64 end_offset);
void Finalize();
bool IsInitialized() const {
return m_node_size > 0;
}
bool IsEmpty() const {
return m_entry_size == 0;
}
Result Find(Visitor* visitor, s64 virtual_address);
Result InvalidateCache();
s32 GetEntryCount() const {
return m_entry_count;
}
Result GetOffsets(Offsets* out) {
// Ensure we have an offset cache.
R_TRY(this->EnsureOffsetCache());
// Set the output.
*out = m_offset_cache.offsets;
R_SUCCEED();
}
public:
static constexpr s64 QueryHeaderStorageSize() {
return sizeof(Header);
}
static constexpr s64 QueryNodeStorageSize(size_t node_size, size_t entry_size,
s32 entry_count) {
ASSERT(entry_size >= sizeof(s64));
ASSERT(node_size >= entry_size + sizeof(NodeHeader));
ASSERT(NodeSizeMin <= node_size && node_size <= NodeSizeMax);
ASSERT(Common::IsPowerOfTwo(node_size));
ASSERT(entry_count >= 0);
if (entry_count <= 0) {
return 0;
}
return (1 + GetNodeL2Count(node_size, entry_size, entry_count)) *
static_cast<s64>(node_size);
}
static constexpr s64 QueryEntryStorageSize(size_t node_size, size_t entry_size,
s32 entry_count) {
ASSERT(entry_size >= sizeof(s64));
ASSERT(node_size >= entry_size + sizeof(NodeHeader));
ASSERT(NodeSizeMin <= node_size && node_size <= NodeSizeMax);
ASSERT(Common::IsPowerOfTwo(node_size));
ASSERT(entry_count >= 0);
if (entry_count <= 0) {
return 0;
}
return GetEntrySetCount(node_size, entry_size, entry_count) * static_cast<s64>(node_size);
}
private:
template <typename EntryType>
struct ContinuousReadingParam {
s64 offset;
size_t size;
NodeHeader entry_set;
s32 entry_index;
Offsets offsets;
EntryType entry;
};
private:
template <typename EntryType>
Result ScanContinuousReading(ContinuousReadingInfo* out_info,
const ContinuousReadingParam<EntryType>& param) const;
bool IsExistL2() const {
return m_offset_count < m_entry_set_count;
}
bool IsExistOffsetL2OnL1() const {
return this->IsExistL2() && m_node_l1->count < m_offset_count;
}
s64 GetEntrySetIndex(s32 node_index, s32 offset_index) const {
return (m_offset_count - m_node_l1->count) + (m_offset_count * node_index) + offset_index;
}
Result EnsureOffsetCache();
private:
mutable VirtualFile m_node_storage;
mutable VirtualFile m_entry_storage;
NodeBuffer m_node_l1;
size_t m_node_size;
size_t m_entry_size;
s32 m_entry_count;
s32 m_offset_count;
s32 m_entry_set_count;
OffsetCache m_offset_cache;
};
class BucketTree::Visitor {
YUZU_NON_COPYABLE(Visitor);
YUZU_NON_MOVEABLE(Visitor);
public:
constexpr Visitor()
: m_tree(), m_entry(), m_entry_index(-1), m_entry_set_count(), m_entry_set{} {}
~Visitor() {
if (m_entry != nullptr) {
::operator delete(m_entry, m_tree->m_entry_size);
m_tree = nullptr;
m_entry = nullptr;
}
}
bool IsValid() const {
return m_entry_index >= 0;
}
bool CanMoveNext() const {
return this->IsValid() && (m_entry_index + 1 < m_entry_set.info.count ||
m_entry_set.info.index + 1 < m_entry_set_count);
}
bool CanMovePrevious() const {
return this->IsValid() && (m_entry_index > 0 || m_entry_set.info.index > 0);
}
Result MoveNext();
Result MovePrevious();
template <typename EntryType>
Result ScanContinuousReading(ContinuousReadingInfo* out_info, s64 offset, size_t size) const;
const void* Get() const {
ASSERT(this->IsValid());
return m_entry;
}
template <typename T>
const T* Get() const {
ASSERT(this->IsValid());
return reinterpret_cast<const T*>(m_entry);
}
const BucketTree* GetTree() const {
return m_tree;
}
private:
Result Initialize(const BucketTree* tree, const BucketTree::Offsets& offsets);
Result Find(s64 virtual_address);
Result FindEntrySet(s32* out_index, s64 virtual_address, s32 node_index);
Result FindEntrySetWithBuffer(s32* out_index, s64 virtual_address, s32 node_index,
char* buffer);
Result FindEntrySetWithoutBuffer(s32* out_index, s64 virtual_address, s32 node_index);
Result FindEntry(s64 virtual_address, s32 entry_set_index);
Result FindEntryWithBuffer(s64 virtual_address, s32 entry_set_index, char* buffer);
Result FindEntryWithoutBuffer(s64 virtual_address, s32 entry_set_index);
private:
friend class BucketTree;
union EntrySetHeader {
NodeHeader header;
struct Info {
s32 index;
s32 count;
s64 end;
s64 start;
} info;
static_assert(std::is_trivial_v<Info>);
};
static_assert(std::is_trivial_v<EntrySetHeader>);
const BucketTree* m_tree;
BucketTree::Offsets m_offsets;
void* m_entry;
s32 m_entry_index;
s32 m_entry_set_count;
EntrySetHeader m_entry_set;
};
} // namespace FileSys

View File

@ -0,0 +1,170 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "core/file_sys/errors.h"
#include "core/file_sys/fssystem/fssystem_bucket_tree.h"
#include "core/file_sys/fssystem/fssystem_bucket_tree_utils.h"
#include "core/file_sys/fssystem/fssystem_pooled_buffer.h"
namespace FileSys {
template <typename EntryType>
Result BucketTree::ScanContinuousReading(ContinuousReadingInfo* out_info,
const ContinuousReadingParam<EntryType>& param) const {
static_assert(std::is_trivial_v<ContinuousReadingParam<EntryType>>);
// Validate our preconditions.
ASSERT(this->IsInitialized());
ASSERT(out_info != nullptr);
ASSERT(m_entry_size == sizeof(EntryType));
// Reset the output.
out_info->Reset();
// If there's nothing to read, we're done.
R_SUCCEED_IF(param.size == 0);
// If we're reading a fragment, we're done.
R_SUCCEED_IF(param.entry.IsFragment());
// Validate the first entry.
auto entry = param.entry;
auto cur_offset = param.offset;
R_UNLESS(entry.GetVirtualOffset() <= cur_offset, ResultOutOfRange);
// Create a pooled buffer for our scan.
PooledBuffer pool(m_node_size, 1);
char* buffer = nullptr;
s64 entry_storage_size = m_entry_storage->GetSize();
// Read the node.
if (m_node_size <= pool.GetSize()) {
buffer = pool.GetBuffer();
const auto ofs = param.entry_set.index * static_cast<s64>(m_node_size);
R_UNLESS(m_node_size + ofs <= static_cast<size_t>(entry_storage_size),
ResultInvalidBucketTreeNodeEntryCount);
m_entry_storage->Read(reinterpret_cast<u8*>(buffer), m_node_size, ofs);
}
// Calculate extents.
const auto end_offset = cur_offset + static_cast<s64>(param.size);
s64 phys_offset = entry.GetPhysicalOffset();
// Start merge tracking.
s64 merge_size = 0;
s64 readable_size = 0;
bool merged = false;
// Iterate.
auto entry_index = param.entry_index;
for (const auto entry_count = param.entry_set.count; entry_index < entry_count; ++entry_index) {
// If we're past the end, we're done.
if (end_offset <= cur_offset) {
break;
}
// Validate the entry offset.
const auto entry_offset = entry.GetVirtualOffset();
R_UNLESS(entry_offset <= cur_offset, ResultInvalidIndirectEntryOffset);
// Get the next entry.
EntryType next_entry = {};
s64 next_entry_offset;
if (entry_index + 1 < entry_count) {
if (buffer != nullptr) {
const auto ofs = impl::GetBucketTreeEntryOffset(0, m_entry_size, entry_index + 1);
std::memcpy(std::addressof(next_entry), buffer + ofs, m_entry_size);
} else {
const auto ofs = impl::GetBucketTreeEntryOffset(param.entry_set.index, m_node_size,
m_entry_size, entry_index + 1);
m_entry_storage->ReadObject(std::addressof(next_entry), ofs);
}
next_entry_offset = next_entry.GetVirtualOffset();
R_UNLESS(param.offsets.IsInclude(next_entry_offset), ResultInvalidIndirectEntryOffset);
} else {
next_entry_offset = param.entry_set.offset;
}
// Validate the next entry offset.
R_UNLESS(cur_offset < next_entry_offset, ResultInvalidIndirectEntryOffset);
// Determine the much data there is.
const auto data_size = next_entry_offset - cur_offset;
ASSERT(data_size > 0);
// Determine how much data we should read.
const auto remaining_size = end_offset - cur_offset;
const size_t read_size = static_cast<size_t>(std::min(data_size, remaining_size));
ASSERT(read_size <= param.size);
// Update our merge tracking.
if (entry.IsFragment()) {
// If we can't merge, stop looping.
if (EntryType::FragmentSizeMax <= read_size || remaining_size <= data_size) {
break;
}
// Otherwise, add the current size to the merge size.
merge_size += read_size;
} else {
// If we can't merge, stop looping.
if (phys_offset != entry.GetPhysicalOffset()) {
break;
}
// Add the size to the readable amount.
readable_size += merge_size + read_size;
ASSERT(readable_size <= static_cast<s64>(param.size));
// Update whether we've merged.
merged |= merge_size > 0;
merge_size = 0;
}
// Advance.
cur_offset += read_size;
ASSERT(cur_offset <= end_offset);
phys_offset += next_entry_offset - entry_offset;
entry = next_entry;
}
// If we merged, set our readable size.
if (merged) {
out_info->SetReadSize(static_cast<size_t>(readable_size));
}
out_info->SetSkipCount(entry_index - param.entry_index);
R_SUCCEED();
}
template <typename EntryType>
Result BucketTree::Visitor::ScanContinuousReading(ContinuousReadingInfo* out_info, s64 offset,
size_t size) const {
static_assert(std::is_trivial_v<EntryType>);
ASSERT(this->IsValid());
// Create our parameters.
ContinuousReadingParam<EntryType> param = {
.offset = offset,
.size = size,
.entry_set = m_entry_set.header,
.entry_index = m_entry_index,
.offsets{},
.entry{},
};
std::memcpy(std::addressof(param.offsets), std::addressof(m_offsets),
sizeof(BucketTree::Offsets));
std::memcpy(std::addressof(param.entry), m_entry, sizeof(EntryType));
// Scan.
R_RETURN(m_tree->ScanContinuousReading<EntryType>(out_info, param));
}
} // namespace FileSys

View File

@ -0,0 +1,110 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "core/file_sys/fssystem/fssystem_bucket_tree.h"
namespace FileSys::impl {
class SafeValue {
public:
static s64 GetInt64(const void* ptr) {
s64 value;
std::memcpy(std::addressof(value), ptr, sizeof(s64));
return value;
}
static s64 GetInt64(const s64* ptr) {
return GetInt64(static_cast<const void*>(ptr));
}
static s64 GetInt64(const s64& v) {
return GetInt64(std::addressof(v));
}
static void SetInt64(void* dst, const void* src) {
std::memcpy(dst, src, sizeof(s64));
}
static void SetInt64(void* dst, const s64* src) {
return SetInt64(dst, static_cast<const void*>(src));
}
static void SetInt64(void* dst, const s64& v) {
return SetInt64(dst, std::addressof(v));
}
};
template <typename IteratorType>
struct BucketTreeNode {
using Header = BucketTree::NodeHeader;
Header header;
s32 GetCount() const {
return this->header.count;
}
void* GetArray() {
return std::addressof(this->header) + 1;
}
template <typename T>
T* GetArray() {
return reinterpret_cast<T*>(this->GetArray());
}
const void* GetArray() const {
return std::addressof(this->header) + 1;
}
template <typename T>
const T* GetArray() const {
return reinterpret_cast<const T*>(this->GetArray());
}
s64 GetBeginOffset() const {
return *this->GetArray<s64>();
}
s64 GetEndOffset() const {
return this->header.offset;
}
IteratorType GetBegin() {
return IteratorType(this->GetArray<s64>());
}
IteratorType GetEnd() {
return IteratorType(this->GetArray<s64>()) + this->header.count;
}
IteratorType GetBegin() const {
return IteratorType(this->GetArray<s64>());
}
IteratorType GetEnd() const {
return IteratorType(this->GetArray<s64>()) + this->header.count;
}
IteratorType GetBegin(size_t entry_size) {
return IteratorType(this->GetArray(), entry_size);
}
IteratorType GetEnd(size_t entry_size) {
return IteratorType(this->GetArray(), entry_size) + this->header.count;
}
IteratorType GetBegin(size_t entry_size) const {
return IteratorType(this->GetArray(), entry_size);
}
IteratorType GetEnd(size_t entry_size) const {
return IteratorType(this->GetArray(), entry_size) + this->header.count;
}
};
constexpr inline s64 GetBucketTreeEntryOffset(s64 entry_set_offset, size_t entry_size,
s32 entry_index) {
return entry_set_offset + sizeof(BucketTree::NodeHeader) +
entry_index * static_cast<s64>(entry_size);
}
constexpr inline s64 GetBucketTreeEntryOffset(s32 entry_set_index, size_t node_size,
size_t entry_size, s32 entry_index) {
return GetBucketTreeEntryOffset(entry_set_index * static_cast<s64>(node_size), entry_size,
entry_index);
}
} // namespace FileSys::impl

View File

@ -0,0 +1,963 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "common/literals.h"
#include "core/file_sys/errors.h"
#include "core/file_sys/fssystem/fs_i_storage.h"
#include "core/file_sys/fssystem/fssystem_bucket_tree.h"
#include "core/file_sys/fssystem/fssystem_compression_common.h"
#include "core/file_sys/fssystem/fssystem_pooled_buffer.h"
#include "core/file_sys/vfs.h"
namespace FileSys {
using namespace Common::Literals;
class CompressedStorage : public IReadOnlyStorage {
YUZU_NON_COPYABLE(CompressedStorage);
YUZU_NON_MOVEABLE(CompressedStorage);
public:
static constexpr size_t NodeSize = 16_KiB;
struct Entry {
s64 virt_offset;
s64 phys_offset;
CompressionType compression_type;
s32 phys_size;
s64 GetPhysicalSize() const {
return this->phys_size;
}
};
static_assert(std::is_trivial_v<Entry>);
static_assert(sizeof(Entry) == 0x18);
public:
static constexpr s64 QueryNodeStorageSize(s32 entry_count) {
return BucketTree::QueryNodeStorageSize(NodeSize, sizeof(Entry), entry_count);
}
static constexpr s64 QueryEntryStorageSize(s32 entry_count) {
return BucketTree::QueryEntryStorageSize(NodeSize, sizeof(Entry), entry_count);
}
private:
class CompressedStorageCore {
YUZU_NON_COPYABLE(CompressedStorageCore);
YUZU_NON_MOVEABLE(CompressedStorageCore);
public:
CompressedStorageCore() : m_table(), m_data_storage() {}
~CompressedStorageCore() {
this->Finalize();
}
public:
Result Initialize(VirtualFile data_storage, VirtualFile node_storage,
VirtualFile entry_storage, s32 bktr_entry_count, size_t block_size_max,
size_t continuous_reading_size_max,
GetDecompressorFunction get_decompressor) {
// Check pre-conditions.
ASSERT(0 < block_size_max);
ASSERT(block_size_max <= continuous_reading_size_max);
ASSERT(get_decompressor != nullptr);
// Initialize our entry table.
R_TRY(m_table.Initialize(node_storage, entry_storage, NodeSize, sizeof(Entry),
bktr_entry_count));
// Set our other fields.
m_block_size_max = block_size_max;
m_continuous_reading_size_max = continuous_reading_size_max;
m_data_storage = data_storage;
m_get_decompressor_function = get_decompressor;
R_SUCCEED();
}
void Finalize() {
if (this->IsInitialized()) {
m_table.Finalize();
m_data_storage = VirtualFile();
}
}
VirtualFile GetDataStorage() {
return m_data_storage;
}
Result GetDataStorageSize(s64* out) {
// Check pre-conditions.
ASSERT(out != nullptr);
// Get size.
*out = m_data_storage->GetSize();
R_SUCCEED();
}
BucketTree& GetEntryTable() {
return m_table;
}
Result GetEntryList(Entry* out_entries, s32* out_read_count, s32 max_entry_count,
s64 offset, s64 size) {
// Check pre-conditions.
ASSERT(offset >= 0);
ASSERT(size >= 0);
ASSERT(this->IsInitialized());
// Check that we can output the count.
R_UNLESS(out_read_count != nullptr, ResultNullptrArgument);
// Check that we have anything to read at all.
R_SUCCEED_IF(size == 0);
// Check that either we have a buffer, or this is to determine how many we need.
if (max_entry_count != 0) {
R_UNLESS(out_entries != nullptr, ResultNullptrArgument);
}
// Get the table offsets.
BucketTree::Offsets table_offsets;
R_TRY(m_table.GetOffsets(std::addressof(table_offsets)));
// Validate arguments.
R_UNLESS(table_offsets.IsInclude(offset, size), ResultOutOfRange);
// Find the offset in our tree.
BucketTree::Visitor visitor;
R_TRY(m_table.Find(std::addressof(visitor), offset));
{
const auto entry_offset = visitor.Get<Entry>()->virt_offset;
R_UNLESS(0 <= entry_offset && table_offsets.IsInclude(entry_offset),
ResultUnexpectedInCompressedStorageA);
}
// Get the entries.
const auto end_offset = offset + size;
s32 read_count = 0;
while (visitor.Get<Entry>()->virt_offset < end_offset) {
// If we should be setting the output, do so.
if (max_entry_count != 0) {
// Ensure we only read as many entries as we can.
if (read_count >= max_entry_count) {
break;
}
// Set the current output entry.
out_entries[read_count] = *visitor.Get<Entry>();
}
// Increase the read count.
++read_count;
// If we're at the end, we're done.
if (!visitor.CanMoveNext()) {
break;
}
// Move to the next entry.
R_TRY(visitor.MoveNext());
}
// Set the output read count.
*out_read_count = read_count;
R_SUCCEED();
}
Result GetSize(s64* out) {
// Check pre-conditions.
ASSERT(out != nullptr);
// Get our table offsets.
BucketTree::Offsets offsets;
R_TRY(m_table.GetOffsets(std::addressof(offsets)));
// Set the output.
*out = offsets.end_offset;
R_SUCCEED();
}
Result OperatePerEntry(s64 offset, s64 size, auto f) {
// Check pre-conditions.
ASSERT(offset >= 0);
ASSERT(size >= 0);
ASSERT(this->IsInitialized());
// Succeed if there's nothing to operate on.
R_SUCCEED_IF(size == 0);
// Get the table offsets.
BucketTree::Offsets table_offsets;
R_TRY(m_table.GetOffsets(std::addressof(table_offsets)));
// Validate arguments.
R_UNLESS(table_offsets.IsInclude(offset, size), ResultOutOfRange);
// Find the offset in our tree.
BucketTree::Visitor visitor;
R_TRY(m_table.Find(std::addressof(visitor), offset));
{
const auto entry_offset = visitor.Get<Entry>()->virt_offset;
R_UNLESS(0 <= entry_offset && table_offsets.IsInclude(entry_offset),
ResultUnexpectedInCompressedStorageA);
}
// Prepare to operate in chunks.
auto cur_offset = offset;
const auto end_offset = offset + static_cast<s64>(size);
while (cur_offset < end_offset) {
// Get the current entry.
const auto cur_entry = *visitor.Get<Entry>();
// Get and validate the entry's offset.
const auto cur_entry_offset = cur_entry.virt_offset;
R_UNLESS(cur_entry_offset <= cur_offset, ResultUnexpectedInCompressedStorageA);
// Get and validate the next entry offset.
s64 next_entry_offset;
if (visitor.CanMoveNext()) {
R_TRY(visitor.MoveNext());
next_entry_offset = visitor.Get<Entry>()->virt_offset;
R_UNLESS(table_offsets.IsInclude(next_entry_offset),
ResultUnexpectedInCompressedStorageA);
} else {
next_entry_offset = table_offsets.end_offset;
}
R_UNLESS(cur_offset < next_entry_offset, ResultUnexpectedInCompressedStorageA);
// Get the offset of the entry in the data we read.
const auto data_offset = cur_offset - cur_entry_offset;
const auto data_size = (next_entry_offset - cur_entry_offset);
ASSERT(data_size > 0);
// Determine how much is left.
const auto remaining_size = end_offset - cur_offset;
const auto cur_size = std::min<s64>(remaining_size, data_size - data_offset);
ASSERT(cur_size <= size);
// Get the data storage size.
s64 storage_size = m_data_storage->GetSize();
// Check that our read remains naively physically in bounds.
R_UNLESS(0 <= cur_entry.phys_offset && cur_entry.phys_offset <= storage_size,
ResultUnexpectedInCompressedStorageC);
// If we have any compression, verify that we remain physically in bounds.
if (cur_entry.compression_type != CompressionType::None) {
R_UNLESS(cur_entry.phys_offset + cur_entry.GetPhysicalSize() <= storage_size,
ResultUnexpectedInCompressedStorageC);
}
// Check that block alignment requirements are met.
if (CompressionTypeUtility::IsBlockAlignmentRequired(cur_entry.compression_type)) {
R_UNLESS(Common::IsAligned(cur_entry.phys_offset, CompressionBlockAlignment),
ResultUnexpectedInCompressedStorageA);
}
// Invoke the operator.
bool is_continuous = true;
R_TRY(
f(std::addressof(is_continuous), cur_entry, data_size, data_offset, cur_size));
// If not continuous, we're done.
if (!is_continuous) {
break;
}
// Advance.
cur_offset += cur_size;
}
R_SUCCEED();
}
public:
using ReadImplFunction = std::function<Result(void*, size_t)>;
using ReadFunction = std::function<Result(size_t, const ReadImplFunction&)>;
public:
Result Read(s64 offset, s64 size, const ReadFunction& read_func) {
// Check pre-conditions.
ASSERT(offset >= 0);
ASSERT(this->IsInitialized());
// Succeed immediately, if we have nothing to read.
R_SUCCEED_IF(size == 0);
// Declare read lambda.
constexpr int EntriesCountMax = 0x80;
struct Entries {
CompressionType compression_type;
u32 gap_from_prev;
u32 physical_size;
u32 virtual_size;
};
std::array<Entries, EntriesCountMax> entries;
s32 entry_count = 0;
Entry prev_entry = {
.virt_offset = -1,
.phys_offset{},
.compression_type{},
.phys_size{},
};
bool will_allocate_pooled_buffer = false;
s64 required_access_physical_offset = 0;
s64 required_access_physical_size = 0;
auto PerformRequiredRead = [&]() -> Result {
// If there are no entries, we have nothing to do.
R_SUCCEED_IF(entry_count == 0);
// Get the remaining size in a convenient form.
const size_t total_required_size =
static_cast<size_t>(required_access_physical_size);
// Perform the read based on whether we need to allocate a buffer.
if (will_allocate_pooled_buffer) {
// Allocate a pooled buffer.
PooledBuffer pooled_buffer;
if (pooled_buffer.GetAllocatableSizeMax() >= total_required_size) {
pooled_buffer.Allocate(total_required_size, m_block_size_max);
} else {
pooled_buffer.AllocateParticularlyLarge(
std::min<size_t>(
total_required_size,
PooledBuffer::GetAllocatableParticularlyLargeSizeMax()),
m_block_size_max);
}
// Read each of the entries.
for (s32 entry_idx = 0; entry_idx < entry_count; ++entry_idx) {
// Determine the current read size.
bool will_use_pooled_buffer = false;
const size_t cur_read_size = [&]() -> size_t {
if (const size_t target_entry_size =
static_cast<size_t>(entries[entry_idx].physical_size) +
static_cast<size_t>(entries[entry_idx].gap_from_prev);
target_entry_size <= pooled_buffer.GetSize()) {
// We'll be using the pooled buffer.
will_use_pooled_buffer = true;
// Determine how much we can read.
const size_t max_size = std::min<size_t>(
required_access_physical_size, pooled_buffer.GetSize());
size_t read_size = 0;
for (auto n = entry_idx; n < entry_count; ++n) {
const size_t cur_entry_size =
static_cast<size_t>(entries[n].physical_size) +
static_cast<size_t>(entries[n].gap_from_prev);
if (read_size + cur_entry_size > max_size) {
break;
}
read_size += cur_entry_size;
}
return read_size;
} else {
// If we don't fit, we must be uncompressed.
ASSERT(entries[entry_idx].compression_type ==
CompressionType::None);
// We can perform the whole of an uncompressed read directly.
return entries[entry_idx].virtual_size;
}
}();
// Perform the read based on whether or not we'll use the pooled buffer.
if (will_use_pooled_buffer) {
// Read the compressed data into the pooled buffer.
auto* const buffer = pooled_buffer.GetBuffer();
m_data_storage->Read(reinterpret_cast<u8*>(buffer), cur_read_size,
required_access_physical_offset);
// Decompress the data.
size_t buffer_offset;
for (buffer_offset = 0;
entry_idx < entry_count &&
((static_cast<size_t>(entries[entry_idx].physical_size) +
static_cast<size_t>(entries[entry_idx].gap_from_prev)) == 0 ||
buffer_offset < cur_read_size);
buffer_offset += entries[entry_idx++].physical_size) {
// Advance by the relevant gap.
buffer_offset += entries[entry_idx].gap_from_prev;
const auto compression_type = entries[entry_idx].compression_type;
switch (compression_type) {
case CompressionType::None: {
// Check that we can remain within bounds.
ASSERT(buffer_offset + entries[entry_idx].virtual_size <=
cur_read_size);
// Perform no decompression.
R_TRY(read_func(
entries[entry_idx].virtual_size,
[&](void* dst, size_t dst_size) -> Result {
// Check that the size is valid.
ASSERT(dst_size == entries[entry_idx].virtual_size);
// We have no compression, so just copy the data
// out.
std::memcpy(dst, buffer + buffer_offset,
entries[entry_idx].virtual_size);
R_SUCCEED();
}));
break;
}
case CompressionType::Zeros: {
// Check that we can remain within bounds.
ASSERT(buffer_offset <= cur_read_size);
// Zero the memory.
R_TRY(read_func(
entries[entry_idx].virtual_size,
[&](void* dst, size_t dst_size) -> Result {
// Check that the size is valid.
ASSERT(dst_size == entries[entry_idx].virtual_size);
// The data is zeroes, so zero the buffer.
std::memset(dst, 0, entries[entry_idx].virtual_size);
R_SUCCEED();
}));
break;
}
default: {
// Check that we can remain within bounds.
ASSERT(buffer_offset + entries[entry_idx].physical_size <=
cur_read_size);
// Get the decompressor.
const auto decompressor =
this->GetDecompressor(compression_type);
R_UNLESS(decompressor != nullptr,
ResultUnexpectedInCompressedStorageB);
// Decompress the data.
R_TRY(read_func(entries[entry_idx].virtual_size,
[&](void* dst, size_t dst_size) -> Result {
// Check that the size is valid.
ASSERT(dst_size ==
entries[entry_idx].virtual_size);
// Perform the decompression.
R_RETURN(decompressor(
dst, entries[entry_idx].virtual_size,
buffer + buffer_offset,
entries[entry_idx].physical_size));
}));
break;
}
}
}
// Check that we processed the correct amount of data.
ASSERT(buffer_offset == cur_read_size);
} else {
// Account for the gap from the previous entry.
required_access_physical_offset += entries[entry_idx].gap_from_prev;
required_access_physical_size -= entries[entry_idx].gap_from_prev;
// We don't need the buffer (as the data is uncompressed), so just
// execute the read.
R_TRY(
read_func(cur_read_size, [&](void* dst, size_t dst_size) -> Result {
// Check that the size is valid.
ASSERT(dst_size == cur_read_size);
// Perform the read.
m_data_storage->Read(reinterpret_cast<u8*>(dst), cur_read_size,
required_access_physical_offset);
R_SUCCEED();
}));
}
// Advance on.
required_access_physical_offset += cur_read_size;
required_access_physical_size -= cur_read_size;
}
// Verify that we have nothing remaining to read.
ASSERT(required_access_physical_size == 0);
R_SUCCEED();
} else {
// We don't need a buffer, so just execute the read.
R_TRY(read_func(total_required_size, [&](void* dst, size_t dst_size) -> Result {
// Check that the size is valid.
ASSERT(dst_size == total_required_size);
// Perform the read.
m_data_storage->Read(reinterpret_cast<u8*>(dst), total_required_size,
required_access_physical_offset);
R_SUCCEED();
}));
}
R_SUCCEED();
};
R_TRY(this->OperatePerEntry(
offset, size,
[&](bool* out_continuous, const Entry& entry, s64 virtual_data_size,
s64 data_offset, s64 read_size) -> Result {
// Determine the physical extents.
s64 physical_offset, physical_size;
if (CompressionTypeUtility::IsRandomAccessible(entry.compression_type)) {
physical_offset = entry.phys_offset + data_offset;
physical_size = read_size;
} else {
physical_offset = entry.phys_offset;
physical_size = entry.GetPhysicalSize();
}
// If we have a pending data storage operation, perform it if we have to.
const s64 required_access_physical_end =
required_access_physical_offset + required_access_physical_size;
if (required_access_physical_size > 0) {
const bool required_by_gap =
!(required_access_physical_end <= physical_offset &&
physical_offset <= Common::AlignUp(required_access_physical_end,
CompressionBlockAlignment));
const bool required_by_continuous_size =
((physical_size + physical_offset) - required_access_physical_end) +
required_access_physical_size >
static_cast<s64>(m_continuous_reading_size_max);
const bool required_by_entry_count = entry_count == EntriesCountMax;
if (required_by_gap || required_by_continuous_size ||
required_by_entry_count) {
// Check that our planned access is sane.
ASSERT(!will_allocate_pooled_buffer ||
required_access_physical_size <=
static_cast<s64>(m_continuous_reading_size_max));
// Perform the required read.
const Result rc = PerformRequiredRead();
if (R_FAILED(rc)) {
R_THROW(rc);
}
// Reset our requirements.
prev_entry.virt_offset = -1;
required_access_physical_size = 0;
entry_count = 0;
will_allocate_pooled_buffer = false;
}
}
// Sanity check that we're within bounds on entries.
ASSERT(entry_count < EntriesCountMax);
// Determine if a buffer allocation is needed.
if (entry.compression_type != CompressionType::None ||
(prev_entry.virt_offset >= 0 &&
entry.virt_offset - prev_entry.virt_offset !=
entry.phys_offset - prev_entry.phys_offset)) {
will_allocate_pooled_buffer = true;
}
// If we need to access the data storage, update our required access parameters.
if (CompressionTypeUtility::IsDataStorageAccessRequired(
entry.compression_type)) {
// If the data is compressed, ensure the access is sane.
if (entry.compression_type != CompressionType::None) {
R_UNLESS(data_offset == 0, ResultInvalidOffset);
R_UNLESS(virtual_data_size == read_size, ResultInvalidSize);
R_UNLESS(entry.GetPhysicalSize() <= static_cast<s64>(m_block_size_max),
ResultUnexpectedInCompressedStorageD);
}
// Update the required access parameters.
s64 gap_from_prev;
if (required_access_physical_size > 0) {
gap_from_prev = physical_offset - required_access_physical_end;
} else {
gap_from_prev = 0;
required_access_physical_offset = physical_offset;
}
required_access_physical_size += physical_size + gap_from_prev;
// Create an entry to access the data storage.
entries[entry_count++] = {
.compression_type = entry.compression_type,
.gap_from_prev = static_cast<u32>(gap_from_prev),
.physical_size = static_cast<u32>(physical_size),
.virtual_size = static_cast<u32>(read_size),
};
} else {
// Verify that we're allowed to be operating on the non-data-storage-access
// type.
R_UNLESS(entry.compression_type == CompressionType::Zeros,
ResultUnexpectedInCompressedStorageB);
// If we have entries, create a fake entry for the zero region.
if (entry_count != 0) {
// We need to have a physical size.
R_UNLESS(entry.GetPhysicalSize() != 0,
ResultUnexpectedInCompressedStorageD);
// Create a fake entry.
entries[entry_count++] = {
.compression_type = CompressionType::Zeros,
.gap_from_prev = 0,
.physical_size = 0,
.virtual_size = static_cast<u32>(read_size),
};
} else {
// We have no entries, so we can just perform the read.
const Result rc =
read_func(static_cast<size_t>(read_size),
[&](void* dst, size_t dst_size) -> Result {
// Check the space we should zero is correct.
ASSERT(dst_size == static_cast<size_t>(read_size));
// Zero the memory.
std::memset(dst, 0, read_size);
R_SUCCEED();
});
if (R_FAILED(rc)) {
R_THROW(rc);
}
}
}
// Set the previous entry.
prev_entry = entry;
// We're continuous.
*out_continuous = true;
R_SUCCEED();
}));
// If we still have a pending access, perform it.
if (required_access_physical_size != 0) {
R_TRY(PerformRequiredRead());
}
R_SUCCEED();
}
private:
DecompressorFunction GetDecompressor(CompressionType type) const {
// Check that we can get a decompressor for the type.
if (CompressionTypeUtility::IsUnknownType(type)) {
return nullptr;
}
// Get the decompressor.
return m_get_decompressor_function(type);
}
bool IsInitialized() const {
return m_table.IsInitialized();
}
private:
size_t m_block_size_max;
size_t m_continuous_reading_size_max;
BucketTree m_table;
VirtualFile m_data_storage;
GetDecompressorFunction m_get_decompressor_function;
};
class CacheManager {
YUZU_NON_COPYABLE(CacheManager);
YUZU_NON_MOVEABLE(CacheManager);
private:
struct AccessRange {
s64 virtual_offset;
s64 virtual_size;
u32 physical_size;
bool is_block_alignment_required;
s64 GetEndVirtualOffset() const {
return this->virtual_offset + this->virtual_size;
}
};
static_assert(std::is_trivial_v<AccessRange>);
public:
CacheManager() = default;
public:
Result Initialize(s64 storage_size, size_t cache_size_0, size_t cache_size_1,
size_t max_cache_entries) {
// Set our fields.
m_storage_size = storage_size;
R_SUCCEED();
}
Result Read(CompressedStorageCore& core, s64 offset, void* buffer, size_t size) {
// If we have nothing to read, succeed.
R_SUCCEED_IF(size == 0);
// Check that we have a buffer to read into.
R_UNLESS(buffer != nullptr, ResultNullptrArgument);
// Check that the read is in bounds.
R_UNLESS(offset <= m_storage_size, ResultInvalidOffset);
// Determine how much we can read.
const size_t read_size = std::min<size_t>(size, m_storage_size - offset);
// Create head/tail ranges.
AccessRange head_range = {};
AccessRange tail_range = {};
bool is_tail_set = false;
// Operate to determine the head range.
R_TRY(core.OperatePerEntry(
offset, 1,
[&](bool* out_continuous, const Entry& entry, s64 virtual_data_size,
s64 data_offset, s64 data_read_size) -> Result {
// Set the head range.
head_range = {
.virtual_offset = entry.virt_offset,
.virtual_size = virtual_data_size,
.physical_size = static_cast<u32>(entry.phys_size),
.is_block_alignment_required =
CompressionTypeUtility::IsBlockAlignmentRequired(
entry.compression_type),
};
// If required, set the tail range.
if (static_cast<s64>(offset + read_size) <=
entry.virt_offset + virtual_data_size) {
tail_range = {
.virtual_offset = entry.virt_offset,
.virtual_size = virtual_data_size,
.physical_size = static_cast<u32>(entry.phys_size),
.is_block_alignment_required =
CompressionTypeUtility::IsBlockAlignmentRequired(
entry.compression_type),
};
is_tail_set = true;
}
// We only want to determine the head range, so we're not continuous.
*out_continuous = false;
R_SUCCEED();
}));
// If necessary, determine the tail range.
if (!is_tail_set) {
R_TRY(core.OperatePerEntry(
offset + read_size - 1, 1,
[&](bool* out_continuous, const Entry& entry, s64 virtual_data_size,
s64 data_offset, s64 data_read_size) -> Result {
// Set the tail range.
tail_range = {
.virtual_offset = entry.virt_offset,
.virtual_size = virtual_data_size,
.physical_size = static_cast<u32>(entry.phys_size),
.is_block_alignment_required =
CompressionTypeUtility::IsBlockAlignmentRequired(
entry.compression_type),
};
// We only want to determine the tail range, so we're not continuous.
*out_continuous = false;
R_SUCCEED();
}));
}
// Begin performing the accesses.
s64 cur_offset = offset;
size_t cur_size = read_size;
char* cur_dst = static_cast<char*>(buffer);
// Determine our alignment.
const bool head_unaligned = head_range.is_block_alignment_required &&
(cur_offset != head_range.virtual_offset ||
static_cast<s64>(cur_size) < head_range.virtual_size);
const bool tail_unaligned = [&]() -> bool {
if (tail_range.is_block_alignment_required) {
if (static_cast<s64>(cur_size + cur_offset) ==
tail_range.GetEndVirtualOffset()) {
return false;
} else if (!head_unaligned) {
return true;
} else {
return head_range.GetEndVirtualOffset() <
static_cast<s64>(cur_size + cur_offset);
}
} else {
return false;
}
}();
// Determine start/end offsets.
const s64 start_offset =
head_range.is_block_alignment_required ? head_range.virtual_offset : cur_offset;
const s64 end_offset = tail_range.is_block_alignment_required
? tail_range.GetEndVirtualOffset()
: cur_offset + cur_size;
// Perform the read.
bool is_burst_reading = false;
R_TRY(core.Read(
start_offset, end_offset - start_offset,
[&](size_t size_buffer_required,
const CompressedStorageCore::ReadImplFunction& read_impl) -> Result {
// Determine whether we're burst reading.
const AccessRange* unaligned_range = nullptr;
if (!is_burst_reading) {
// Check whether we're using head, tail, or none as unaligned.
if (head_unaligned && head_range.virtual_offset <= cur_offset &&
cur_offset < head_range.GetEndVirtualOffset()) {
unaligned_range = std::addressof(head_range);
} else if (tail_unaligned && tail_range.virtual_offset <= cur_offset &&
cur_offset < tail_range.GetEndVirtualOffset()) {
unaligned_range = std::addressof(tail_range);
} else {
is_burst_reading = true;
}
}
ASSERT((is_burst_reading ^ (unaligned_range != nullptr)));
// Perform reading by burst, or not.
if (is_burst_reading) {
// Check that the access is valid for burst reading.
ASSERT(size_buffer_required <= cur_size);
// Perform the read.
Result rc = read_impl(cur_dst, size_buffer_required);
if (R_FAILED(rc)) {
R_THROW(rc);
}
// Advance.
cur_dst += size_buffer_required;
cur_offset += size_buffer_required;
cur_size -= size_buffer_required;
// Determine whether we're going to continue burst reading.
const s64 offset_aligned =
tail_unaligned ? tail_range.virtual_offset : end_offset;
ASSERT(cur_offset <= offset_aligned);
if (offset_aligned <= cur_offset) {
is_burst_reading = false;
}
} else {
// We're not burst reading, so we have some unaligned range.
ASSERT(unaligned_range != nullptr);
// Check that the size is correct.
ASSERT(size_buffer_required ==
static_cast<size_t>(unaligned_range->virtual_size));
// Get a pooled buffer for our read.
PooledBuffer pooled_buffer;
pooled_buffer.Allocate(size_buffer_required, size_buffer_required);
// Perform read.
Result rc = read_impl(pooled_buffer.GetBuffer(), size_buffer_required);
if (R_FAILED(rc)) {
R_THROW(rc);
}
// Copy the data we read to the destination.
const size_t skip_size = cur_offset - unaligned_range->virtual_offset;
const size_t copy_size = std::min<size_t>(
cur_size, unaligned_range->GetEndVirtualOffset() - cur_offset);
std::memcpy(cur_dst, pooled_buffer.GetBuffer() + skip_size, copy_size);
// Advance.
cur_dst += copy_size;
cur_offset += copy_size;
cur_size -= copy_size;
}
R_SUCCEED();
}));
R_SUCCEED();
}
private:
s64 m_storage_size = 0;
};
public:
CompressedStorage() = default;
virtual ~CompressedStorage() {
this->Finalize();
}
Result Initialize(VirtualFile data_storage, VirtualFile node_storage, VirtualFile entry_storage,
s32 bktr_entry_count, size_t block_size_max,
size_t continuous_reading_size_max, GetDecompressorFunction get_decompressor,
size_t cache_size_0, size_t cache_size_1, s32 max_cache_entries) {
// Initialize our core.
R_TRY(m_core.Initialize(data_storage, node_storage, entry_storage, bktr_entry_count,
block_size_max, continuous_reading_size_max, get_decompressor));
// Get our core size.
s64 core_size = 0;
R_TRY(m_core.GetSize(std::addressof(core_size)));
// Initialize our cache manager.
R_TRY(m_cache_manager.Initialize(core_size, cache_size_0, cache_size_1, max_cache_entries));
R_SUCCEED();
}
void Finalize() {
m_core.Finalize();
}
VirtualFile GetDataStorage() {
return m_core.GetDataStorage();
}
Result GetDataStorageSize(s64* out) {
R_RETURN(m_core.GetDataStorageSize(out));
}
Result GetEntryList(Entry* out_entries, s32* out_read_count, s32 max_entry_count, s64 offset,
s64 size) {
R_RETURN(m_core.GetEntryList(out_entries, out_read_count, max_entry_count, offset, size));
}
BucketTree& GetEntryTable() {
return m_core.GetEntryTable();
}
public:
virtual size_t GetSize() const override {
s64 ret{};
m_core.GetSize(&ret);
return ret;
}
virtual size_t Read(u8* buffer, size_t size, size_t offset) const override {
if (R_SUCCEEDED(m_cache_manager.Read(m_core, offset, buffer, size))) {
return size;
} else {
return 0;
}
}
private:
mutable CompressedStorageCore m_core;
mutable CacheManager m_cache_manager;
};
} // namespace FileSys

View File

@ -0,0 +1,43 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "core/hle/result.h"
namespace FileSys {
enum class CompressionType : u8 {
None = 0,
Zeros = 1,
Two = 2,
Lz4 = 3,
Unknown = 4,
};
using DecompressorFunction = Result (*)(void*, size_t, const void*, size_t);
using GetDecompressorFunction = DecompressorFunction (*)(CompressionType);
constexpr s64 CompressionBlockAlignment = 0x10;
namespace CompressionTypeUtility {
constexpr bool IsBlockAlignmentRequired(CompressionType type) {
return type != CompressionType::None && type != CompressionType::Zeros;
}
constexpr bool IsDataStorageAccessRequired(CompressionType type) {
return type != CompressionType::Zeros;
}
constexpr bool IsRandomAccessible(CompressionType type) {
return type == CompressionType::None;
}
constexpr bool IsUnknownType(CompressionType type) {
return type >= CompressionType::Unknown;
}
} // namespace CompressionTypeUtility
} // namespace FileSys

View File

@ -0,0 +1,36 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/lz4_compression.h"
#include "core/file_sys/fssystem/fssystem_compression_configuration.h"
namespace FileSys {
namespace {
Result DecompressLz4(void* dst, size_t dst_size, const void* src, size_t src_size) {
auto result = Common::Compression::DecompressDataLZ4(dst, dst_size, src, src_size);
R_UNLESS(static_cast<size_t>(result) == dst_size, ResultUnexpectedInCompressedStorageC);
R_SUCCEED();
}
constexpr DecompressorFunction GetNcaDecompressorFunction(CompressionType type) {
switch (type) {
case CompressionType::Lz4:
return DecompressLz4;
default:
return nullptr;
}
}
} // namespace
const NcaCompressionConfiguration& GetNcaCompressionConfiguration() {
static const NcaCompressionConfiguration configuration = {
.get_decompressor = GetNcaDecompressorFunction,
};
return configuration;
}
} // namespace FileSys

View File

@ -0,0 +1,12 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "core/file_sys/fssystem/fssystem_nca_file_system_driver.h"
namespace FileSys {
const NcaCompressionConfiguration& GetNcaCompressionConfiguration();
}

View File

@ -0,0 +1,65 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/crypto/aes_util.h"
#include "core/crypto/key_manager.h"
#include "core/file_sys/fssystem/fssystem_crypto_configuration.h"
namespace FileSys {
namespace {
void GenerateKey(void* dst_key, size_t dst_key_size, const void* src_key, size_t src_key_size,
s32 key_type) {
if (key_type == static_cast<s32>(KeyType::ZeroKey)) {
std::memset(dst_key, 0, dst_key_size);
return;
}
if (key_type == static_cast<s32>(KeyType::InvalidKey) ||
key_type < static_cast<s32>(KeyType::ZeroKey) ||
key_type >= static_cast<s32>(KeyType::NcaExternalKey)) {
std::memset(dst_key, 0xFF, dst_key_size);
return;
}
const auto& instance = Core::Crypto::KeyManager::Instance();
if (key_type == static_cast<s32>(KeyType::NcaHeaderKey1) ||
key_type == static_cast<s32>(KeyType::NcaHeaderKey2)) {
const s32 key_index = static_cast<s32>(KeyType::NcaHeaderKey2) == key_type;
const auto key = instance.GetKey(Core::Crypto::S256KeyType::Header);
std::memcpy(dst_key, key.data() + key_index * 0x10, std::min(dst_key_size, key.size() / 2));
return;
}
const s32 key_generation =
std::max(key_type / NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexCount, 1) - 1;
const s32 key_index = key_type % NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexCount;
Core::Crypto::AESCipher<Core::Crypto::Key128> cipher(
instance.GetKey(Core::Crypto::S128KeyType::KeyArea, key_generation, key_index),
Core::Crypto::Mode::ECB);
cipher.Transcode(reinterpret_cast<const u8*>(src_key), src_key_size,
reinterpret_cast<u8*>(dst_key), Core::Crypto::Op::Decrypt);
}
} // namespace
const NcaCryptoConfiguration& GetCryptoConfiguration() {
static const NcaCryptoConfiguration configuration = {
.header_1_sign_key_moduli{},
.header_1_sign_key_public_exponent{},
.key_area_encryption_key_source{},
.header_encryption_key_source{},
.header_encrypted_encryption_keys{},
.generate_key = GenerateKey,
.verify_sign1{},
.is_plaintext_header_available{},
.is_available_sw_key{},
};
return configuration;
}
} // namespace FileSys

View File

@ -0,0 +1,12 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "core/file_sys/fssystem/fssystem_nca_file_system_driver.h"
namespace FileSys {
const NcaCryptoConfiguration& GetCryptoConfiguration();
}

View File

@ -0,0 +1,127 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.h"
#include "core/file_sys/vfs_offset.h"
namespace FileSys {
HierarchicalIntegrityVerificationStorage::HierarchicalIntegrityVerificationStorage()
: m_data_size(-1) {
for (size_t i = 0; i < MaxLayers - 1; i++) {
m_verify_storages[i] = std::make_shared<IntegrityVerificationStorage>();
}
}
Result HierarchicalIntegrityVerificationStorage::Initialize(
const HierarchicalIntegrityVerificationInformation& info,
HierarchicalStorageInformation storage, int max_data_cache_entries, int max_hash_cache_entries,
s8 buffer_level) {
// Validate preconditions.
ASSERT(IntegrityMinLayerCount <= info.max_layers && info.max_layers <= IntegrityMaxLayerCount);
// Set member variables.
m_max_layers = info.max_layers;
// Initialize the top level verification storage.
m_verify_storages[0]->Initialize(storage[HierarchicalStorageInformation::MasterStorage],
storage[HierarchicalStorageInformation::Layer1Storage],
static_cast<s64>(1) << info.info[0].block_order, HashSize,
false);
// Ensure we don't leak state if further initialization goes wrong.
ON_RESULT_FAILURE {
m_verify_storages[0]->Finalize();
m_data_size = -1;
};
// Initialize the top level buffer storage.
m_buffer_storages[0] = m_verify_storages[0];
R_UNLESS(m_buffer_storages[0] != nullptr, ResultAllocationMemoryFailedAllocateShared);
// Prepare to initialize the level storages.
s32 level = 0;
// Ensure we don't leak state if further initialization goes wrong.
ON_RESULT_FAILURE_2 {
m_verify_storages[level + 1]->Finalize();
for (; level > 0; --level) {
m_buffer_storages[level].reset();
m_verify_storages[level]->Finalize();
}
};
// Initialize the level storages.
for (; level < m_max_layers - 3; ++level) {
// Initialize the verification storage.
auto buffer_storage =
std::make_shared<OffsetVfsFile>(m_buffer_storages[level], info.info[level].size, 0);
m_verify_storages[level + 1]->Initialize(
std::move(buffer_storage), storage[level + 2],
static_cast<s64>(1) << info.info[level + 1].block_order,
static_cast<s64>(1) << info.info[level].block_order, false);
// Initialize the buffer storage.
m_buffer_storages[level + 1] = m_verify_storages[level + 1];
R_UNLESS(m_buffer_storages[level + 1] != nullptr,
ResultAllocationMemoryFailedAllocateShared);
}
// Initialize the final level storage.
{
// Initialize the verification storage.
auto buffer_storage =
std::make_shared<OffsetVfsFile>(m_buffer_storages[level], info.info[level].size, 0);
m_verify_storages[level + 1]->Initialize(
std::move(buffer_storage), storage[level + 2],
static_cast<s64>(1) << info.info[level + 1].block_order,
static_cast<s64>(1) << info.info[level].block_order, true);
// Initialize the buffer storage.
m_buffer_storages[level + 1] = m_verify_storages[level + 1];
R_UNLESS(m_buffer_storages[level + 1] != nullptr,
ResultAllocationMemoryFailedAllocateShared);
}
// Set the data size.
m_data_size = info.info[level + 1].size;
// We succeeded.
R_SUCCEED();
}
void HierarchicalIntegrityVerificationStorage::Finalize() {
if (m_data_size >= 0) {
m_data_size = 0;
for (s32 level = m_max_layers - 2; level >= 0; --level) {
m_buffer_storages[level].reset();
m_verify_storages[level]->Finalize();
}
m_data_size = -1;
}
}
size_t HierarchicalIntegrityVerificationStorage::Read(u8* buffer, size_t size,
size_t offset) const {
// Validate preconditions.
ASSERT(m_data_size >= 0);
// Succeed if zero-size.
if (size == 0) {
return size;
}
// Validate arguments.
ASSERT(buffer != nullptr);
// Read the data.
return m_buffer_storages[m_max_layers - 2]->Read(buffer, size, offset);
}
size_t HierarchicalIntegrityVerificationStorage::GetSize() const {
return m_data_size;
}
} // namespace FileSys

View File

@ -0,0 +1,164 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "common/alignment.h"
#include "core/file_sys/fssystem/fs_i_storage.h"
#include "core/file_sys/fssystem/fs_types.h"
#include "core/file_sys/fssystem/fssystem_alignment_matching_storage.h"
#include "core/file_sys/fssystem/fssystem_integrity_verification_storage.h"
#include "core/file_sys/vfs_offset.h"
namespace FileSys {
struct HierarchicalIntegrityVerificationLevelInformation {
Int64 offset;
Int64 size;
s32 block_order;
std::array<u8, 4> reserved;
};
static_assert(std::is_trivial_v<HierarchicalIntegrityVerificationLevelInformation>);
static_assert(sizeof(HierarchicalIntegrityVerificationLevelInformation) == 0x18);
static_assert(alignof(HierarchicalIntegrityVerificationLevelInformation) == 0x4);
struct HierarchicalIntegrityVerificationInformation {
u32 max_layers;
std::array<HierarchicalIntegrityVerificationLevelInformation, IntegrityMaxLayerCount - 1> info;
HashSalt seed;
s64 GetLayeredHashSize() const {
return this->info[this->max_layers - 2].offset;
}
s64 GetDataOffset() const {
return this->info[this->max_layers - 2].offset;
}
s64 GetDataSize() const {
return this->info[this->max_layers - 2].size;
}
};
static_assert(std::is_trivial_v<HierarchicalIntegrityVerificationInformation>);
struct HierarchicalIntegrityVerificationMetaInformation {
u32 magic;
u32 version;
u32 master_hash_size;
HierarchicalIntegrityVerificationInformation level_hash_info;
};
static_assert(std::is_trivial_v<HierarchicalIntegrityVerificationMetaInformation>);
struct HierarchicalIntegrityVerificationSizeSet {
s64 control_size;
s64 master_hash_size;
std::array<s64, IntegrityMaxLayerCount - 2> layered_hash_sizes;
};
static_assert(std::is_trivial_v<HierarchicalIntegrityVerificationSizeSet>);
class HierarchicalIntegrityVerificationStorage : public IReadOnlyStorage {
YUZU_NON_COPYABLE(HierarchicalIntegrityVerificationStorage);
YUZU_NON_MOVEABLE(HierarchicalIntegrityVerificationStorage);
public:
using GenerateRandomFunction = void (*)(void* dst, size_t size);
class HierarchicalStorageInformation {
public:
enum {
MasterStorage = 0,
Layer1Storage = 1,
Layer2Storage = 2,
Layer3Storage = 3,
Layer4Storage = 4,
Layer5Storage = 5,
DataStorage = 6,
};
private:
std::array<VirtualFile, DataStorage + 1> m_storages;
public:
void SetMasterHashStorage(VirtualFile s) {
m_storages[MasterStorage] = s;
}
void SetLayer1HashStorage(VirtualFile s) {
m_storages[Layer1Storage] = s;
}
void SetLayer2HashStorage(VirtualFile s) {
m_storages[Layer2Storage] = s;
}
void SetLayer3HashStorage(VirtualFile s) {
m_storages[Layer3Storage] = s;
}
void SetLayer4HashStorage(VirtualFile s) {
m_storages[Layer4Storage] = s;
}
void SetLayer5HashStorage(VirtualFile s) {
m_storages[Layer5Storage] = s;
}
void SetDataStorage(VirtualFile s) {
m_storages[DataStorage] = s;
}
VirtualFile& operator[](s32 index) {
ASSERT(MasterStorage <= index && index <= DataStorage);
return m_storages[index];
}
};
public:
HierarchicalIntegrityVerificationStorage();
virtual ~HierarchicalIntegrityVerificationStorage() override {
this->Finalize();
}
Result Initialize(const HierarchicalIntegrityVerificationInformation& info,
HierarchicalStorageInformation storage, int max_data_cache_entries,
int max_hash_cache_entries, s8 buffer_level);
void Finalize();
virtual size_t Read(u8* buffer, size_t size, size_t offset) const override;
virtual size_t GetSize() const override;
bool IsInitialized() const {
return m_data_size >= 0;
}
s64 GetL1HashVerificationBlockSize() const {
return m_verify_storages[m_max_layers - 2]->GetBlockSize();
}
VirtualFile GetL1HashStorage() {
return std::make_shared<OffsetVfsFile>(
m_buffer_storages[m_max_layers - 3],
Common::DivideUp(m_data_size, this->GetL1HashVerificationBlockSize()), 0);
}
public:
static constexpr s8 GetDefaultDataCacheBufferLevel(u32 max_layers) {
return static_cast<s8>(16 + max_layers - 2);
}
protected:
static constexpr s64 HashSize = 256 / 8;
static constexpr size_t MaxLayers = IntegrityMaxLayerCount;
private:
static GenerateRandomFunction s_generate_random;
static void SetGenerateRandomFunction(GenerateRandomFunction func) {
s_generate_random = func;
}
private:
friend struct HierarchicalIntegrityVerificationMetaInformation;
private:
std::array<std::shared_ptr<IntegrityVerificationStorage>, MaxLayers - 1> m_verify_storages;
std::array<VirtualFile, MaxLayers - 1> m_buffer_storages;
s64 m_data_size;
s32 m_max_layers;
};
} // namespace FileSys

View File

@ -0,0 +1,80 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/alignment.h"
#include "common/scope_exit.h"
#include "core/file_sys/fssystem/fssystem_hierarchical_sha256_storage.h"
namespace FileSys {
namespace {
s32 Log2(s32 value) {
ASSERT(value > 0);
ASSERT(Common::IsPowerOfTwo(value));
s32 log = 0;
while ((value >>= 1) > 0) {
++log;
}
return log;
}
} // namespace
Result HierarchicalSha256Storage::Initialize(VirtualFile* base_storages, s32 layer_count,
size_t htbs, void* hash_buf, size_t hash_buf_size) {
// Validate preconditions.
ASSERT(layer_count == LayerCount);
ASSERT(Common::IsPowerOfTwo(htbs));
ASSERT(hash_buf != nullptr);
// Set size tracking members.
m_hash_target_block_size = static_cast<s32>(htbs);
m_log_size_ratio = Log2(m_hash_target_block_size / HashSize);
// Get the base storage size.
m_base_storage_size = base_storages[2]->GetSize();
{
auto size_guard = SCOPE_GUARD({ m_base_storage_size = 0; });
R_UNLESS(m_base_storage_size <= static_cast<s64>(HashSize)
<< m_log_size_ratio << m_log_size_ratio,
ResultHierarchicalSha256BaseStorageTooLarge);
size_guard.Cancel();
}
// Set hash buffer tracking members.
m_base_storage = base_storages[2];
m_hash_buffer = static_cast<char*>(hash_buf);
m_hash_buffer_size = hash_buf_size;
// Read the master hash.
std::array<u8, HashSize> master_hash{};
base_storages[0]->ReadObject(std::addressof(master_hash));
// Read and validate the data being hashed.
s64 hash_storage_size = base_storages[1]->GetSize();
ASSERT(Common::IsAligned(hash_storage_size, HashSize));
ASSERT(hash_storage_size <= m_hash_target_block_size);
ASSERT(hash_storage_size <= static_cast<s64>(m_hash_buffer_size));
base_storages[1]->Read(reinterpret_cast<u8*>(m_hash_buffer),
static_cast<size_t>(hash_storage_size), 0);
R_SUCCEED();
}
size_t HierarchicalSha256Storage::Read(u8* buffer, size_t size, size_t offset) const {
// Succeed if zero-size.
if (size == 0) {
return size;
}
// Validate that we have a buffer to read into.
ASSERT(buffer != nullptr);
// Read the data.
return m_base_storage->Read(buffer, size, offset);
}
} // namespace FileSys

View File

@ -0,0 +1,44 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <mutex>
#include "core/file_sys/errors.h"
#include "core/file_sys/fssystem/fs_i_storage.h"
#include "core/file_sys/vfs.h"
namespace FileSys {
class HierarchicalSha256Storage : public IReadOnlyStorage {
YUZU_NON_COPYABLE(HierarchicalSha256Storage);
YUZU_NON_MOVEABLE(HierarchicalSha256Storage);
public:
static constexpr s32 LayerCount = 3;
static constexpr size_t HashSize = 256 / 8;
public:
HierarchicalSha256Storage() : m_mutex() {}
Result Initialize(VirtualFile* base_storages, s32 layer_count, size_t htbs, void* hash_buf,
size_t hash_buf_size);
virtual size_t GetSize() const override {
return m_base_storage->GetSize();
}
virtual size_t Read(u8* buffer, size_t length, size_t offset) const override;
private:
VirtualFile m_base_storage;
s64 m_base_storage_size;
char* m_hash_buffer;
size_t m_hash_buffer_size;
s32 m_hash_target_block_size;
s32 m_log_size_ratio;
std::mutex m_mutex;
};
} // namespace FileSys

View File

@ -0,0 +1,119 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/file_sys/errors.h"
#include "core/file_sys/fssystem/fssystem_indirect_storage.h"
namespace FileSys {
Result IndirectStorage::Initialize(VirtualFile table_storage) {
// Read and verify the bucket tree header.
BucketTree::Header header;
table_storage->ReadObject(std::addressof(header));
R_TRY(header.Verify());
// Determine extents.
const auto node_storage_size = QueryNodeStorageSize(header.entry_count);
const auto entry_storage_size = QueryEntryStorageSize(header.entry_count);
const auto node_storage_offset = QueryHeaderStorageSize();
const auto entry_storage_offset = node_storage_offset + node_storage_size;
// Initialize.
R_RETURN(this->Initialize(
std::make_shared<OffsetVfsFile>(table_storage, node_storage_size, node_storage_offset),
std::make_shared<OffsetVfsFile>(table_storage, entry_storage_size, entry_storage_offset),
header.entry_count));
}
void IndirectStorage::Finalize() {
if (this->IsInitialized()) {
m_table.Finalize();
for (auto i = 0; i < StorageCount; i++) {
m_data_storage[i] = VirtualFile();
}
}
}
Result IndirectStorage::GetEntryList(Entry* out_entries, s32* out_entry_count, s32 entry_count,
s64 offset, s64 size) {
// Validate pre-conditions.
ASSERT(offset >= 0);
ASSERT(size >= 0);
ASSERT(this->IsInitialized());
// Clear the out count.
R_UNLESS(out_entry_count != nullptr, ResultNullptrArgument);
*out_entry_count = 0;
// Succeed if there's no range.
R_SUCCEED_IF(size == 0);
// If we have an output array, we need it to be non-null.
R_UNLESS(out_entries != nullptr || entry_count == 0, ResultNullptrArgument);
// Check that our range is valid.
BucketTree::Offsets table_offsets;
R_TRY(m_table.GetOffsets(std::addressof(table_offsets)));
R_UNLESS(table_offsets.IsInclude(offset, size), ResultOutOfRange);
// Find the offset in our tree.
BucketTree::Visitor visitor;
R_TRY(m_table.Find(std::addressof(visitor), offset));
{
const auto entry_offset = visitor.Get<Entry>()->GetVirtualOffset();
R_UNLESS(0 <= entry_offset && table_offsets.IsInclude(entry_offset),
ResultInvalidIndirectEntryOffset);
}
// Prepare to loop over entries.
const auto end_offset = offset + static_cast<s64>(size);
s32 count = 0;
auto cur_entry = *visitor.Get<Entry>();
while (cur_entry.GetVirtualOffset() < end_offset) {
// Try to write the entry to the out list.
if (entry_count != 0) {
if (count >= entry_count) {
break;
}
std::memcpy(out_entries + count, std::addressof(cur_entry), sizeof(Entry));
}
count++;
// Advance.
if (visitor.CanMoveNext()) {
R_TRY(visitor.MoveNext());
cur_entry = *visitor.Get<Entry>();
} else {
break;
}
}
// Write the output count.
*out_entry_count = count;
R_SUCCEED();
}
size_t IndirectStorage::Read(u8* buffer, size_t size, size_t offset) const {
// Validate pre-conditions.
ASSERT(this->IsInitialized());
ASSERT(buffer != nullptr);
// Succeed if there's nothing to read.
if (size == 0) {
return 0;
}
const_cast<IndirectStorage*>(this)->OperatePerEntry<true, true>(
offset, size,
[=](VirtualFile storage, s64 data_offset, s64 cur_offset, s64 cur_size) -> Result {
storage->Read(reinterpret_cast<u8*>(buffer) + (cur_offset - offset),
static_cast<size_t>(cur_size), data_offset);
R_SUCCEED();
});
return size;
}
} // namespace FileSys

View File

@ -0,0 +1,294 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "core/file_sys/errors.h"
#include "core/file_sys/fssystem/fs_i_storage.h"
#include "core/file_sys/fssystem/fssystem_bucket_tree.h"
#include "core/file_sys/fssystem/fssystem_bucket_tree_template_impl.h"
#include "core/file_sys/vfs.h"
#include "core/file_sys/vfs_offset.h"
namespace FileSys {
class IndirectStorage : public IReadOnlyStorage {
YUZU_NON_COPYABLE(IndirectStorage);
YUZU_NON_MOVEABLE(IndirectStorage);
public:
static constexpr s32 StorageCount = 2;
static constexpr size_t NodeSize = 16_KiB;
struct Entry {
std::array<u8, sizeof(s64)> virt_offset;
std::array<u8, sizeof(s64)> phys_offset;
s32 storage_index;
void SetVirtualOffset(const s64& ofs) {
std::memcpy(this->virt_offset.data(), std::addressof(ofs), sizeof(s64));
}
s64 GetVirtualOffset() const {
s64 offset;
std::memcpy(std::addressof(offset), this->virt_offset.data(), sizeof(s64));
return offset;
}
void SetPhysicalOffset(const s64& ofs) {
std::memcpy(this->phys_offset.data(), std::addressof(ofs), sizeof(s64));
}
s64 GetPhysicalOffset() const {
s64 offset;
std::memcpy(std::addressof(offset), this->phys_offset.data(), sizeof(s64));
return offset;
}
};
static_assert(std::is_trivial_v<Entry>);
static_assert(sizeof(Entry) == 0x14);
struct EntryData {
s64 virt_offset;
s64 phys_offset;
s32 storage_index;
void Set(const Entry& entry) {
this->virt_offset = entry.GetVirtualOffset();
this->phys_offset = entry.GetPhysicalOffset();
this->storage_index = entry.storage_index;
}
};
static_assert(std::is_trivial_v<EntryData>);
public:
IndirectStorage() : m_table(), m_data_storage() {}
virtual ~IndirectStorage() {
this->Finalize();
}
Result Initialize(VirtualFile table_storage);
void Finalize();
bool IsInitialized() const {
return m_table.IsInitialized();
}
Result Initialize(VirtualFile node_storage, VirtualFile entry_storage, s32 entry_count) {
R_RETURN(
m_table.Initialize(node_storage, entry_storage, NodeSize, sizeof(Entry), entry_count));
}
void SetStorage(s32 idx, VirtualFile storage) {
ASSERT(0 <= idx && idx < StorageCount);
m_data_storage[idx] = storage;
}
template <typename T>
void SetStorage(s32 idx, T storage, s64 offset, s64 size) {
ASSERT(0 <= idx && idx < StorageCount);
m_data_storage[idx] = std::make_shared<OffsetVfsFile>(storage, size, offset);
}
Result GetEntryList(Entry* out_entries, s32* out_entry_count, s32 entry_count, s64 offset,
s64 size);
virtual size_t GetSize() const override {
BucketTree::Offsets offsets{};
m_table.GetOffsets(std::addressof(offsets));
return offsets.end_offset;
}
virtual size_t Read(u8* buffer, size_t size, size_t offset) const override;
public:
static constexpr s64 QueryHeaderStorageSize() {
return BucketTree::QueryHeaderStorageSize();
}
static constexpr s64 QueryNodeStorageSize(s32 entry_count) {
return BucketTree::QueryNodeStorageSize(NodeSize, sizeof(Entry), entry_count);
}
static constexpr s64 QueryEntryStorageSize(s32 entry_count) {
return BucketTree::QueryEntryStorageSize(NodeSize, sizeof(Entry), entry_count);
}
protected:
BucketTree& GetEntryTable() {
return m_table;
}
VirtualFile& GetDataStorage(s32 index) {
ASSERT(0 <= index && index < StorageCount);
return m_data_storage[index];
}
template <bool ContinuousCheck, bool RangeCheck, typename F>
Result OperatePerEntry(s64 offset, s64 size, F func);
private:
struct ContinuousReadingEntry {
static constexpr size_t FragmentSizeMax = 4_KiB;
IndirectStorage::Entry entry;
s64 GetVirtualOffset() const {
return this->entry.GetVirtualOffset();
}
s64 GetPhysicalOffset() const {
return this->entry.GetPhysicalOffset();
}
bool IsFragment() const {
return this->entry.storage_index != 0;
}
};
static_assert(std::is_trivial_v<ContinuousReadingEntry>);
private:
mutable BucketTree m_table;
std::array<VirtualFile, StorageCount> m_data_storage;
};
template <bool ContinuousCheck, bool RangeCheck, typename F>
Result IndirectStorage::OperatePerEntry(s64 offset, s64 size, F func) {
// Validate preconditions.
ASSERT(offset >= 0);
ASSERT(size >= 0);
ASSERT(this->IsInitialized());
// Succeed if there's nothing to operate on.
R_SUCCEED_IF(size == 0);
// Get the table offsets.
BucketTree::Offsets table_offsets;
R_TRY(m_table.GetOffsets(std::addressof(table_offsets)));
// Validate arguments.
R_UNLESS(table_offsets.IsInclude(offset, size), ResultOutOfRange);
// Find the offset in our tree.
BucketTree::Visitor visitor;
R_TRY(m_table.Find(std::addressof(visitor), offset));
{
const auto entry_offset = visitor.Get<Entry>()->GetVirtualOffset();
R_UNLESS(0 <= entry_offset && table_offsets.IsInclude(entry_offset),
ResultInvalidIndirectEntryOffset);
}
// Prepare to operate in chunks.
auto cur_offset = offset;
const auto end_offset = offset + static_cast<s64>(size);
BucketTree::ContinuousReadingInfo cr_info;
while (cur_offset < end_offset) {
// Get the current entry.
const auto cur_entry = *visitor.Get<Entry>();
// Get and validate the entry's offset.
const auto cur_entry_offset = cur_entry.GetVirtualOffset();
R_UNLESS(cur_entry_offset <= cur_offset, ResultInvalidIndirectEntryOffset);
// Validate the storage index.
R_UNLESS(0 <= cur_entry.storage_index && cur_entry.storage_index < StorageCount,
ResultInvalidIndirectEntryStorageIndex);
// If we need to check the continuous info, do so.
if constexpr (ContinuousCheck) {
// Scan, if we need to.
if (cr_info.CheckNeedScan()) {
R_TRY(visitor.ScanContinuousReading<ContinuousReadingEntry>(
std::addressof(cr_info), cur_offset,
static_cast<size_t>(end_offset - cur_offset)));
}
// Process a base storage entry.
if (cr_info.CanDo()) {
// Ensure that we can process.
R_UNLESS(cur_entry.storage_index == 0, ResultInvalidIndirectEntryStorageIndex);
// Ensure that we remain within range.
const auto data_offset = cur_offset - cur_entry_offset;
const auto cur_entry_phys_offset = cur_entry.GetPhysicalOffset();
const auto cur_size = static_cast<s64>(cr_info.GetReadSize());
// If we should, verify the range.
if constexpr (RangeCheck) {
// Get the current data storage's size.
s64 cur_data_storage_size = m_data_storage[0]->GetSize();
R_UNLESS(0 <= cur_entry_phys_offset &&
cur_entry_phys_offset <= cur_data_storage_size,
ResultInvalidIndirectEntryOffset);
R_UNLESS(cur_entry_phys_offset + data_offset + cur_size <=
cur_data_storage_size,
ResultInvalidIndirectStorageSize);
}
// Operate.
R_TRY(func(m_data_storage[0], cur_entry_phys_offset + data_offset, cur_offset,
cur_size));
// Mark as done.
cr_info.Done();
}
}
// Get and validate the next entry offset.
s64 next_entry_offset;
if (visitor.CanMoveNext()) {
R_TRY(visitor.MoveNext());
next_entry_offset = visitor.Get<Entry>()->GetVirtualOffset();
R_UNLESS(table_offsets.IsInclude(next_entry_offset), ResultInvalidIndirectEntryOffset);
} else {
next_entry_offset = table_offsets.end_offset;
}
R_UNLESS(cur_offset < next_entry_offset, ResultInvalidIndirectEntryOffset);
// Get the offset of the entry in the data we read.
const auto data_offset = cur_offset - cur_entry_offset;
const auto data_size = (next_entry_offset - cur_entry_offset);
ASSERT(data_size > 0);
// Determine how much is left.
const auto remaining_size = end_offset - cur_offset;
const auto cur_size = std::min<s64>(remaining_size, data_size - data_offset);
ASSERT(cur_size <= size);
// Operate, if we need to.
bool needs_operate;
if constexpr (!ContinuousCheck) {
needs_operate = true;
} else {
needs_operate = !cr_info.IsDone() || cur_entry.storage_index != 0;
}
if (needs_operate) {
const auto cur_entry_phys_offset = cur_entry.GetPhysicalOffset();
if constexpr (RangeCheck) {
// Get the current data storage's size.
s64 cur_data_storage_size = m_data_storage[cur_entry.storage_index]->GetSize();
// Ensure that we remain within range.
R_UNLESS(0 <= cur_entry_phys_offset &&
cur_entry_phys_offset <= cur_data_storage_size,
ResultIndirectStorageCorrupted);
R_UNLESS(cur_entry_phys_offset + data_offset + cur_size <= cur_data_storage_size,
ResultIndirectStorageCorrupted);
}
R_TRY(func(m_data_storage[cur_entry.storage_index], cur_entry_phys_offset + data_offset,
cur_offset, cur_size));
}
cur_offset += cur_size;
}
R_SUCCEED();
}
} // namespace FileSys

View File

@ -0,0 +1,30 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/file_sys/fssystem/fssystem_integrity_romfs_storage.h"
namespace FileSys {
Result IntegrityRomFsStorage::Initialize(
HierarchicalIntegrityVerificationInformation level_hash_info, Hash master_hash,
HierarchicalIntegrityVerificationStorage::HierarchicalStorageInformation storage_info,
int max_data_cache_entries, int max_hash_cache_entries, s8 buffer_level) {
// Set master hash.
m_master_hash = master_hash;
m_master_hash_storage = std::make_shared<ArrayVfsFile<sizeof(Hash)>>(m_master_hash.value);
R_UNLESS(m_master_hash_storage != nullptr,
ResultAllocationMemoryFailedInIntegrityRomFsStorageA);
// Set the master hash storage.
storage_info[0] = m_master_hash_storage;
// Initialize our integrity storage.
R_RETURN(m_integrity_storage.Initialize(level_hash_info, storage_info, max_data_cache_entries,
max_hash_cache_entries, buffer_level));
}
void IntegrityRomFsStorage::Finalize() {
m_integrity_storage.Finalize();
}
} // namespace FileSys

View File

@ -0,0 +1,42 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "core/file_sys/fssystem/fssystem_hierarchical_integrity_verification_storage.h"
#include "core/file_sys/fssystem/fssystem_nca_header.h"
#include "core/file_sys/vfs_vector.h"
namespace FileSys {
constexpr inline size_t IntegrityLayerCountRomFs = 7;
constexpr inline size_t IntegrityHashLayerBlockSize = 16_KiB;
class IntegrityRomFsStorage : public IReadOnlyStorage {
public:
IntegrityRomFsStorage() {}
virtual ~IntegrityRomFsStorage() override {
this->Finalize();
}
Result Initialize(
HierarchicalIntegrityVerificationInformation level_hash_info, Hash master_hash,
HierarchicalIntegrityVerificationStorage::HierarchicalStorageInformation storage_info,
int max_data_cache_entries, int max_hash_cache_entries, s8 buffer_level);
void Finalize();
virtual size_t Read(u8* buffer, size_t size, size_t offset) const override {
return m_integrity_storage.Read(buffer, size, offset);
}
virtual size_t GetSize() const override {
return m_integrity_storage.GetSize();
}
private:
HierarchicalIntegrityVerificationStorage m_integrity_storage;
Hash m_master_hash;
std::shared_ptr<ArrayVfsFile<sizeof(Hash)>> m_master_hash_storage;
};
} // namespace FileSys

View File

@ -0,0 +1,91 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/alignment.h"
#include "core/file_sys/fssystem/fssystem_integrity_verification_storage.h"
namespace FileSys {
constexpr inline u32 ILog2(u32 val) {
ASSERT(val > 0);
return static_cast<u32>((sizeof(u32) * 8) - 1 - std::countl_zero<u32>(val));
}
void IntegrityVerificationStorage::Initialize(VirtualFile hs, VirtualFile ds, s64 verif_block_size,
s64 upper_layer_verif_block_size, bool is_real_data) {
// Validate preconditions.
ASSERT(verif_block_size >= HashSize);
// Set storages.
m_hash_storage = hs;
m_data_storage = ds;
// Set verification block sizes.
m_verification_block_size = verif_block_size;
m_verification_block_order = ILog2(static_cast<u32>(verif_block_size));
ASSERT(m_verification_block_size == 1ll << m_verification_block_order);
// Set upper layer block sizes.
upper_layer_verif_block_size = std::max(upper_layer_verif_block_size, HashSize);
m_upper_layer_verification_block_size = upper_layer_verif_block_size;
m_upper_layer_verification_block_order = ILog2(static_cast<u32>(upper_layer_verif_block_size));
ASSERT(m_upper_layer_verification_block_size == 1ll << m_upper_layer_verification_block_order);
// Validate sizes.
{
s64 hash_size = m_hash_storage->GetSize();
s64 data_size = m_data_storage->GetSize();
ASSERT(((hash_size / HashSize) * m_verification_block_size) >= data_size);
}
// Set data.
m_is_real_data = is_real_data;
}
void IntegrityVerificationStorage::Finalize() {
m_hash_storage = VirtualFile();
m_data_storage = VirtualFile();
}
size_t IntegrityVerificationStorage::Read(u8* buffer, size_t size, size_t offset) const {
// Succeed if zero size.
if (size == 0) {
return size;
}
// Validate arguments.
ASSERT(buffer != nullptr);
// Validate the offset.
s64 data_size = m_data_storage->GetSize();
ASSERT(offset <= static_cast<size_t>(data_size));
// Validate the access range.
ASSERT(R_SUCCEEDED(IStorage::CheckAccessRange(
offset, size, Common::AlignUp(data_size, static_cast<size_t>(m_verification_block_size)))));
// Determine the read extents.
size_t read_size = size;
if (static_cast<s64>(offset + read_size) > data_size) {
// Determine the padding sizes.
s64 padding_offset = data_size - offset;
size_t padding_size = static_cast<size_t>(
m_verification_block_size - (padding_offset & (m_verification_block_size - 1)));
ASSERT(static_cast<s64>(padding_size) < m_verification_block_size);
// Clear the padding.
std::memset(static_cast<u8*>(buffer) + padding_offset, 0, padding_size);
// Set the new in-bounds size.
read_size = static_cast<size_t>(data_size - offset);
}
// Perform the read.
return m_data_storage->Read(buffer, read_size, offset);
}
size_t IntegrityVerificationStorage::GetSize() const {
return m_data_storage->GetSize();
}
} // namespace FileSys

View File

@ -0,0 +1,65 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <optional>
#include "core/file_sys/fssystem/fs_i_storage.h"
#include "core/file_sys/fssystem/fs_types.h"
namespace FileSys {
class IntegrityVerificationStorage : public IReadOnlyStorage {
YUZU_NON_COPYABLE(IntegrityVerificationStorage);
YUZU_NON_MOVEABLE(IntegrityVerificationStorage);
public:
static constexpr s64 HashSize = 256 / 8;
struct BlockHash {
std::array<u8, HashSize> hash;
};
static_assert(std::is_trivial_v<BlockHash>);
public:
IntegrityVerificationStorage()
: m_verification_block_size(0), m_verification_block_order(0),
m_upper_layer_verification_block_size(0), m_upper_layer_verification_block_order(0) {}
virtual ~IntegrityVerificationStorage() override {
this->Finalize();
}
void Initialize(VirtualFile hs, VirtualFile ds, s64 verif_block_size,
s64 upper_layer_verif_block_size, bool is_real_data);
void Finalize();
virtual size_t Read(u8* buffer, size_t size, size_t offset) const override;
virtual size_t GetSize() const override;
s64 GetBlockSize() const {
return m_verification_block_size;
}
private:
static void SetValidationBit(BlockHash* hash) {
ASSERT(hash != nullptr);
hash->hash[HashSize - 1] |= 0x80;
}
static bool IsValidationBit(const BlockHash* hash) {
ASSERT(hash != nullptr);
return (hash->hash[HashSize - 1] & 0x80) != 0;
}
private:
VirtualFile m_hash_storage;
VirtualFile m_data_storage;
s64 m_verification_block_size;
s64 m_verification_block_order;
s64 m_upper_layer_verification_block_size;
s64 m_upper_layer_verification_block_order;
bool m_is_real_data;
};
} // namespace FileSys

View File

@ -0,0 +1,61 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "core/file_sys/fssystem/fs_i_storage.h"
namespace FileSys {
class MemoryResourceBufferHoldStorage : public IStorage {
YUZU_NON_COPYABLE(MemoryResourceBufferHoldStorage);
YUZU_NON_MOVEABLE(MemoryResourceBufferHoldStorage);
public:
MemoryResourceBufferHoldStorage(VirtualFile storage, size_t buffer_size)
: m_storage(std::move(storage)), m_buffer(::operator new(buffer_size)),
m_buffer_size(buffer_size) {}
virtual ~MemoryResourceBufferHoldStorage() {
// If we have a buffer, deallocate it.
if (m_buffer != nullptr) {
::operator delete(m_buffer);
}
}
bool IsValid() const {
return m_buffer != nullptr;
}
void* GetBuffer() const {
return m_buffer;
}
public:
virtual size_t Read(u8* buffer, size_t size, size_t offset) const override {
// Check pre-conditions.
ASSERT(m_storage != nullptr);
return m_storage->Read(buffer, size, offset);
}
virtual size_t GetSize() const override {
// Check pre-conditions.
ASSERT(m_storage != nullptr);
return m_storage->GetSize();
}
virtual size_t Write(const u8* buffer, size_t size, size_t offset) override {
// Check pre-conditions.
ASSERT(m_storage != nullptr);
return m_storage->Write(buffer, size, offset);
}
private:
VirtualFile m_storage;
void* m_buffer;
size_t m_buffer_size;
};
} // namespace FileSys

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,364 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "core/file_sys/fssystem/fssystem_compression_common.h"
#include "core/file_sys/fssystem/fssystem_nca_header.h"
#include "core/file_sys/vfs.h"
namespace FileSys {
class CompressedStorage;
class AesCtrCounterExtendedStorage;
class IndirectStorage;
class SparseStorage;
struct NcaCryptoConfiguration;
using KeyGenerationFunction = void (*)(void* dst_key, size_t dst_key_size, const void* src_key,
size_t src_key_size, s32 key_type);
using VerifySign1Function = bool (*)(const void* sig, size_t sig_size, const void* data,
size_t data_size, u8 generation);
struct NcaCryptoConfiguration {
static constexpr size_t Rsa2048KeyModulusSize = 2048 / 8;
static constexpr size_t Rsa2048KeyPublicExponentSize = 3;
static constexpr size_t Rsa2048KeyPrivateExponentSize = Rsa2048KeyModulusSize;
static constexpr size_t Aes128KeySize = 128 / 8;
static constexpr size_t Header1SignatureKeyGenerationMax = 1;
static constexpr s32 KeyAreaEncryptionKeyIndexCount = 3;
static constexpr s32 HeaderEncryptionKeyCount = 2;
static constexpr u8 KeyAreaEncryptionKeyIndexZeroKey = 0xFF;
static constexpr size_t KeyGenerationMax = 32;
std::array<const u8*, Header1SignatureKeyGenerationMax + 1> header_1_sign_key_moduli;
std::array<u8, Rsa2048KeyPublicExponentSize> header_1_sign_key_public_exponent;
std::array<std::array<u8, Aes128KeySize>, KeyAreaEncryptionKeyIndexCount>
key_area_encryption_key_source;
std::array<u8, Aes128KeySize> header_encryption_key_source;
std::array<std::array<u8, Aes128KeySize>, HeaderEncryptionKeyCount>
header_encrypted_encryption_keys;
KeyGenerationFunction generate_key;
VerifySign1Function verify_sign1;
bool is_plaintext_header_available;
bool is_available_sw_key;
};
static_assert(std::is_trivial_v<NcaCryptoConfiguration>);
struct NcaCompressionConfiguration {
GetDecompressorFunction get_decompressor;
};
static_assert(std::is_trivial_v<NcaCompressionConfiguration>);
constexpr inline s32 KeyAreaEncryptionKeyCount =
NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexCount *
NcaCryptoConfiguration::KeyGenerationMax;
enum class KeyType : s32 {
ZeroKey = -2,
InvalidKey = -1,
NcaHeaderKey1 = KeyAreaEncryptionKeyCount + 0,
NcaHeaderKey2 = KeyAreaEncryptionKeyCount + 1,
NcaExternalKey = KeyAreaEncryptionKeyCount + 2,
SaveDataDeviceUniqueMac = KeyAreaEncryptionKeyCount + 3,
SaveDataSeedUniqueMac = KeyAreaEncryptionKeyCount + 4,
SaveDataTransferMac = KeyAreaEncryptionKeyCount + 5,
};
constexpr inline bool IsInvalidKeyTypeValue(s32 key_type) {
return key_type < 0;
}
constexpr inline s32 GetKeyTypeValue(u8 key_index, u8 key_generation) {
if (key_index == NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexZeroKey) {
return static_cast<s32>(KeyType::ZeroKey);
}
if (key_index >= NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexCount) {
return static_cast<s32>(KeyType::InvalidKey);
}
return NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexCount * key_generation + key_index;
}
class NcaReader {
YUZU_NON_COPYABLE(NcaReader);
YUZU_NON_MOVEABLE(NcaReader);
public:
NcaReader();
~NcaReader();
Result Initialize(VirtualFile base_storage, const NcaCryptoConfiguration& crypto_cfg,
const NcaCompressionConfiguration& compression_cfg);
VirtualFile GetSharedBodyStorage();
u32 GetMagic() const;
NcaHeader::DistributionType GetDistributionType() const;
NcaHeader::ContentType GetContentType() const;
u8 GetHeaderSign1KeyGeneration() const;
u8 GetKeyGeneration() const;
u8 GetKeyIndex() const;
u64 GetContentSize() const;
u64 GetProgramId() const;
u32 GetContentIndex() const;
u32 GetSdkAddonVersion() const;
void GetRightsId(u8* dst, size_t dst_size) const;
bool HasFsInfo(s32 index) const;
s32 GetFsCount() const;
const Hash& GetFsHeaderHash(s32 index) const;
void GetFsHeaderHash(Hash* dst, s32 index) const;
void GetFsInfo(NcaHeader::FsInfo* dst, s32 index) const;
u64 GetFsOffset(s32 index) const;
u64 GetFsEndOffset(s32 index) const;
u64 GetFsSize(s32 index) const;
void GetEncryptedKey(void* dst, size_t size) const;
const void* GetDecryptionKey(s32 index) const;
bool HasValidInternalKey() const;
bool HasInternalDecryptionKeyForAesHw() const;
bool IsSoftwareAesPrioritized() const;
void PrioritizeSoftwareAes();
bool IsAvailableSwKey() const;
bool HasExternalDecryptionKey() const;
const void* GetExternalDecryptionKey() const;
void SetExternalDecryptionKey(const void* src, size_t size);
void GetRawData(void* dst, size_t dst_size) const;
NcaHeader::EncryptionType GetEncryptionType() const;
Result ReadHeader(NcaFsHeader* dst, s32 index) const;
GetDecompressorFunction GetDecompressor() const;
bool GetHeaderSign1Valid() const;
void GetHeaderSign2(void* dst, size_t size) const;
private:
NcaHeader m_header;
std::array<std::array<u8, NcaCryptoConfiguration::Aes128KeySize>,
NcaHeader::DecryptionKey_Count>
m_decryption_keys;
VirtualFile m_body_storage;
VirtualFile m_header_storage;
std::array<u8, NcaCryptoConfiguration::Aes128KeySize> m_external_decryption_key;
bool m_is_software_aes_prioritized;
bool m_is_available_sw_key;
NcaHeader::EncryptionType m_header_encryption_type;
bool m_is_header_sign1_signature_valid;
GetDecompressorFunction m_get_decompressor;
};
class NcaFsHeaderReader {
YUZU_NON_COPYABLE(NcaFsHeaderReader);
YUZU_NON_MOVEABLE(NcaFsHeaderReader);
public:
NcaFsHeaderReader() : m_fs_index(-1) {
std::memset(std::addressof(m_data), 0, sizeof(m_data));
}
Result Initialize(const NcaReader& reader, s32 index);
bool IsInitialized() const {
return m_fs_index >= 0;
}
void GetRawData(void* dst, size_t dst_size) const;
NcaFsHeader::HashData& GetHashData();
const NcaFsHeader::HashData& GetHashData() const;
u16 GetVersion() const;
s32 GetFsIndex() const;
NcaFsHeader::FsType GetFsType() const;
NcaFsHeader::HashType GetHashType() const;
NcaFsHeader::EncryptionType GetEncryptionType() const;
NcaPatchInfo& GetPatchInfo();
const NcaPatchInfo& GetPatchInfo() const;
const NcaAesCtrUpperIv GetAesCtrUpperIv() const;
bool IsSkipLayerHashEncryption() const;
Result GetHashTargetOffset(s64* out) const;
bool ExistsSparseLayer() const;
NcaSparseInfo& GetSparseInfo();
const NcaSparseInfo& GetSparseInfo() const;
bool ExistsCompressionLayer() const;
NcaCompressionInfo& GetCompressionInfo();
const NcaCompressionInfo& GetCompressionInfo() const;
bool ExistsPatchMetaHashLayer() const;
NcaMetaDataHashDataInfo& GetPatchMetaDataHashDataInfo();
const NcaMetaDataHashDataInfo& GetPatchMetaDataHashDataInfo() const;
NcaFsHeader::MetaDataHashType GetPatchMetaHashType() const;
bool ExistsSparseMetaHashLayer() const;
NcaMetaDataHashDataInfo& GetSparseMetaDataHashDataInfo();
const NcaMetaDataHashDataInfo& GetSparseMetaDataHashDataInfo() const;
NcaFsHeader::MetaDataHashType GetSparseMetaHashType() const;
private:
NcaFsHeader m_data;
s32 m_fs_index;
};
class NcaFileSystemDriver {
YUZU_NON_COPYABLE(NcaFileSystemDriver);
YUZU_NON_MOVEABLE(NcaFileSystemDriver);
public:
struct StorageContext {
bool open_raw_storage;
VirtualFile body_substorage;
std::shared_ptr<SparseStorage> current_sparse_storage;
VirtualFile sparse_storage_meta_storage;
std::shared_ptr<SparseStorage> original_sparse_storage;
void* external_current_sparse_storage;
void* external_original_sparse_storage;
VirtualFile aes_ctr_ex_storage_meta_storage;
VirtualFile aes_ctr_ex_storage_data_storage;
std::shared_ptr<AesCtrCounterExtendedStorage> aes_ctr_ex_storage;
VirtualFile indirect_storage_meta_storage;
std::shared_ptr<IndirectStorage> indirect_storage;
VirtualFile fs_data_storage;
VirtualFile compressed_storage_meta_storage;
std::shared_ptr<CompressedStorage> compressed_storage;
VirtualFile patch_layer_info_storage;
VirtualFile sparse_layer_info_storage;
VirtualFile external_original_storage;
};
private:
enum class AlignmentStorageRequirement {
CacheBlockSize = 0,
None = 1,
};
public:
static Result SetupFsHeaderReader(NcaFsHeaderReader* out, const NcaReader& reader,
s32 fs_index);
public:
NcaFileSystemDriver(std::shared_ptr<NcaReader> reader) : m_original_reader(), m_reader(reader) {
ASSERT(m_reader != nullptr);
}
NcaFileSystemDriver(std::shared_ptr<NcaReader> original_reader,
std::shared_ptr<NcaReader> reader)
: m_original_reader(original_reader), m_reader(reader) {
ASSERT(m_reader != nullptr);
}
Result OpenStorageWithContext(VirtualFile* out, NcaFsHeaderReader* out_header_reader,
s32 fs_index, StorageContext* ctx);
Result OpenStorage(VirtualFile* out, NcaFsHeaderReader* out_header_reader, s32 fs_index) {
// Create a storage context.
StorageContext ctx{};
// Open the storage.
R_RETURN(OpenStorageWithContext(out, out_header_reader, fs_index, std::addressof(ctx)));
}
public:
Result CreateStorageByRawStorage(VirtualFile* out, const NcaFsHeaderReader* header_reader,
VirtualFile raw_storage, StorageContext* ctx);
private:
Result OpenStorageImpl(VirtualFile* out, NcaFsHeaderReader* out_header_reader, s32 fs_index,
StorageContext* ctx);
Result OpenIndirectableStorageAsOriginal(VirtualFile* out,
const NcaFsHeaderReader* header_reader,
StorageContext* ctx);
Result CreateBodySubStorage(VirtualFile* out, s64 offset, s64 size);
Result CreateAesCtrStorage(VirtualFile* out, VirtualFile base_storage, s64 offset,
const NcaAesCtrUpperIv& upper_iv,
AlignmentStorageRequirement alignment_storage_requirement);
Result CreateAesXtsStorage(VirtualFile* out, VirtualFile base_storage, s64 offset);
Result CreateSparseStorageMetaStorage(VirtualFile* out, VirtualFile base_storage, s64 offset,
const NcaAesCtrUpperIv& upper_iv,
const NcaSparseInfo& sparse_info);
Result CreateSparseStorageCore(std::shared_ptr<SparseStorage>* out, VirtualFile base_storage,
s64 base_size, VirtualFile meta_storage,
const NcaSparseInfo& sparse_info, bool external_info);
Result CreateSparseStorage(VirtualFile* out, s64* out_fs_data_offset,
std::shared_ptr<SparseStorage>* out_sparse_storage,
VirtualFile* out_meta_storage, s32 index,
const NcaAesCtrUpperIv& upper_iv, const NcaSparseInfo& sparse_info);
Result CreateSparseStorageMetaStorageWithVerification(
VirtualFile* out, VirtualFile* out_verification, VirtualFile base_storage, s64 offset,
const NcaAesCtrUpperIv& upper_iv, const NcaSparseInfo& sparse_info,
const NcaMetaDataHashDataInfo& meta_data_hash_data_info);
Result CreateSparseStorageWithVerification(
VirtualFile* out, s64* out_fs_data_offset,
std::shared_ptr<SparseStorage>* out_sparse_storage, VirtualFile* out_meta_storage,
VirtualFile* out_verification, s32 index, const NcaAesCtrUpperIv& upper_iv,
const NcaSparseInfo& sparse_info, const NcaMetaDataHashDataInfo& meta_data_hash_data_info,
NcaFsHeader::MetaDataHashType meta_data_hash_type);
Result CreateAesCtrExStorageMetaStorage(VirtualFile* out, VirtualFile base_storage, s64 offset,
NcaFsHeader::EncryptionType encryption_type,
const NcaAesCtrUpperIv& upper_iv,
const NcaPatchInfo& patch_info);
Result CreateAesCtrExStorage(VirtualFile* out,
std::shared_ptr<AesCtrCounterExtendedStorage>* out_ext,
VirtualFile base_storage, VirtualFile meta_storage,
s64 counter_offset, const NcaAesCtrUpperIv& upper_iv,
const NcaPatchInfo& patch_info);
Result CreateIndirectStorageMetaStorage(VirtualFile* out, VirtualFile base_storage,
const NcaPatchInfo& patch_info);
Result CreateIndirectStorage(VirtualFile* out, std::shared_ptr<IndirectStorage>* out_ind,
VirtualFile base_storage, VirtualFile original_data_storage,
VirtualFile meta_storage, const NcaPatchInfo& patch_info);
Result CreatePatchMetaStorage(VirtualFile* out_aes_ctr_ex_meta, VirtualFile* out_indirect_meta,
VirtualFile* out_verification, VirtualFile base_storage,
s64 offset, const NcaAesCtrUpperIv& upper_iv,
const NcaPatchInfo& patch_info,
const NcaMetaDataHashDataInfo& meta_data_hash_data_info);
Result CreateSha256Storage(VirtualFile* out, VirtualFile base_storage,
const NcaFsHeader::HashData::HierarchicalSha256Data& sha256_data);
Result CreateIntegrityVerificationStorage(
VirtualFile* out, VirtualFile base_storage,
const NcaFsHeader::HashData::IntegrityMetaInfo& meta_info);
Result CreateIntegrityVerificationStorageForMeta(
VirtualFile* out, VirtualFile* out_verification, VirtualFile base_storage, s64 offset,
const NcaMetaDataHashDataInfo& meta_data_hash_data_info);
Result CreateIntegrityVerificationStorageImpl(
VirtualFile* out, VirtualFile base_storage,
const NcaFsHeader::HashData::IntegrityMetaInfo& meta_info, s64 layer_info_offset,
int max_data_cache_entries, int max_hash_cache_entries, s8 buffer_level);
Result CreateRegionSwitchStorage(VirtualFile* out, const NcaFsHeaderReader* header_reader,
VirtualFile inside_storage, VirtualFile outside_storage);
Result CreateCompressedStorage(VirtualFile* out, std::shared_ptr<CompressedStorage>* out_cmp,
VirtualFile* out_meta, VirtualFile base_storage,
const NcaCompressionInfo& compression_info);
public:
Result CreateCompressedStorage(VirtualFile* out, std::shared_ptr<CompressedStorage>* out_cmp,
VirtualFile* out_meta, VirtualFile base_storage,
const NcaCompressionInfo& compression_info,
GetDecompressorFunction get_decompressor);
private:
std::shared_ptr<NcaReader> m_original_reader;
std::shared_ptr<NcaReader> m_reader;
};
} // namespace FileSys

View File

@ -0,0 +1,20 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/file_sys/fssystem/fssystem_nca_header.h"
namespace FileSys {
u8 NcaHeader::GetProperKeyGeneration() const {
return std::max(this->key_generation, this->key_generation_2);
}
bool NcaPatchInfo::HasIndirectTable() const {
return this->indirect_size != 0;
}
bool NcaPatchInfo::HasAesCtrExTable() const {
return this->aes_ctr_ex_size != 0;
}
} // namespace FileSys

View File

@ -0,0 +1,338 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/literals.h"
#include "core/file_sys/errors.h"
#include "core/file_sys/fssystem/fs_types.h"
namespace FileSys {
using namespace Common::Literals;
struct Hash {
static constexpr std::size_t Size = 256 / 8;
std::array<u8, Size> value;
};
static_assert(sizeof(Hash) == Hash::Size);
static_assert(std::is_trivial_v<Hash>);
using NcaDigest = Hash;
struct NcaHeader {
enum class ContentType : u8 {
Program = 0,
Meta = 1,
Control = 2,
Manual = 3,
Data = 4,
PublicData = 5,
Start = Program,
End = PublicData,
};
enum class DistributionType : u8 {
Download = 0,
GameCard = 1,
Start = Download,
End = GameCard,
};
enum class EncryptionType : u8 {
Auto = 0,
None = 1,
};
enum DecryptionKey {
DecryptionKey_AesXts = 0,
DecryptionKey_AesXts1 = DecryptionKey_AesXts,
DecryptionKey_AesXts2 = 1,
DecryptionKey_AesCtr = 2,
DecryptionKey_AesCtrEx = 3,
DecryptionKey_AesCtrHw = 4,
DecryptionKey_Count,
};
struct FsInfo {
u32 start_sector;
u32 end_sector;
u32 hash_sectors;
u32 reserved;
};
static_assert(sizeof(FsInfo) == 0x10);
static_assert(std::is_trivial_v<FsInfo>);
static constexpr u32 Magic0 = Common::MakeMagic('N', 'C', 'A', '0');
static constexpr u32 Magic1 = Common::MakeMagic('N', 'C', 'A', '1');
static constexpr u32 Magic2 = Common::MakeMagic('N', 'C', 'A', '2');
static constexpr u32 Magic3 = Common::MakeMagic('N', 'C', 'A', '3');
static constexpr u32 Magic = Magic3;
static constexpr std::size_t Size = 1_KiB;
static constexpr s32 FsCountMax = 4;
static constexpr std::size_t HeaderSignCount = 2;
static constexpr std::size_t HeaderSignSize = 0x100;
static constexpr std::size_t EncryptedKeyAreaSize = 0x100;
static constexpr std::size_t SectorSize = 0x200;
static constexpr std::size_t SectorShift = 9;
static constexpr std::size_t RightsIdSize = 0x10;
static constexpr std::size_t XtsBlockSize = 0x200;
static constexpr std::size_t CtrBlockSize = 0x10;
static_assert(SectorSize == (1 << SectorShift));
// Data members.
std::array<u8, HeaderSignSize> header_sign_1;
std::array<u8, HeaderSignSize> header_sign_2;
u32 magic;
DistributionType distribution_type;
ContentType content_type;
u8 key_generation;
u8 key_index;
u64 content_size;
u64 program_id;
u32 content_index;
u32 sdk_addon_version;
u8 key_generation_2;
u8 header1_signature_key_generation;
std::array<u8, 2> reserved_222;
std::array<u32, 3> reserved_224;
std::array<u8, RightsIdSize> rights_id;
std::array<FsInfo, FsCountMax> fs_info;
std::array<Hash, FsCountMax> fs_header_hash;
std::array<u8, EncryptedKeyAreaSize> encrypted_key_area;
static constexpr u64 SectorToByte(u32 sector) {
return static_cast<u64>(sector) << SectorShift;
}
static constexpr u32 ByteToSector(u64 byte) {
return static_cast<u32>(byte >> SectorShift);
}
u8 GetProperKeyGeneration() const;
};
static_assert(sizeof(NcaHeader) == NcaHeader::Size);
static_assert(std::is_trivial_v<NcaHeader>);
struct NcaBucketInfo {
static constexpr size_t HeaderSize = 0x10;
Int64 offset;
Int64 size;
std::array<u8, HeaderSize> header;
};
static_assert(std::is_trivial_v<NcaBucketInfo>);
struct NcaPatchInfo {
static constexpr size_t Size = 0x40;
static constexpr size_t Offset = 0x100;
Int64 indirect_offset;
Int64 indirect_size;
std::array<u8, NcaBucketInfo::HeaderSize> indirect_header;
Int64 aes_ctr_ex_offset;
Int64 aes_ctr_ex_size;
std::array<u8, NcaBucketInfo::HeaderSize> aes_ctr_ex_header;
bool HasIndirectTable() const;
bool HasAesCtrExTable() const;
};
static_assert(std::is_trivial_v<NcaPatchInfo>);
union NcaAesCtrUpperIv {
u64 value;
struct {
u32 generation;
u32 secure_value;
} part;
};
static_assert(std::is_trivial_v<NcaAesCtrUpperIv>);
struct NcaSparseInfo {
NcaBucketInfo bucket;
Int64 physical_offset;
u16 generation;
std::array<u8, 6> reserved;
s64 GetPhysicalSize() const {
return this->bucket.offset + this->bucket.size;
}
u32 GetGeneration() const {
return static_cast<u32>(this->generation) << 16;
}
const NcaAesCtrUpperIv MakeAesCtrUpperIv(NcaAesCtrUpperIv upper_iv) const {
NcaAesCtrUpperIv sparse_upper_iv = upper_iv;
sparse_upper_iv.part.generation = this->GetGeneration();
return sparse_upper_iv;
}
};
static_assert(std::is_trivial_v<NcaSparseInfo>);
struct NcaCompressionInfo {
NcaBucketInfo bucket;
std::array<u8, 8> resreved;
};
static_assert(std::is_trivial_v<NcaCompressionInfo>);
struct NcaMetaDataHashDataInfo {
Int64 offset;
Int64 size;
Hash hash;
};
static_assert(std::is_trivial_v<NcaMetaDataHashDataInfo>);
struct NcaFsHeader {
static constexpr size_t Size = 0x200;
static constexpr size_t HashDataOffset = 0x8;
struct Region {
Int64 offset;
Int64 size;
};
static_assert(std::is_trivial_v<Region>);
enum class FsType : u8 {
RomFs = 0,
PartitionFs = 1,
};
enum class EncryptionType : u8 {
Auto = 0,
None = 1,
AesXts = 2,
AesCtr = 3,
AesCtrEx = 4,
AesCtrSkipLayerHash = 5,
AesCtrExSkipLayerHash = 6,
};
enum class HashType : u8 {
Auto = 0,
None = 1,
HierarchicalSha256Hash = 2,
HierarchicalIntegrityHash = 3,
AutoSha3 = 4,
HierarchicalSha3256Hash = 5,
HierarchicalIntegritySha3Hash = 6,
};
enum class MetaDataHashType : u8 {
None = 0,
HierarchicalIntegrity = 1,
};
union HashData {
struct HierarchicalSha256Data {
static constexpr size_t HashLayerCountMax = 5;
static const size_t MasterHashOffset;
Hash fs_data_master_hash;
s32 hash_block_size;
s32 hash_layer_count;
std::array<Region, HashLayerCountMax> hash_layer_region;
} hierarchical_sha256_data;
static_assert(std::is_trivial_v<HierarchicalSha256Data>);
struct IntegrityMetaInfo {
static const size_t MasterHashOffset;
u32 magic;
u32 version;
u32 master_hash_size;
struct LevelHashInfo {
u32 max_layers;
struct HierarchicalIntegrityVerificationLevelInformation {
static constexpr size_t IntegrityMaxLayerCount = 7;
Int64 offset;
Int64 size;
s32 block_order;
std::array<u8, 4> reserved;
};
std::array<
HierarchicalIntegrityVerificationLevelInformation,
HierarchicalIntegrityVerificationLevelInformation::IntegrityMaxLayerCount - 1>
info;
struct SignatureSalt {
static constexpr size_t Size = 0x20;
std::array<u8, Size> value;
};
SignatureSalt seed;
} level_hash_info;
Hash master_hash;
} integrity_meta_info;
static_assert(std::is_trivial_v<IntegrityMetaInfo>);
std::array<u8, NcaPatchInfo::Offset - HashDataOffset> padding;
};
u16 version;
FsType fs_type;
HashType hash_type;
EncryptionType encryption_type;
MetaDataHashType meta_data_hash_type;
std::array<u8, 2> reserved;
HashData hash_data;
NcaPatchInfo patch_info;
NcaAesCtrUpperIv aes_ctr_upper_iv;
NcaSparseInfo sparse_info;
NcaCompressionInfo compression_info;
NcaMetaDataHashDataInfo meta_data_hash_data_info;
std::array<u8, 0x30> pad;
bool IsSkipLayerHashEncryption() const {
return this->encryption_type == EncryptionType::AesCtrSkipLayerHash ||
this->encryption_type == EncryptionType::AesCtrExSkipLayerHash;
}
Result GetHashTargetOffset(s64* out) const {
switch (this->hash_type) {
case HashType::HierarchicalIntegrityHash:
case HashType::HierarchicalIntegritySha3Hash:
*out = this->hash_data.integrity_meta_info.level_hash_info
.info[this->hash_data.integrity_meta_info.level_hash_info.max_layers - 2]
.offset;
R_SUCCEED();
case HashType::HierarchicalSha256Hash:
case HashType::HierarchicalSha3256Hash:
*out =
this->hash_data.hierarchical_sha256_data
.hash_layer_region[this->hash_data.hierarchical_sha256_data.hash_layer_count -
1]
.offset;
R_SUCCEED();
default:
R_THROW(ResultInvalidNcaFsHeader);
}
}
};
static_assert(sizeof(NcaFsHeader) == NcaFsHeader::Size);
static_assert(std::is_trivial_v<NcaFsHeader>);
static_assert(offsetof(NcaFsHeader, patch_info) == NcaPatchInfo::Offset);
inline constexpr const size_t NcaFsHeader::HashData::HierarchicalSha256Data::MasterHashOffset =
offsetof(NcaFsHeader, hash_data.hierarchical_sha256_data.fs_data_master_hash);
inline constexpr const size_t NcaFsHeader::HashData::IntegrityMetaInfo::MasterHashOffset =
offsetof(NcaFsHeader, hash_data.integrity_meta_info.master_hash);
struct NcaMetaDataHashData {
s64 layer_info_offset;
NcaFsHeader::HashData::IntegrityMetaInfo integrity_meta_info;
};
static_assert(sizeof(NcaMetaDataHashData) ==
sizeof(NcaFsHeader::HashData::IntegrityMetaInfo) + sizeof(s64));
static_assert(std::is_trivial_v<NcaMetaDataHashData>);
} // namespace FileSys

View File

@ -0,0 +1,531 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/file_sys/fssystem/fssystem_aes_xts_storage.h"
#include "core/file_sys/fssystem/fssystem_nca_file_system_driver.h"
#include "core/file_sys/vfs_offset.h"
namespace FileSys {
namespace {
constexpr inline u32 SdkAddonVersionMin = 0x000B0000;
constexpr inline size_t Aes128KeySize = 0x10;
constexpr const std::array<u8, Aes128KeySize> ZeroKey{};
constexpr Result CheckNcaMagic(u32 magic) {
// Verify the magic is not a deprecated one.
R_UNLESS(magic != NcaHeader::Magic0, ResultUnsupportedSdkVersion);
R_UNLESS(magic != NcaHeader::Magic1, ResultUnsupportedSdkVersion);
R_UNLESS(magic != NcaHeader::Magic2, ResultUnsupportedSdkVersion);
// Verify the magic is the current one.
R_UNLESS(magic == NcaHeader::Magic3, ResultInvalidNcaSignature);
R_SUCCEED();
}
} // namespace
NcaReader::NcaReader()
: m_body_storage(), m_header_storage(), m_is_software_aes_prioritized(false),
m_is_available_sw_key(false), m_header_encryption_type(NcaHeader::EncryptionType::Auto),
m_get_decompressor() {
std::memset(std::addressof(m_header), 0, sizeof(m_header));
std::memset(std::addressof(m_decryption_keys), 0, sizeof(m_decryption_keys));
std::memset(std::addressof(m_external_decryption_key), 0, sizeof(m_external_decryption_key));
}
NcaReader::~NcaReader() {}
Result NcaReader::Initialize(VirtualFile base_storage, const NcaCryptoConfiguration& crypto_cfg,
const NcaCompressionConfiguration& compression_cfg) {
// Validate preconditions.
ASSERT(base_storage != nullptr);
ASSERT(m_body_storage == nullptr);
// Create the work header storage storage.
VirtualFile work_header_storage;
// We need to be able to generate keys.
R_UNLESS(crypto_cfg.generate_key != nullptr, ResultInvalidArgument);
// Generate keys for header.
using AesXtsStorageForNcaHeader = AesXtsStorage;
constexpr std::array<s32, NcaCryptoConfiguration::HeaderEncryptionKeyCount>
HeaderKeyTypeValues = {
static_cast<s32>(KeyType::NcaHeaderKey1),
static_cast<s32>(KeyType::NcaHeaderKey2),
};
std::array<std::array<u8, NcaCryptoConfiguration::Aes128KeySize>,
NcaCryptoConfiguration::HeaderEncryptionKeyCount>
header_decryption_keys;
for (size_t i = 0; i < NcaCryptoConfiguration::HeaderEncryptionKeyCount; i++) {
crypto_cfg.generate_key(header_decryption_keys[i].data(),
AesXtsStorageForNcaHeader::KeySize,
crypto_cfg.header_encrypted_encryption_keys[i].data(),
AesXtsStorageForNcaHeader::KeySize, HeaderKeyTypeValues[i]);
}
// Create the header storage.
std::array<u8, AesXtsStorageForNcaHeader::IvSize> header_iv = {};
work_header_storage = std::make_unique<AesXtsStorageForNcaHeader>(
base_storage, header_decryption_keys[0].data(), header_decryption_keys[1].data(),
AesXtsStorageForNcaHeader::KeySize, header_iv.data(), AesXtsStorageForNcaHeader::IvSize,
NcaHeader::XtsBlockSize);
// Check that we successfully created the storage.
R_UNLESS(work_header_storage != nullptr, ResultAllocationMemoryFailedInNcaReaderA);
// Read the header.
work_header_storage->ReadObject(std::addressof(m_header), 0);
// Validate the magic.
if (const Result magic_result = CheckNcaMagic(m_header.magic); R_FAILED(magic_result)) {
// Try to use a plaintext header.
base_storage->ReadObject(std::addressof(m_header), 0);
R_UNLESS(R_SUCCEEDED(CheckNcaMagic(m_header.magic)), magic_result);
// Configure to use the plaintext header.
auto base_storage_size = base_storage->GetSize();
work_header_storage = std::make_shared<OffsetVfsFile>(base_storage, base_storage_size, 0);
R_UNLESS(work_header_storage != nullptr, ResultAllocationMemoryFailedInNcaReaderA);
// Set encryption type as plaintext.
m_header_encryption_type = NcaHeader::EncryptionType::None;
}
// Verify the header sign1.
if (crypto_cfg.verify_sign1 != nullptr) {
const u8* sig = m_header.header_sign_1.data();
const size_t sig_size = NcaHeader::HeaderSignSize;
const u8* msg =
static_cast<const u8*>(static_cast<const void*>(std::addressof(m_header.magic)));
const size_t msg_size =
NcaHeader::Size - NcaHeader::HeaderSignSize * NcaHeader::HeaderSignCount;
m_is_header_sign1_signature_valid = crypto_cfg.verify_sign1(
sig, sig_size, msg, msg_size, m_header.header1_signature_key_generation);
if (!m_is_header_sign1_signature_valid) {
LOG_WARNING(Common_Filesystem, "Invalid NCA header sign1");
}
}
// Validate the sdk version.
R_UNLESS(m_header.sdk_addon_version >= SdkAddonVersionMin, ResultUnsupportedSdkVersion);
// Validate the key index.
R_UNLESS(m_header.key_index < NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexCount ||
m_header.key_index == NcaCryptoConfiguration::KeyAreaEncryptionKeyIndexZeroKey,
ResultInvalidNcaKeyIndex);
// Check if we have a rights id.
constexpr const std::array<u8, NcaHeader::RightsIdSize> ZeroRightsId{};
if (std::memcmp(ZeroRightsId.data(), m_header.rights_id.data(), NcaHeader::RightsIdSize) == 0) {
// If we don't, then we don't have an external key, so we need to generate decryption keys.
crypto_cfg.generate_key(
m_decryption_keys[NcaHeader::DecryptionKey_AesCtr].data(), Aes128KeySize,
m_header.encrypted_key_area.data() + NcaHeader::DecryptionKey_AesCtr * Aes128KeySize,
Aes128KeySize, GetKeyTypeValue(m_header.key_index, m_header.GetProperKeyGeneration()));
crypto_cfg.generate_key(
m_decryption_keys[NcaHeader::DecryptionKey_AesXts1].data(), Aes128KeySize,
m_header.encrypted_key_area.data() + NcaHeader::DecryptionKey_AesXts1 * Aes128KeySize,
Aes128KeySize, GetKeyTypeValue(m_header.key_index, m_header.GetProperKeyGeneration()));
crypto_cfg.generate_key(
m_decryption_keys[NcaHeader::DecryptionKey_AesXts2].data(), Aes128KeySize,
m_header.encrypted_key_area.data() + NcaHeader::DecryptionKey_AesXts2 * Aes128KeySize,
Aes128KeySize, GetKeyTypeValue(m_header.key_index, m_header.GetProperKeyGeneration()));
crypto_cfg.generate_key(
m_decryption_keys[NcaHeader::DecryptionKey_AesCtrEx].data(), Aes128KeySize,
m_header.encrypted_key_area.data() + NcaHeader::DecryptionKey_AesCtrEx * Aes128KeySize,
Aes128KeySize, GetKeyTypeValue(m_header.key_index, m_header.GetProperKeyGeneration()));
// Copy the hardware speed emulation key.
std::memcpy(m_decryption_keys[NcaHeader::DecryptionKey_AesCtrHw].data(),
m_header.encrypted_key_area.data() +
NcaHeader::DecryptionKey_AesCtrHw * Aes128KeySize,
Aes128KeySize);
}
// Clear the external decryption key.
std::memset(m_external_decryption_key.data(), 0, m_external_decryption_key.size());
// Set software key availability.
m_is_available_sw_key = crypto_cfg.is_available_sw_key;
// Set our decompressor function getter.
m_get_decompressor = compression_cfg.get_decompressor;
// Set our storages.
m_header_storage = std::move(work_header_storage);
m_body_storage = std::move(base_storage);
R_SUCCEED();
}
VirtualFile NcaReader::GetSharedBodyStorage() {
ASSERT(m_body_storage != nullptr);
return m_body_storage;
}
u32 NcaReader::GetMagic() const {
ASSERT(m_body_storage != nullptr);
return m_header.magic;
}
NcaHeader::DistributionType NcaReader::GetDistributionType() const {
ASSERT(m_body_storage != nullptr);
return m_header.distribution_type;
}
NcaHeader::ContentType NcaReader::GetContentType() const {
ASSERT(m_body_storage != nullptr);
return m_header.content_type;
}
u8 NcaReader::GetHeaderSign1KeyGeneration() const {
ASSERT(m_body_storage != nullptr);
return m_header.header1_signature_key_generation;
}
u8 NcaReader::GetKeyGeneration() const {
ASSERT(m_body_storage != nullptr);
return m_header.GetProperKeyGeneration();
}
u8 NcaReader::GetKeyIndex() const {
ASSERT(m_body_storage != nullptr);
return m_header.key_index;
}
u64 NcaReader::GetContentSize() const {
ASSERT(m_body_storage != nullptr);
return m_header.content_size;
}
u64 NcaReader::GetProgramId() const {
ASSERT(m_body_storage != nullptr);
return m_header.program_id;
}
u32 NcaReader::GetContentIndex() const {
ASSERT(m_body_storage != nullptr);
return m_header.content_index;
}
u32 NcaReader::GetSdkAddonVersion() const {
ASSERT(m_body_storage != nullptr);
return m_header.sdk_addon_version;
}
void NcaReader::GetRightsId(u8* dst, size_t dst_size) const {
ASSERT(dst != nullptr);
ASSERT(dst_size >= NcaHeader::RightsIdSize);
std::memcpy(dst, m_header.rights_id.data(), NcaHeader::RightsIdSize);
}
bool NcaReader::HasFsInfo(s32 index) const {
ASSERT(0 <= index && index < NcaHeader::FsCountMax);
return m_header.fs_info[index].start_sector != 0 || m_header.fs_info[index].end_sector != 0;
}
s32 NcaReader::GetFsCount() const {
ASSERT(m_body_storage != nullptr);
for (s32 i = 0; i < NcaHeader::FsCountMax; i++) {
if (!this->HasFsInfo(i)) {
return i;
}
}
return NcaHeader::FsCountMax;
}
const Hash& NcaReader::GetFsHeaderHash(s32 index) const {
ASSERT(m_body_storage != nullptr);
ASSERT(0 <= index && index < NcaHeader::FsCountMax);
return m_header.fs_header_hash[index];
}
void NcaReader::GetFsHeaderHash(Hash* dst, s32 index) const {
ASSERT(m_body_storage != nullptr);
ASSERT(0 <= index && index < NcaHeader::FsCountMax);
ASSERT(dst != nullptr);
std::memcpy(dst, std::addressof(m_header.fs_header_hash[index]), sizeof(*dst));
}
void NcaReader::GetFsInfo(NcaHeader::FsInfo* dst, s32 index) const {
ASSERT(m_body_storage != nullptr);
ASSERT(0 <= index && index < NcaHeader::FsCountMax);
ASSERT(dst != nullptr);
std::memcpy(dst, std::addressof(m_header.fs_info[index]), sizeof(*dst));
}
u64 NcaReader::GetFsOffset(s32 index) const {
ASSERT(m_body_storage != nullptr);
ASSERT(0 <= index && index < NcaHeader::FsCountMax);
return NcaHeader::SectorToByte(m_header.fs_info[index].start_sector);
}
u64 NcaReader::GetFsEndOffset(s32 index) const {
ASSERT(m_body_storage != nullptr);
ASSERT(0 <= index && index < NcaHeader::FsCountMax);
return NcaHeader::SectorToByte(m_header.fs_info[index].end_sector);
}
u64 NcaReader::GetFsSize(s32 index) const {
ASSERT(m_body_storage != nullptr);
ASSERT(0 <= index && index < NcaHeader::FsCountMax);
return NcaHeader::SectorToByte(m_header.fs_info[index].end_sector -
m_header.fs_info[index].start_sector);
}
void NcaReader::GetEncryptedKey(void* dst, size_t size) const {
ASSERT(m_body_storage != nullptr);
ASSERT(dst != nullptr);
ASSERT(size >= NcaHeader::EncryptedKeyAreaSize);
std::memcpy(dst, m_header.encrypted_key_area.data(), NcaHeader::EncryptedKeyAreaSize);
}
const void* NcaReader::GetDecryptionKey(s32 index) const {
ASSERT(m_body_storage != nullptr);
ASSERT(0 <= index && index < NcaHeader::DecryptionKey_Count);
return m_decryption_keys[index].data();
}
bool NcaReader::HasValidInternalKey() const {
for (s32 i = 0; i < NcaHeader::DecryptionKey_Count; i++) {
if (std::memcmp(ZeroKey.data(), m_header.encrypted_key_area.data() + i * Aes128KeySize,
Aes128KeySize) != 0) {
return true;
}
}
return false;
}
bool NcaReader::HasInternalDecryptionKeyForAesHw() const {
return std::memcmp(ZeroKey.data(), this->GetDecryptionKey(NcaHeader::DecryptionKey_AesCtrHw),
Aes128KeySize) != 0;
}
bool NcaReader::IsSoftwareAesPrioritized() const {
return m_is_software_aes_prioritized;
}
void NcaReader::PrioritizeSoftwareAes() {
m_is_software_aes_prioritized = true;
}
bool NcaReader::IsAvailableSwKey() const {
return m_is_available_sw_key;
}
bool NcaReader::HasExternalDecryptionKey() const {
return std::memcmp(ZeroKey.data(), this->GetExternalDecryptionKey(), Aes128KeySize) != 0;
}
const void* NcaReader::GetExternalDecryptionKey() const {
return m_external_decryption_key.data();
}
void NcaReader::SetExternalDecryptionKey(const void* src, size_t size) {
ASSERT(src != nullptr);
ASSERT(size == sizeof(m_external_decryption_key));
std::memcpy(m_external_decryption_key.data(), src, sizeof(m_external_decryption_key));
}
void NcaReader::GetRawData(void* dst, size_t dst_size) const {
ASSERT(m_body_storage != nullptr);
ASSERT(dst != nullptr);
ASSERT(dst_size >= sizeof(NcaHeader));
std::memcpy(dst, std::addressof(m_header), sizeof(NcaHeader));
}
GetDecompressorFunction NcaReader::GetDecompressor() const {
ASSERT(m_get_decompressor != nullptr);
return m_get_decompressor;
}
NcaHeader::EncryptionType NcaReader::GetEncryptionType() const {
return m_header_encryption_type;
}
Result NcaReader::ReadHeader(NcaFsHeader* dst, s32 index) const {
ASSERT(dst != nullptr);
ASSERT(0 <= index && index < NcaHeader::FsCountMax);
const s64 offset = sizeof(NcaHeader) + sizeof(NcaFsHeader) * index;
m_header_storage->ReadObject(dst, offset);
R_SUCCEED();
}
bool NcaReader::GetHeaderSign1Valid() const {
return m_is_header_sign1_signature_valid;
}
void NcaReader::GetHeaderSign2(void* dst, size_t size) const {
ASSERT(dst != nullptr);
ASSERT(size == NcaHeader::HeaderSignSize);
std::memcpy(dst, m_header.header_sign_2.data(), size);
}
Result NcaFsHeaderReader::Initialize(const NcaReader& reader, s32 index) {
// Reset ourselves to uninitialized.
m_fs_index = -1;
// Read the header.
R_TRY(reader.ReadHeader(std::addressof(m_data), index));
// Set our index.
m_fs_index = index;
R_SUCCEED();
}
void NcaFsHeaderReader::GetRawData(void* dst, size_t dst_size) const {
ASSERT(this->IsInitialized());
ASSERT(dst != nullptr);
ASSERT(dst_size >= sizeof(NcaFsHeader));
std::memcpy(dst, std::addressof(m_data), sizeof(NcaFsHeader));
}
NcaFsHeader::HashData& NcaFsHeaderReader::GetHashData() {
ASSERT(this->IsInitialized());
return m_data.hash_data;
}
const NcaFsHeader::HashData& NcaFsHeaderReader::GetHashData() const {
ASSERT(this->IsInitialized());
return m_data.hash_data;
}
u16 NcaFsHeaderReader::GetVersion() const {
ASSERT(this->IsInitialized());
return m_data.version;
}
s32 NcaFsHeaderReader::GetFsIndex() const {
ASSERT(this->IsInitialized());
return m_fs_index;
}
NcaFsHeader::FsType NcaFsHeaderReader::GetFsType() const {
ASSERT(this->IsInitialized());
return m_data.fs_type;
}
NcaFsHeader::HashType NcaFsHeaderReader::GetHashType() const {
ASSERT(this->IsInitialized());
return m_data.hash_type;
}
NcaFsHeader::EncryptionType NcaFsHeaderReader::GetEncryptionType() const {
ASSERT(this->IsInitialized());
return m_data.encryption_type;
}
NcaPatchInfo& NcaFsHeaderReader::GetPatchInfo() {
ASSERT(this->IsInitialized());
return m_data.patch_info;
}
const NcaPatchInfo& NcaFsHeaderReader::GetPatchInfo() const {
ASSERT(this->IsInitialized());
return m_data.patch_info;
}
const NcaAesCtrUpperIv NcaFsHeaderReader::GetAesCtrUpperIv() const {
ASSERT(this->IsInitialized());
return m_data.aes_ctr_upper_iv;
}
bool NcaFsHeaderReader::IsSkipLayerHashEncryption() const {
ASSERT(this->IsInitialized());
return m_data.IsSkipLayerHashEncryption();
}
Result NcaFsHeaderReader::GetHashTargetOffset(s64* out) const {
ASSERT(out != nullptr);
ASSERT(this->IsInitialized());
R_RETURN(m_data.GetHashTargetOffset(out));
}
bool NcaFsHeaderReader::ExistsSparseLayer() const {
ASSERT(this->IsInitialized());
return m_data.sparse_info.generation != 0;
}
NcaSparseInfo& NcaFsHeaderReader::GetSparseInfo() {
ASSERT(this->IsInitialized());
return m_data.sparse_info;
}
const NcaSparseInfo& NcaFsHeaderReader::GetSparseInfo() const {
ASSERT(this->IsInitialized());
return m_data.sparse_info;
}
bool NcaFsHeaderReader::ExistsCompressionLayer() const {
ASSERT(this->IsInitialized());
return m_data.compression_info.bucket.offset != 0 && m_data.compression_info.bucket.size != 0;
}
NcaCompressionInfo& NcaFsHeaderReader::GetCompressionInfo() {
ASSERT(this->IsInitialized());
return m_data.compression_info;
}
const NcaCompressionInfo& NcaFsHeaderReader::GetCompressionInfo() const {
ASSERT(this->IsInitialized());
return m_data.compression_info;
}
bool NcaFsHeaderReader::ExistsPatchMetaHashLayer() const {
ASSERT(this->IsInitialized());
return m_data.meta_data_hash_data_info.size != 0 && this->GetPatchInfo().HasIndirectTable();
}
NcaMetaDataHashDataInfo& NcaFsHeaderReader::GetPatchMetaDataHashDataInfo() {
ASSERT(this->IsInitialized());
return m_data.meta_data_hash_data_info;
}
const NcaMetaDataHashDataInfo& NcaFsHeaderReader::GetPatchMetaDataHashDataInfo() const {
ASSERT(this->IsInitialized());
return m_data.meta_data_hash_data_info;
}
NcaFsHeader::MetaDataHashType NcaFsHeaderReader::GetPatchMetaHashType() const {
ASSERT(this->IsInitialized());
return m_data.meta_data_hash_type;
}
bool NcaFsHeaderReader::ExistsSparseMetaHashLayer() const {
ASSERT(this->IsInitialized());
return m_data.meta_data_hash_data_info.size != 0 && this->ExistsSparseLayer();
}
NcaMetaDataHashDataInfo& NcaFsHeaderReader::GetSparseMetaDataHashDataInfo() {
ASSERT(this->IsInitialized());
return m_data.meta_data_hash_data_info;
}
const NcaMetaDataHashDataInfo& NcaFsHeaderReader::GetSparseMetaDataHashDataInfo() const {
ASSERT(this->IsInitialized());
return m_data.meta_data_hash_data_info;
}
NcaFsHeader::MetaDataHashType NcaFsHeaderReader::GetSparseMetaHashType() const {
ASSERT(this->IsInitialized());
return m_data.meta_data_hash_type;
}
} // namespace FileSys

View File

@ -0,0 +1,61 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "common/alignment.h"
#include "core/file_sys/fssystem/fssystem_pooled_buffer.h"
namespace FileSys {
namespace {
constexpr size_t HeapBlockSize = BufferPoolAlignment;
static_assert(HeapBlockSize == 4_KiB);
// A heap block is 4KiB. An order is a power of two.
// This gives blocks of the order 32KiB, 512KiB, 4MiB.
constexpr s32 HeapOrderMax = 7;
constexpr s32 HeapOrderMaxForLarge = HeapOrderMax + 3;
constexpr size_t HeapAllocatableSizeMax = HeapBlockSize * (static_cast<size_t>(1) << HeapOrderMax);
constexpr size_t HeapAllocatableSizeMaxForLarge =
HeapBlockSize * (static_cast<size_t>(1) << HeapOrderMaxForLarge);
} // namespace
size_t PooledBuffer::GetAllocatableSizeMaxCore(bool large) {
return large ? HeapAllocatableSizeMaxForLarge : HeapAllocatableSizeMax;
}
void PooledBuffer::AllocateCore(size_t ideal_size, size_t required_size, bool large) {
// Ensure preconditions.
ASSERT(m_buffer == nullptr);
// Check that we can allocate this size.
ASSERT(required_size <= GetAllocatableSizeMaxCore(large));
const size_t target_size =
std::min(std::max(ideal_size, required_size), GetAllocatableSizeMaxCore(large));
// Dummy implementation for allocate.
if (target_size > 0) {
m_buffer =
reinterpret_cast<char*>(::operator new(target_size, std::align_val_t{HeapBlockSize}));
m_size = target_size;
// Ensure postconditions.
ASSERT(m_buffer != nullptr);
}
}
void PooledBuffer::Shrink(size_t ideal_size) {
ASSERT(ideal_size <= GetAllocatableSizeMaxCore(true));
// Shrinking to zero means that we have no buffer.
if (ideal_size == 0) {
::operator delete(m_buffer, std::align_val_t{HeapBlockSize});
m_buffer = nullptr;
m_size = ideal_size;
}
}
} // namespace FileSys

View File

@ -0,0 +1,95 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/literals.h"
#include "core/hle/result.h"
namespace FileSys {
using namespace Common::Literals;
constexpr inline size_t BufferPoolAlignment = 4_KiB;
constexpr inline size_t BufferPoolWorkSize = 320;
class PooledBuffer {
YUZU_NON_COPYABLE(PooledBuffer);
public:
// Constructor/Destructor.
constexpr PooledBuffer() : m_buffer(), m_size() {}
PooledBuffer(size_t ideal_size, size_t required_size) : m_buffer(), m_size() {
this->Allocate(ideal_size, required_size);
}
~PooledBuffer() {
this->Deallocate();
}
// Move and assignment.
explicit PooledBuffer(PooledBuffer&& rhs) : m_buffer(rhs.m_buffer), m_size(rhs.m_size) {
rhs.m_buffer = nullptr;
rhs.m_size = 0;
}
PooledBuffer& operator=(PooledBuffer&& rhs) {
PooledBuffer(std::move(rhs)).Swap(*this);
return *this;
}
// Allocation API.
void Allocate(size_t ideal_size, size_t required_size) {
return this->AllocateCore(ideal_size, required_size, false);
}
void AllocateParticularlyLarge(size_t ideal_size, size_t required_size) {
return this->AllocateCore(ideal_size, required_size, true);
}
void Shrink(size_t ideal_size);
void Deallocate() {
// Shrink the buffer to empty.
this->Shrink(0);
ASSERT(m_buffer == nullptr);
}
char* GetBuffer() const {
ASSERT(m_buffer != nullptr);
return m_buffer;
}
size_t GetSize() const {
ASSERT(m_buffer != nullptr);
return m_size;
}
public:
static size_t GetAllocatableSizeMax() {
return GetAllocatableSizeMaxCore(false);
}
static size_t GetAllocatableParticularlyLargeSizeMax() {
return GetAllocatableSizeMaxCore(true);
}
private:
static size_t GetAllocatableSizeMaxCore(bool large);
private:
void Swap(PooledBuffer& rhs) {
std::swap(m_buffer, rhs.m_buffer);
std::swap(m_size, rhs.m_size);
}
void AllocateCore(size_t ideal_size, size_t required_size, bool large);
private:
char* m_buffer;
size_t m_size;
};
} // namespace FileSys

View File

@ -0,0 +1,39 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/file_sys/fssystem/fssystem_sparse_storage.h"
namespace FileSys {
size_t SparseStorage::Read(u8* buffer, size_t size, size_t offset) const {
// Validate preconditions.
ASSERT(this->IsInitialized());
ASSERT(buffer != nullptr);
// Allow zero size.
if (size == 0) {
return size;
}
SparseStorage* self = const_cast<SparseStorage*>(this);
if (self->GetEntryTable().IsEmpty()) {
BucketTree::Offsets table_offsets;
ASSERT(R_SUCCEEDED(self->GetEntryTable().GetOffsets(std::addressof(table_offsets))));
ASSERT(table_offsets.IsInclude(offset, size));
std::memset(buffer, 0, size);
} else {
self->OperatePerEntry<false, true>(
offset, size,
[=](VirtualFile storage, s64 data_offset, s64 cur_offset, s64 cur_size) -> Result {
storage->Read(reinterpret_cast<u8*>(buffer) + (cur_offset - offset),
static_cast<size_t>(cur_size), data_offset);
R_SUCCEED();
});
}
return size;
}
} // namespace FileSys

View File

@ -0,0 +1,72 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "core/file_sys/fssystem/fssystem_indirect_storage.h"
namespace FileSys {
class SparseStorage : public IndirectStorage {
YUZU_NON_COPYABLE(SparseStorage);
YUZU_NON_MOVEABLE(SparseStorage);
private:
class ZeroStorage : public IReadOnlyStorage {
public:
ZeroStorage() {}
virtual ~ZeroStorage() {}
virtual size_t GetSize() const override {
return std::numeric_limits<size_t>::max();
}
virtual size_t Read(u8* buffer, size_t size, size_t offset) const override {
ASSERT(buffer != nullptr || size == 0);
if (size > 0) {
std::memset(buffer, 0, size);
}
return size;
}
};
public:
SparseStorage() : IndirectStorage(), m_zero_storage(std::make_shared<ZeroStorage>()) {}
virtual ~SparseStorage() {}
using IndirectStorage::Initialize;
void Initialize(s64 end_offset) {
this->GetEntryTable().Initialize(NodeSize, end_offset);
this->SetZeroStorage();
}
void SetDataStorage(VirtualFile storage) {
ASSERT(this->IsInitialized());
this->SetStorage(0, storage);
this->SetZeroStorage();
}
template <typename T>
void SetDataStorage(T storage, s64 offset, s64 size) {
ASSERT(this->IsInitialized());
this->SetStorage(0, storage, offset, size);
this->SetZeroStorage();
}
virtual size_t Read(u8* buffer, size_t size, size_t offset) const override;
private:
void SetZeroStorage() {
return this->SetStorage(1, m_zero_storage, 0, std::numeric_limits<s64>::max());
}
private:
VirtualFile m_zero_storage;
};
} // namespace FileSys

View File

@ -0,0 +1,80 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "core/file_sys/fssystem/fs_i_storage.h"
namespace FileSys {
class RegionSwitchStorage : public IReadOnlyStorage {
YUZU_NON_COPYABLE(RegionSwitchStorage);
YUZU_NON_MOVEABLE(RegionSwitchStorage);
public:
struct Region {
s64 offset;
s64 size;
};
public:
RegionSwitchStorage(VirtualFile&& i, VirtualFile&& o, Region r)
: m_inside_region_storage(std::move(i)), m_outside_region_storage(std::move(o)),
m_region(r) {}
virtual size_t Read(u8* buffer, size_t size, size_t offset) const override {
// Process until we're done.
size_t processed = 0;
while (processed < size) {
// Process on the appropriate storage.
s64 cur_size = 0;
if (this->CheckRegions(std::addressof(cur_size), offset + processed,
size - processed)) {
m_inside_region_storage->Read(buffer + processed, cur_size, offset + processed);
} else {
m_outside_region_storage->Read(buffer + processed, cur_size, offset + processed);
}
// Advance.
processed += cur_size;
}
return size;
}
virtual size_t GetSize() const override {
return m_inside_region_storage->GetSize();
}
private:
bool CheckRegions(s64* out_current_size, s64 offset, s64 size) const {
// Check if our region contains the access.
if (m_region.offset <= offset) {
if (offset < m_region.offset + m_region.size) {
if (m_region.offset + m_region.size <= offset + size) {
*out_current_size = m_region.offset + m_region.size - offset;
} else {
*out_current_size = size;
}
return true;
} else {
*out_current_size = size;
return false;
}
} else {
if (m_region.offset <= offset + size) {
*out_current_size = m_region.offset - offset;
} else {
*out_current_size = size;
}
return false;
}
}
private:
VirtualFile m_inside_region_storage;
VirtualFile m_outside_region_storage;
Region m_region;
};
} // namespace FileSys

View File

@ -0,0 +1,27 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include "core/file_sys/fssystem/fssystem_utility.h"
namespace FileSys {
void AddCounter(void* counter_, size_t counter_size, u64 value) {
u8* counter = static_cast<u8*>(counter_);
u64 remaining = value;
u8 carry = 0;
for (size_t i = 0; i < counter_size; i++) {
auto sum = counter[counter_size - 1 - i] + (remaining & 0xFF) + carry;
carry = static_cast<u8>(sum >> (sizeof(u8) * 8));
auto sum8 = static_cast<u8>(sum & 0xFF);
counter[counter_size - 1 - i] = sum8;
remaining >>= (sizeof(u8) * 8);
if (carry == 0 && remaining == 0) {
break;
}
}
}
} // namespace FileSys

View File

@ -0,0 +1,12 @@
// SPDX-FileCopyrightText: Copyright 2023 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include "common/common_funcs.h"
namespace FileSys {
void AddCounter(void* counter, size_t counter_size, u64 value);
}

View File

@ -1,217 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#include <algorithm>
#include <array>
#include <cstddef>
#include <cstring>
#include "common/assert.h"
#include "core/crypto/aes_util.h"
#include "core/file_sys/nca_patch.h"
namespace FileSys {
namespace {
template <bool Subsection, typename BlockType, typename BucketType>
std::pair<std::size_t, std::size_t> SearchBucketEntry(u64 offset, const BlockType& block,
const BucketType& buckets) {
if constexpr (Subsection) {
const auto& last_bucket = buckets[block.number_buckets - 1];
if (offset >= last_bucket.entries[last_bucket.number_entries].address_patch) {
return {block.number_buckets - 1, last_bucket.number_entries};
}
} else {
ASSERT_MSG(offset <= block.size, "Offset is out of bounds in BKTR relocation block.");
}
std::size_t bucket_id = std::count_if(
block.base_offsets.begin() + 1, block.base_offsets.begin() + block.number_buckets,
[&offset](u64 base_offset) { return base_offset <= offset; });
const auto& bucket = buckets[bucket_id];
if (bucket.number_entries == 1) {
return {bucket_id, 0};
}
std::size_t low = 0;
std::size_t mid = 0;
std::size_t high = bucket.number_entries - 1;
while (low <= high) {
mid = (low + high) / 2;
if (bucket.entries[mid].address_patch > offset) {
high = mid - 1;
} else {
if (mid == bucket.number_entries - 1 ||
bucket.entries[mid + 1].address_patch > offset) {
return {bucket_id, mid};
}
low = mid + 1;
}
}
ASSERT_MSG(false, "Offset could not be found in BKTR block.");
return {0, 0};
}
} // Anonymous namespace
BKTR::BKTR(VirtualFile base_romfs_, VirtualFile bktr_romfs_, RelocationBlock relocation_,
std::vector<RelocationBucket> relocation_buckets_, SubsectionBlock subsection_,
std::vector<SubsectionBucket> subsection_buckets_, bool is_encrypted_,
Core::Crypto::Key128 key_, u64 base_offset_, u64 ivfc_offset_,
std::array<u8, 8> section_ctr_)
: relocation(relocation_), relocation_buckets(std::move(relocation_buckets_)),
subsection(subsection_), subsection_buckets(std::move(subsection_buckets_)),
base_romfs(std::move(base_romfs_)), bktr_romfs(std::move(bktr_romfs_)),
encrypted(is_encrypted_), key(key_), base_offset(base_offset_), ivfc_offset(ivfc_offset_),
section_ctr(section_ctr_) {
for (std::size_t i = 0; i < relocation.number_buckets - 1; ++i) {
relocation_buckets[i].entries.push_back({relocation.base_offsets[i + 1], 0, 0});
}
for (std::size_t i = 0; i < subsection.number_buckets - 1; ++i) {
subsection_buckets[i].entries.push_back({subsection_buckets[i + 1].entries[0].address_patch,
{0},
subsection_buckets[i + 1].entries[0].ctr});
}
relocation_buckets.back().entries.push_back({relocation.size, 0, 0});
}
BKTR::~BKTR() = default;
std::size_t BKTR::Read(u8* data, std::size_t length, std::size_t offset) const {
// Read out of bounds.
if (offset >= relocation.size) {
return 0;
}
const auto relocation_entry = GetRelocationEntry(offset);
const auto section_offset =
offset - relocation_entry.address_patch + relocation_entry.address_source;
const auto bktr_read = relocation_entry.from_patch;
const auto next_relocation = GetNextRelocationEntry(offset);
if (offset + length > next_relocation.address_patch) {
const u64 partition = next_relocation.address_patch - offset;
return Read(data, partition, offset) +
Read(data + partition, length - partition, offset + partition);
}
if (!bktr_read) {
ASSERT_MSG(section_offset >= ivfc_offset, "Offset calculation negative.");
return base_romfs->Read(data, length, section_offset - ivfc_offset);
}
if (!encrypted) {
return bktr_romfs->Read(data, length, section_offset);
}
const auto subsection_entry = GetSubsectionEntry(section_offset);
Core::Crypto::AESCipher<Core::Crypto::Key128> cipher(key, Core::Crypto::Mode::CTR);
// Calculate AES IV
std::array<u8, 16> iv{};
auto subsection_ctr = subsection_entry.ctr;
auto offset_iv = section_offset + base_offset;
for (std::size_t i = 0; i < section_ctr.size(); ++i) {
iv[i] = section_ctr[0x8 - i - 1];
}
offset_iv >>= 4;
for (std::size_t i = 0; i < sizeof(u64); ++i) {
iv[0xF - i] = static_cast<u8>(offset_iv & 0xFF);
offset_iv >>= 8;
}
for (std::size_t i = 0; i < sizeof(u32); ++i) {
iv[0x7 - i] = static_cast<u8>(subsection_ctr & 0xFF);
subsection_ctr >>= 8;
}
cipher.SetIV(iv);
const auto next_subsection = GetNextSubsectionEntry(section_offset);
if (section_offset + length > next_subsection.address_patch) {
const u64 partition = next_subsection.address_patch - section_offset;
return Read(data, partition, offset) +
Read(data + partition, length - partition, offset + partition);
}
const auto block_offset = section_offset & 0xF;
if (block_offset != 0) {
auto block = bktr_romfs->ReadBytes(0x10, section_offset & ~0xF);
cipher.Transcode(block.data(), block.size(), block.data(), Core::Crypto::Op::Decrypt);
if (length + block_offset < 0x10) {
std::memcpy(data, block.data() + block_offset, std::min(length, block.size()));
return std::min(length, block.size());
}
const auto read = 0x10 - block_offset;
std::memcpy(data, block.data() + block_offset, read);
return read + Read(data + read, length - read, offset + read);
}
const auto raw_read = bktr_romfs->Read(data, length, section_offset);
cipher.Transcode(data, raw_read, data, Core::Crypto::Op::Decrypt);
return raw_read;
}
RelocationEntry BKTR::GetRelocationEntry(u64 offset) const {
const auto res = SearchBucketEntry<false>(offset, relocation, relocation_buckets);
return relocation_buckets[res.first].entries[res.second];
}
RelocationEntry BKTR::GetNextRelocationEntry(u64 offset) const {
const auto res = SearchBucketEntry<false>(offset, relocation, relocation_buckets);
const auto bucket = relocation_buckets[res.first];
if (res.second + 1 < bucket.entries.size())
return bucket.entries[res.second + 1];
return relocation_buckets[res.first + 1].entries[0];
}
SubsectionEntry BKTR::GetSubsectionEntry(u64 offset) const {
const auto res = SearchBucketEntry<true>(offset, subsection, subsection_buckets);
return subsection_buckets[res.first].entries[res.second];
}
SubsectionEntry BKTR::GetNextSubsectionEntry(u64 offset) const {
const auto res = SearchBucketEntry<true>(offset, subsection, subsection_buckets);
const auto bucket = subsection_buckets[res.first];
if (res.second + 1 < bucket.entries.size())
return bucket.entries[res.second + 1];
return subsection_buckets[res.first + 1].entries[0];
}
std::string BKTR::GetName() const {
return base_romfs->GetName();
}
std::size_t BKTR::GetSize() const {
return relocation.size;
}
bool BKTR::Resize(std::size_t new_size) {
return false;
}
VirtualDir BKTR::GetContainingDirectory() const {
return base_romfs->GetContainingDirectory();
}
bool BKTR::IsWritable() const {
return false;
}
bool BKTR::IsReadable() const {
return true;
}
std::size_t BKTR::Write(const u8* data, std::size_t length, std::size_t offset) {
return 0;
}
bool BKTR::Rename(std::string_view name) {
return base_romfs->Rename(name);
}
} // namespace FileSys

View File

@ -1,145 +0,0 @@
// SPDX-FileCopyrightText: Copyright 2018 yuzu Emulator Project
// SPDX-License-Identifier: GPL-2.0-or-later
#pragma once
#include <array>
#include <memory>
#include <vector>
#include "common/common_funcs.h"
#include "common/common_types.h"
#include "common/swap.h"
#include "core/crypto/key_manager.h"
namespace FileSys {
#pragma pack(push, 1)
struct RelocationEntry {
u64_le address_patch;
u64_le address_source;
u32 from_patch;
};
#pragma pack(pop)
static_assert(sizeof(RelocationEntry) == 0x14, "RelocationEntry has incorrect size.");
struct RelocationBucketRaw {
INSERT_PADDING_BYTES(4);
u32_le number_entries;
u64_le end_offset;
std::array<RelocationEntry, 0x332> relocation_entries;
INSERT_PADDING_BYTES(8);
};
static_assert(sizeof(RelocationBucketRaw) == 0x4000, "RelocationBucketRaw has incorrect size.");
// Vector version of RelocationBucketRaw
struct RelocationBucket {
u32 number_entries;
u64 end_offset;
std::vector<RelocationEntry> entries;
};
struct RelocationBlock {
INSERT_PADDING_BYTES(4);
u32_le number_buckets;
u64_le size;
std::array<u64, 0x7FE> base_offsets;
};
static_assert(sizeof(RelocationBlock) == 0x4000, "RelocationBlock has incorrect size.");
struct SubsectionEntry {
u64_le address_patch;
INSERT_PADDING_BYTES(0x4);
u32_le ctr;
};
static_assert(sizeof(SubsectionEntry) == 0x10, "SubsectionEntry has incorrect size.");
struct SubsectionBucketRaw {
INSERT_PADDING_BYTES(4);
u32_le number_entries;
u64_le end_offset;
std::array<SubsectionEntry, 0x3FF> subsection_entries;
};
static_assert(sizeof(SubsectionBucketRaw) == 0x4000, "SubsectionBucketRaw has incorrect size.");
// Vector version of SubsectionBucketRaw
struct SubsectionBucket {
u32 number_entries;
u64 end_offset;
std::vector<SubsectionEntry> entries;
};
struct SubsectionBlock {
INSERT_PADDING_BYTES(4);
u32_le number_buckets;
u64_le size;
std::array<u64, 0x7FE> base_offsets;
};
static_assert(sizeof(SubsectionBlock) == 0x4000, "SubsectionBlock has incorrect size.");
inline RelocationBucket ConvertRelocationBucketRaw(RelocationBucketRaw raw) {
return {raw.number_entries,
raw.end_offset,
{raw.relocation_entries.begin(), raw.relocation_entries.begin() + raw.number_entries}};
}
inline SubsectionBucket ConvertSubsectionBucketRaw(SubsectionBucketRaw raw) {
return {raw.number_entries,
raw.end_offset,
{raw.subsection_entries.begin(), raw.subsection_entries.begin() + raw.number_entries}};
}
class BKTR : public VfsFile {
public:
BKTR(VirtualFile base_romfs, VirtualFile bktr_romfs, RelocationBlock relocation,
std::vector<RelocationBucket> relocation_buckets, SubsectionBlock subsection,
std::vector<SubsectionBucket> subsection_buckets, bool is_encrypted,
Core::Crypto::Key128 key, u64 base_offset, u64 ivfc_offset, std::array<u8, 8> section_ctr);
~BKTR() override;
std::size_t Read(u8* data, std::size_t length, std::size_t offset) const override;
std::string GetName() const override;
std::size_t GetSize() const override;
bool Resize(std::size_t new_size) override;
VirtualDir GetContainingDirectory() const override;
bool IsWritable() const override;
bool IsReadable() const override;
std::size_t Write(const u8* data, std::size_t length, std::size_t offset) override;
bool Rename(std::string_view name) override;
private:
RelocationEntry GetRelocationEntry(u64 offset) const;
RelocationEntry GetNextRelocationEntry(u64 offset) const;
SubsectionEntry GetSubsectionEntry(u64 offset) const;
SubsectionEntry GetNextSubsectionEntry(u64 offset) const;
RelocationBlock relocation;
std::vector<RelocationBucket> relocation_buckets;
SubsectionBlock subsection;
std::vector<SubsectionBucket> subsection_buckets;
// Should be the raw base romfs, decrypted.
VirtualFile base_romfs;
// Should be the raw BKTR romfs, (located at media_offset with size media_size).
VirtualFile bktr_romfs;
bool encrypted;
Core::Crypto::Key128 key;
// Base offset into NCA, used for IV calculation.
u64 base_offset;
// Distance between IVFC start and RomFS start, used for base reads
u64 ivfc_offset;
std::array<u8, 8> section_ctr;
};
} // namespace FileSys

View File

@ -141,8 +141,7 @@ VirtualDir PatchManager::PatchExeFS(VirtualDir exefs) const {
const auto update_tid = GetUpdateTitleID(title_id); const auto update_tid = GetUpdateTitleID(title_id);
const auto update = content_provider.GetEntry(update_tid, ContentRecordType::Program); const auto update = content_provider.GetEntry(update_tid, ContentRecordType::Program);
if (!update_disabled && update != nullptr && update->GetExeFS() != nullptr && if (!update_disabled && update != nullptr && update->GetExeFS() != nullptr) {
update->GetStatus() == Loader::ResultStatus::ErrorMissingBKTRBaseRomFS) {
LOG_INFO(Loader, " ExeFS: Update ({}) applied successfully", LOG_INFO(Loader, " ExeFS: Update ({}) applied successfully",
FormatTitleVersion(content_provider.GetEntryVersion(update_tid).value_or(0))); FormatTitleVersion(content_provider.GetEntryVersion(update_tid).value_or(0)));
exefs = update->GetExeFS(); exefs = update->GetExeFS();
@ -358,11 +357,6 @@ static void ApplyLayeredFS(VirtualFile& romfs, u64 title_id, ContentRecordType t
return; return;
} }
auto extracted = ExtractRomFS(romfs);
if (extracted == nullptr) {
return;
}
const auto& disabled = Settings::values.disabled_addons[title_id]; const auto& disabled = Settings::values.disabled_addons[title_id];
std::vector<VirtualDir> patch_dirs = load_dir->GetSubdirectories(); std::vector<VirtualDir> patch_dirs = load_dir->GetSubdirectories();
if (std::find(disabled.cbegin(), disabled.cend(), "SDMC") == disabled.cend()) { if (std::find(disabled.cbegin(), disabled.cend(), "SDMC") == disabled.cend()) {
@ -394,6 +388,11 @@ static void ApplyLayeredFS(VirtualFile& romfs, u64 title_id, ContentRecordType t
return; return;
} }
auto extracted = ExtractRomFS(romfs);
if (extracted == nullptr) {
return;
}
layers.push_back(std::move(extracted)); layers.push_back(std::move(extracted));
auto layered = LayeredVfsDirectory::MakeLayeredDirectory(std::move(layers)); auto layered = LayeredVfsDirectory::MakeLayeredDirectory(std::move(layers));
@ -412,39 +411,43 @@ static void ApplyLayeredFS(VirtualFile& romfs, u64 title_id, ContentRecordType t
romfs = std::move(packed); romfs = std::move(packed);
} }
VirtualFile PatchManager::PatchRomFS(VirtualFile romfs, u64 ivfc_offset, ContentRecordType type, VirtualFile PatchManager::PatchRomFS(const NCA* base_nca, VirtualFile base_romfs,
VirtualFile update_raw, bool apply_layeredfs) const { ContentRecordType type, VirtualFile packed_update_raw,
bool apply_layeredfs) const {
const auto log_string = fmt::format("Patching RomFS for title_id={:016X}, type={:02X}", const auto log_string = fmt::format("Patching RomFS for title_id={:016X}, type={:02X}",
title_id, static_cast<u8>(type)); title_id, static_cast<u8>(type));
if (type == ContentRecordType::Program || type == ContentRecordType::Data) { if (type == ContentRecordType::Program || type == ContentRecordType::Data) {
LOG_INFO(Loader, "{}", log_string); LOG_INFO(Loader, "{}", log_string);
} else { } else {
LOG_DEBUG(Loader, "{}", log_string); LOG_DEBUG(Loader, "{}", log_string);
} }
if (romfs == nullptr) { if (base_romfs == nullptr) {
return romfs; return base_romfs;
} }
auto romfs = base_romfs;
// Game Updates // Game Updates
const auto update_tid = GetUpdateTitleID(title_id); const auto update_tid = GetUpdateTitleID(title_id);
const auto update = content_provider.GetEntryRaw(update_tid, type); const auto update_raw = content_provider.GetEntryRaw(update_tid, type);
const auto& disabled = Settings::values.disabled_addons[title_id]; const auto& disabled = Settings::values.disabled_addons[title_id];
const auto update_disabled = const auto update_disabled =
std::find(disabled.cbegin(), disabled.cend(), "Update") != disabled.cend(); std::find(disabled.cbegin(), disabled.cend(), "Update") != disabled.cend();
if (!update_disabled && update != nullptr) { if (!update_disabled && update_raw != nullptr && base_nca != nullptr) {
const auto new_nca = std::make_shared<NCA>(update, romfs, ivfc_offset); const auto new_nca = std::make_shared<NCA>(update_raw, base_nca);
if (new_nca->GetStatus() == Loader::ResultStatus::Success && if (new_nca->GetStatus() == Loader::ResultStatus::Success &&
new_nca->GetRomFS() != nullptr) { new_nca->GetRomFS() != nullptr) {
LOG_INFO(Loader, " RomFS: Update ({}) applied successfully", LOG_INFO(Loader, " RomFS: Update ({}) applied successfully",
FormatTitleVersion(content_provider.GetEntryVersion(update_tid).value_or(0))); FormatTitleVersion(content_provider.GetEntryVersion(update_tid).value_or(0)));
romfs = new_nca->GetRomFS(); romfs = new_nca->GetRomFS();
const auto version =
FormatTitleVersion(content_provider.GetEntryVersion(update_tid).value_or(0));
} }
} else if (!update_disabled && update_raw != nullptr) { } else if (!update_disabled && packed_update_raw != nullptr && base_nca != nullptr) {
const auto new_nca = std::make_shared<NCA>(update_raw, romfs, ivfc_offset); const auto new_nca = std::make_shared<NCA>(packed_update_raw, base_nca);
if (new_nca->GetStatus() == Loader::ResultStatus::Success && if (new_nca->GetStatus() == Loader::ResultStatus::Success &&
new_nca->GetRomFS() != nullptr) { new_nca->GetRomFS() != nullptr) {
LOG_INFO(Loader, " RomFS: Update (PACKED) applied successfully"); LOG_INFO(Loader, " RomFS: Update (PACKED) applied successfully");
@ -608,7 +611,7 @@ PatchManager::Metadata PatchManager::ParseControlNCA(const NCA& nca) const {
return {}; return {};
} }
const auto romfs = PatchRomFS(base_romfs, nca.GetBaseIVFCOffset(), ContentRecordType::Control); const auto romfs = PatchRomFS(&nca, base_romfs, ContentRecordType::Control);
if (romfs == nullptr) { if (romfs == nullptr) {
return {}; return {};
} }

View File

@ -61,9 +61,9 @@ public:
// Currently tracked RomFS patches: // Currently tracked RomFS patches:
// - Game Updates // - Game Updates
// - LayeredFS // - LayeredFS
[[nodiscard]] VirtualFile PatchRomFS(VirtualFile base, u64 ivfc_offset, [[nodiscard]] VirtualFile PatchRomFS(const NCA* base_nca, VirtualFile base_romfs,
ContentRecordType type = ContentRecordType::Program, ContentRecordType type = ContentRecordType::Program,
VirtualFile update_raw = nullptr, VirtualFile packed_update_raw = nullptr,
bool apply_layeredfs = true) const; bool apply_layeredfs = true) const;
// Returns a vector of pairs between patch names and patch versions. // Returns a vector of pairs between patch names and patch versions.

View File

@ -416,9 +416,9 @@ void RegisteredCache::ProcessFiles(const std::vector<NcaID>& ids) {
if (file == nullptr) if (file == nullptr)
continue; continue;
const auto nca = std::make_shared<NCA>(parser(file, id), nullptr, 0); const auto nca = std::make_shared<NCA>(parser(file, id));
if (nca->GetStatus() != Loader::ResultStatus::Success || if (nca->GetStatus() != Loader::ResultStatus::Success ||
nca->GetType() != NCAContentType::Meta) { nca->GetType() != NCAContentType::Meta || nca->GetSubdirectories().empty()) {
continue; continue;
} }
@ -500,7 +500,7 @@ std::unique_ptr<NCA> RegisteredCache::GetEntry(u64 title_id, ContentRecordType t
const auto raw = GetEntryRaw(title_id, type); const auto raw = GetEntryRaw(title_id, type);
if (raw == nullptr) if (raw == nullptr)
return nullptr; return nullptr;
return std::make_unique<NCA>(raw, nullptr, 0); return std::make_unique<NCA>(raw);
} }
template <typename T> template <typename T>
@ -964,7 +964,7 @@ std::unique_ptr<NCA> ManualContentProvider::GetEntry(u64 title_id, ContentRecord
const auto res = GetEntryRaw(title_id, type); const auto res = GetEntryRaw(title_id, type);
if (res == nullptr) if (res == nullptr)
return nullptr; return nullptr;
return std::make_unique<NCA>(res, nullptr, 0); return std::make_unique<NCA>(res);
} }
std::vector<ContentProviderEntry> ManualContentProvider::ListEntriesFilter( std::vector<ContentProviderEntry> ManualContentProvider::ListEntriesFilter(

View File

@ -26,13 +26,12 @@ RomFSFactory::RomFSFactory(Loader::AppLoader& app_loader, ContentProvider& provi
} }
updatable = app_loader.IsRomFSUpdatable(); updatable = app_loader.IsRomFSUpdatable();
ivfc_offset = app_loader.ReadRomFSIVFCOffset();
} }
RomFSFactory::~RomFSFactory() = default; RomFSFactory::~RomFSFactory() = default;
void RomFSFactory::SetPackedUpdate(VirtualFile update_raw_file) { void RomFSFactory::SetPackedUpdate(VirtualFile update_raw_file) {
update_raw = std::move(update_raw_file); packed_update_raw = std::move(update_raw_file);
} }
VirtualFile RomFSFactory::OpenCurrentProcess(u64 current_process_title_id) const { VirtualFile RomFSFactory::OpenCurrentProcess(u64 current_process_title_id) const {
@ -40,9 +39,11 @@ VirtualFile RomFSFactory::OpenCurrentProcess(u64 current_process_title_id) const
return file; return file;
} }
const auto type = ContentRecordType::Program;
const auto nca = content_provider.GetEntry(current_process_title_id, type);
const PatchManager patch_manager{current_process_title_id, filesystem_controller, const PatchManager patch_manager{current_process_title_id, filesystem_controller,
content_provider}; content_provider};
return patch_manager.PatchRomFS(file, ivfc_offset, ContentRecordType::Program, update_raw); return patch_manager.PatchRomFS(nca.get(), file, ContentRecordType::Program, packed_update_raw);
} }
VirtualFile RomFSFactory::OpenPatchedRomFS(u64 title_id, ContentRecordType type) const { VirtualFile RomFSFactory::OpenPatchedRomFS(u64 title_id, ContentRecordType type) const {
@ -54,7 +55,7 @@ VirtualFile RomFSFactory::OpenPatchedRomFS(u64 title_id, ContentRecordType type)
const PatchManager patch_manager{title_id, filesystem_controller, content_provider}; const PatchManager patch_manager{title_id, filesystem_controller, content_provider};
return patch_manager.PatchRomFS(nca->GetRomFS(), nca->GetBaseIVFCOffset(), type); return patch_manager.PatchRomFS(nca.get(), nca->GetRomFS(), type);
} }
VirtualFile RomFSFactory::OpenPatchedRomFSWithProgramIndex(u64 title_id, u8 program_index, VirtualFile RomFSFactory::OpenPatchedRomFSWithProgramIndex(u64 title_id, u8 program_index,

View File

@ -40,21 +40,22 @@ public:
Service::FileSystem::FileSystemController& controller); Service::FileSystem::FileSystemController& controller);
~RomFSFactory(); ~RomFSFactory();
void SetPackedUpdate(VirtualFile update_raw_file); void SetPackedUpdate(VirtualFile packed_update_raw);
[[nodiscard]] VirtualFile OpenCurrentProcess(u64 current_process_title_id) const; [[nodiscard]] VirtualFile OpenCurrentProcess(u64 current_process_title_id) const;
[[nodiscard]] VirtualFile OpenPatchedRomFS(u64 title_id, ContentRecordType type) const; [[nodiscard]] VirtualFile OpenPatchedRomFS(u64 title_id, ContentRecordType type) const;
[[nodiscard]] VirtualFile OpenPatchedRomFSWithProgramIndex(u64 title_id, u8 program_index, [[nodiscard]] VirtualFile OpenPatchedRomFSWithProgramIndex(u64 title_id, u8 program_index,
ContentRecordType type) const; ContentRecordType type) const;
[[nodiscard]] VirtualFile Open(u64 title_id, StorageId storage, ContentRecordType type) const; [[nodiscard]] VirtualFile Open(u64 title_id, StorageId storage, ContentRecordType type) const;
private:
[[nodiscard]] std::shared_ptr<NCA> GetEntry(u64 title_id, StorageId storage, [[nodiscard]] std::shared_ptr<NCA> GetEntry(u64 title_id, StorageId storage,
ContentRecordType type) const; ContentRecordType type) const;
private:
VirtualFile file; VirtualFile file;
VirtualFile update_raw; VirtualFile packed_update_raw;
VirtualFile base;
bool updatable; bool updatable;
u64 ivfc_offset;
ContentProvider& content_provider; ContentProvider& content_provider;
Service::FileSystem::FileSystemController& filesystem_controller; Service::FileSystem::FileSystemController& filesystem_controller;

View File

@ -249,7 +249,7 @@ void NSP::ReadNCAs(const std::vector<VirtualFile>& files) {
} }
const auto nca = std::make_shared<NCA>(outer_file); const auto nca = std::make_shared<NCA>(outer_file);
if (nca->GetStatus() != Loader::ResultStatus::Success) { if (nca->GetStatus() != Loader::ResultStatus::Success || nca->GetSubdirectories().empty()) {
program_status[nca->GetTitleId()] = nca->GetStatus(); program_status[nca->GetTitleId()] = nca->GetStatus();
continue; continue;
} }
@ -280,7 +280,7 @@ void NSP::ReadNCAs(const std::vector<VirtualFile>& files) {
continue; continue;
} }
auto next_nca = std::make_shared<NCA>(std::move(next_file), nullptr, 0); auto next_nca = std::make_shared<NCA>(std::move(next_file));
if (next_nca->GetType() == NCAContentType::Program) { if (next_nca->GetType() == NCAContentType::Program) {
program_status[next_nca->GetTitleId()] = next_nca->GetStatus(); program_status[next_nca->GetTitleId()] = next_nca->GetStatus();

View File

@ -139,7 +139,7 @@ FileSys::VirtualFile GetOfflineRomFS(Core::System& system, u64 title_id,
const FileSys::PatchManager pm{title_id, system.GetFileSystemController(), const FileSys::PatchManager pm{title_id, system.GetFileSystemController(),
system.GetContentProvider()}; system.GetContentProvider()};
return pm.PatchRomFS(nca->GetRomFS(), nca->GetBaseIVFCOffset(), nca_type); return pm.PatchRomFS(nca.get(), nca->GetRomFS(), nca_type);
} }
} }

View File

@ -373,6 +373,11 @@ FileSys::VirtualFile FileSystemController::OpenRomFS(u64 title_id, FileSys::Stor
return romfs_factory->Open(title_id, storage_id, type); return romfs_factory->Open(title_id, storage_id, type);
} }
std::shared_ptr<FileSys::NCA> FileSystemController::OpenBaseNca(
u64 title_id, FileSys::StorageId storage_id, FileSys::ContentRecordType type) const {
return romfs_factory->GetEntry(title_id, storage_id, type);
}
Result FileSystemController::CreateSaveData(FileSys::VirtualDir* out_save_data, Result FileSystemController::CreateSaveData(FileSys::VirtualDir* out_save_data,
FileSys::SaveDataSpaceId space, FileSys::SaveDataSpaceId space,
const FileSys::SaveDataAttribute& save_struct) const { const FileSys::SaveDataAttribute& save_struct) const {

View File

@ -15,6 +15,7 @@ class System;
namespace FileSys { namespace FileSys {
class BISFactory; class BISFactory;
class NCA;
class RegisteredCache; class RegisteredCache;
class RegisteredCacheUnion; class RegisteredCacheUnion;
class PlaceholderCache; class PlaceholderCache;
@ -70,6 +71,8 @@ public:
FileSys::ContentRecordType type) const; FileSys::ContentRecordType type) const;
FileSys::VirtualFile OpenRomFS(u64 title_id, FileSys::StorageId storage_id, FileSys::VirtualFile OpenRomFS(u64 title_id, FileSys::StorageId storage_id,
FileSys::ContentRecordType type) const; FileSys::ContentRecordType type) const;
std::shared_ptr<FileSys::NCA> OpenBaseNca(u64 title_id, FileSys::StorageId storage_id,
FileSys::ContentRecordType type) const;
Result CreateSaveData(FileSys::VirtualDir* out_save_data, FileSys::SaveDataSpaceId space, Result CreateSaveData(FileSys::VirtualDir* out_save_data, FileSys::SaveDataSpaceId space,
const FileSys::SaveDataAttribute& save_struct) const; const FileSys::SaveDataAttribute& save_struct) const;

View File

@ -1029,8 +1029,9 @@ void FSP_SRV::OpenDataStorageByDataId(HLERequestContext& ctx) {
const FileSys::PatchManager pm{title_id, fsc, content_provider}; const FileSys::PatchManager pm{title_id, fsc, content_provider};
auto base = fsc.OpenBaseNca(title_id, storage_id, FileSys::ContentRecordType::Data);
auto storage = std::make_shared<IStorage>( auto storage = std::make_shared<IStorage>(
system, pm.PatchRomFS(std::move(data), 0, FileSys::ContentRecordType::Data)); system, pm.PatchRomFS(base.get(), std::move(data), FileSys::ContentRecordType::Data));
IPC::ResponseBuilder rb{ctx, 2, 0, 1}; IPC::ResponseBuilder rb{ctx, 2, 0, 1};
rb.Push(ResultSuccess); rb.Push(ResultSuccess);

View File

@ -135,7 +135,7 @@ constexpr std::array<const char*, 66> RESULT_MESSAGES{
"The titlekey and/or titlekek is incorrect or the section header is invalid.", "The titlekey and/or titlekek is incorrect or the section header is invalid.",
"The XCI file is missing a Program-type NCA.", "The XCI file is missing a Program-type NCA.",
"The NCA file is not an application.", "The NCA file is not an application.",
"The ExeFS partition could not be found.", "The Program-type NCA contains no executable. An update may be required.",
"The XCI file has a bad header.", "The XCI file has a bad header.",
"The XCI file is missing a partition.", "The XCI file is missing a partition.",
"The file could not be found or does not exist.", "The file could not be found or does not exist.",
@ -169,7 +169,7 @@ constexpr std::array<const char*, 66> RESULT_MESSAGES{
"The BKTR-type NCA has a bad Subsection block.", "The BKTR-type NCA has a bad Subsection block.",
"The BKTR-type NCA has a bad Relocation bucket.", "The BKTR-type NCA has a bad Relocation bucket.",
"The BKTR-type NCA has a bad Subsection bucket.", "The BKTR-type NCA has a bad Subsection bucket.",
"The BKTR-type NCA is missing the base RomFS.", "Game updates cannot be loaded directly. Load the base game instead.",
"The NSP or XCI does not contain an update in addition to the base game.", "The NSP or XCI does not contain an update in addition to the base game.",
"The KIP file has a bad header.", "The KIP file has a bad header.",
"The KIP BLZ decompression of the section failed unexpectedly.", "The KIP BLZ decompression of the section failed unexpectedly.",

View File

@ -79,8 +79,6 @@ enum class ResultStatus : u16 {
ErrorBadPFSHeader, ErrorBadPFSHeader,
ErrorIncorrectPFSFileSize, ErrorIncorrectPFSFileSize,
ErrorBadNCAHeader, ErrorBadNCAHeader,
ErrorCompressedNCA,
ErrorSparseNCA,
ErrorMissingProductionKeyFile, ErrorMissingProductionKeyFile,
ErrorMissingHeaderKey, ErrorMissingHeaderKey,
ErrorIncorrectHeaderKey, ErrorIncorrectHeaderKey,
@ -275,16 +273,6 @@ public:
return true; return true;
} }
/**
* Gets the difference between the start of the IVFC header and the start of level 6 (RomFS)
* data. Needed for BKTR patching.
*
* @return IVFC offset for RomFS.
*/
virtual u64 ReadRomFSIVFCOffset() const {
return 0;
}
/** /**
* Get the title of the application * Get the title of the application
* *

View File

@ -76,10 +76,6 @@ ResultStatus AppLoader_NAX::ReadRomFS(FileSys::VirtualFile& dir) {
return nca_loader->ReadRomFS(dir); return nca_loader->ReadRomFS(dir);
} }
u64 AppLoader_NAX::ReadRomFSIVFCOffset() const {
return nca_loader->ReadRomFSIVFCOffset();
}
ResultStatus AppLoader_NAX::ReadProgramId(u64& out_program_id) { ResultStatus AppLoader_NAX::ReadProgramId(u64& out_program_id) {
return nca_loader->ReadProgramId(out_program_id); return nca_loader->ReadProgramId(out_program_id);
} }

View File

@ -39,7 +39,6 @@ public:
LoadResult Load(Kernel::KProcess& process, Core::System& system) override; LoadResult Load(Kernel::KProcess& process, Core::System& system) override;
ResultStatus ReadRomFS(FileSys::VirtualFile& dir) override; ResultStatus ReadRomFS(FileSys::VirtualFile& dir) override;
u64 ReadRomFSIVFCOffset() const override;
ResultStatus ReadProgramId(u64& out_program_id) override; ResultStatus ReadProgramId(u64& out_program_id) override;
ResultStatus ReadBanner(std::vector<u8>& buffer) override; ResultStatus ReadBanner(std::vector<u8>& buffer) override;

View File

@ -5,6 +5,8 @@
#include "core/core.h" #include "core/core.h"
#include "core/file_sys/content_archive.h" #include "core/file_sys/content_archive.h"
#include "core/file_sys/nca_metadata.h"
#include "core/file_sys/registered_cache.h"
#include "core/file_sys/romfs_factory.h" #include "core/file_sys/romfs_factory.h"
#include "core/hle/kernel/k_process.h" #include "core/hle/kernel/k_process.h"
#include "core/hle/service/filesystem/filesystem.h" #include "core/hle/service/filesystem/filesystem.h"
@ -43,9 +45,23 @@ AppLoader_NCA::LoadResult AppLoader_NCA::Load(Kernel::KProcess& process, Core::S
return {ResultStatus::ErrorNCANotProgram, {}}; return {ResultStatus::ErrorNCANotProgram, {}};
} }
const auto exefs = nca->GetExeFS(); auto exefs = nca->GetExeFS();
if (exefs == nullptr) { if (exefs == nullptr) {
return {ResultStatus::ErrorNoExeFS, {}}; LOG_INFO(Loader, "No ExeFS found in NCA, looking for ExeFS from update");
// This NCA may be a sparse base of an installed title.
// Try to fetch the ExeFS from the installed update.
const auto& installed = system.GetContentProvider();
const auto update_nca = installed.GetEntry(FileSys::GetUpdateTitleID(nca->GetTitleId()),
FileSys::ContentRecordType::Program);
if (update_nca) {
exefs = update_nca->GetExeFS();
}
if (exefs == nullptr) {
return {ResultStatus::ErrorNoExeFS, {}};
}
} }
directory_loader = std::make_unique<AppLoader_DeconstructedRomDirectory>(exefs, true); directory_loader = std::make_unique<AppLoader_DeconstructedRomDirectory>(exefs, true);
@ -77,14 +93,6 @@ ResultStatus AppLoader_NCA::ReadRomFS(FileSys::VirtualFile& dir) {
return ResultStatus::Success; return ResultStatus::Success;
} }
u64 AppLoader_NCA::ReadRomFSIVFCOffset() const {
if (nca == nullptr) {
return 0;
}
return nca->GetBaseIVFCOffset();
}
ResultStatus AppLoader_NCA::ReadProgramId(u64& out_program_id) { ResultStatus AppLoader_NCA::ReadProgramId(u64& out_program_id) {
if (nca == nullptr || nca->GetStatus() != ResultStatus::Success) { if (nca == nullptr || nca->GetStatus() != ResultStatus::Success) {
return ResultStatus::ErrorNotInitialized; return ResultStatus::ErrorNotInitialized;

View File

@ -40,7 +40,6 @@ public:
LoadResult Load(Kernel::KProcess& process, Core::System& system) override; LoadResult Load(Kernel::KProcess& process, Core::System& system) override;
ResultStatus ReadRomFS(FileSys::VirtualFile& dir) override; ResultStatus ReadRomFS(FileSys::VirtualFile& dir) override;
u64 ReadRomFSIVFCOffset() const override;
ResultStatus ReadProgramId(u64& out_program_id) override; ResultStatus ReadProgramId(u64& out_program_id) override;
ResultStatus ReadBanner(std::vector<u8>& buffer) override; ResultStatus ReadBanner(std::vector<u8>& buffer) override;

View File

@ -121,10 +121,6 @@ ResultStatus AppLoader_NSP::ReadRomFS(FileSys::VirtualFile& out_file) {
return secondary_loader->ReadRomFS(out_file); return secondary_loader->ReadRomFS(out_file);
} }
u64 AppLoader_NSP::ReadRomFSIVFCOffset() const {
return secondary_loader->ReadRomFSIVFCOffset();
}
ResultStatus AppLoader_NSP::ReadUpdateRaw(FileSys::VirtualFile& out_file) { ResultStatus AppLoader_NSP::ReadUpdateRaw(FileSys::VirtualFile& out_file) {
if (nsp->IsExtractedType()) { if (nsp->IsExtractedType()) {
return ResultStatus::ErrorNoPackedUpdate; return ResultStatus::ErrorNoPackedUpdate;

View File

@ -46,7 +46,6 @@ public:
LoadResult Load(Kernel::KProcess& process, Core::System& system) override; LoadResult Load(Kernel::KProcess& process, Core::System& system) override;
ResultStatus ReadRomFS(FileSys::VirtualFile& out_file) override; ResultStatus ReadRomFS(FileSys::VirtualFile& out_file) override;
u64 ReadRomFSIVFCOffset() const override;
ResultStatus ReadUpdateRaw(FileSys::VirtualFile& out_file) override; ResultStatus ReadUpdateRaw(FileSys::VirtualFile& out_file) override;
ResultStatus ReadProgramId(u64& out_program_id) override; ResultStatus ReadProgramId(u64& out_program_id) override;
ResultStatus ReadProgramIds(std::vector<u64>& out_program_ids) override; ResultStatus ReadProgramIds(std::vector<u64>& out_program_ids) override;

View File

@ -89,10 +89,6 @@ ResultStatus AppLoader_XCI::ReadRomFS(FileSys::VirtualFile& out_file) {
return nca_loader->ReadRomFS(out_file); return nca_loader->ReadRomFS(out_file);
} }
u64 AppLoader_XCI::ReadRomFSIVFCOffset() const {
return nca_loader->ReadRomFSIVFCOffset();
}
ResultStatus AppLoader_XCI::ReadUpdateRaw(FileSys::VirtualFile& out_file) { ResultStatus AppLoader_XCI::ReadUpdateRaw(FileSys::VirtualFile& out_file) {
u64 program_id{}; u64 program_id{};
nca_loader->ReadProgramId(program_id); nca_loader->ReadProgramId(program_id);

View File

@ -46,7 +46,6 @@ public:
LoadResult Load(Kernel::KProcess& process, Core::System& system) override; LoadResult Load(Kernel::KProcess& process, Core::System& system) override;
ResultStatus ReadRomFS(FileSys::VirtualFile& out_file) override; ResultStatus ReadRomFS(FileSys::VirtualFile& out_file) override;
u64 ReadRomFSIVFCOffset() const override;
ResultStatus ReadUpdateRaw(FileSys::VirtualFile& out_file) override; ResultStatus ReadUpdateRaw(FileSys::VirtualFile& out_file) override;
ResultStatus ReadProgramId(u64& out_program_id) override; ResultStatus ReadProgramId(u64& out_program_id) override;
ResultStatus ReadProgramIds(std::vector<u64>& out_program_ids) override; ResultStatus ReadProgramIds(std::vector<u64>& out_program_ids) override;

View File

@ -2535,8 +2535,8 @@ void GMainWindow::OnGameListDumpRomFS(u64 program_id, const std::string& game_pa
return; return;
} }
FileSys::VirtualFile file; FileSys::VirtualFile base_romfs;
if (loader->ReadRomFS(file) != Loader::ResultStatus::Success) { if (loader->ReadRomFS(base_romfs) != Loader::ResultStatus::Success) {
failed(); failed();
return; return;
} }
@ -2549,6 +2549,14 @@ void GMainWindow::OnGameListDumpRomFS(u64 program_id, const std::string& game_pa
return; return;
} }
const auto type = *romfs_title_id == program_id ? FileSys::ContentRecordType::Program
: FileSys::ContentRecordType::Data;
const auto base_nca = installed.GetEntry(*romfs_title_id, type);
if (!base_nca) {
failed();
return;
}
const auto dump_dir = const auto dump_dir =
target == DumpRomFSTarget::Normal target == DumpRomFSTarget::Normal
? Common::FS::GetYuzuPath(Common::FS::YuzuPath::DumpDir) ? Common::FS::GetYuzuPath(Common::FS::YuzuPath::DumpDir)
@ -2560,12 +2568,10 @@ void GMainWindow::OnGameListDumpRomFS(u64 program_id, const std::string& game_pa
FileSys::VirtualFile romfs; FileSys::VirtualFile romfs;
if (*romfs_title_id == program_id) { if (*romfs_title_id == program_id) {
const u64 ivfc_offset = loader->ReadRomFSIVFCOffset();
const FileSys::PatchManager pm{program_id, system->GetFileSystemController(), installed}; const FileSys::PatchManager pm{program_id, system->GetFileSystemController(), installed};
romfs = romfs = pm.PatchRomFS(base_nca.get(), base_romfs, type, nullptr, false);
pm.PatchRomFS(file, ivfc_offset, FileSys::ContentRecordType::Program, nullptr, false);
} else { } else {
romfs = installed.GetEntry(*romfs_title_id, FileSys::ContentRecordType::Data)->GetRomFS(); romfs = installed.GetEntry(*romfs_title_id, type)->GetRomFS();
} }
const auto extracted = FileSys::ExtractRomFS(romfs, FileSys::RomFSExtractionType::Full); const auto extracted = FileSys::ExtractRomFS(romfs, FileSys::RomFSExtractionType::Full);