yuzu-emu
/
yuzu-android
Archived
1
0
Fork 0

hle: kernel: Re-create memory layout at initialization.

- As this can only be derived once.
This commit is contained in:
bunnei 2022-02-27 11:05:07 -08:00
parent 16e5954fcb
commit 14d28a043d
1 changed files with 43 additions and 41 deletions

View File

@ -71,7 +71,7 @@ struct KernelCore::Impl {
// Derive the initial memory layout from the emulated board // Derive the initial memory layout from the emulated board
Init::InitializeSlabResourceCounts(kernel); Init::InitializeSlabResourceCounts(kernel);
DeriveInitialMemoryLayout(); DeriveInitialMemoryLayout();
Init::InitializeSlabHeaps(system, memory_layout); Init::InitializeSlabHeaps(system, *memory_layout);
// Initialize kernel memory and resources. // Initialize kernel memory and resources.
InitializeSystemResourceLimit(kernel, system.CoreTiming()); InitializeSystemResourceLimit(kernel, system.CoreTiming());
@ -222,7 +222,7 @@ struct KernelCore::Impl {
system_resource_limit = KResourceLimit::Create(system.Kernel()); system_resource_limit = KResourceLimit::Create(system.Kernel());
system_resource_limit->Initialize(&core_timing); system_resource_limit->Initialize(&core_timing);
const auto [total_size, kernel_size] = memory_layout.GetTotalAndKernelMemorySizes(); const auto [total_size, kernel_size] = memory_layout->GetTotalAndKernelMemorySizes();
// If setting the default system values fails, then something seriously wrong has occurred. // If setting the default system values fails, then something seriously wrong has occurred.
ASSERT(system_resource_limit->SetLimitValue(LimitableResource::PhysicalMemory, total_size) ASSERT(system_resource_limit->SetLimitValue(LimitableResource::PhysicalMemory, total_size)
@ -352,15 +352,17 @@ struct KernelCore::Impl {
} }
void DeriveInitialMemoryLayout() { void DeriveInitialMemoryLayout() {
memory_layout = std::make_unique<KMemoryLayout>();
// Insert the root region for the virtual memory tree, from which all other regions will // Insert the root region for the virtual memory tree, from which all other regions will
// derive. // derive.
memory_layout.GetVirtualMemoryRegionTree().InsertDirectly( memory_layout->GetVirtualMemoryRegionTree().InsertDirectly(
KernelVirtualAddressSpaceBase, KernelVirtualAddressSpaceBase,
KernelVirtualAddressSpaceBase + KernelVirtualAddressSpaceSize - 1); KernelVirtualAddressSpaceBase + KernelVirtualAddressSpaceSize - 1);
// Insert the root region for the physical memory tree, from which all other regions will // Insert the root region for the physical memory tree, from which all other regions will
// derive. // derive.
memory_layout.GetPhysicalMemoryRegionTree().InsertDirectly( memory_layout->GetPhysicalMemoryRegionTree().InsertDirectly(
KernelPhysicalAddressSpaceBase, KernelPhysicalAddressSpaceBase,
KernelPhysicalAddressSpaceBase + KernelPhysicalAddressSpaceSize - 1); KernelPhysicalAddressSpaceBase + KernelPhysicalAddressSpaceSize - 1);
@ -377,7 +379,7 @@ struct KernelCore::Impl {
if (!(kernel_region_start + KernelRegionSize - 1 <= KernelVirtualAddressSpaceLast)) { if (!(kernel_region_start + KernelRegionSize - 1 <= KernelVirtualAddressSpaceLast)) {
kernel_region_size = KernelVirtualAddressSpaceEnd - kernel_region_start; kernel_region_size = KernelVirtualAddressSpaceEnd - kernel_region_start;
} }
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
kernel_region_start, kernel_region_size, KMemoryRegionType_Kernel)); kernel_region_start, kernel_region_size, KMemoryRegionType_Kernel));
// Setup the code region. // Setup the code region.
@ -386,11 +388,11 @@ struct KernelCore::Impl {
Common::AlignDown(code_start_virt_addr, CodeRegionAlign); Common::AlignDown(code_start_virt_addr, CodeRegionAlign);
constexpr VAddr code_region_end = Common::AlignUp(code_end_virt_addr, CodeRegionAlign); constexpr VAddr code_region_end = Common::AlignUp(code_end_virt_addr, CodeRegionAlign);
constexpr size_t code_region_size = code_region_end - code_region_start; constexpr size_t code_region_size = code_region_end - code_region_start;
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
code_region_start, code_region_size, KMemoryRegionType_KernelCode)); code_region_start, code_region_size, KMemoryRegionType_KernelCode));
// Setup board-specific device physical regions. // Setup board-specific device physical regions.
Init::SetupDevicePhysicalMemoryRegions(memory_layout); Init::SetupDevicePhysicalMemoryRegions(*memory_layout);
// Determine the amount of space needed for the misc region. // Determine the amount of space needed for the misc region.
size_t misc_region_needed_size; size_t misc_region_needed_size;
@ -399,7 +401,7 @@ struct KernelCore::Impl {
misc_region_needed_size = Core::Hardware::NUM_CPU_CORES * (3 * (PageSize + PageSize)); misc_region_needed_size = Core::Hardware::NUM_CPU_CORES * (3 * (PageSize + PageSize));
// Account for each auto-map device. // Account for each auto-map device.
for (const auto& region : memory_layout.GetPhysicalMemoryRegionTree()) { for (const auto& region : memory_layout->GetPhysicalMemoryRegionTree()) {
if (region.HasTypeAttribute(KMemoryRegionAttr_ShouldKernelMap)) { if (region.HasTypeAttribute(KMemoryRegionAttr_ShouldKernelMap)) {
// Check that the region is valid. // Check that the region is valid.
ASSERT(region.GetEndAddress() != 0); ASSERT(region.GetEndAddress() != 0);
@ -424,22 +426,22 @@ struct KernelCore::Impl {
// Setup the misc region. // Setup the misc region.
const VAddr misc_region_start = const VAddr misc_region_start =
memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion( memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
misc_region_size, MiscRegionAlign, KMemoryRegionType_Kernel); misc_region_size, MiscRegionAlign, KMemoryRegionType_Kernel);
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
misc_region_start, misc_region_size, KMemoryRegionType_KernelMisc)); misc_region_start, misc_region_size, KMemoryRegionType_KernelMisc));
// Setup the stack region. // Setup the stack region.
constexpr size_t StackRegionSize = 14_MiB; constexpr size_t StackRegionSize = 14_MiB;
constexpr size_t StackRegionAlign = KernelAslrAlignment; constexpr size_t StackRegionAlign = KernelAslrAlignment;
const VAddr stack_region_start = const VAddr stack_region_start =
memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion( memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
StackRegionSize, StackRegionAlign, KMemoryRegionType_Kernel); StackRegionSize, StackRegionAlign, KMemoryRegionType_Kernel);
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
stack_region_start, StackRegionSize, KMemoryRegionType_KernelStack)); stack_region_start, StackRegionSize, KMemoryRegionType_KernelStack));
// Determine the size of the resource region. // Determine the size of the resource region.
const size_t resource_region_size = memory_layout.GetResourceRegionSizeForInit(); const size_t resource_region_size = memory_layout->GetResourceRegionSizeForInit();
// Determine the size of the slab region. // Determine the size of the slab region.
const size_t slab_region_size = const size_t slab_region_size =
@ -456,23 +458,23 @@ struct KernelCore::Impl {
Common::AlignUp(code_end_phys_addr + slab_region_size, SlabRegionAlign) - Common::AlignUp(code_end_phys_addr + slab_region_size, SlabRegionAlign) -
Common::AlignDown(code_end_phys_addr, SlabRegionAlign); Common::AlignDown(code_end_phys_addr, SlabRegionAlign);
const VAddr slab_region_start = const VAddr slab_region_start =
memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion( memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
slab_region_needed_size, SlabRegionAlign, KMemoryRegionType_Kernel) + slab_region_needed_size, SlabRegionAlign, KMemoryRegionType_Kernel) +
(code_end_phys_addr % SlabRegionAlign); (code_end_phys_addr % SlabRegionAlign);
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
slab_region_start, slab_region_size, KMemoryRegionType_KernelSlab)); slab_region_start, slab_region_size, KMemoryRegionType_KernelSlab));
// Setup the temp region. // Setup the temp region.
constexpr size_t TempRegionSize = 128_MiB; constexpr size_t TempRegionSize = 128_MiB;
constexpr size_t TempRegionAlign = KernelAslrAlignment; constexpr size_t TempRegionAlign = KernelAslrAlignment;
const VAddr temp_region_start = const VAddr temp_region_start =
memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegion( memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegion(
TempRegionSize, TempRegionAlign, KMemoryRegionType_Kernel); TempRegionSize, TempRegionAlign, KMemoryRegionType_Kernel);
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert(temp_region_start, TempRegionSize, ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(temp_region_start, TempRegionSize,
KMemoryRegionType_KernelTemp)); KMemoryRegionType_KernelTemp));
// Automatically map in devices that have auto-map attributes. // Automatically map in devices that have auto-map attributes.
for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) { for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) {
// We only care about kernel regions. // We only care about kernel regions.
if (!region.IsDerivedFrom(KMemoryRegionType_Kernel)) { if (!region.IsDerivedFrom(KMemoryRegionType_Kernel)) {
continue; continue;
@ -499,21 +501,21 @@ struct KernelCore::Impl {
const size_t map_size = const size_t map_size =
Common::AlignUp(region.GetEndAddress(), PageSize) - map_phys_addr; Common::AlignUp(region.GetEndAddress(), PageSize) - map_phys_addr;
const VAddr map_virt_addr = const VAddr map_virt_addr =
memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard( memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(
map_size, PageSize, KMemoryRegionType_KernelMisc, PageSize); map_size, PageSize, KMemoryRegionType_KernelMisc, PageSize);
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
map_virt_addr, map_size, KMemoryRegionType_KernelMiscMappedDevice)); map_virt_addr, map_size, KMemoryRegionType_KernelMiscMappedDevice));
region.SetPairAddress(map_virt_addr + region.GetAddress() - map_phys_addr); region.SetPairAddress(map_virt_addr + region.GetAddress() - map_phys_addr);
} }
Init::SetupDramPhysicalMemoryRegions(memory_layout); Init::SetupDramPhysicalMemoryRegions(*memory_layout);
// Insert a physical region for the kernel code region. // Insert a physical region for the kernel code region.
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
code_start_phys_addr, code_region_size, KMemoryRegionType_DramKernelCode)); code_start_phys_addr, code_region_size, KMemoryRegionType_DramKernelCode));
// Insert a physical region for the kernel slab region. // Insert a physical region for the kernel slab region.
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
slab_start_phys_addr, slab_region_size, KMemoryRegionType_DramKernelSlab)); slab_start_phys_addr, slab_region_size, KMemoryRegionType_DramKernelSlab));
// Determine size available for kernel page table heaps, requiring > 8 MB. // Determine size available for kernel page table heaps, requiring > 8 MB.
@ -522,12 +524,12 @@ struct KernelCore::Impl {
ASSERT(page_table_heap_size / 4_MiB > 2); ASSERT(page_table_heap_size / 4_MiB > 2);
// Insert a physical region for the kernel page table heap region // Insert a physical region for the kernel page table heap region
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
slab_end_phys_addr, page_table_heap_size, KMemoryRegionType_DramKernelPtHeap)); slab_end_phys_addr, page_table_heap_size, KMemoryRegionType_DramKernelPtHeap));
// All DRAM regions that we haven't tagged by this point will be mapped under the linear // All DRAM regions that we haven't tagged by this point will be mapped under the linear
// mapping. Tag them. // mapping. Tag them.
for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) { for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) {
if (region.GetType() == KMemoryRegionType_Dram) { if (region.GetType() == KMemoryRegionType_Dram) {
// Check that the region is valid. // Check that the region is valid.
ASSERT(region.GetEndAddress() != 0); ASSERT(region.GetEndAddress() != 0);
@ -539,7 +541,7 @@ struct KernelCore::Impl {
// Get the linear region extents. // Get the linear region extents.
const auto linear_extents = const auto linear_extents =
memory_layout.GetPhysicalMemoryRegionTree().GetDerivedRegionExtents( memory_layout->GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(
KMemoryRegionAttr_LinearMapped); KMemoryRegionAttr_LinearMapped);
ASSERT(linear_extents.GetEndAddress() != 0); ASSERT(linear_extents.GetEndAddress() != 0);
@ -551,7 +553,7 @@ struct KernelCore::Impl {
Common::AlignUp(linear_extents.GetEndAddress(), LinearRegionAlign) - Common::AlignUp(linear_extents.GetEndAddress(), LinearRegionAlign) -
aligned_linear_phys_start; aligned_linear_phys_start;
const VAddr linear_region_start = const VAddr linear_region_start =
memory_layout.GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard( memory_layout->GetVirtualMemoryRegionTree().GetRandomAlignedRegionWithGuard(
linear_region_size, LinearRegionAlign, KMemoryRegionType_None, LinearRegionAlign); linear_region_size, LinearRegionAlign, KMemoryRegionType_None, LinearRegionAlign);
const u64 linear_region_phys_to_virt_diff = linear_region_start - aligned_linear_phys_start; const u64 linear_region_phys_to_virt_diff = linear_region_start - aligned_linear_phys_start;
@ -560,7 +562,7 @@ struct KernelCore::Impl {
{ {
PAddr cur_phys_addr = 0; PAddr cur_phys_addr = 0;
u64 cur_size = 0; u64 cur_size = 0;
for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) { for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) {
if (!region.HasTypeAttribute(KMemoryRegionAttr_LinearMapped)) { if (!region.HasTypeAttribute(KMemoryRegionAttr_LinearMapped)) {
continue; continue;
} }
@ -579,47 +581,47 @@ struct KernelCore::Impl {
const VAddr region_virt_addr = const VAddr region_virt_addr =
region.GetAddress() + linear_region_phys_to_virt_diff; region.GetAddress() + linear_region_phys_to_virt_diff;
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
region_virt_addr, region.GetSize(), region_virt_addr, region.GetSize(),
GetTypeForVirtualLinearMapping(region.GetType()))); GetTypeForVirtualLinearMapping(region.GetType())));
region.SetPairAddress(region_virt_addr); region.SetPairAddress(region_virt_addr);
KMemoryRegion* virt_region = KMemoryRegion* virt_region =
memory_layout.GetVirtualMemoryRegionTree().FindModifiable(region_virt_addr); memory_layout->GetVirtualMemoryRegionTree().FindModifiable(region_virt_addr);
ASSERT(virt_region != nullptr); ASSERT(virt_region != nullptr);
virt_region->SetPairAddress(region.GetAddress()); virt_region->SetPairAddress(region.GetAddress());
} }
} }
// Insert regions for the initial page table region. // Insert regions for the initial page table region.
ASSERT(memory_layout.GetPhysicalMemoryRegionTree().Insert( ASSERT(memory_layout->GetPhysicalMemoryRegionTree().Insert(
resource_end_phys_addr, KernelPageTableHeapSize, KMemoryRegionType_DramKernelInitPt)); resource_end_phys_addr, KernelPageTableHeapSize, KMemoryRegionType_DramKernelInitPt));
ASSERT(memory_layout.GetVirtualMemoryRegionTree().Insert( ASSERT(memory_layout->GetVirtualMemoryRegionTree().Insert(
resource_end_phys_addr + linear_region_phys_to_virt_diff, KernelPageTableHeapSize, resource_end_phys_addr + linear_region_phys_to_virt_diff, KernelPageTableHeapSize,
KMemoryRegionType_VirtualDramKernelInitPt)); KMemoryRegionType_VirtualDramKernelInitPt));
// All linear-mapped DRAM regions that we haven't tagged by this point will be allocated to // All linear-mapped DRAM regions that we haven't tagged by this point will be allocated to
// some pool partition. Tag them. // some pool partition. Tag them.
for (auto& region : memory_layout.GetPhysicalMemoryRegionTree()) { for (auto& region : memory_layout->GetPhysicalMemoryRegionTree()) {
if (region.GetType() == (KMemoryRegionType_Dram | KMemoryRegionAttr_LinearMapped)) { if (region.GetType() == (KMemoryRegionType_Dram | KMemoryRegionAttr_LinearMapped)) {
region.SetType(KMemoryRegionType_DramPoolPartition); region.SetType(KMemoryRegionType_DramPoolPartition);
} }
} }
// Setup all other memory regions needed to arrange the pool partitions. // Setup all other memory regions needed to arrange the pool partitions.
Init::SetupPoolPartitionMemoryRegions(memory_layout); Init::SetupPoolPartitionMemoryRegions(*memory_layout);
// Cache all linear regions in their own trees for faster access, later. // Cache all linear regions in their own trees for faster access, later.
memory_layout.InitializeLinearMemoryRegionTrees(aligned_linear_phys_start, memory_layout->InitializeLinearMemoryRegionTrees(aligned_linear_phys_start,
linear_region_start); linear_region_start);
} }
void InitializeMemoryLayout() { void InitializeMemoryLayout() {
const auto system_pool = memory_layout.GetKernelSystemPoolRegionPhysicalExtents(); const auto system_pool = memory_layout->GetKernelSystemPoolRegionPhysicalExtents();
// Initialize the memory manager. // Initialize the memory manager.
memory_manager = std::make_unique<KMemoryManager>(system); memory_manager = std::make_unique<KMemoryManager>(system);
const auto& management_region = memory_layout.GetPoolManagementRegion(); const auto& management_region = memory_layout->GetPoolManagementRegion();
ASSERT(management_region.GetEndAddress() != 0); ASSERT(management_region.GetEndAddress() != 0);
memory_manager->Initialize(management_region.GetAddress(), management_region.GetSize()); memory_manager->Initialize(management_region.GetAddress(), management_region.GetSize());
@ -763,7 +765,7 @@ struct KernelCore::Impl {
Kernel::KSharedMemory* time_shared_mem{}; Kernel::KSharedMemory* time_shared_mem{};
// Memory layout // Memory layout
KMemoryLayout memory_layout; std::unique_ptr<KMemoryLayout> memory_layout;
// Threads used for services // Threads used for services
std::unordered_set<std::shared_ptr<Kernel::ServiceThread>> service_threads; std::unordered_set<std::shared_ptr<Kernel::ServiceThread>> service_threads;
@ -1131,7 +1133,7 @@ const KWorkerTaskManager& KernelCore::WorkerTaskManager() const {
} }
const KMemoryLayout& KernelCore::MemoryLayout() const { const KMemoryLayout& KernelCore::MemoryLayout() const {
return impl->memory_layout; return *impl->memory_layout;
} }
bool KernelCore::IsPhantomModeForSingleCore() const { bool KernelCore::IsPhantomModeForSingleCore() const {