citra-emu
/
citra-canary
Archived
1
0
Fork 0

Memory: move states into class

This commit is contained in:
Weiyi Wang 2018-11-21 16:18:23 -05:00
parent d18cda5a5d
commit 42edab01d9
5 changed files with 27 additions and 25 deletions

View File

@ -188,10 +188,11 @@ ResultVal<VAddr> Process::HeapAllocate(VAddr target, u32 size, VMAPermission per
u32 interval_size = interval.upper() - interval.lower();
LOG_DEBUG(Kernel, "Allocated FCRAM region lower={:08X}, upper={:08X}", interval.lower(),
interval.upper());
std::fill(Memory::fcram.begin() + interval.lower(),
Memory::fcram.begin() + interval.upper(), 0);
auto vma = vm_manager.MapBackingMemory(
interval_target, Memory::fcram.data() + interval.lower(), interval_size, memory_state);
std::fill(kernel.memory.fcram.begin() + interval.lower(),
kernel.memory.fcram.begin() + interval.upper(), 0);
auto vma = vm_manager.MapBackingMemory(interval_target,
kernel.memory.fcram.data() + interval.lower(),
interval_size, memory_state);
ASSERT(vma.Succeeded());
vm_manager.Reprotect(vma.Unwrap(), perms);
interval_target += interval_size;
@ -262,7 +263,7 @@ ResultVal<VAddr> Process::LinearAllocate(VAddr target, u32 size, VMAPermission p
}
}
u8* backing_memory = Memory::fcram.data() + physical_offset;
u8* backing_memory = kernel.memory.fcram.data() + physical_offset;
std::fill(backing_memory, backing_memory + size, 0);
auto vma = vm_manager.MapBackingMemory(target, backing_memory, size, MemoryState::Continuous);

View File

@ -43,8 +43,8 @@ ResultVal<SharedPtr<SharedMemory>> KernelSystem::CreateSharedMemory(
ASSERT_MSG(offset, "Not enough space in region to allocate shared memory!");
std::fill(Memory::fcram.data() + *offset, Memory::fcram.data() + *offset + size, 0);
shared_memory->backing_blocks = {{Memory::fcram.data() + *offset, size}};
std::fill(memory.fcram.data() + *offset, memory.fcram.data() + *offset + size, 0);
shared_memory->backing_blocks = {{memory.fcram.data() + *offset, size}};
shared_memory->holding_memory += MemoryRegionInfo::Interval(*offset, *offset + size);
shared_memory->linear_heap_phys_offset = *offset;
@ -86,8 +86,8 @@ SharedPtr<SharedMemory> KernelSystem::CreateSharedMemoryForApplet(
shared_memory->other_permissions = other_permissions;
for (const auto& interval : backing_blocks) {
shared_memory->backing_blocks.push_back(
{Memory::fcram.data() + interval.lower(), interval.upper() - interval.lower()});
std::fill(Memory::fcram.data() + interval.lower(), Memory::fcram.data() + interval.upper(),
{memory.fcram.data() + interval.lower(), interval.upper() - interval.lower()});
std::fill(memory.fcram.data() + interval.lower(), memory.fcram.data() + interval.upper(),
0);
}
shared_memory->base_address = Memory::HEAP_VADDR + offset;

View File

@ -355,7 +355,7 @@ ResultVal<SharedPtr<Thread>> KernelSystem::CreateThread(std::string name, VAddr
// Map the page to the current process' address space.
vm_manager.MapBackingMemory(Memory::TLS_AREA_VADDR + available_page * Memory::PAGE_SIZE,
Memory::fcram.data() + *offset, Memory::PAGE_SIZE,
memory.fcram.data() + *offset, Memory::PAGE_SIZE,
MemoryState::Locked);
}

View File

@ -21,12 +21,6 @@
namespace Memory {
static std::array<u8, Memory::VRAM_SIZE> vram;
static std::array<u8, Memory::N3DS_EXTRA_RAM_SIZE> n3ds_extra_ram;
std::array<u8, Memory::FCRAM_N3DS_SIZE> fcram;
static PageTable* current_page_table = nullptr;
void MemorySystem::SetCurrentPageTable(PageTable* page_table) {
current_page_table = page_table;
if (Core::System::GetInstance().IsPoweredOn()) {
@ -78,13 +72,7 @@ void UnmapRegion(PageTable& page_table, VAddr base, u32 size) {
MapPages(page_table, base / PAGE_SIZE, size / PAGE_SIZE, nullptr, PageType::Unmapped);
}
/**
* Gets the pointer for virtual memory where the page is marked as RasterizerCachedMemory.
* This is used to access the memory where the page pointer is nullptr due to rasterizer cache.
* Since the cache only happens on linear heap or VRAM, we know the exact physical address and
* pointer of such virtual address
*/
static u8* GetPointerForRasterizerCache(VAddr addr) {
u8* MemorySystem::GetPointerForRasterizerCache(VAddr addr) {
if (addr >= LINEAR_HEAP_VADDR && addr < LINEAR_HEAP_VADDR_END) {
return fcram.data() + (addr - LINEAR_HEAP_VADDR);
}

View File

@ -178,8 +178,6 @@ enum : VAddr {
NEW_LINEAR_HEAP_VADDR_END = NEW_LINEAR_HEAP_VADDR + NEW_LINEAR_HEAP_SIZE,
};
extern std::array<u8, Memory::FCRAM_N3DS_SIZE> fcram;
/**
* Flushes any externally cached rasterizer resources touching the given region.
*/
@ -258,12 +256,27 @@ public:
*/
void RasterizerMarkRegionCached(PAddr start, u32 size, bool cached);
std::array<u8, Memory::FCRAM_N3DS_SIZE> fcram{};
private:
template <typename T>
T Read(const VAddr vaddr);
template <typename T>
void Write(const VAddr vaddr, const T data);
/**
* Gets the pointer for virtual memory where the page is marked as RasterizerCachedMemory.
* This is used to access the memory where the page pointer is nullptr due to rasterizer cache.
* Since the cache only happens on linear heap or VRAM, we know the exact physical address and
* pointer of such virtual address
*/
u8* GetPointerForRasterizerCache(VAddr addr);
std::array<u8, Memory::VRAM_SIZE> vram{};
std::array<u8, Memory::N3DS_EXTRA_RAM_SIZE> n3ds_extra_ram{};
PageTable* current_page_table = nullptr;
};
} // namespace Memory