yuzu-emu
/
yuzu
Archived
1
0
Fork 0

core: hle: kernel: k_page_table: Implement IPC memory methods.

This commit is contained in:
bunnei 2022-10-29 16:08:33 -07:00
parent ba21ba0c5c
commit 661fe06d9d
3 changed files with 910 additions and 3 deletions

View File

@ -24,6 +24,65 @@ namespace Kernel {
namespace {
class KScopedLightLockPair {
YUZU_NON_COPYABLE(KScopedLightLockPair);
YUZU_NON_MOVEABLE(KScopedLightLockPair);
private:
KLightLock* m_lower;
KLightLock* m_upper;
public:
KScopedLightLockPair(KLightLock& lhs, KLightLock& rhs) {
// Ensure our locks are in a consistent order.
if (std::addressof(lhs) <= std::addressof(rhs)) {
m_lower = std::addressof(lhs);
m_upper = std::addressof(rhs);
} else {
m_lower = std::addressof(rhs);
m_upper = std::addressof(lhs);
}
// Acquire both locks.
m_lower->Lock();
if (m_lower != m_upper) {
m_upper->Lock();
}
}
~KScopedLightLockPair() {
// Unlock the upper lock.
if (m_upper != nullptr && m_upper != m_lower) {
m_upper->Unlock();
}
// Unlock the lower lock.
if (m_lower != nullptr) {
m_lower->Unlock();
}
}
public:
// Utility.
void TryUnlockHalf(KLightLock& lock) {
// Only allow unlocking if the lock is half the pair.
if (m_lower != m_upper) {
// We want to be sure the lock is one we own.
if (m_lower == std::addressof(lock)) {
lock.Unlock();
m_lower = nullptr;
} else if (m_upper == std::addressof(lock)) {
lock.Unlock();
m_upper = nullptr;
}
}
}
};
} // namespace
namespace {
using namespace Common::Literals;
constexpr size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceType as_type) {
@ -676,7 +735,8 @@ bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t nu
Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& src_page_table,
VAddr src_addr) {
KScopedLightLock lk(m_general_lock);
// Acquire the table locks.
KScopedLightLockPair lk(src_page_table.m_general_lock, m_general_lock);
const size_t num_pages{size / PageSize};
@ -712,6 +772,723 @@ Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& s
R_SUCCEED();
}
Result KPageTable::SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed,
VAddr address, size_t size, KMemoryPermission test_perm,
KMemoryState dst_state) {
// Validate pre-conditions.
ASSERT(this->IsLockedByCurrentThread());
ASSERT(test_perm == KMemoryPermission::UserReadWrite ||
test_perm == KMemoryPermission::UserRead);
// Check that the address is in range.
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
// Get the source permission.
const auto src_perm = static_cast<KMemoryPermission>(
(test_perm == KMemoryPermission::UserReadWrite)
? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped
: KMemoryPermission::UserRead);
// Get aligned extents.
const VAddr aligned_src_start = Common::AlignDown((address), PageSize);
const VAddr aligned_src_end = Common::AlignUp((address) + size, PageSize);
const VAddr mapping_src_start = Common::AlignUp((address), PageSize);
const VAddr mapping_src_end = Common::AlignDown((address) + size, PageSize);
const auto aligned_src_last = (aligned_src_end)-1;
const auto mapping_src_last = (mapping_src_end)-1;
// Get the test state and attribute mask.
KMemoryState test_state;
KMemoryAttribute test_attr_mask;
switch (dst_state) {
case KMemoryState::Ipc:
test_state = KMemoryState::FlagCanUseIpc;
test_attr_mask =
KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked;
break;
case KMemoryState::NonSecureIpc:
test_state = KMemoryState::FlagCanUseNonSecureIpc;
test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
break;
case KMemoryState::NonDeviceIpc:
test_state = KMemoryState::FlagCanUseNonDeviceIpc;
test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
break;
default:
R_THROW(ResultInvalidCombination);
}
// Ensure that on failure, we roll back appropriately.
size_t mapped_size = 0;
ON_RESULT_FAILURE {
if (mapped_size > 0) {
this->CleanupForIpcClientOnServerSetupFailure(page_list, mapping_src_start, mapped_size,
src_perm);
}
};
size_t blocks_needed = 0;
// Iterate, mapping as needed.
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(aligned_src_start);
while (true) {
const KMemoryInfo info = it->GetMemoryInfo();
// Validate the current block.
R_TRY(this->CheckMemoryState(info, test_state, test_state, test_perm, test_perm,
test_attr_mask, KMemoryAttribute::None));
if (mapping_src_start < mapping_src_end && (mapping_src_start) < info.GetEndAddress() &&
info.GetAddress() < (mapping_src_end)) {
const auto cur_start =
info.GetAddress() >= (mapping_src_start) ? info.GetAddress() : (mapping_src_start);
const auto cur_end = mapping_src_last >= info.GetLastAddress() ? info.GetEndAddress()
: (mapping_src_end);
const size_t cur_size = cur_end - cur_start;
if (info.GetAddress() < (mapping_src_start)) {
++blocks_needed;
}
if (mapping_src_last < info.GetLastAddress()) {
++blocks_needed;
}
// Set the permissions on the block, if we need to.
if ((info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != src_perm) {
R_TRY(Operate(cur_start, cur_size / PageSize, src_perm,
OperationType::ChangePermissions));
}
// Note that we mapped this part.
mapped_size += cur_size;
}
// If the block is at the end, we're done.
if (aligned_src_last <= info.GetLastAddress()) {
break;
}
// Advance.
++it;
ASSERT(it != m_memory_block_manager.end());
}
if (out_blocks_needed != nullptr) {
ASSERT(blocks_needed <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
*out_blocks_needed = blocks_needed;
}
R_SUCCEED();
}
Result KPageTable::SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_addr,
KMemoryPermission test_perm, KMemoryState dst_state,
KPageTable& src_page_table, bool send) {
ASSERT(this->IsLockedByCurrentThread());
ASSERT(src_page_table.IsLockedByCurrentThread());
// Check that we can theoretically map.
const VAddr region_start = m_alias_region_start;
const size_t region_size = m_alias_region_end - m_alias_region_start;
R_UNLESS(size < region_size, ResultOutOfAddressSpace);
// Get aligned source extents.
const VAddr src_start = src_addr;
const VAddr src_end = src_addr + size;
const VAddr aligned_src_start = Common::AlignDown((src_start), PageSize);
const VAddr aligned_src_end = Common::AlignUp((src_start) + size, PageSize);
const VAddr mapping_src_start = Common::AlignUp((src_start), PageSize);
const VAddr mapping_src_end = Common::AlignDown((src_start) + size, PageSize);
const size_t aligned_src_size = aligned_src_end - aligned_src_start;
const size_t mapping_src_size =
(mapping_src_start < mapping_src_end) ? (mapping_src_end - mapping_src_start) : 0;
// Select a random address to map at.
VAddr dst_addr =
this->FindFreeArea(region_start, region_size / PageSize, aligned_src_size / PageSize,
PageSize, 0, this->GetNumGuardPages());
R_UNLESS(dst_addr != 0, ResultOutOfAddressSpace);
// Check that we can perform the operation we're about to perform.
ASSERT(this->CanContain(dst_addr, aligned_src_size, dst_state));
// Create an update allocator.
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
m_memory_block_slab_manager);
R_TRY(allocator_result);
// We're going to perform an update, so create a helper.
KScopedPageTableUpdater updater(this);
// Reserve space for any partial pages we allocate.
const size_t unmapped_size = aligned_src_size - mapping_src_size;
KScopedResourceReservation memory_reservation(m_resource_limit,
LimitableResource::PhysicalMemory, unmapped_size);
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
// Ensure that we manage page references correctly.
PAddr start_partial_page = 0;
PAddr end_partial_page = 0;
VAddr cur_mapped_addr = dst_addr;
// If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll
// free on scope exit.
SCOPE_EXIT({
if (start_partial_page != 0) {
m_system.Kernel().MemoryManager().Close(start_partial_page, 1);
}
if (end_partial_page != 0) {
m_system.Kernel().MemoryManager().Close(end_partial_page, 1);
}
});
ON_RESULT_FAILURE {
if (cur_mapped_addr != dst_addr) {
ASSERT(Operate(dst_addr, (cur_mapped_addr - dst_addr) / PageSize,
KMemoryPermission::None, OperationType::Unmap)
.IsSuccess());
}
};
// Allocate the start page as needed.
if (aligned_src_start < mapping_src_start) {
start_partial_page =
m_system.Kernel().MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option);
R_UNLESS(start_partial_page != 0, ResultOutOfMemory);
}
// Allocate the end page as needed.
if (mapping_src_end < aligned_src_end &&
(aligned_src_start < mapping_src_end || aligned_src_start == mapping_src_start)) {
end_partial_page =
m_system.Kernel().MemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option);
R_UNLESS(end_partial_page != 0, ResultOutOfMemory);
}
// Get the implementation.
auto& src_impl = src_page_table.PageTableImpl();
// Get the fill value for partial pages.
const auto fill_val = m_ipc_fill_value;
// Begin traversal.
Common::PageTable::TraversalContext context;
Common::PageTable::TraversalEntry next_entry;
bool traverse_valid = src_impl.BeginTraversal(next_entry, context, aligned_src_start);
ASSERT(traverse_valid);
// Prepare tracking variables.
PAddr cur_block_addr = next_entry.phys_addr;
size_t cur_block_size =
next_entry.block_size - ((cur_block_addr) & (next_entry.block_size - 1));
size_t tot_block_size = cur_block_size;
// Map the start page, if we have one.
if (start_partial_page != 0) {
// Ensure the page holds correct data.
const VAddr start_partial_virt =
GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), start_partial_page);
if (send) {
const size_t partial_offset = src_start - aligned_src_start;
size_t copy_size, clear_size;
if (src_end < mapping_src_start) {
copy_size = size;
clear_size = mapping_src_start - src_end;
} else {
copy_size = mapping_src_start - src_start;
clear_size = 0;
}
std::memset(m_system.Memory().GetPointer<void>(start_partial_virt), fill_val,
partial_offset);
std::memcpy(
m_system.Memory().GetPointer<void>(start_partial_virt + partial_offset),
m_system.Memory().GetPointer<void>(
GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), cur_block_addr) +
partial_offset),
copy_size);
if (clear_size > 0) {
std::memset(m_system.Memory().GetPointer<void>(start_partial_virt + partial_offset +
copy_size),
fill_val, clear_size);
}
} else {
std::memset(m_system.Memory().GetPointer<void>(start_partial_virt), fill_val, PageSize);
}
// Map the page.
R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, start_partial_page));
// Update tracking extents.
cur_mapped_addr += PageSize;
cur_block_addr += PageSize;
cur_block_size -= PageSize;
// If the block's size was one page, we may need to continue traversal.
if (cur_block_size == 0 && aligned_src_size > PageSize) {
traverse_valid = src_impl.ContinueTraversal(next_entry, context);
ASSERT(traverse_valid);
cur_block_addr = next_entry.phys_addr;
cur_block_size = next_entry.block_size;
tot_block_size += next_entry.block_size;
}
}
// Map the remaining pages.
while (aligned_src_start + tot_block_size < mapping_src_end) {
// Continue the traversal.
traverse_valid = src_impl.ContinueTraversal(next_entry, context);
ASSERT(traverse_valid);
// Process the block.
if (next_entry.phys_addr != cur_block_addr + cur_block_size) {
// Map the block we've been processing so far.
R_TRY(Operate(cur_mapped_addr, cur_block_size / PageSize, test_perm, OperationType::Map,
cur_block_addr));
// Update tracking extents.
cur_mapped_addr += cur_block_size;
cur_block_addr = next_entry.phys_addr;
cur_block_size = next_entry.block_size;
} else {
cur_block_size += next_entry.block_size;
}
tot_block_size += next_entry.block_size;
}
// Handle the last direct-mapped page.
if (const VAddr mapped_block_end = aligned_src_start + tot_block_size - cur_block_size;
mapped_block_end < mapping_src_end) {
const size_t last_block_size = mapping_src_end - mapped_block_end;
// Map the last block.
R_TRY(Operate(cur_mapped_addr, last_block_size / PageSize, test_perm, OperationType::Map,
cur_block_addr));
// Update tracking extents.
cur_mapped_addr += last_block_size;
cur_block_addr += last_block_size;
if (mapped_block_end + cur_block_size < aligned_src_end &&
cur_block_size == last_block_size) {
traverse_valid = src_impl.ContinueTraversal(next_entry, context);
ASSERT(traverse_valid);
cur_block_addr = next_entry.phys_addr;
}
}
// Map the end page, if we have one.
if (end_partial_page != 0) {
// Ensure the page holds correct data.
const VAddr end_partial_virt =
GetHeapVirtualAddress(m_system.Kernel().MemoryLayout(), end_partial_page);
if (send) {
const size_t copy_size = src_end - mapping_src_end;
std::memcpy(m_system.Memory().GetPointer<void>(end_partial_virt),
m_system.Memory().GetPointer<void>(GetHeapVirtualAddress(
m_system.Kernel().MemoryLayout(), cur_block_addr)),
copy_size);
std::memset(m_system.Memory().GetPointer<void>(end_partial_virt + copy_size), fill_val,
PageSize - copy_size);
} else {
std::memset(m_system.Memory().GetPointer<void>(end_partial_virt), fill_val, PageSize);
}
// Map the page.
R_TRY(Operate(cur_mapped_addr, 1, test_perm, OperationType::Map, end_partial_page));
}
// Update memory blocks to reflect our changes
m_memory_block_manager.Update(std::addressof(allocator), dst_addr, aligned_src_size / PageSize,
dst_state, test_perm, KMemoryAttribute::None,
KMemoryBlockDisableMergeAttribute::Normal,
KMemoryBlockDisableMergeAttribute::None);
// Set the output address.
*out_addr = dst_addr + (src_start - aligned_src_start);
// We succeeded.
memory_reservation.Commit();
R_SUCCEED();
}
Result KPageTable::SetupForIpc(VAddr* out_dst_addr, size_t size, VAddr src_addr,
KPageTable& src_page_table, KMemoryPermission test_perm,
KMemoryState dst_state, bool send) {
// For convenience, alias this.
KPageTable& dst_page_table = *this;
// Acquire the table locks.
KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
// We're going to perform an update, so create a helper.
KScopedPageTableUpdater updater(std::addressof(src_page_table));
// Perform client setup.
size_t num_allocator_blocks;
R_TRY(src_page_table.SetupForIpcClient(updater.GetPageList(),
std::addressof(num_allocator_blocks), src_addr, size,
test_perm, dst_state));
// Create an update allocator.
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
src_page_table.m_memory_block_slab_manager,
num_allocator_blocks);
R_TRY(allocator_result);
// Get the mapped extents.
const VAddr src_map_start = Common::AlignUp((src_addr), PageSize);
const VAddr src_map_end = Common::AlignDown((src_addr) + size, PageSize);
const size_t src_map_size = src_map_end - src_map_start;
// Ensure that we clean up appropriately if we fail after this.
const auto src_perm = static_cast<KMemoryPermission>(
(test_perm == KMemoryPermission::UserReadWrite)
? KMemoryPermission::KernelReadWrite | KMemoryPermission::NotMapped
: KMemoryPermission::UserRead);
ON_RESULT_FAILURE {
if (src_map_end > src_map_start) {
src_page_table.CleanupForIpcClientOnServerSetupFailure(
updater.GetPageList(), src_map_start, src_map_size, src_perm);
}
};
// Perform server setup.
R_TRY(dst_page_table.SetupForIpcServer(out_dst_addr, size, src_addr, test_perm, dst_state,
src_page_table, send));
// If anything was mapped, ipc-lock the pages.
if (src_map_start < src_map_end) {
// Get the source permission.
src_page_table.m_memory_block_manager.UpdateLock(std::addressof(allocator), src_map_start,
(src_map_end - src_map_start) / PageSize,
&KMemoryBlock::LockForIpc, src_perm);
}
R_SUCCEED();
}
Result KPageTable::CleanupForIpcServer(VAddr address, size_t size, KMemoryState dst_state) {
// Validate the address.
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
// Lock the table.
KScopedLightLock lk(m_general_lock);
// Validate the memory state.
size_t num_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size,
KMemoryState::All, dst_state, KMemoryPermission::UserRead,
KMemoryPermission::UserRead, KMemoryAttribute::All,
KMemoryAttribute::None));
// Create an update allocator.
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
// We're going to perform an update, so create a helper.
KScopedPageTableUpdater updater(this);
// Get aligned extents.
const VAddr aligned_start = Common::AlignDown((address), PageSize);
const VAddr aligned_end = Common::AlignUp((address) + size, PageSize);
const size_t aligned_size = aligned_end - aligned_start;
const size_t aligned_num_pages = aligned_size / PageSize;
// Unmap the pages.
R_TRY(Operate(aligned_start, aligned_num_pages, KMemoryPermission::None, OperationType::Unmap));
// Update memory blocks.
m_memory_block_manager.Update(std::addressof(allocator), aligned_start, aligned_num_pages,
KMemoryState::None, KMemoryPermission::None,
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
KMemoryBlockDisableMergeAttribute::Normal);
// Release from the resource limit as relevant.
const VAddr mapping_start = Common::AlignUp((address), PageSize);
const VAddr mapping_end = Common::AlignDown((address) + size, PageSize);
const size_t mapping_size = (mapping_start < mapping_end) ? mapping_end - mapping_start : 0;
m_resource_limit->Release(LimitableResource::PhysicalMemory, aligned_size - mapping_size);
R_SUCCEED();
}
Result KPageTable::CleanupForIpcClient(VAddr address, size_t size, KMemoryState dst_state) {
// Validate the address.
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
// Get aligned source extents.
const VAddr mapping_start = Common::AlignUp((address), PageSize);
const VAddr mapping_end = Common::AlignDown((address) + size, PageSize);
const VAddr mapping_last = mapping_end - 1;
const size_t mapping_size = (mapping_start < mapping_end) ? (mapping_end - mapping_start) : 0;
// If nothing was mapped, we're actually done immediately.
R_SUCCEED_IF(mapping_size == 0);
// Get the test state and attribute mask.
KMemoryState test_state;
KMemoryAttribute test_attr_mask;
switch (dst_state) {
case KMemoryState::Ipc:
test_state = KMemoryState::FlagCanUseIpc;
test_attr_mask =
KMemoryAttribute::Uncached | KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked;
break;
case KMemoryState::NonSecureIpc:
test_state = KMemoryState::FlagCanUseNonSecureIpc;
test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
break;
case KMemoryState::NonDeviceIpc:
test_state = KMemoryState::FlagCanUseNonDeviceIpc;
test_attr_mask = KMemoryAttribute::Uncached | KMemoryAttribute::Locked;
break;
default:
R_THROW(ResultInvalidCombination);
}
// Lock the table.
// NOTE: Nintendo does this *after* creating the updater below, but this does not follow
// convention elsewhere in KPageTable.
KScopedLightLock lk(m_general_lock);
// We're going to perform an update, so create a helper.
KScopedPageTableUpdater updater(this);
// Ensure that on failure, we roll back appropriately.
size_t mapped_size = 0;
ON_RESULT_FAILURE {
if (mapped_size > 0) {
// Determine where the mapping ends.
const auto mapped_end = (mapping_start) + mapped_size;
const auto mapped_last = mapped_end - 1;
// Get current and next iterators.
KMemoryBlockManager::const_iterator start_it =
m_memory_block_manager.FindIterator(mapping_start);
KMemoryBlockManager::const_iterator next_it = start_it;
++next_it;
// Get the current block info.
KMemoryInfo cur_info = start_it->GetMemoryInfo();
// Create tracking variables.
VAddr cur_address = cur_info.GetAddress();
size_t cur_size = cur_info.GetSize();
bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
bool first =
cur_info.GetIpcDisableMergeCount() == 1 &&
(cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Locked) ==
KMemoryBlockDisableMergeAttribute::None;
while (((cur_address) + cur_size - 1) < mapped_last) {
// Check that we have a next block.
ASSERT(next_it != m_memory_block_manager.end());
// Get the next info.
const KMemoryInfo next_info = next_it->GetMemoryInfo();
// Check if we can consolidate the next block's permission set with the current one.
const bool next_perm_eq =
next_info.GetPermission() == next_info.GetOriginalPermission();
const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1;
if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm &&
cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) {
// We can consolidate the reprotection for the current and next block into a
// single call.
cur_size += next_info.GetSize();
} else {
// We have to operate on the current block.
if ((cur_needs_set_perm || first) && !cur_perm_eq) {
ASSERT(Operate(cur_address, cur_size / PageSize, cur_info.GetPermission(),
OperationType::ChangePermissions)
.IsSuccess());
}
// Advance.
cur_address = next_info.GetAddress();
cur_size = next_info.GetSize();
first = false;
}
// Advance.
cur_info = next_info;
cur_perm_eq = next_perm_eq;
cur_needs_set_perm = next_needs_set_perm;
++next_it;
}
// Process the last block.
if ((first || cur_needs_set_perm) && !cur_perm_eq) {
ASSERT(Operate(cur_address, cur_size / PageSize, cur_info.GetPermission(),
OperationType::ChangePermissions)
.IsSuccess());
}
}
};
// Iterate, reprotecting as needed.
{
// Get current and next iterators.
KMemoryBlockManager::const_iterator start_it =
m_memory_block_manager.FindIterator(mapping_start);
KMemoryBlockManager::const_iterator next_it = start_it;
++next_it;
// Validate the current block.
KMemoryInfo cur_info = start_it->GetMemoryInfo();
ASSERT(this->CheckMemoryState(cur_info, test_state, test_state, KMemoryPermission::None,
KMemoryPermission::None,
test_attr_mask | KMemoryAttribute::IpcLocked,
KMemoryAttribute::IpcLocked)
.IsSuccess());
// Create tracking variables.
VAddr cur_address = cur_info.GetAddress();
size_t cur_size = cur_info.GetSize();
bool cur_perm_eq = cur_info.GetPermission() == cur_info.GetOriginalPermission();
bool cur_needs_set_perm = !cur_perm_eq && cur_info.GetIpcLockCount() == 1;
bool first =
cur_info.GetIpcDisableMergeCount() == 1 &&
(cur_info.GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute::Locked) ==
KMemoryBlockDisableMergeAttribute::None;
while ((cur_address + cur_size - 1) < mapping_last) {
// Check that we have a next block.
ASSERT(next_it != m_memory_block_manager.end());
// Get the next info.
const KMemoryInfo next_info = next_it->GetMemoryInfo();
// Validate the next block.
ASSERT(this->CheckMemoryState(next_info, test_state, test_state,
KMemoryPermission::None, KMemoryPermission::None,
test_attr_mask | KMemoryAttribute::IpcLocked,
KMemoryAttribute::IpcLocked)
.IsSuccess());
// Check if we can consolidate the next block's permission set with the current one.
const bool next_perm_eq =
next_info.GetPermission() == next_info.GetOriginalPermission();
const bool next_needs_set_perm = !next_perm_eq && next_info.GetIpcLockCount() == 1;
if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm &&
cur_info.GetOriginalPermission() == next_info.GetOriginalPermission()) {
// We can consolidate the reprotection for the current and next block into a single
// call.
cur_size += next_info.GetSize();
} else {
// We have to operate on the current block.
if ((cur_needs_set_perm || first) && !cur_perm_eq) {
R_TRY(Operate(cur_address, cur_size / PageSize,
cur_needs_set_perm ? cur_info.GetOriginalPermission()
: cur_info.GetPermission(),
OperationType::ChangePermissions));
}
// Mark that we mapped the block.
mapped_size += cur_size;
// Advance.
cur_address = next_info.GetAddress();
cur_size = next_info.GetSize();
first = false;
}
// Advance.
cur_info = next_info;
cur_perm_eq = next_perm_eq;
cur_needs_set_perm = next_needs_set_perm;
++next_it;
}
// Process the last block.
const auto lock_count =
cur_info.GetIpcLockCount() +
(next_it != m_memory_block_manager.end()
? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount())
: 0);
if ((first || cur_needs_set_perm || (lock_count == 1)) && !cur_perm_eq) {
R_TRY(Operate(cur_address, cur_size / PageSize,
cur_needs_set_perm ? cur_info.GetOriginalPermission()
: cur_info.GetPermission(),
OperationType::ChangePermissions));
}
}
// Create an update allocator.
// NOTE: Guaranteed zero blocks needed here.
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
m_memory_block_slab_manager, 0);
R_TRY(allocator_result);
// Unlock the pages.
m_memory_block_manager.UpdateLock(std::addressof(allocator), mapping_start,
mapping_size / PageSize, &KMemoryBlock::UnlockForIpc,
KMemoryPermission::None);
R_SUCCEED();
}
void KPageTable::CleanupForIpcClientOnServerSetupFailure([[maybe_unused]] PageLinkedList* page_list,
VAddr address, size_t size,
KMemoryPermission prot_perm) {
ASSERT(this->IsLockedByCurrentThread());
ASSERT(Common::IsAligned(address, PageSize));
ASSERT(Common::IsAligned(size, PageSize));
// Get the mapped extents.
const VAddr src_map_start = address;
const VAddr src_map_end = address + size;
const VAddr src_map_last = src_map_end - 1;
// This function is only invoked when there's something to do.
ASSERT(src_map_end > src_map_start);
// Iterate over blocks, fixing permissions.
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(address);
while (true) {
const KMemoryInfo info = it->GetMemoryInfo();
const auto cur_start =
info.GetAddress() >= src_map_start ? info.GetAddress() : src_map_start;
const auto cur_end =
src_map_last <= info.GetLastAddress() ? src_map_end : info.GetEndAddress();
// If we can, fix the protections on the block.
if ((info.GetIpcLockCount() == 0 &&
(info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) ||
(info.GetIpcLockCount() != 0 &&
(info.GetOriginalPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm)) {
// Check if we actually need to fix the protections on the block.
if (cur_end == src_map_end || info.GetAddress() <= src_map_start ||
(info.GetPermission() & KMemoryPermission::IpcLockChangeMask) != prot_perm) {
ASSERT(Operate(cur_start, (cur_end - cur_start) / PageSize, info.GetPermission(),
OperationType::ChangePermissions)
.IsSuccess());
}
}
// If we're past the end of the region, we're done.
if (src_map_last <= info.GetLastAddress()) {
break;
}
// Advance.
++it;
ASSERT(it != m_memory_block_manager.end());
}
}
void KPageTable::HACK_OpenPages(PAddr phys_addr, size_t num_pages) {
m_system.Kernel().MemoryManager().OpenFirst(phys_addr, num_pages);
}
@ -858,7 +1635,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
R_TRY(allocator_result);
// We're going to perform an update, so create a helper.
// KScopedPageTableUpdater updater(this);
KScopedPageTableUpdater updater(this);
// Prepare to iterate over the memory.
auto pg_it = pg.Nodes().begin();
@ -1074,7 +1851,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
R_TRY(allocator_result);
// We're going to perform an update, so create a helper.
// KScopedPageTableUpdater updater(this);
KScopedPageTableUpdater updater(this);
// Separate the mapping.
R_TRY(Operate(map_start_address, (map_last_address + 1 - map_start_address) / PageSize,
@ -1935,6 +2712,24 @@ Result KPageTable::UnlockForDeviceAddressSpace(VAddr address, size_t size) {
R_SUCCEED();
}
Result KPageTable::LockForIpcUserBuffer(PAddr* out, VAddr address, size_t size) {
R_RETURN(this->LockMemoryAndOpen(
nullptr, out, address, size, KMemoryState::FlagCanIpcUserBuffer,
KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::All,
KMemoryPermission::UserReadWrite, KMemoryAttribute::All, KMemoryAttribute::None,
static_cast<KMemoryPermission>(KMemoryPermission::NotMapped |
KMemoryPermission::KernelReadWrite),
KMemoryAttribute::Locked));
}
Result KPageTable::UnlockForIpcUserBuffer(VAddr address, size_t size) {
R_RETURN(this->UnlockMemory(address, size, KMemoryState::FlagCanIpcUserBuffer,
KMemoryState::FlagCanIpcUserBuffer, KMemoryPermission::None,
KMemoryPermission::None, KMemoryAttribute::All,
KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite,
KMemoryAttribute::Locked, nullptr));
}
Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size) {
R_RETURN(this->LockMemoryAndOpen(
out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
@ -2038,6 +2833,17 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm,
R_SUCCEED();
}
void KPageTable::FinalizeUpdate(PageLinkedList* page_list) {
while (page_list->Peek()) {
[[maybe_unused]] auto page = page_list->Pop();
// TODO(bunnei): Free pages once they are allocated in guest memory
// ASSERT(this->GetPageTableManager().IsInPageTableHeap(page));
// ASSERT(this->GetPageTableManager().GetRefCount(page) == 0);
// this->GetPageTableManager().Free(page);
}
}
VAddr KPageTable::GetRegionAddress(KMemoryState state) const {
switch (state) {
case KMemoryState::Free:

View File

@ -16,6 +16,7 @@
#include "core/hle/kernel/k_memory_layout.h"
#include "core/hle/kernel/k_memory_manager.h"
#include "core/hle/result.h"
#include "core/memory.h"
namespace Core {
class System;
@ -83,6 +84,14 @@ public:
Result UnlockForDeviceAddressSpace(VAddr addr, size_t size);
Result LockForIpcUserBuffer(PAddr* out, VAddr address, size_t size);
Result UnlockForIpcUserBuffer(VAddr address, size_t size);
Result SetupForIpc(VAddr* out_dst_addr, size_t size, VAddr src_addr, KPageTable& src_page_table,
KMemoryPermission test_perm, KMemoryState dst_state, bool send);
Result CleanupForIpcServer(VAddr address, size_t size, KMemoryState dst_state);
Result CleanupForIpcClient(VAddr address, size_t size, KMemoryState dst_state);
Result LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size);
Result UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg);
Result MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages,
@ -100,6 +109,45 @@ public:
bool CanContain(VAddr addr, size_t size, KMemoryState state) const;
protected:
struct PageLinkedList {
private:
struct Node {
Node* m_next;
std::array<u8, PageSize - sizeof(Node*)> m_buffer;
};
public:
constexpr PageLinkedList() = default;
void Push(Node* n) {
ASSERT(Common::IsAligned(reinterpret_cast<uintptr_t>(n), PageSize));
n->m_next = m_root;
m_root = n;
}
void Push(Core::Memory::Memory& memory, VAddr addr) {
this->Push(memory.GetPointer<Node>(addr));
}
Node* Peek() const {
return m_root;
}
Node* Pop() {
Node* const r = m_root;
m_root = r->m_next;
r->m_next = nullptr;
return r;
}
private:
Node* m_root{};
};
static_assert(std::is_trivially_destructible<PageLinkedList>::value);
private:
enum class OperationType : u32 {
Map = 0,
@ -128,6 +176,7 @@ private:
OperationType operation);
Result Operate(VAddr addr, size_t num_pages, KMemoryPermission perm, OperationType operation,
PAddr map_addr = 0);
void FinalizeUpdate(PageLinkedList* page_list);
VAddr GetRegionAddress(KMemoryState state) const;
size_t GetRegionSize(KMemoryState state) const;
@ -204,6 +253,14 @@ private:
return *out != 0;
}
Result SetupForIpcClient(PageLinkedList* page_list, size_t* out_blocks_needed, VAddr address,
size_t size, KMemoryPermission test_perm, KMemoryState dst_state);
Result SetupForIpcServer(VAddr* out_addr, size_t size, VAddr src_addr,
KMemoryPermission test_perm, KMemoryState dst_state,
KPageTable& src_page_table, bool send);
void CleanupForIpcClientOnServerSetupFailure(PageLinkedList* page_list, VAddr address,
size_t size, KMemoryPermission prot_perm);
// HACK: These will be removed once we automatically manage page reference counts.
void HACK_OpenPages(PAddr phys_addr, size_t num_pages);
void HACK_ClosePages(VAddr virt_addr, size_t num_pages);
@ -325,6 +382,31 @@ public:
addr + size - 1 <= m_address_space_end - 1;
}
public:
static VAddr GetLinearMappedVirtualAddress(const KMemoryLayout& layout, PAddr addr) {
return layout.GetLinearVirtualAddress(addr);
}
static PAddr GetLinearMappedPhysicalAddress(const KMemoryLayout& layout, VAddr addr) {
return layout.GetLinearPhysicalAddress(addr);
}
static VAddr GetHeapVirtualAddress(const KMemoryLayout& layout, PAddr addr) {
return GetLinearMappedVirtualAddress(layout, addr);
}
static PAddr GetHeapPhysicalAddress(const KMemoryLayout& layout, VAddr addr) {
return GetLinearMappedPhysicalAddress(layout, addr);
}
static VAddr GetPageTableVirtualAddress(const KMemoryLayout& layout, PAddr addr) {
return GetLinearMappedVirtualAddress(layout, addr);
}
static PAddr GetPageTablePhysicalAddress(const KMemoryLayout& layout, VAddr addr) {
return GetLinearMappedPhysicalAddress(layout, addr);
}
private:
constexpr bool IsKernel() const {
return m_is_kernel;
@ -339,6 +421,24 @@ private:
(addr + num_pages * PageSize - 1 <= m_address_space_end - 1);
}
private:
class KScopedPageTableUpdater {
private:
KPageTable* m_pt{};
PageLinkedList m_ll;
public:
explicit KScopedPageTableUpdater(KPageTable* pt) : m_pt(pt) {}
explicit KScopedPageTableUpdater(KPageTable& pt) : KScopedPageTableUpdater(&pt) {}
~KScopedPageTableUpdater() {
m_pt->FinalizeUpdate(this->GetPageList());
}
PageLinkedList* GetPageList() {
return &m_ll;
}
};
private:
VAddr m_address_space_start{};
VAddr m_address_space_end{};

View File

@ -37,6 +37,7 @@ constexpr Result ResultInvalidState{ErrorModule::Kernel, 125};
constexpr Result ResultReservedUsed{ErrorModule::Kernel, 126};
constexpr Result ResultPortClosed{ErrorModule::Kernel, 131};
constexpr Result ResultLimitReached{ErrorModule::Kernel, 132};
constexpr Result ResultOutOfAddressSpace{ErrorModule::Kernel, 259};
constexpr Result ResultInvalidId{ErrorModule::Kernel, 519};
} // namespace Kernel