core: hle: kernel: Use result macros for new/changed code.
This commit is contained in:
parent
a4d11f4427
commit
829e82e264
|
@ -65,7 +65,7 @@ public:
|
|||
m_page_bitmap.SetBit(i);
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
VAddr GetAddress() const {
|
||||
|
|
|
@ -23,7 +23,7 @@ Result KMemoryBlockManager::Initialize(VAddr st, VAddr nd, KMemoryBlockSlabManag
|
|||
KMemoryState::Free, KMemoryPermission::None, KMemoryAttribute::None);
|
||||
m_memory_block_tree.insert(*start_block);
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KMemoryBlockManager::Finalize(KMemoryBlockSlabManager* slab_manager,
|
||||
|
|
|
@ -35,7 +35,7 @@ private:
|
|||
R_UNLESS(m_blocks[m_index + i] != nullptr, ResultOutOfResource);
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
public:
|
||||
|
|
|
@ -128,12 +128,9 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
|
|||
alloc_start = process_code_end;
|
||||
alloc_size = end - process_code_end;
|
||||
}
|
||||
const size_t needed_size{
|
||||
(alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size)};
|
||||
if (alloc_size < needed_size) {
|
||||
ASSERT(false);
|
||||
return ResultOutOfMemory;
|
||||
}
|
||||
const size_t needed_size =
|
||||
(alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size);
|
||||
R_UNLESS(alloc_size >= needed_size, ResultOutOfMemory);
|
||||
|
||||
const size_t remaining_size{alloc_size - needed_size};
|
||||
|
||||
|
@ -259,8 +256,9 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
|
|||
m_page_table_impl = std::make_unique<Common::PageTable>();
|
||||
m_page_table_impl->Resize(m_address_space_width, PageBits);
|
||||
|
||||
return m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end,
|
||||
m_memory_block_slab_manager);
|
||||
// Initialize our memory block manager.
|
||||
R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end,
|
||||
m_memory_block_slab_manager));
|
||||
}
|
||||
|
||||
void KPageTable::Finalize() {
|
||||
|
@ -306,7 +304,7 @@ Result KPageTable::MapProcessCode(VAddr addr, size_t num_pages, KMemoryState sta
|
|||
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
|
||||
KMemoryBlockDisableMergeAttribute::None);
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t size) {
|
||||
|
@ -385,7 +383,7 @@ Result KPageTable::MapCodeMemory(VAddr dst_address, VAddr src_address, size_t si
|
|||
KMemoryBlockDisableMergeAttribute::None);
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t size,
|
||||
|
@ -487,7 +485,7 @@ Result KPageTable::UnmapCodeMemory(VAddr dst_address, VAddr src_address, size_t
|
|||
reprotected_pages = true;
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
VAddr KPageTable::FindFreeArea(VAddr region_start, size_t region_num_pages, size_t num_pages,
|
||||
|
@ -558,7 +556,7 @@ Result KPageTable::MakePageGroup(KPageGroup& pg, VAddr addr, size_t num_pages) {
|
|||
R_UNLESS(IsHeapPhysicalAddress(memory_layout, cur_addr), ResultInvalidCurrentMemory);
|
||||
R_TRY(pg.AddBlock(cur_addr, cur_pages));
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
bool KPageTable::IsValidPageGroup(const KPageGroup& pg_ll, VAddr addr, size_t num_pages) {
|
||||
|
@ -685,7 +683,7 @@ Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& s
|
|||
|
||||
m_system.InvalidateCpuInstructionCaches();
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
|
||||
|
@ -933,7 +931,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
|
|||
// Cancel our guard.
|
||||
unmap_guard.Cancel();
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1176,7 +1174,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
|
|||
// We succeeded.
|
||||
remap_guard.Cancel();
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size) {
|
||||
|
@ -1243,7 +1241,7 @@ Result KPageTable::MapMemory(VAddr dst_address, VAddr src_address, size_t size)
|
|||
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
|
||||
KMemoryBlockDisableMergeAttribute::None);
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size) {
|
||||
|
@ -1288,9 +1286,7 @@ Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size
|
|||
AddRegionToPages(src_address, num_pages, src_pages);
|
||||
AddRegionToPages(dst_address, num_pages, dst_pages);
|
||||
|
||||
if (!dst_pages.IsEqual(src_pages)) {
|
||||
return ResultInvalidMemoryRegion;
|
||||
}
|
||||
R_UNLESS(dst_pages.IsEqual(src_pages), ResultInvalidMemoryRegion);
|
||||
|
||||
{
|
||||
auto block_guard = detail::ScopeExit([&] { MapPages(dst_address, dst_pages, dst_perm); });
|
||||
|
@ -1312,7 +1308,7 @@ Result KPageTable::UnmapMemory(VAddr dst_address, VAddr src_address, size_t size
|
|||
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
|
||||
KMemoryBlockDisableMergeAttribute::Normal);
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list,
|
||||
|
@ -1330,13 +1326,13 @@ Result KPageTable::MapPages(VAddr addr, const KPageGroup& page_linked_list,
|
|||
ASSERT(Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap)
|
||||
.IsSuccess());
|
||||
|
||||
return result;
|
||||
R_RETURN(result);
|
||||
}
|
||||
|
||||
cur_addr += node.GetNumPages() * PageSize;
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTable::MapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state,
|
||||
|
@ -1367,7 +1363,7 @@ Result KPageTable::MapPages(VAddr address, KPageGroup& page_linked_list, KMemory
|
|||
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::Normal,
|
||||
KMemoryBlockDisableMergeAttribute::None);
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTable::MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr,
|
||||
|
@ -1413,7 +1409,7 @@ Result KPageTable::MapPages(VAddr* out_addr, size_t num_pages, size_t alignment,
|
|||
|
||||
// We successfully mapped the pages.
|
||||
*out_addr = addr;
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) {
|
||||
|
@ -1425,13 +1421,13 @@ Result KPageTable::UnmapPages(VAddr addr, const KPageGroup& page_linked_list) {
|
|||
if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None,
|
||||
OperationType::Unmap)};
|
||||
result.IsError()) {
|
||||
return result;
|
||||
R_RETURN(result);
|
||||
}
|
||||
|
||||
cur_addr += node.GetNumPages() * PageSize;
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemoryState state) {
|
||||
|
@ -1465,7 +1461,7 @@ Result KPageTable::UnmapPages(VAddr address, KPageGroup& page_linked_list, KMemo
|
|||
KMemoryBlockDisableMergeAttribute::None,
|
||||
KMemoryBlockDisableMergeAttribute::Normal);
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTable::UnmapPages(VAddr address, size_t num_pages, KMemoryState state) {
|
||||
|
@ -1498,7 +1494,7 @@ Result KPageTable::UnmapPages(VAddr address, size_t num_pages, KMemoryState stat
|
|||
KMemoryBlockDisableMergeAttribute::None,
|
||||
KMemoryBlockDisableMergeAttribute::Normal);
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t num_pages,
|
||||
|
@ -1523,7 +1519,7 @@ Result KPageTable::MakeAndOpenPageGroup(KPageGroup* out, VAddr address, size_t n
|
|||
// Create a new page group for the region.
|
||||
R_TRY(this->MakePageGroup(*out, address, num_pages));
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTable::SetProcessMemoryPermission(VAddr addr, size_t size,
|
||||
|
@ -1589,7 +1585,7 @@ Result KPageTable::SetProcessMemoryPermission(VAddr addr, size_t size,
|
|||
m_system.InvalidateCpuInstructionCacheRange(addr, size);
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
KMemoryInfo KPageTable::QueryInfoImpl(VAddr addr) {
|
||||
|
@ -1653,7 +1649,7 @@ Result KPageTable::SetMemoryPermission(VAddr addr, size_t size, Svc::MemoryPermi
|
|||
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
|
||||
KMemoryBlockDisableMergeAttribute::None);
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTable::SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 attr) {
|
||||
|
@ -1696,7 +1692,7 @@ Result KPageTable::SetMemoryAttribute(VAddr addr, size_t size, u32 mask, u32 att
|
|||
new_attr, KMemoryBlockDisableMergeAttribute::None,
|
||||
KMemoryBlockDisableMergeAttribute::None);
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTable::SetMaxHeapSize(size_t size) {
|
||||
|
@ -1708,7 +1704,7 @@ Result KPageTable::SetMaxHeapSize(size_t size) {
|
|||
|
||||
m_max_heap_size = size;
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTable::SetHeapSize(VAddr* out, size_t size) {
|
||||
|
@ -1769,11 +1765,11 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) {
|
|||
|
||||
// Set the output.
|
||||
*out = m_heap_region_start;
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
} else if (size == GetHeapSize()) {
|
||||
// The size requested is exactly the current size.
|
||||
*out = m_heap_region_start;
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
} else {
|
||||
// We have to allocate memory. Determine how much to allocate and where while the table
|
||||
// is locked.
|
||||
|
@ -1847,7 +1843,7 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) {
|
|||
|
||||
// Set the output.
|
||||
*out = m_heap_region_start;
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1857,19 +1853,12 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_
|
|||
KMemoryPermission perm, PAddr map_addr) {
|
||||
KScopedLightLock lk(m_general_lock);
|
||||
|
||||
if (!CanContain(region_start, region_num_pages * PageSize, state)) {
|
||||
return ResultInvalidCurrentMemory;
|
||||
}
|
||||
|
||||
if (region_num_pages <= needed_num_pages) {
|
||||
return ResultOutOfMemory;
|
||||
}
|
||||
|
||||
R_UNLESS(CanContain(region_start, region_num_pages * PageSize, state),
|
||||
ResultInvalidCurrentMemory);
|
||||
R_UNLESS(region_num_pages > needed_num_pages, ResultOutOfMemory);
|
||||
const VAddr addr{
|
||||
AllocateVirtualMemory(region_start, region_num_pages, needed_num_pages, align)};
|
||||
if (!addr) {
|
||||
return ResultOutOfMemory;
|
||||
}
|
||||
R_UNLESS(addr, ResultOutOfMemory);
|
||||
|
||||
// Create an update allocator.
|
||||
Result allocator_result{ResultSuccess};
|
||||
|
@ -1922,7 +1911,7 @@ Result KPageTable::LockForMapDeviceAddressSpace(VAddr address, size_t size, KMem
|
|||
m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages,
|
||||
&KMemoryBlock::ShareToDevice, KMemoryPermission::None);
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size) {
|
||||
|
@ -1956,7 +1945,7 @@ Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size) {
|
|||
m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func,
|
||||
KMemoryPermission::None);
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTable::UnlockForDeviceAddressSpace(VAddr address, size_t size) {
|
||||
|
@ -1984,24 +1973,24 @@ Result KPageTable::UnlockForDeviceAddressSpace(VAddr address, size_t size) {
|
|||
m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages,
|
||||
&KMemoryBlock::UnshareToDevice, KMemoryPermission::None);
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTable::LockForCodeMemory(KPageGroup* out, VAddr addr, size_t size) {
|
||||
return this->LockMemoryAndOpen(
|
||||
R_RETURN(this->LockMemoryAndOpen(
|
||||
out, nullptr, addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
|
||||
KMemoryPermission::All, KMemoryPermission::UserReadWrite, KMemoryAttribute::All,
|
||||
KMemoryAttribute::None,
|
||||
static_cast<KMemoryPermission>(KMemoryPermission::NotMapped |
|
||||
KMemoryPermission::KernelReadWrite),
|
||||
KMemoryAttribute::Locked);
|
||||
KMemoryAttribute::Locked));
|
||||
}
|
||||
|
||||
Result KPageTable::UnlockForCodeMemory(VAddr addr, size_t size, const KPageGroup& pg) {
|
||||
return this->UnlockMemory(
|
||||
R_RETURN(this->UnlockMemory(
|
||||
addr, size, KMemoryState::FlagCanCodeMemory, KMemoryState::FlagCanCodeMemory,
|
||||
KMemoryPermission::None, KMemoryPermission::None, KMemoryAttribute::All,
|
||||
KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg);
|
||||
KMemoryAttribute::Locked, KMemoryPermission::UserReadWrite, KMemoryAttribute::Locked, &pg));
|
||||
}
|
||||
|
||||
bool KPageTable::IsRegionContiguous(VAddr addr, u64 size) const {
|
||||
|
@ -2056,7 +2045,7 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, const KPageGroup& page_
|
|||
addr += size;
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm,
|
||||
|
@ -2083,7 +2072,7 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm,
|
|||
default:
|
||||
ASSERT(false);
|
||||
}
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
VAddr KPageTable::GetRegionAddress(KMemoryState state) const {
|
||||
|
@ -2211,7 +2200,7 @@ Result KPageTable::CheckMemoryState(const KMemoryInfo& info, KMemoryState state_
|
|||
R_UNLESS((info.m_permission & perm_mask) == perm, ResultInvalidCurrentMemory);
|
||||
R_UNLESS((info.m_attribute & attr_mask) == attr, ResultInvalidCurrentMemory);
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr addr, size_t size,
|
||||
|
@ -2253,7 +2242,7 @@ Result KPageTable::CheckMemoryStateContiguous(size_t* out_blocks_needed, VAddr a
|
|||
*out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission* out_perm,
|
||||
|
@ -2315,7 +2304,7 @@ Result KPageTable::CheckMemoryState(KMemoryState* out_state, KMemoryPermission*
|
|||
if (out_blocks_needed != nullptr) {
|
||||
*out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
|
||||
}
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
|
||||
|
@ -2381,7 +2370,7 @@ Result KPageTable::LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr
|
|||
new_attr, KMemoryBlockDisableMergeAttribute::Locked,
|
||||
KMemoryBlockDisableMergeAttribute::None);
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask,
|
||||
|
@ -2436,7 +2425,7 @@ Result KPageTable::UnlockMemory(VAddr addr, size_t size, KMemoryState state_mask
|
|||
new_attr, KMemoryBlockDisableMergeAttribute::None,
|
||||
KMemoryBlockDisableMergeAttribute::Locked);
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
|
|
@ -57,9 +57,9 @@ public:
|
|||
KMemoryPermission perm);
|
||||
Result MapPages(VAddr* out_addr, size_t num_pages, size_t alignment, PAddr phys_addr,
|
||||
KMemoryState state, KMemoryPermission perm) {
|
||||
return this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
|
||||
this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize,
|
||||
state, perm);
|
||||
R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true,
|
||||
this->GetRegionAddress(state),
|
||||
this->GetRegionSize(state) / PageSize, state, perm));
|
||||
}
|
||||
Result UnmapPages(VAddr addr, KPageGroup& page_linked_list, KMemoryState state);
|
||||
Result UnmapPages(VAddr address, size_t num_pages, KMemoryState state);
|
||||
|
@ -137,8 +137,8 @@ private:
|
|||
KMemoryState state, KMemoryPermission perm_mask,
|
||||
KMemoryPermission perm, KMemoryAttribute attr_mask,
|
||||
KMemoryAttribute attr) const {
|
||||
return this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask,
|
||||
perm, attr_mask, attr);
|
||||
R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask,
|
||||
perm, attr_mask, attr));
|
||||
}
|
||||
|
||||
Result CheckMemoryState(const KMemoryInfo& info, KMemoryState state_mask, KMemoryState state,
|
||||
|
@ -155,15 +155,16 @@ private:
|
|||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
|
||||
return CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
|
||||
state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr);
|
||||
R_RETURN(CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size,
|
||||
state_mask, state, perm_mask, perm, attr_mask, attr,
|
||||
ignore_attr));
|
||||
}
|
||||
Result CheckMemoryState(VAddr addr, size_t size, KMemoryState state_mask, KMemoryState state,
|
||||
KMemoryPermission perm_mask, KMemoryPermission perm,
|
||||
KMemoryAttribute attr_mask, KMemoryAttribute attr,
|
||||
KMemoryAttribute ignore_attr = DefaultMemoryIgnoreAttr) const {
|
||||
return this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
|
||||
attr_mask, attr, ignore_attr);
|
||||
R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm,
|
||||
attr_mask, attr, ignore_attr));
|
||||
}
|
||||
|
||||
Result LockMemoryAndOpen(KPageGroup* out_pg, PAddr* out_paddr, VAddr addr, size_t size,
|
||||
|
|
|
@ -98,7 +98,7 @@ Result KProcess::Initialize(KProcess* process, Core::System& system, std::string
|
|||
// Open a reference to the resource limit.
|
||||
process->resource_limit->Open();
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KProcess::DoWorkerTaskImpl() {
|
||||
|
@ -246,7 +246,7 @@ Result KProcess::AddSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr ad
|
|||
shmem->Open();
|
||||
shemen_info->Open();
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KProcess::RemoveSharedMemory(KSharedMemory* shmem, [[maybe_unused]] VAddr address,
|
||||
|
@ -296,7 +296,7 @@ Result KProcess::Reset() {
|
|||
|
||||
// Clear signaled.
|
||||
is_signaled = false;
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KProcess::SetActivity(ProcessActivity activity) {
|
||||
|
@ -312,9 +312,7 @@ Result KProcess::SetActivity(ProcessActivity activity) {
|
|||
// Either pause or resume.
|
||||
if (activity == ProcessActivity::Paused) {
|
||||
// Verify that we're not suspended.
|
||||
if (is_suspended) {
|
||||
return ResultInvalidState;
|
||||
}
|
||||
R_UNLESS(!is_suspended, ResultInvalidState);
|
||||
|
||||
// Suspend all threads.
|
||||
for (auto* thread : GetThreadList()) {
|
||||
|
@ -327,9 +325,7 @@ Result KProcess::SetActivity(ProcessActivity activity) {
|
|||
ASSERT(activity == ProcessActivity::Runnable);
|
||||
|
||||
// Verify that we're suspended.
|
||||
if (!is_suspended) {
|
||||
return ResultInvalidState;
|
||||
}
|
||||
R_UNLESS(is_suspended, ResultInvalidState);
|
||||
|
||||
// Resume all threads.
|
||||
for (auto* thread : GetThreadList()) {
|
||||
|
@ -340,7 +336,7 @@ Result KProcess::SetActivity(ProcessActivity activity) {
|
|||
SetSuspended(false);
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std::size_t code_size) {
|
||||
|
@ -358,14 +354,14 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
|
|||
if (!memory_reservation.Succeeded()) {
|
||||
LOG_ERROR(Kernel, "Could not reserve process memory requirements of size {:X} bytes",
|
||||
code_size + system_resource_size);
|
||||
return ResultLimitReached;
|
||||
R_RETURN(ResultLimitReached);
|
||||
}
|
||||
// Initialize proces address space
|
||||
if (const Result result{page_table.InitializeForProcess(
|
||||
metadata.GetAddressSpaceType(), false, 0x8000000, code_size,
|
||||
&kernel.GetApplicationMemoryBlockManager(), KMemoryManager::Pool::Application)};
|
||||
result.IsError()) {
|
||||
return result;
|
||||
R_RETURN(result);
|
||||
}
|
||||
|
||||
// Map process code region
|
||||
|
@ -373,7 +369,7 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
|
|||
code_size / PageSize, KMemoryState::Code,
|
||||
KMemoryPermission::None)};
|
||||
result.IsError()) {
|
||||
return result;
|
||||
R_RETURN(result);
|
||||
}
|
||||
|
||||
// Initialize process capabilities
|
||||
|
@ -381,7 +377,7 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
|
|||
if (const Result result{
|
||||
capabilities.InitializeForUserProcess(caps.data(), caps.size(), page_table)};
|
||||
result.IsError()) {
|
||||
return result;
|
||||
R_RETURN(result);
|
||||
}
|
||||
|
||||
// Set memory usage capacity
|
||||
|
@ -405,7 +401,7 @@ Result KProcess::LoadFromMetadata(const FileSys::ProgramMetadata& metadata, std:
|
|||
R_TRY(this->CreateThreadLocalRegion(std::addressof(plr_address)));
|
||||
memory_reservation.Commit();
|
||||
|
||||
return handle_table.Initialize(capabilities.GetHandleTableSize());
|
||||
R_RETURN(handle_table.Initialize(capabilities.GetHandleTableSize()));
|
||||
}
|
||||
|
||||
void KProcess::Run(s32 main_thread_priority, u64 stack_size) {
|
||||
|
@ -504,7 +500,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
|
|||
}
|
||||
|
||||
*out = tlr;
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -533,7 +529,7 @@ Result KProcess::CreateThreadLocalRegion(VAddr* out) {
|
|||
// We succeeded!
|
||||
tlp_guard.Cancel();
|
||||
*out = tlr;
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
|
||||
|
@ -581,7 +577,7 @@ Result KProcess::DeleteThreadLocalRegion(VAddr addr) {
|
|||
KThreadLocalPage::Free(kernel, page_to_free);
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
bool KProcess::InsertWatchpoint(Core::System& system, VAddr addr, u64 size,
|
||||
|
@ -682,15 +678,7 @@ Result KProcess::AllocateMainThreadStack(std::size_t stack_size) {
|
|||
|
||||
main_thread_stack_top += main_thread_stack_size;
|
||||
|
||||
return ResultSuccess;
|
||||
}
|
||||
|
||||
void KProcess::FinalizeHandleTable() {
|
||||
// Finalize the table.
|
||||
handle_table.Finalize();
|
||||
|
||||
// Note that the table is finalized.
|
||||
is_handle_table_initialized = false;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
} // namespace Kernel
|
||||
|
|
|
@ -138,16 +138,16 @@ public:
|
|||
}
|
||||
|
||||
Result WaitConditionVariable(VAddr address, u64 cv_key, u32 tag, s64 ns) {
|
||||
return condition_var.Wait(address, cv_key, tag, ns);
|
||||
R_RETURN(condition_var.Wait(address, cv_key, tag, ns));
|
||||
}
|
||||
|
||||
Result SignalAddressArbiter(VAddr address, Svc::SignalType signal_type, s32 value, s32 count) {
|
||||
return address_arbiter.SignalToAddress(address, signal_type, value, count);
|
||||
R_RETURN(address_arbiter.SignalToAddress(address, signal_type, value, count));
|
||||
}
|
||||
|
||||
Result WaitAddressArbiter(VAddr address, Svc::ArbitrationType arb_type, s32 value,
|
||||
s64 timeout) {
|
||||
return address_arbiter.WaitForAddress(address, arb_type, value, timeout);
|
||||
R_RETURN(address_arbiter.WaitForAddress(address, arb_type, value, timeout));
|
||||
}
|
||||
|
||||
VAddr GetProcessLocalRegionAddress() const {
|
||||
|
@ -407,13 +407,19 @@ private:
|
|||
pinned_threads[core_id] = nullptr;
|
||||
}
|
||||
|
||||
void FinalizeHandleTable() {
|
||||
// Finalize the table.
|
||||
handle_table.Finalize();
|
||||
|
||||
// Note that the table is finalized.
|
||||
is_handle_table_initialized = false;
|
||||
}
|
||||
|
||||
void ChangeState(State new_state);
|
||||
|
||||
/// Allocates the main thread stack for the process, given the stack size in bytes.
|
||||
Result AllocateMainThreadStack(std::size_t stack_size);
|
||||
|
||||
void FinalizeHandleTable();
|
||||
|
||||
/// Memory manager for this process
|
||||
KPageTable page_table;
|
||||
|
||||
|
|
|
@ -245,7 +245,7 @@ Result KThread::Initialize(KThreadFunction func, uintptr_t arg, VAddr user_stack
|
|||
}
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_t arg,
|
||||
|
@ -258,7 +258,7 @@ Result KThread::InitializeThread(KThread* thread, KThreadFunction func, uintptr_
|
|||
thread->host_context = std::make_shared<Common::Fiber>(std::move(init_func));
|
||||
thread->is_single_core = !Settings::values.use_multi_core.GetValue();
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KThread::InitializeDummyThread(KThread* thread) {
|
||||
|
@ -268,31 +268,32 @@ Result KThread::InitializeDummyThread(KThread* thread) {
|
|||
// Initialize emulation parameters.
|
||||
thread->stack_parameters.disable_count = 0;
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KThread::InitializeMainThread(Core::System& system, KThread* thread, s32 virt_core) {
|
||||
return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main,
|
||||
system.GetCpuManager().GetGuestActivateFunc());
|
||||
R_RETURN(InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {},
|
||||
ThreadType::Main, system.GetCpuManager().GetGuestActivateFunc()));
|
||||
}
|
||||
|
||||
Result KThread::InitializeIdleThread(Core::System& system, KThread* thread, s32 virt_core) {
|
||||
return InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {}, ThreadType::Main,
|
||||
system.GetCpuManager().GetIdleThreadStartFunc());
|
||||
R_RETURN(InitializeThread(thread, {}, {}, {}, IdleThreadPriority, virt_core, {},
|
||||
ThreadType::Main, system.GetCpuManager().GetIdleThreadStartFunc()));
|
||||
}
|
||||
|
||||
Result KThread::InitializeHighPriorityThread(Core::System& system, KThread* thread,
|
||||
KThreadFunction func, uintptr_t arg, s32 virt_core) {
|
||||
return InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr, ThreadType::HighPriority,
|
||||
system.GetCpuManager().GetShutdownThreadStartFunc());
|
||||
R_RETURN(InitializeThread(thread, func, arg, {}, {}, virt_core, nullptr,
|
||||
ThreadType::HighPriority,
|
||||
system.GetCpuManager().GetShutdownThreadStartFunc()));
|
||||
}
|
||||
|
||||
Result KThread::InitializeUserThread(Core::System& system, KThread* thread, KThreadFunction func,
|
||||
uintptr_t arg, VAddr user_stack_top, s32 prio, s32 virt_core,
|
||||
KProcess* owner) {
|
||||
system.Kernel().GlobalSchedulerContext().AddThread(thread);
|
||||
return InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner,
|
||||
ThreadType::User, system.GetCpuManager().GetGuestThreadFunc());
|
||||
R_RETURN(InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner,
|
||||
ThreadType::User, system.GetCpuManager().GetGuestThreadFunc()));
|
||||
}
|
||||
|
||||
void KThread::PostDestroy(uintptr_t arg) {
|
||||
|
@ -542,7 +543,7 @@ Result KThread::GetCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
|
|||
*out_ideal_core = virtual_ideal_core_id;
|
||||
*out_affinity_mask = virtual_affinity_mask;
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask) {
|
||||
|
@ -558,7 +559,7 @@ Result KThread::GetPhysicalCoreMask(s32* out_ideal_core, u64* out_affinity_mask)
|
|||
*out_affinity_mask = original_physical_affinity_mask.GetAffinityMask();
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
|
||||
|
@ -670,7 +671,7 @@ Result KThread::SetCoreMask(s32 core_id_, u64 v_affinity_mask) {
|
|||
} while (retry_update);
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KThread::SetBasePriority(s32 value) {
|
||||
|
@ -843,7 +844,7 @@ Result KThread::SetActivity(Svc::ThreadActivity activity) {
|
|||
} while (thread_is_current);
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
Result KThread::GetThreadContext3(std::vector<u8>& out) {
|
||||
|
@ -878,7 +879,7 @@ Result KThread::GetThreadContext3(std::vector<u8>& out) {
|
|||
}
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KThread::AddWaiterImpl(KThread* thread) {
|
||||
|
@ -1042,7 +1043,7 @@ Result KThread::Run() {
|
|||
// Set our state and finish.
|
||||
SetState(ThreadState::Runnable);
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1089,7 +1090,7 @@ Result KThread::Terminate() {
|
|||
Svc::WaitInfinite));
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
ThreadState KThread::RequestTerminate() {
|
||||
|
@ -1162,7 +1163,7 @@ Result KThread::Sleep(s64 timeout) {
|
|||
// Check if the thread should terminate.
|
||||
if (this->IsTerminationRequested()) {
|
||||
slp.CancelSleep();
|
||||
return ResultTerminationRequested;
|
||||
R_THROW(ResultTerminationRequested);
|
||||
}
|
||||
|
||||
// Wait for the sleep to end.
|
||||
|
@ -1170,7 +1171,7 @@ Result KThread::Sleep(s64 timeout) {
|
|||
SetWaitReasonForDebugging(ThreadWaitReasonForDebugging::Sleep);
|
||||
}
|
||||
|
||||
return ResultSuccess;
|
||||
R_SUCCEED();
|
||||
}
|
||||
|
||||
void KThread::IfDummyThreadTryWait() {
|
||||
|
|
|
@ -470,9 +470,6 @@ constexpr inline Result __TmpCurrentResultReference = ResultSuccess;
|
|||
#define R_UNLESS(expr, res) \
|
||||
{ \
|
||||
if (!(expr)) { \
|
||||
if (res.IsError()) { \
|
||||
LOG_ERROR(Kernel, "Failed with result: {}", res.raw); \
|
||||
} \
|
||||
R_THROW(res); \
|
||||
} \
|
||||
}
|
||||
|
|
Reference in New Issue