citra-emu
/
citra-canary
Archived
1
0
Fork 0

kernel/thread: Change owner_process to std::weak_ptr (#5325)

* kernel/thread: Change owner_process to std::weak_ptr

Previously this leaked almost all kernel objects. In short, Threads own Processes which own HandleTables which own maps of Objects which include Threads.

Changing this to weak_ptr at least got the camera interfaces to destruct properly. Did not really check the other objects though, and I think there are probably more leaks.

* hle/kernel: Lock certain objects while deserializing

When deserializing other kernel objects, these objects (`MemoryRegion`s and `VMManager`s) can possibly get modified. To avoid inconsistent state caused by destructor side-effects, we may as well simply lock them until loading is fully completed.

* Fix silly typo

Somehow this didn't break?!
This commit is contained in:
Pengfei Zhu 2020-11-15 19:59:45 +08:00 committed by GitHub
parent 80c9f9abbb
commit de3d7cf49f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 121 additions and 31 deletions

View File

@ -243,7 +243,6 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const {
std::vector<std::unique_ptr<WaitTreeItem>> list(WaitTreeWaitObject::GetChildren());
const auto& thread = static_cast<const Kernel::Thread&>(object);
const auto& process = thread.owner_process;
QString processor;
switch (thread.processor_id) {
@ -267,10 +266,12 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const {
list.push_back(std::make_unique<WaitTreeText>(tr("object id = %1").arg(thread.GetObjectId())));
list.push_back(std::make_unique<WaitTreeText>(tr("processor = %1").arg(processor)));
list.push_back(std::make_unique<WaitTreeText>(tr("thread id = %1").arg(thread.GetThreadId())));
list.push_back(
std::make_unique<WaitTreeText>(tr("process = %1 (%2)")
.arg(QString::fromStdString(process->GetName()))
.arg(process->process_id)));
if (auto process = thread.owner_process.lock()) {
list.push_back(
std::make_unique<WaitTreeText>(tr("process = %1 (%2)")
.arg(QString::fromStdString(process->GetName()))
.arg(process->process_id)));
}
list.push_back(std::make_unique<WaitTreeText>(tr("priority = %1(current) / %2(normal)")
.arg(thread.current_priority)
.arg(thread.nominal_priority)));

View File

@ -29,7 +29,9 @@ public:
callback->WakeUp(thread, *context, reason);
}
auto& process = thread->owner_process;
auto process = thread->owner_process.lock();
ASSERT(process);
// We must copy the entire command buffer *plus* the entire static buffers area, since
// the translation might need to read from it in order to retrieve the StaticBuffer
// target addresses.

View File

@ -24,8 +24,9 @@ ResultCode TranslateCommandBuffer(Kernel::KernelSystem& kernel, Memory::MemorySy
VAddr dst_address,
std::vector<MappedBufferContext>& mapped_buffer_context,
bool reply) {
auto& src_process = src_thread->owner_process;
auto& dst_process = dst_thread->owner_process;
auto src_process = src_thread->owner_process.lock();
auto dst_process = dst_thread->owner_process.lock();
ASSERT(src_process && dst_process);
IPC::Header header;
// TODO(Subv): Replace by Memory::Read32 when possible.

View File

@ -50,19 +50,21 @@ void Recorder::RegisterRequest(const std::shared_ptr<Kernel::ClientSession>& cli
const std::shared_ptr<Kernel::Thread>& client_thread) {
const u32 thread_id = client_thread->GetThreadId();
RequestRecord record = {/* id */ ++record_count,
/* status */ RequestStatus::Sent,
/* client_process */ GetObjectInfo(client_thread->owner_process.get()),
/* client_thread */ GetObjectInfo(client_thread.get()),
/* client_session */ GetObjectInfo(client_session.get()),
/* client_port */ GetObjectInfo(client_session->parent->port.get()),
/* server_process */ {},
/* server_thread */ {},
/* server_session */ GetObjectInfo(client_session->parent->server)};
record_map.insert_or_assign(thread_id, std::make_unique<RequestRecord>(record));
client_session_map.insert_or_assign(thread_id, client_session);
if (auto owner_process = client_thread->owner_process.lock()) {
RequestRecord record = {/* id */ ++record_count,
/* status */ RequestStatus::Sent,
/* client_process */ GetObjectInfo(owner_process.get()),
/* client_thread */ GetObjectInfo(client_thread.get()),
/* client_session */ GetObjectInfo(client_session.get()),
/* client_port */ GetObjectInfo(client_session->parent->port.get()),
/* server_process */ {},
/* server_thread */ {},
/* server_session */ GetObjectInfo(client_session->parent->server)};
record_map.insert_or_assign(thread_id, std::make_unique<RequestRecord>(record));
client_session_map.insert_or_assign(thread_id, client_session);
InvokeCallbacks(record);
InvokeCallbacks(record);
}
}
void Recorder::SetRequestInfo(const std::shared_ptr<Kernel::Thread>& client_thread,
@ -82,7 +84,9 @@ void Recorder::SetRequestInfo(const std::shared_ptr<Kernel::Thread>& client_thre
record.translated_request_cmdbuf = std::move(translated_cmdbuf);
if (server_thread) {
record.server_process = GetObjectInfo(server_thread->owner_process.get());
if (auto owner_process = server_thread->owner_process.lock()) {
record.server_process = GetObjectInfo(owner_process.get());
}
record.server_thread = GetObjectInfo(server_thread.get());
} else {
record.is_hle = true;

View File

@ -177,6 +177,15 @@ void KernelSystem::serialize(Archive& ar, const unsigned int file_version) {
ar& stored_processes;
ar& next_thread_id;
// Deliberately don't include debugger info to allow debugging through loads
if (Archive::is_loading::value) {
for (auto& memory_region : memory_regions) {
memory_region->Unlock();
}
for (auto& process : process_list) {
process->vm_manager.Unlock();
}
}
}
SERIALIZE_IMPL(KernelSystem)

View File

@ -174,6 +174,8 @@ void KernelSystem::MapSharedPages(VMManager& address_space) {
}
void MemoryRegionInfo::Reset(u32 base, u32 size) {
ASSERT(!is_locked);
this->base = base;
this->size = size;
used = 0;
@ -184,6 +186,8 @@ void MemoryRegionInfo::Reset(u32 base, u32 size) {
}
MemoryRegionInfo::IntervalSet MemoryRegionInfo::HeapAllocate(u32 size) {
ASSERT(!is_locked);
IntervalSet result;
u32 rest = size;
@ -211,6 +215,8 @@ MemoryRegionInfo::IntervalSet MemoryRegionInfo::HeapAllocate(u32 size) {
}
bool MemoryRegionInfo::LinearAllocate(u32 offset, u32 size) {
ASSERT(!is_locked);
Interval interval(offset, offset + size);
if (!boost::icl::contains(free_blocks, interval)) {
// The requested range is already allocated
@ -222,6 +228,8 @@ bool MemoryRegionInfo::LinearAllocate(u32 offset, u32 size) {
}
std::optional<u32> MemoryRegionInfo::LinearAllocate(u32 size) {
ASSERT(!is_locked);
// Find the first sufficient continuous block from the lower address
for (const auto& interval : free_blocks) {
ASSERT(interval.bounds() == boost::icl::interval_bounds::right_open());
@ -238,10 +246,18 @@ std::optional<u32> MemoryRegionInfo::LinearAllocate(u32 size) {
}
void MemoryRegionInfo::Free(u32 offset, u32 size) {
if (is_locked) {
return;
}
Interval interval(offset, offset + size);
ASSERT(!boost::icl::intersects(free_blocks, interval)); // must be allocated blocks
free_blocks += interval;
used -= size;
}
void MemoryRegionInfo::Unlock() {
is_locked = false;
}
} // namespace Kernel

View File

@ -26,6 +26,10 @@ struct MemoryRegionInfo {
IntervalSet free_blocks;
// When locked, Free calls will be ignored, while Allocate calls will hit an assert. A memory
// region locks itself after deserialization.
bool is_locked{};
/**
* Reset the allocator state
* @param base The base offset the beginning of FCRAM.
@ -63,6 +67,11 @@ struct MemoryRegionInfo {
*/
void Free(u32 offset, u32 size);
/**
* Unlock the MemoryRegion. Used after loading is completed.
*/
void Unlock();
private:
friend class boost::serialization::access;
template <class Archive>
@ -71,6 +80,9 @@ private:
ar& size;
ar& used;
ar& free_blocks;
if (Archive::is_loading::value) {
is_locked = true;
}
}
};

View File

@ -85,7 +85,8 @@ ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<Thread> thread) {
// If this ServerSession has an associated HLE handler, forward the request to it.
if (hle_handler != nullptr) {
std::array<u32_le, IPC::COMMAND_BUFFER_LENGTH + 2 * IPC::MAX_STATIC_BUFFERS> cmd_buf;
auto current_process = thread->owner_process;
auto current_process = thread->owner_process.lock();
ASSERT(current_process);
kernel.memory.ReadBlock(*current_process, thread->GetCommandBufferAddress(), cmd_buf.data(),
cmd_buf.size() * sizeof(u32));

View File

@ -283,7 +283,7 @@ void SVC::ExitProcess() {
// Stop all the process threads that are currently waiting for objects.
auto& thread_list = kernel.GetCurrentThreadManager().GetThreadList();
for (auto& thread : thread_list) {
if (thread->owner_process != current_process)
if (thread->owner_process.lock() != current_process)
continue;
if (thread.get() == kernel.GetCurrentThreadManager().GetCurrentThread())
@ -1041,8 +1041,7 @@ ResultCode SVC::GetProcessIdOfThread(u32* process_id, Handle thread_handle) {
if (thread == nullptr)
return ERR_INVALID_HANDLE;
const std::shared_ptr<Process> process = thread->owner_process;
const std::shared_ptr<Process> process = thread->owner_process.lock();
ASSERT_MSG(process != nullptr, "Invalid parent process for thread={:#010X}", thread_handle);
*process_id = process->process_id;

View File

@ -45,7 +45,16 @@ void Thread::serialize(Archive& ar, const unsigned int file_version) {
ar& tls_address;
ar& held_mutexes;
ar& pending_mutexes;
ar& owner_process;
// Note: this is equivalent of what is done in boost/serialization/weak_ptr.hpp, but it's
// compatible with previous versions of savestates.
// TODO(SaveStates): When the savestate version is bumped, simplify this again.
std::shared_ptr<Process> shared_owner_process = owner_process.lock();
ar& shared_owner_process;
if (Archive::is_loading::value) {
owner_process = shared_owner_process;
}
ar& wait_objects;
ar& wait_address;
ar& name;
@ -99,7 +108,8 @@ void Thread::Stop() {
u32 tls_page = (tls_address - Memory::TLS_AREA_VADDR) / Memory::PAGE_SIZE;
u32 tls_slot =
((tls_address - Memory::TLS_AREA_VADDR) % Memory::PAGE_SIZE) / Memory::TLS_ENTRY_SIZE;
owner_process->tls_slots[tls_page].reset(tls_slot);
ASSERT(owner_process.lock());
owner_process.lock()->tls_slots[tls_page].reset(tls_slot);
}
void ThreadManager::SwitchContext(Thread* new_thread) {
@ -110,7 +120,7 @@ void ThreadManager::SwitchContext(Thread* new_thread) {
// Save context for previous thread
if (previous_thread) {
previous_process = previous_thread->owner_process;
previous_process = previous_thread->owner_process.lock();
previous_thread->last_running_ticks = cpu->GetTimer().GetTicks();
cpu->SaveContext(previous_thread->context);
@ -135,8 +145,9 @@ void ThreadManager::SwitchContext(Thread* new_thread) {
ready_queue.remove(new_thread->current_priority, new_thread);
new_thread->status = ThreadStatus::Running;
if (previous_process != current_thread->owner_process) {
kernel.SetCurrentProcessForCPU(current_thread->owner_process, cpu->GetID());
ASSERT(current_thread->owner_process.lock());
if (previous_process != current_thread->owner_process.lock()) {
kernel.SetCurrentProcessForCPU(current_thread->owner_process.lock(), cpu->GetID());
}
cpu->LoadContext(new_thread->context);

View File

@ -308,7 +308,7 @@ public:
/// Mutexes that this thread is currently waiting for.
boost::container::flat_set<std::shared_ptr<Mutex>> pending_mutexes{};
std::shared_ptr<Process> owner_process{}; ///< Process that owns this thread
std::weak_ptr<Process> owner_process{}; ///< Process that owns this thread
/// Objects that the thread is waiting on, in the same order as they were
// passed to WaitSynchronization1/N.

View File

@ -45,6 +45,8 @@ VMManager::VMManager(Memory::MemorySystem& memory)
VMManager::~VMManager() = default;
void VMManager::Reset() {
ASSERT(!is_locked);
vma_map.clear();
// Initialize the map with a single free region covering the entire managed space.
@ -67,6 +69,7 @@ VMManager::VMAHandle VMManager::FindVMA(VAddr target) const {
ResultVal<VAddr> VMManager::MapBackingMemoryToBase(VAddr base, u32 region_size, MemoryRef memory,
u32 size, MemoryState state) {
ASSERT(!is_locked);
// Find the first Free VMA.
VMAHandle vma_handle = std::find_if(vma_map.begin(), vma_map.end(), [&](const auto& vma) {
@ -96,6 +99,7 @@ ResultVal<VAddr> VMManager::MapBackingMemoryToBase(VAddr base, u32 region_size,
ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, MemoryRef memory,
u32 size, MemoryState state) {
ASSERT(!is_locked);
ASSERT(memory.GetPtr() != nullptr);
// This is the appropriately sized VMA that will turn into our allocation.
@ -115,6 +119,8 @@ ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, Memory
ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u32 size,
MemoryState state,
Memory::MMIORegionPointer mmio_handler) {
ASSERT(!is_locked);
// This is the appropriately sized VMA that will turn into our allocation.
CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size));
VirtualMemoryArea& final_vma = vma_handle->second;
@ -133,6 +139,10 @@ ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u3
ResultCode VMManager::ChangeMemoryState(VAddr target, u32 size, MemoryState expected_state,
VMAPermission expected_perms, MemoryState new_state,
VMAPermission new_perms) {
if (is_locked) {
return RESULT_SUCCESS;
}
VAddr target_end = target + size;
VMAIter begin_vma = StripIterConstness(FindVMA(target));
VMAIter i_end = vma_map.lower_bound(target_end);
@ -167,6 +177,8 @@ ResultCode VMManager::ChangeMemoryState(VAddr target, u32 size, MemoryState expe
}
VMManager::VMAIter VMManager::Unmap(VMAIter vma_handle) {
ASSERT(!is_locked);
VirtualMemoryArea& vma = vma_handle->second;
vma.type = VMAType::Free;
vma.permissions = VMAPermission::None;
@ -181,6 +193,8 @@ VMManager::VMAIter VMManager::Unmap(VMAIter vma_handle) {
}
ResultCode VMManager::UnmapRange(VAddr target, u32 size) {
ASSERT(!is_locked);
CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size));
const VAddr target_end = target + size;
@ -196,6 +210,8 @@ ResultCode VMManager::UnmapRange(VAddr target, u32 size) {
}
VMManager::VMAHandle VMManager::Reprotect(VMAHandle vma_handle, VMAPermission new_perms) {
ASSERT(!is_locked);
VMAIter iter = StripIterConstness(vma_handle);
VirtualMemoryArea& vma = iter->second;
@ -206,6 +222,8 @@ VMManager::VMAHandle VMManager::Reprotect(VMAHandle vma_handle, VMAPermission ne
}
ResultCode VMManager::ReprotectRange(VAddr target, u32 size, VMAPermission new_perms) {
ASSERT(!is_locked);
CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size));
const VAddr target_end = target + size;
@ -231,6 +249,10 @@ void VMManager::LogLayout(Log::Level log_level) const {
}
}
void VMManager::Unlock() {
is_locked = false;
}
VMManager::VMAIter VMManager::StripIterConstness(const VMAHandle& iter) {
// This uses a neat C++ trick to convert a const_iterator to a regular iterator, given
// non-const access to its container.

View File

@ -212,6 +212,11 @@ public:
/// is scheduled.
std::shared_ptr<Memory::PageTable> page_table;
/**
* Unlock the VMManager. Used after loading is completed.
*/
void Unlock();
private:
using VMAIter = decltype(vma_map)::iterator;
@ -250,10 +255,17 @@ private:
Memory::MemorySystem& memory;
// When locked, ChangeMemoryState calls will be ignored, other modification calls will hit an
// assert. VMManager locks itself after deserialization.
bool is_locked{};
template <class Archive>
void serialize(Archive& ar, const unsigned int) {
ar& vma_map;
ar& page_table;
if (Archive::is_loading::value) {
is_locked = true;
}
}
friend class boost::serialization::access;
};