yuzu-emu
/
yuzu
Archived
1
0
Fork 0

kernel: memory: Improve implementation of device shared memory. (#3707)

* kernel: memory: Improve implementation of device shared memory.

* fixup! kernel: memory: Improve implementation of device shared memory.

* fixup! kernel: memory: Improve implementation of device shared memory.
This commit is contained in:
bunnei 2020-04-23 11:37:12 -04:00 committed by GitHub
parent eb26e9e711
commit ff0c49e1ce
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 110 additions and 16 deletions

View File

@ -17,7 +17,7 @@ namespace Kernel::Memory {
enum class MemoryState : u32 {
None = 0,
Mask = 0xFFFFFFFF, // TODO(bunnei): This should probable be 0xFF
Mask = 0xFF,
All = ~None,
FlagCanReprotect = (1 << 8),
@ -253,6 +253,23 @@ public:
};
}
void ShareToDevice(MemoryPermission /*new_perm*/) {
ASSERT((attribute & MemoryAttribute::DeviceShared) == MemoryAttribute::DeviceShared ||
device_use_count == 0);
attribute |= MemoryAttribute::DeviceShared;
const u16 new_use_count{++device_use_count};
ASSERT(new_use_count > 0);
}
void UnshareToDevice(MemoryPermission /*new_perm*/) {
ASSERT((attribute & MemoryAttribute::DeviceShared) == MemoryAttribute::DeviceShared);
const u16 prev_use_count{device_use_count--};
ASSERT(prev_use_count > 0);
if (prev_use_count == 1) {
attribute &= ~MemoryAttribute::DeviceShared;
}
}
private:
constexpr bool HasProperties(MemoryState s, MemoryPermission p, MemoryAttribute a) const {
constexpr MemoryAttribute AttributeIgnoreMask{MemoryAttribute::DontCareMask |
@ -287,9 +304,9 @@ private:
state = new_state;
perm = new_perm;
// TODO(bunnei): Is this right?
attribute = static_cast<MemoryAttribute>(
new_attribute /*| (attribute & (MemoryAttribute::IpcLocked | MemoryAttribute::DeviceShared))*/);
new_attribute |
(attribute & (MemoryAttribute::IpcLocked | MemoryAttribute::DeviceShared)));
}
constexpr MemoryBlock Split(VAddr split_addr) {

View File

@ -143,6 +143,42 @@ void MemoryBlockManager::Update(VAddr addr, std::size_t num_pages, MemoryState s
}
}
void MemoryBlockManager::UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func,
MemoryPermission perm) {
const std::size_t prev_count{memory_block_tree.size()};
const VAddr end_addr{addr + num_pages * PageSize};
iterator node{memory_block_tree.begin()};
while (node != memory_block_tree.end()) {
MemoryBlock* block{&(*node)};
iterator next_node{std::next(node)};
const VAddr cur_addr{block->GetAddress()};
const VAddr cur_end_addr{block->GetNumPages() * PageSize + cur_addr};
if (addr < cur_end_addr && cur_addr < end_addr) {
iterator new_node{node};
if (addr > cur_addr) {
memory_block_tree.insert(node, block->Split(addr));
}
if (end_addr < cur_end_addr) {
new_node = memory_block_tree.insert(node, block->Split(end_addr));
}
lock_func(new_node, perm);
MergeAdjacent(new_node, next_node);
}
if (cur_end_addr - 1 >= end_addr - 1) {
break;
}
node = next_node;
}
}
void MemoryBlockManager::IterateForRange(VAddr start, VAddr end, IterateFunc&& func) {
const_iterator it{FindIterator(start)};
MemoryInfo info{};

View File

@ -45,6 +45,9 @@ public:
MemoryPermission perm = MemoryPermission::None,
MemoryAttribute attribute = MemoryAttribute::None);
using LockFunc = std::function<void(iterator, MemoryPermission)>;
void UpdateLock(VAddr addr, std::size_t num_pages, LockFunc&& lock_func, MemoryPermission perm);
using IterateFunc = std::function<void(const MemoryInfo&)>;
void IterateForRange(VAddr start, VAddr end, IterateFunc&& func);

View File

@ -840,6 +840,50 @@ ResultVal<VAddr> PageTable::AllocateAndMapMemory(std::size_t needed_num_pages, s
return MakeResult<VAddr>(addr);
}
ResultCode PageTable::LockForDeviceAddressSpace(VAddr addr, std::size_t size) {
std::lock_guard lock{page_table_lock};
MemoryPermission perm{};
if (const ResultCode result{CheckMemoryState(
nullptr, &perm, nullptr, addr, size, MemoryState::FlagCanChangeAttribute,
MemoryState::FlagCanChangeAttribute, MemoryPermission::None, MemoryPermission::None,
MemoryAttribute::LockedAndIpcLocked, MemoryAttribute::None,
MemoryAttribute::DeviceSharedAndUncached)};
result.IsError()) {
return result;
}
block_manager->UpdateLock(addr, size / PageSize,
[perm](MemoryBlockManager::iterator block, MemoryPermission perm) {
block->ShareToDevice(perm);
},
perm);
return RESULT_SUCCESS;
}
ResultCode PageTable::UnlockForDeviceAddressSpace(VAddr addr, std::size_t size) {
std::lock_guard lock{page_table_lock};
MemoryPermission perm{};
if (const ResultCode result{CheckMemoryState(
nullptr, &perm, nullptr, addr, size, MemoryState::FlagCanChangeAttribute,
MemoryState::FlagCanChangeAttribute, MemoryPermission::None, MemoryPermission::None,
MemoryAttribute::LockedAndIpcLocked, MemoryAttribute::None,
MemoryAttribute::DeviceSharedAndUncached)};
result.IsError()) {
return result;
}
block_manager->UpdateLock(addr, size / PageSize,
[perm](MemoryBlockManager::iterator block, MemoryPermission perm) {
block->UnshareToDevice(perm);
},
perm);
return RESULT_SUCCESS;
}
ResultCode PageTable::InitializeMemoryLayout(VAddr start, VAddr end) {
block_manager = std::make_unique<MemoryBlockManager>(start, end);

View File

@ -53,6 +53,8 @@ public:
bool is_map_only, VAddr region_start,
std::size_t region_num_pages, MemoryState state,
MemoryPermission perm, PAddr map_addr = 0);
ResultCode LockForDeviceAddressSpace(VAddr addr, std::size_t size);
ResultCode UnlockForDeviceAddressSpace(VAddr addr, std::size_t size);
Common::PageTable& PageTableImpl() {
return page_table_impl;

View File

@ -51,11 +51,8 @@ GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, u64 size) {
const GPUVAddr gpu_addr{FindFreeRegion(address_space_base, aligned_size)};
MapBackingMemory(gpu_addr, system.Memory().GetPointer(cpu_addr), aligned_size, cpu_addr);
ASSERT(system.CurrentProcess()
->PageTable()
.SetMemoryAttribute(cpu_addr, size, Kernel::Memory::MemoryAttribute::DeviceShared,
Kernel::Memory::MemoryAttribute::DeviceShared)
.IsSuccess());
ASSERT(
system.CurrentProcess()->PageTable().LockForDeviceAddressSpace(cpu_addr, size).IsSuccess());
return gpu_addr;
}
@ -66,11 +63,8 @@ GPUVAddr MemoryManager::MapBufferEx(VAddr cpu_addr, GPUVAddr gpu_addr, u64 size)
const u64 aligned_size{Common::AlignUp(size, page_size)};
MapBackingMemory(gpu_addr, system.Memory().GetPointer(cpu_addr), aligned_size, cpu_addr);
ASSERT(system.CurrentProcess()
->PageTable()
.SetMemoryAttribute(cpu_addr, size, Kernel::Memory::MemoryAttribute::DeviceShared,
Kernel::Memory::MemoryAttribute::DeviceShared)
.IsSuccess());
ASSERT(
system.CurrentProcess()->PageTable().LockForDeviceAddressSpace(cpu_addr, size).IsSuccess());
return gpu_addr;
}
@ -87,9 +81,7 @@ GPUVAddr MemoryManager::UnmapBuffer(GPUVAddr gpu_addr, u64 size) {
UnmapRange(gpu_addr, aligned_size);
ASSERT(system.CurrentProcess()
->PageTable()
.SetMemoryAttribute(cpu_addr.value(), size,
Kernel::Memory::MemoryAttribute::DeviceShared,
Kernel::Memory::MemoryAttribute::None)
.UnlockForDeviceAddressSpace(cpu_addr.value(), size)
.IsSuccess());
return gpu_addr;