mirror of
				https://git.zaroz.cloud/nintendo-back-up/yuzu/yuzu-mainline.git
				synced 2025-03-21 01:53:15 +00:00 
			
		
		
		
	Merge pull request #905 from lioncash/vma
kernel/vm_manager: Minor changes
This commit is contained in:
		
						commit
						61ed68f3d0
					
				@ -267,7 +267,7 @@ static ResultCode GetInfo(u64* result, u64 info_id, u64 handle, u64 info_sub_id)
 | 
			
		||||
    LOG_TRACE(Kernel_SVC, "called info_id=0x{:X}, info_sub_id=0x{:X}, handle=0x{:08X}", info_id,
 | 
			
		||||
              info_sub_id, handle);
 | 
			
		||||
 | 
			
		||||
    auto& vm_manager = Core::CurrentProcess()->vm_manager;
 | 
			
		||||
    const auto& vm_manager = Core::CurrentProcess()->vm_manager;
 | 
			
		||||
 | 
			
		||||
    switch (static_cast<GetInfoType>(info_id)) {
 | 
			
		||||
    case GetInfoType::AllowedCpuIdBitmask:
 | 
			
		||||
 | 
			
		||||
@ -2,6 +2,7 @@
 | 
			
		||||
// Licensed under GPLv2 or any later version
 | 
			
		||||
// Refer to the license.txt file included.
 | 
			
		||||
 | 
			
		||||
#include <algorithm>
 | 
			
		||||
#include <iterator>
 | 
			
		||||
#include <utility>
 | 
			
		||||
#include "common/assert.h"
 | 
			
		||||
@ -175,9 +176,9 @@ VMManager::VMAIter VMManager::Unmap(VMAIter vma_handle) {
 | 
			
		||||
 | 
			
		||||
ResultCode VMManager::UnmapRange(VAddr target, u64 size) {
 | 
			
		||||
    CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size));
 | 
			
		||||
    VAddr target_end = target + size;
 | 
			
		||||
    const VAddr target_end = target + size;
 | 
			
		||||
 | 
			
		||||
    VMAIter end = vma_map.end();
 | 
			
		||||
    const VMAIter end = vma_map.end();
 | 
			
		||||
    // The comparison against the end of the range must be done using addresses since VMAs can be
 | 
			
		||||
    // merged during this process, causing invalidation of the iterators.
 | 
			
		||||
    while (vma != end && vma->second.base < target_end) {
 | 
			
		||||
@ -207,9 +208,9 @@ VMManager::VMAHandle VMManager::Reprotect(VMAHandle vma_handle, VMAPermission ne
 | 
			
		||||
 | 
			
		||||
ResultCode VMManager::ReprotectRange(VAddr target, u64 size, VMAPermission new_perms) {
 | 
			
		||||
    CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size));
 | 
			
		||||
    VAddr target_end = target + size;
 | 
			
		||||
    const VAddr target_end = target + size;
 | 
			
		||||
 | 
			
		||||
    VMAIter end = vma_map.end();
 | 
			
		||||
    const VMAIter end = vma_map.end();
 | 
			
		||||
    // The comparison against the end of the range must be done using addresses since VMAs can be
 | 
			
		||||
    // merged during this process, causing invalidation of the iterators.
 | 
			
		||||
    while (vma != end && vma->second.base < target_end) {
 | 
			
		||||
@ -258,14 +259,14 @@ ResultVal<VMManager::VMAIter> VMManager::CarveVMA(VAddr base, u64 size) {
 | 
			
		||||
        return ERR_INVALID_ADDRESS;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    VirtualMemoryArea& vma = vma_handle->second;
 | 
			
		||||
    const VirtualMemoryArea& vma = vma_handle->second;
 | 
			
		||||
    if (vma.type != VMAType::Free) {
 | 
			
		||||
        // Region is already allocated
 | 
			
		||||
        return ERR_INVALID_ADDRESS_STATE;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    u64 start_in_vma = base - vma.base;
 | 
			
		||||
    u64 end_in_vma = start_in_vma + size;
 | 
			
		||||
    const VAddr start_in_vma = base - vma.base;
 | 
			
		||||
    const VAddr end_in_vma = start_in_vma + size;
 | 
			
		||||
 | 
			
		||||
    if (end_in_vma > vma.size) {
 | 
			
		||||
        // Requested allocation doesn't fit inside VMA
 | 
			
		||||
@ -288,17 +289,16 @@ ResultVal<VMManager::VMAIter> VMManager::CarveVMARange(VAddr target, u64 size) {
 | 
			
		||||
    ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: 0x{:016X}", size);
 | 
			
		||||
    ASSERT_MSG((target & Memory::PAGE_MASK) == 0, "non-page aligned base: 0x{:016X}", target);
 | 
			
		||||
 | 
			
		||||
    VAddr target_end = target + size;
 | 
			
		||||
    const VAddr target_end = target + size;
 | 
			
		||||
    ASSERT(target_end >= target);
 | 
			
		||||
    ASSERT(target_end <= MAX_ADDRESS);
 | 
			
		||||
    ASSERT(size > 0);
 | 
			
		||||
 | 
			
		||||
    VMAIter begin_vma = StripIterConstness(FindVMA(target));
 | 
			
		||||
    VMAIter i_end = vma_map.lower_bound(target_end);
 | 
			
		||||
    for (auto i = begin_vma; i != i_end; ++i) {
 | 
			
		||||
        if (i->second.type == VMAType::Free) {
 | 
			
		||||
            return ERR_INVALID_ADDRESS_STATE;
 | 
			
		||||
        }
 | 
			
		||||
    const VMAIter i_end = vma_map.lower_bound(target_end);
 | 
			
		||||
    if (std::any_of(begin_vma, i_end,
 | 
			
		||||
                    [](const auto& entry) { return entry.second.type == VMAType::Free; })) {
 | 
			
		||||
        return ERR_INVALID_ADDRESS_STATE;
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if (target != begin_vma->second.base) {
 | 
			
		||||
@ -346,7 +346,7 @@ VMManager::VMAIter VMManager::SplitVMA(VMAIter vma_handle, u64 offset_in_vma) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
VMManager::VMAIter VMManager::MergeAdjacent(VMAIter iter) {
 | 
			
		||||
    VMAIter next_vma = std::next(iter);
 | 
			
		||||
    const VMAIter next_vma = std::next(iter);
 | 
			
		||||
    if (next_vma != vma_map.end() && iter->second.CanBeMergedWith(next_vma->second)) {
 | 
			
		||||
        iter->second.size += next_vma->second.size;
 | 
			
		||||
        vma_map.erase(next_vma);
 | 
			
		||||
@ -382,22 +382,22 @@ void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) {
 | 
			
		||||
    }
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
u64 VMManager::GetTotalMemoryUsage() {
 | 
			
		||||
u64 VMManager::GetTotalMemoryUsage() const {
 | 
			
		||||
    LOG_WARNING(Kernel, "(STUBBED) called");
 | 
			
		||||
    return 0xF8000000;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
u64 VMManager::GetTotalHeapUsage() {
 | 
			
		||||
u64 VMManager::GetTotalHeapUsage() const {
 | 
			
		||||
    LOG_WARNING(Kernel, "(STUBBED) called");
 | 
			
		||||
    return 0x0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
VAddr VMManager::GetAddressSpaceBaseAddr() {
 | 
			
		||||
VAddr VMManager::GetAddressSpaceBaseAddr() const {
 | 
			
		||||
    LOG_WARNING(Kernel, "(STUBBED) called");
 | 
			
		||||
    return 0x8000000;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
u64 VMManager::GetAddressSpaceSize() {
 | 
			
		||||
u64 VMManager::GetAddressSpaceSize() const {
 | 
			
		||||
    LOG_WARNING(Kernel, "(STUBBED) called");
 | 
			
		||||
    return MAX_ADDRESS;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@ -190,16 +190,16 @@ public:
 | 
			
		||||
    void LogLayout() const;
 | 
			
		||||
 | 
			
		||||
    /// Gets the total memory usage, used by svcGetInfo
 | 
			
		||||
    u64 GetTotalMemoryUsage();
 | 
			
		||||
    u64 GetTotalMemoryUsage() const;
 | 
			
		||||
 | 
			
		||||
    /// Gets the total heap usage, used by svcGetInfo
 | 
			
		||||
    u64 GetTotalHeapUsage();
 | 
			
		||||
    u64 GetTotalHeapUsage() const;
 | 
			
		||||
 | 
			
		||||
    /// Gets the total address space base address, used by svcGetInfo
 | 
			
		||||
    VAddr GetAddressSpaceBaseAddr();
 | 
			
		||||
    VAddr GetAddressSpaceBaseAddr() const;
 | 
			
		||||
 | 
			
		||||
    /// Gets the total address space address size, used by svcGetInfo
 | 
			
		||||
    u64 GetAddressSpaceSize();
 | 
			
		||||
    u64 GetAddressSpaceSize() const;
 | 
			
		||||
 | 
			
		||||
    /// Each VMManager has its own page table, which is set as the main one when the owning process
 | 
			
		||||
    /// is scheduled.
 | 
			
		||||
 | 
			
		||||
		Loading…
	
		Reference in New Issue
	
	Block a user