Compare commits

..

No commits in common. "fadec2981727636ec7ba81d6c83995b7b9782190" and "bfc55834869fe24f8d94550bc6909a65ae7d35c2" have entirely different histories.

58 changed files with 1283 additions and 1713 deletions

View File

@ -37,7 +37,6 @@ namespace ams::pkg1 {
KeyGeneration_15_0_0 = 0x0E, KeyGeneration_15_0_0 = 0x0E,
KeyGeneration_16_0_0 = 0x0F, KeyGeneration_16_0_0 = 0x0F,
KeyGeneration_17_0_0 = 0x10, KeyGeneration_17_0_0 = 0x10,
KeyGeneration_18_0_0 = 0x11,
KeyGeneration_Count, KeyGeneration_Count,

View File

@ -24,7 +24,7 @@ namespace ams::pkg2 {
constexpr inline int PayloadCount = 3; constexpr inline int PayloadCount = 3;
constexpr inline int MinimumValidDataVersion = 0; /* We allow older package2 to load; this value is currently 0x18 in Nintendo's code. */ constexpr inline int MinimumValidDataVersion = 0; /* We allow older package2 to load; this value is currently 0x18 in Nintendo's code. */
constexpr inline int CurrentBootloaderVersion = 0x15; constexpr inline int CurrentBootloaderVersion = 0x14;
struct Package2Meta { struct Package2Meta {
using Magic = util::FourCC<'P','K','2','1'>; using Magic = util::FourCC<'P','K','2','1'>;

View File

@ -178,7 +178,7 @@ namespace ams::kern::arch::arm64 {
} }
NOINLINE Result InitializeForKernel(void *table, KVirtualAddress start, KVirtualAddress end); NOINLINE Result InitializeForKernel(void *table, KVirtualAddress start, KVirtualAddress end);
NOINLINE Result InitializeForProcess(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit); NOINLINE Result InitializeForProcess(ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit);
Result Finalize(); Result Finalize();
private: private:
Result MapL1Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll); Result MapL1Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll);

View File

@ -30,7 +30,6 @@ namespace ams::kern::arch::arm64 {
KPhysicalAddress phys_addr; KPhysicalAddress phys_addr;
size_t block_size; size_t block_size;
u8 sw_reserved_bits; u8 sw_reserved_bits;
u8 attr;
constexpr bool IsHeadMergeDisabled() const { return (this->sw_reserved_bits & PageTableEntry::SoftwareReservedBit_DisableMergeHead) != 0; } constexpr bool IsHeadMergeDisabled() const { return (this->sw_reserved_bits & PageTableEntry::SoftwareReservedBit_DisableMergeHead) != 0; }
constexpr bool IsHeadAndBodyMergeDisabled() const { return (this->sw_reserved_bits & PageTableEntry::SoftwareReservedBit_DisableMergeHeadAndBody) != 0; } constexpr bool IsHeadAndBodyMergeDisabled() const { return (this->sw_reserved_bits & PageTableEntry::SoftwareReservedBit_DisableMergeHeadAndBody) != 0; }

View File

@ -28,8 +28,8 @@ namespace ams::kern::arch::arm64 {
m_page_table.Activate(id); m_page_table.Activate(id);
} }
Result Initialize(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit) { Result Initialize(ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit) {
R_RETURN(m_page_table.InitializeForProcess(flags, from_back, pool, code_address, code_size, system_resource, resource_limit)); R_RETURN(m_page_table.InitializeForProcess(as_type, enable_aslr, enable_das_merge, from_back, pool, code_address, code_size, system_resource, resource_limit));
} }
void Finalize() { m_page_table.Finalize(); } void Finalize() { m_page_table.Finalize(); }
@ -316,8 +316,6 @@ namespace ams::kern::arch::arm64 {
size_t GetKernelMapRegionSize() const { return m_page_table.GetKernelMapRegionSize(); } size_t GetKernelMapRegionSize() const { return m_page_table.GetKernelMapRegionSize(); }
size_t GetAliasCodeRegionSize() const { return m_page_table.GetAliasCodeRegionSize(); } size_t GetAliasCodeRegionSize() const { return m_page_table.GetAliasCodeRegionSize(); }
size_t GetAliasRegionExtraSize() const { return m_page_table.GetAliasRegionExtraSize(); }
size_t GetNormalMemorySize() const { return m_page_table.GetNormalMemorySize(); } size_t GetNormalMemorySize() const { return m_page_table.GetNormalMemorySize(); }
size_t GetCodeSize() const { return m_page_table.GetCodeSize(); } size_t GetCodeSize() const { return m_page_table.GetCodeSize(); }

View File

@ -16,10 +16,11 @@
#pragma once #pragma once
#include <mesosphere/kern_common.hpp> #include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_select_cpu.hpp> #include <mesosphere/kern_select_cpu.hpp>
#include <mesosphere/kern_select_interrupt_manager.hpp>
namespace ams::kern::arch::arm64::smc { namespace ams::kern::arch::arm64::smc {
template<int SmcId> template<int SmcId, bool DisableInterrupt>
void SecureMonitorCall(u64 *buf) { void SecureMonitorCall(u64 *buf) {
/* Load arguments into registers. */ /* Load arguments into registers. */
register u64 x0 asm("x0") = buf[0]; register u64 x0 asm("x0") = buf[0];
@ -31,18 +32,34 @@ namespace ams::kern::arch::arm64::smc {
register u64 x6 asm("x6") = buf[6]; register u64 x6 asm("x6") = buf[6];
register u64 x7 asm("x7") = buf[7]; register u64 x7 asm("x7") = buf[7];
/* Backup the current thread pointer. */
const uintptr_t current_thread_pointer_value = cpu::GetCurrentThreadPointerValue();
/* Perform the call. */ /* Perform the call. */
__asm__ __volatile__("smc %c[smc_id]" if constexpr (DisableInterrupt) {
: "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5), "+r"(x6), "+r"(x7) KScopedInterruptDisable di;
: [smc_id]"i"(SmcId)
: "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "cc", "memory"
);
/* Restore the current thread pointer into X18. */ /* Backup the current thread pointer. */
cpu::SetCurrentThreadPointerValue(current_thread_pointer_value); const uintptr_t current_thread_pointer_value = cpu::GetCurrentThreadPointerValue();
__asm__ __volatile__("smc %c[smc_id]"
: "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5), "+r"(x6), "+r"(x7)
: [smc_id]"i"(SmcId)
: "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "cc", "memory"
);
/* Restore the current thread pointer into X18. */
cpu::SetCurrentThreadPointerValue(current_thread_pointer_value);
} else {
/* Backup the current thread pointer. */
const uintptr_t current_thread_pointer_value = cpu::GetCurrentThreadPointerValue();
__asm__ __volatile__("smc %c[smc_id]"
: "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5), "+r"(x6), "+r"(x7)
: [smc_id]"i"(SmcId)
: "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "cc", "memory"
);
/* Restore the current thread pointer into X18. */
cpu::SetCurrentThreadPointerValue(current_thread_pointer_value);
}
/* Store arguments to output. */ /* Store arguments to output. */
buf[0] = x0; buf[0] = x0;
@ -61,18 +78,18 @@ namespace ams::kern::arch::arm64::smc {
PsciFunction_CpuOn = 0xC4000003, PsciFunction_CpuOn = 0xC4000003,
}; };
template<int SmcId> template<int SmcId, bool DisableInterrupt>
u64 PsciCall(PsciFunction function, u64 x1 = 0, u64 x2 = 0, u64 x3 = 0, u64 x4 = 0, u64 x5 = 0, u64 x6 = 0, u64 x7 = 0) { u64 PsciCall(PsciFunction function, u64 x1 = 0, u64 x2 = 0, u64 x3 = 0, u64 x4 = 0, u64 x5 = 0, u64 x6 = 0, u64 x7 = 0) {
ams::svc::lp64::SecureMonitorArguments args = { { function, x1, x2, x3, x4, x5, x6, x7 } }; ams::svc::lp64::SecureMonitorArguments args = { { function, x1, x2, x3, x4, x5, x6, x7 } };
SecureMonitorCall<SmcId>(args.r); SecureMonitorCall<SmcId, DisableInterrupt>(args.r);
return args.r[0]; return args.r[0];
} }
template<int SmcId> template<int SmcId, bool DisableInterrupt>
u64 CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) { u64 CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
return PsciCall<SmcId>(PsciFunction_CpuOn, core_id, entrypoint, arg); return PsciCall<SmcId, DisableInterrupt>(PsciFunction_CpuOn, core_id, entrypoint, arg);
} }
} }

View File

@ -32,7 +32,6 @@ namespace ams::kern {
struct InitialProcessBinaryLayout { struct InitialProcessBinaryLayout {
uintptr_t address; uintptr_t address;
uintptr_t _08; uintptr_t _08;
uintptr_t kern_address;
}; };
struct InitialProcessBinaryLayoutWithSize { struct InitialProcessBinaryLayoutWithSize {

View File

@ -177,7 +177,7 @@ namespace ams::kern {
}; };
constexpr KMemoryPermission ConvertToKMemoryPermission(ams::svc::MemoryPermission perm) { constexpr KMemoryPermission ConvertToKMemoryPermission(ams::svc::MemoryPermission perm) {
return static_cast<KMemoryPermission>((util::ToUnderlying(perm) & KMemoryPermission_UserMask) | KMemoryPermission_KernelRead | ((util::ToUnderlying(perm) & ams::svc::MemoryPermission_Write) ? KMemoryPermission_KernelWrite : KMemoryPermission_None) | (perm == ams::svc::MemoryPermission_None ? KMemoryPermission_NotMapped : KMemoryPermission_None)); return static_cast<KMemoryPermission>((util::ToUnderlying(perm) & KMemoryPermission_UserMask) | KMemoryPermission_KernelRead | ((util::ToUnderlying(perm) & KMemoryPermission_UserWrite) << KMemoryPermission_KernelShift) | (perm == ams::svc::MemoryPermission_None ? KMemoryPermission_NotMapped : KMemoryPermission_None));
} }
enum KMemoryAttribute : u8 { enum KMemoryAttribute : u8 {

View File

@ -185,7 +185,7 @@ namespace ams::kern {
} }
} }
Result AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool unoptimized, bool random, s32 min_heap_index); Result AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool unoptimized, bool random);
public: public:
KMemoryManager() KMemoryManager()
: m_pool_locks(), m_pool_managers_head(), m_pool_managers_tail(), m_managers(), m_num_managers(), m_optimized_process_ids(), m_has_optimized_process() : m_pool_locks(), m_pool_managers_head(), m_pool_managers_tail(), m_managers(), m_num_managers(), m_optimized_process_ids(), m_has_optimized_process()
@ -199,7 +199,7 @@ namespace ams::kern {
NOINLINE void FinalizeOptimizedMemory(u64 process_id, Pool pool); NOINLINE void FinalizeOptimizedMemory(u64 process_id, Pool pool);
NOINLINE KPhysicalAddress AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option); NOINLINE KPhysicalAddress AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
NOINLINE Result AllocateAndOpen(KPageGroup *out, size_t num_pages, size_t align_pages, u32 option); NOINLINE Result AllocateAndOpen(KPageGroup *out, size_t num_pages, u32 option);
NOINLINE Result AllocateForProcess(KPageGroup *out, size_t num_pages, u32 option, u64 process_id, u8 fill_pattern); NOINLINE Result AllocateForProcess(KPageGroup *out, size_t num_pages, u32 option, u64 process_id, u8 fill_pattern);
Pool GetPool(KPhysicalAddress address) const { Pool GetPool(KPhysicalAddress address) const {

View File

@ -145,8 +145,6 @@ namespace ams::kern {
bool IsEquivalentTo(const KPageGroup &rhs) const; bool IsEquivalentTo(const KPageGroup &rhs) const;
Result CopyRangeTo(KPageGroup &out, size_t offset, size_t size) const;
ALWAYS_INLINE bool operator==(const KPageGroup &rhs) const { ALWAYS_INLINE bool operator==(const KPageGroup &rhs) const {
return this->IsEquivalentTo(rhs); return this->IsEquivalentTo(rhs);
} }

View File

@ -62,21 +62,18 @@ namespace ams::kern {
KPhysicalAddress m_address; KPhysicalAddress m_address;
size_t m_size; size_t m_size;
bool m_heap; bool m_heap;
u8 m_attr;
public: public:
constexpr MemoryRange() : m_address(Null<KPhysicalAddress>), m_size(0), m_heap(false), m_attr(0) { /* ... */ } constexpr MemoryRange() : m_address(Null<KPhysicalAddress>), m_size(0), m_heap(false) { /* ... */ }
void Set(KPhysicalAddress address, size_t size, bool heap, u8 attr) { void Set(KPhysicalAddress address, size_t size, bool heap) {
m_address = address; m_address = address;
m_size = size; m_size = size;
m_heap = heap; m_heap = heap;
m_attr = attr;
} }
constexpr KPhysicalAddress GetAddress() const { return m_address; } constexpr KPhysicalAddress GetAddress() const { return m_address; }
constexpr size_t GetSize() const { return m_size; } constexpr size_t GetSize() const { return m_size; }
constexpr bool IsHeap() const { return m_heap; } constexpr bool IsHeap() const { return m_heap; }
constexpr u8 GetAttribute() const { return m_attr; }
void Open(); void Open();
void Close(); void Close();
@ -89,15 +86,6 @@ namespace ams::kern {
MemoryFillValue_Heap = 'Z', MemoryFillValue_Heap = 'Z',
}; };
enum RegionType {
RegionType_KernelMap = 0,
RegionType_Stack = 1,
RegionType_Alias = 2,
RegionType_Heap = 3,
RegionType_Count,
};
enum OperationType { enum OperationType {
OperationType_Map = 0, OperationType_Map = 0,
OperationType_MapGroup = 1, OperationType_MapGroup = 1,
@ -177,9 +165,15 @@ namespace ams::kern {
private: private:
KProcessAddress m_address_space_start; KProcessAddress m_address_space_start;
KProcessAddress m_address_space_end; KProcessAddress m_address_space_end;
KProcessAddress m_region_starts[RegionType_Count]; KProcessAddress m_heap_region_start;
KProcessAddress m_region_ends[RegionType_Count]; KProcessAddress m_heap_region_end;
KProcessAddress m_current_heap_end; KProcessAddress m_current_heap_end;
KProcessAddress m_alias_region_start;
KProcessAddress m_alias_region_end;
KProcessAddress m_stack_region_start;
KProcessAddress m_stack_region_end;
KProcessAddress m_kernel_map_region_start;
KProcessAddress m_kernel_map_region_end;
KProcessAddress m_alias_code_region_start; KProcessAddress m_alias_code_region_start;
KProcessAddress m_alias_code_region_end; KProcessAddress m_alias_code_region_end;
KProcessAddress m_code_region_start; KProcessAddress m_code_region_start;
@ -189,7 +183,6 @@ namespace ams::kern {
size_t m_mapped_unsafe_physical_memory; size_t m_mapped_unsafe_physical_memory;
size_t m_mapped_insecure_memory; size_t m_mapped_insecure_memory;
size_t m_mapped_ipc_server_memory; size_t m_mapped_ipc_server_memory;
size_t m_alias_region_extra_size;
mutable KLightLock m_general_lock; mutable KLightLock m_general_lock;
mutable KLightLock m_map_physical_memory_lock; mutable KLightLock m_map_physical_memory_lock;
KLightLock m_device_map_lock; KLightLock m_device_map_lock;
@ -210,12 +203,12 @@ namespace ams::kern {
MemoryFillValue m_stack_fill_value; MemoryFillValue m_stack_fill_value;
public: public:
constexpr explicit KPageTableBase(util::ConstantInitializeTag) constexpr explicit KPageTableBase(util::ConstantInitializeTag)
: m_address_space_start(Null<KProcessAddress>), m_address_space_end(Null<KProcessAddress>), : m_address_space_start(Null<KProcessAddress>), m_address_space_end(Null<KProcessAddress>), m_heap_region_start(Null<KProcessAddress>),
m_region_starts{Null<KProcessAddress>, Null<KProcessAddress>, Null<KProcessAddress>, Null<KProcessAddress>}, m_heap_region_end(Null<KProcessAddress>), m_current_heap_end(Null<KProcessAddress>), m_alias_region_start(Null<KProcessAddress>),
m_region_ends{Null<KProcessAddress>, Null<KProcessAddress>, Null<KProcessAddress>, Null<KProcessAddress>}, m_alias_region_end(Null<KProcessAddress>), m_stack_region_start(Null<KProcessAddress>), m_stack_region_end(Null<KProcessAddress>),
m_current_heap_end(Null<KProcessAddress>), m_alias_code_region_start(Null<KProcessAddress>), m_kernel_map_region_start(Null<KProcessAddress>), m_kernel_map_region_end(Null<KProcessAddress>), m_alias_code_region_start(Null<KProcessAddress>),
m_alias_code_region_end(Null<KProcessAddress>), m_code_region_start(Null<KProcessAddress>), m_code_region_end(Null<KProcessAddress>), m_alias_code_region_end(Null<KProcessAddress>), m_code_region_start(Null<KProcessAddress>), m_code_region_end(Null<KProcessAddress>),
m_max_heap_size(), m_mapped_physical_memory_size(), m_mapped_unsafe_physical_memory(), m_mapped_insecure_memory(), m_mapped_ipc_server_memory(), m_alias_region_extra_size(), m_max_heap_size(), m_mapped_physical_memory_size(), m_mapped_unsafe_physical_memory(), m_mapped_insecure_memory(), m_mapped_ipc_server_memory(),
m_general_lock(), m_map_physical_memory_lock(), m_device_map_lock(), m_impl(util::ConstantInitialize), m_memory_block_manager(util::ConstantInitialize), m_general_lock(), m_map_physical_memory_lock(), m_device_map_lock(), m_impl(util::ConstantInitialize), m_memory_block_manager(util::ConstantInitialize),
m_allocate_option(), m_address_space_width(), m_is_kernel(), m_enable_aslr(), m_enable_device_address_space_merge(), m_allocate_option(), m_address_space_width(), m_is_kernel(), m_enable_aslr(), m_enable_device_address_space_merge(),
m_memory_block_slab_manager(), m_block_info_manager(), m_resource_limit(), m_cached_physical_linear_region(), m_cached_physical_heap_region(), m_memory_block_slab_manager(), m_block_info_manager(), m_resource_limit(), m_cached_physical_linear_region(), m_cached_physical_heap_region(),
@ -227,7 +220,7 @@ namespace ams::kern {
explicit KPageTableBase() { /* ... */ } explicit KPageTableBase() { /* ... */ }
NOINLINE Result InitializeForKernel(bool is_64_bit, void *table, KVirtualAddress start, KVirtualAddress end); NOINLINE Result InitializeForKernel(bool is_64_bit, void *table, KVirtualAddress start, KVirtualAddress end);
NOINLINE Result InitializeForProcess(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, void *table, KProcessAddress start, KProcessAddress end, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit); NOINLINE Result InitializeForProcess(ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_device_address_space_merge, bool from_back, KMemoryManager::Pool pool, void *table, KProcessAddress start, KProcessAddress end, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit);
void Finalize(); void Finalize();
@ -243,7 +236,7 @@ namespace ams::kern {
} }
constexpr bool IsInAliasRegion(KProcessAddress addr, size_t size) const { constexpr bool IsInAliasRegion(KProcessAddress addr, size_t size) const {
return this->Contains(addr, size) && m_region_starts[RegionType_Alias] <= addr && addr + size - 1 <= m_region_ends[RegionType_Alias] - 1; return this->Contains(addr, size) && m_alias_region_start <= addr && addr + size - 1 <= m_alias_region_end - 1;
} }
bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const { bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const {
@ -335,7 +328,7 @@ namespace ams::kern {
Result QueryMappingImpl(KProcessAddress *out, KPhysicalAddress address, size_t size, ams::svc::MemoryState state) const; Result QueryMappingImpl(KProcessAddress *out, KPhysicalAddress address, size_t size, ams::svc::MemoryState state) const;
Result AllocateAndMapPagesImpl(PageLinkedList *page_list, KProcessAddress address, size_t num_pages, const KPageProperties &properties); Result AllocateAndMapPagesImpl(PageLinkedList *page_list, KProcessAddress address, size_t num_pages, KMemoryPermission perm);
Result MapPageGroupImpl(PageLinkedList *page_list, KProcessAddress address, const KPageGroup &pg, const KPageProperties properties, bool reuse_ll); Result MapPageGroupImpl(PageLinkedList *page_list, KProcessAddress address, const KPageGroup &pg, const KPageProperties properties, bool reuse_ll);
void RemapPageGroup(PageLinkedList *page_list, KProcessAddress address, size_t size, const KPageGroup &pg); void RemapPageGroup(PageLinkedList *page_list, KProcessAddress address, size_t size, const KPageGroup &pg);
@ -486,30 +479,24 @@ namespace ams::kern {
} }
public: public:
KProcessAddress GetAddressSpaceStart() const { return m_address_space_start; } KProcessAddress GetAddressSpaceStart() const { return m_address_space_start; }
KProcessAddress GetHeapRegionStart() const { return m_heap_region_start; }
KProcessAddress GetHeapRegionStart() const { return m_region_starts[RegionType_Heap]; } KProcessAddress GetAliasRegionStart() const { return m_alias_region_start; }
KProcessAddress GetAliasRegionStart() const { return m_region_starts[RegionType_Alias]; } KProcessAddress GetStackRegionStart() const { return m_stack_region_start; }
KProcessAddress GetStackRegionStart() const { return m_region_starts[RegionType_Stack]; } KProcessAddress GetKernelMapRegionStart() const { return m_kernel_map_region_start; }
KProcessAddress GetKernelMapRegionStart() const { return m_region_starts[RegionType_KernelMap]; }
KProcessAddress GetAliasCodeRegionStart() const { return m_alias_code_region_start; } KProcessAddress GetAliasCodeRegionStart() const { return m_alias_code_region_start; }
size_t GetAddressSpaceSize() const { return m_address_space_end - m_address_space_start; } size_t GetAddressSpaceSize() const { return m_address_space_end - m_address_space_start; }
size_t GetHeapRegionSize() const { return m_heap_region_end - m_heap_region_start; }
size_t GetHeapRegionSize() const { return m_region_ends[RegionType_Heap] - m_region_starts[RegionType_Heap]; } size_t GetAliasRegionSize() const { return m_alias_region_end - m_alias_region_start; }
size_t GetAliasRegionSize() const { return m_region_ends[RegionType_Alias] - m_region_starts[RegionType_Alias]; } size_t GetStackRegionSize() const { return m_stack_region_end - m_stack_region_start; }
size_t GetStackRegionSize() const { return m_region_ends[RegionType_Stack] - m_region_starts[RegionType_Stack]; } size_t GetKernelMapRegionSize() const { return m_kernel_map_region_end - m_kernel_map_region_start; }
size_t GetKernelMapRegionSize() const { return m_region_ends[RegionType_KernelMap] - m_region_starts[RegionType_KernelMap]; }
size_t GetAliasCodeRegionSize() const { return m_alias_code_region_end - m_alias_code_region_start; } size_t GetAliasCodeRegionSize() const { return m_alias_code_region_end - m_alias_code_region_start; }
size_t GetAliasRegionExtraSize() const { return m_alias_region_extra_size; }
size_t GetNormalMemorySize() const { size_t GetNormalMemorySize() const {
/* Lock the table. */ /* Lock the table. */
KScopedLightLock lk(m_general_lock); KScopedLightLock lk(m_general_lock);
return (m_current_heap_end - m_region_starts[RegionType_Heap]) + m_mapped_physical_memory_size; return (m_current_heap_end - m_heap_region_start) + m_mapped_physical_memory_size;
} }
size_t GetCodeSize() const; size_t GetCodeSize() const;

View File

@ -53,7 +53,7 @@ namespace ams::kern {
static size_t GetRealMemorySize(); static size_t GetRealMemorySize();
static size_t GetIntendedMemorySize(); static size_t GetIntendedMemorySize();
static KPhysicalAddress GetKernelPhysicalBaseAddress(KPhysicalAddress base_address); static KPhysicalAddress GetKernelPhysicalBaseAddress(KPhysicalAddress base_address);
static void GetInitialProcessBinaryLayout(InitialProcessBinaryLayout *out, KPhysicalAddress kern_base_address); static void GetInitialProcessBinaryLayout(InitialProcessBinaryLayout *out);
static bool ShouldIncreaseThreadResourceLimit(); static bool ShouldIncreaseThreadResourceLimit();
static void TurnOnCpu(u64 core_id, const ams::kern::init::KInitArguments *args); static void TurnOnCpu(u64 core_id, const ams::kern::init::KInitArguments *args);
static size_t GetApplicationPoolSize(); static size_t GetApplicationPoolSize();

View File

@ -223,13 +223,6 @@ namespace ams::kern::arch::arm64 {
type = ams::svc::ExceptionType_InstructionAbort; type = ams::svc::ExceptionType_InstructionAbort;
break; break;
case EsrEc_DataAbortEl0: case EsrEc_DataAbortEl0:
/* If esr.IFSC is "Alignment Fault", return UnalignedData instead of DataAbort. */
if ((esr & 0x3F) == 0b100001) {
type = ams::svc::ExceptionType_UnalignedData;
} else {
type = ams::svc::ExceptionType_DataAbort;
}
break;
default: default:
type = ams::svc::ExceptionType_DataAbort; type = ams::svc::ExceptionType_DataAbort;
break; break;

View File

@ -207,7 +207,7 @@ namespace ams::kern::arch::arm64 {
R_SUCCEED(); R_SUCCEED();
} }
Result KPageTable::InitializeForProcess(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit) { Result KPageTable::InitializeForProcess(ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit) {
/* Get an ASID */ /* Get an ASID */
m_asid = g_asid_manager.Reserve(); m_asid = g_asid_manager.Reserve();
ON_RESULT_FAILURE { g_asid_manager.Release(m_asid); }; ON_RESULT_FAILURE { g_asid_manager.Release(m_asid); };
@ -222,10 +222,10 @@ namespace ams::kern::arch::arm64 {
ON_RESULT_FAILURE_2 { m_manager->Free(new_table); }; ON_RESULT_FAILURE_2 { m_manager->Free(new_table); };
/* Initialize our base table. */ /* Initialize our base table. */
const size_t as_width = GetAddressSpaceWidth(flags); const size_t as_width = GetAddressSpaceWidth(as_type);
const KProcessAddress as_start = 0; const KProcessAddress as_start = 0;
const KProcessAddress as_end = (1ul << as_width); const KProcessAddress as_end = (1ul << as_width);
R_TRY(KPageTableBase::InitializeForProcess(flags, from_back, pool, GetVoidPointer(new_table), as_start, as_end, code_address, code_size, system_resource, resource_limit)); R_TRY(KPageTableBase::InitializeForProcess(as_type, enable_aslr, enable_das_merge, from_back, pool, GetVoidPointer(new_table), as_start, as_end, code_address, code_size, system_resource, resource_limit));
/* Note that we've updated the table (since we created it). */ /* Note that we've updated the table (since we created it). */
this->NoteUpdated(); this->NoteUpdated();
@ -258,7 +258,7 @@ namespace ams::kern::arch::arm64 {
/* Begin the traversal. */ /* Begin the traversal. */
TraversalContext context; TraversalContext context;
TraversalEntry cur_entry = { .phys_addr = Null<KPhysicalAddress>, .block_size = 0, .sw_reserved_bits = 0, .attr = 0 }; TraversalEntry cur_entry = { .phys_addr = Null<KPhysicalAddress>, .block_size = 0, .sw_reserved_bits = 0 };
bool cur_valid = false; bool cur_valid = false;
TraversalEntry next_entry; TraversalEntry next_entry;
bool next_valid; bool next_valid;
@ -268,9 +268,7 @@ namespace ams::kern::arch::arm64 {
/* Iterate over entries. */ /* Iterate over entries. */
while (true) { while (true) {
/* NOTE: Nintendo really does check next_entry.attr == (cur_entry.attr != 0)...but attr is always zero as of 18.0.0, and this is "probably" for the new console or debug-only anyway, */ if ((!next_valid && !cur_valid) || (next_valid && cur_valid && next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) {
/* so we'll implement the weird logic verbatim even though it doesn't match the GetContiguousRange logic. */
if ((!next_valid && !cur_valid) || (next_valid && cur_valid && next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size && next_entry.attr == (cur_entry.attr ? 1 : 0))) {
cur_entry.block_size += next_entry.block_size; cur_entry.block_size += next_entry.block_size;
} else { } else {
if (cur_valid && IsHeapPhysicalAddressForFinalize(cur_entry.phys_addr)) { if (cur_valid && IsHeapPhysicalAddressForFinalize(cur_entry.phys_addr)) {

View File

@ -46,14 +46,12 @@ namespace ams::kern::arch::arm64 {
out_entry->block_size = L3BlockSize; out_entry->block_size = L3BlockSize;
} }
out_entry->sw_reserved_bits = l3_entry->GetSoftwareReservedBits(); out_entry->sw_reserved_bits = l3_entry->GetSoftwareReservedBits();
out_entry->attr = 0;
return true; return true;
} else { } else {
out_entry->phys_addr = Null<KPhysicalAddress>; out_entry->phys_addr = Null<KPhysicalAddress>;
out_entry->block_size = L3BlockSize; out_entry->block_size = L3BlockSize;
out_entry->sw_reserved_bits = 0; out_entry->sw_reserved_bits = 0;
out_entry->attr = 0;
return false; return false;
} }
} }
@ -71,7 +69,6 @@ namespace ams::kern::arch::arm64 {
out_entry->block_size = L2BlockSize; out_entry->block_size = L2BlockSize;
} }
out_entry->sw_reserved_bits = l2_entry->GetSoftwareReservedBits(); out_entry->sw_reserved_bits = l2_entry->GetSoftwareReservedBits();
out_entry->attr = 0;
/* Set the output context. */ /* Set the output context. */
out_context->l3_entry = nullptr; out_context->l3_entry = nullptr;
@ -82,8 +79,6 @@ namespace ams::kern::arch::arm64 {
out_entry->phys_addr = Null<KPhysicalAddress>; out_entry->phys_addr = Null<KPhysicalAddress>;
out_entry->block_size = L2BlockSize; out_entry->block_size = L2BlockSize;
out_entry->sw_reserved_bits = 0; out_entry->sw_reserved_bits = 0;
out_entry->attr = 0;
out_context->l3_entry = nullptr; out_context->l3_entry = nullptr;
return false; return false;
} }
@ -113,8 +108,6 @@ namespace ams::kern::arch::arm64 {
out_entry->phys_addr = Null<KPhysicalAddress>; out_entry->phys_addr = Null<KPhysicalAddress>;
out_entry->block_size = L1BlockSize; out_entry->block_size = L1BlockSize;
out_entry->sw_reserved_bits = 0; out_entry->sw_reserved_bits = 0;
out_entry->attr = 0;
out_context->l2_entry = nullptr; out_context->l2_entry = nullptr;
out_context->l3_entry = nullptr; out_context->l3_entry = nullptr;
return false; return false;
@ -126,7 +119,6 @@ namespace ams::kern::arch::arm64 {
out_entry->phys_addr = Null<KPhysicalAddress>; out_entry->phys_addr = Null<KPhysicalAddress>;
out_entry->block_size = L1BlockSize; out_entry->block_size = L1BlockSize;
out_entry->sw_reserved_bits = 0; out_entry->sw_reserved_bits = 0;
out_entry->attr = 0;
out_context->l1_entry = m_table + m_num_entries; out_context->l1_entry = m_table + m_num_entries;
out_context->l2_entry = nullptr; out_context->l2_entry = nullptr;
out_context->l3_entry = nullptr; out_context->l3_entry = nullptr;
@ -228,7 +220,6 @@ namespace ams::kern::arch::arm64 {
out_entry->phys_addr = Null<KPhysicalAddress>; out_entry->phys_addr = Null<KPhysicalAddress>;
out_entry->block_size = L1BlockSize; out_entry->block_size = L1BlockSize;
out_entry->sw_reserved_bits = 0; out_entry->sw_reserved_bits = 0;
out_entry->attr = 0;
context->l1_entry = m_table + m_num_entries; context->l1_entry = m_table + m_num_entries;
context->l2_entry = nullptr; context->l2_entry = nullptr;
context->l3_entry = nullptr; context->l3_entry = nullptr;

View File

@ -68,8 +68,7 @@ _ZN3ams4kern4arch5arm6412SvcHandler64Ev:
/* Check if our disable count allows us to call SVCs. */ /* Check if our disable count allows us to call SVCs. */
mrs x10, tpidrro_el0 mrs x10, tpidrro_el0
add x10, x10, #(THREAD_LOCAL_REGION_DISABLE_COUNT) ldrh w10, [x10, #(THREAD_LOCAL_REGION_DISABLE_COUNT)]
ldtrh w10, [x10]
cbz w10, 1f cbz w10, 1f
/* It might not, so check the stack params to see if we must not allow the SVC. */ /* It might not, so check the stack params to see if we must not allow the SVC. */
@ -353,8 +352,7 @@ _ZN3ams4kern4arch5arm6412SvcHandler32Ev:
/* Check if our disable count allows us to call SVCs. */ /* Check if our disable count allows us to call SVCs. */
mrs x10, tpidrro_el0 mrs x10, tpidrro_el0
add x10, x10, #(THREAD_LOCAL_REGION_DISABLE_COUNT) ldrh w10, [x10, #(THREAD_LOCAL_REGION_DISABLE_COUNT)]
ldtrh w10, [x10]
cbz w10, 1f cbz w10, 1f
/* It might not, so check the stack params to see if we must not allow the SVC. */ /* It might not, so check the stack params to see if we must not allow the SVC. */

View File

@ -296,7 +296,7 @@ namespace ams::kern::board::nintendo::nx {
/* TODO: Move this into a header for the MC in general. */ /* TODO: Move this into a header for the MC in general. */
constexpr u32 MemoryControllerConfigurationRegister = 0x70019050; constexpr u32 MemoryControllerConfigurationRegister = 0x70019050;
u32 config_value; u32 config_value;
smc::init::ReadWriteRegister(std::addressof(config_value), MemoryControllerConfigurationRegister, 0, 0); MESOSPHERE_INIT_ABORT_UNLESS(smc::init::ReadWriteRegister(&config_value, MemoryControllerConfigurationRegister, 0, 0));
return static_cast<size_t>(config_value & 0x3FFF) << 20; return static_cast<size_t>(config_value & 0x3FFF) << 20;
} }
@ -387,7 +387,7 @@ namespace ams::kern::board::nintendo::nx {
} }
void KSystemControl::Init::CpuOnImpl(u64 core_id, uintptr_t entrypoint, uintptr_t arg) { void KSystemControl::Init::CpuOnImpl(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
MESOSPHERE_INIT_ABORT_UNLESS((::ams::kern::arch::arm64::smc::CpuOn<smc::SmcId_Supervisor>(core_id, entrypoint, arg)) == 0); MESOSPHERE_INIT_ABORT_UNLESS((::ams::kern::arch::arm64::smc::CpuOn<smc::SmcId_Supervisor, false>(core_id, entrypoint, arg)) == 0);
} }
/* Randomness for Initialization. */ /* Randomness for Initialization. */
@ -601,9 +601,8 @@ namespace ams::kern::board::nintendo::nx {
if (g_call_smc_on_panic) { if (g_call_smc_on_panic) {
/* If we should, instruct the secure monitor to display a panic screen. */ /* If we should, instruct the secure monitor to display a panic screen. */
smc::ShowError(0xF00); smc::Panic(0xF00);
} }
AMS_INFINITE_LOOP(); AMS_INFINITE_LOOP();
} }

View File

@ -43,7 +43,7 @@ namespace ams::kern::board::nintendo::nx::smc {
enum FunctionId : u32 { enum FunctionId : u32 {
FunctionId_GetConfig = 0xC3000004, FunctionId_GetConfig = 0xC3000004,
FunctionId_GenerateRandomBytes = 0xC3000005, FunctionId_GenerateRandomBytes = 0xC3000005,
FunctionId_ShowError = 0xC3000006, FunctionId_Panic = 0xC3000006,
FunctionId_ConfigureCarveout = 0xC3000007, FunctionId_ConfigureCarveout = 0xC3000007,
FunctionId_ReadWriteRegister = 0xC3000008, FunctionId_ReadWriteRegister = 0xC3000008,
@ -51,187 +51,122 @@ namespace ams::kern::board::nintendo::nx::smc {
FunctionId_SetConfig = 0xC3000409, FunctionId_SetConfig = 0xC3000409,
}; };
constexpr size_t GenerateRandomBytesSizeMax = sizeof(::ams::svc::lp64::SecureMonitorArguments) - sizeof(::ams::svc::lp64::SecureMonitorArguments{}.r[0]);
/* Global lock for generate random bytes. */ /* Global lock for generate random bytes. */
constinit KSpinLock g_generate_random_lock; constinit KSpinLock g_generate_random_lock;
bool TryGetConfigImpl(u64 *out, size_t num_qwords, ConfigItem config_item) {
/* Create the arguments .*/
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_GetConfig, static_cast<u32>(config_item) } };
/* Call into the secure monitor. */
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor>(args.r);
/* If successful, copy the output. */
const bool success = static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
if (AMS_LIKELY(success)) {
for (size_t i = 0; i < num_qwords && i < 7; i++) {
out[i] = args.r[1 + i];
}
}
return success;
}
bool SetConfigImpl(ConfigItem config_item, u64 value) {
/* Create the arguments .*/
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_SetConfig, static_cast<u32>(config_item), 0, value } };
/* Call into the secure monitor. */
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor>(args.r);
/* Return whether the call was successful. */
return static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
}
bool ReadWriteRegisterImpl(u32 *out, u64 address, u32 mask, u32 value) {
/* Create the arguments .*/
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_ReadWriteRegister, address, mask, value } };
/* Call into the secure monitor. */
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor>(args.r);
/* Unconditionally write the output. */
*out = static_cast<u32>(args.r[1]);
/* Return whether the call was successful. */
return static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
}
bool GenerateRandomBytesImpl(void *dst, size_t size) {
/* Create the arguments. */
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_GenerateRandomBytes, size } };
/* Call into the secure monitor. */
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor>(args.r);
/* If successful, copy the output. */
const bool success = static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
if (AMS_LIKELY(success)) {
std::memcpy(dst, std::addressof(args.r[1]), size);
}
return success;
}
bool ConfigureCarveoutImpl(size_t which, uintptr_t address, size_t size) {
/* Create the arguments .*/
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_ConfigureCarveout, static_cast<u64>(which), static_cast<u64>(address), static_cast<u64>(size) } };
/* Call into the secure monitor. */
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor>(args.r);
/* Return whether the call was successful. */
return static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
}
bool ShowErrorImpl(u32 color) {
/* Create the arguments .*/
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_ShowError, color } };
/* Call into the secure monitor. */
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor>(args.r);
/* Return whether the call was successful. */
return static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
}
void CallSecureMonitorFromUserImpl(ams::svc::lp64::SecureMonitorArguments *args) {
/* Call into the secure monitor. */
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_User>(args->r);
}
} }
/* SMC functionality needed for init. */ /* SMC functionality needed for init. */
namespace init { namespace init {
void GetConfig(u64 *out, size_t num_qwords, ConfigItem config_item) { void GetConfig(u64 *out, size_t num_qwords, ConfigItem config_item) {
/* Ensure we successfully get the config. */ ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_GetConfig, static_cast<u32>(config_item) } };
MESOSPHERE_INIT_ABORT_UNLESS(TryGetConfigImpl(out, num_qwords, config_item));
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, false>(args.r);
MESOSPHERE_INIT_ABORT_UNLESS((static_cast<SmcResult>(args.r[0]) == SmcResult::Success));
for (size_t i = 0; i < num_qwords && i < 7; i++) {
out[i] = args.r[1 + i];
}
} }
void GenerateRandomBytes(void *dst, size_t size) { void GenerateRandomBytes(void *dst, size_t size) {
/* Check that the size is valid. */ /* Call SmcGenerateRandomBytes() */
MESOSPHERE_INIT_ABORT_UNLESS(0 < size && size <= GenerateRandomBytesSizeMax); ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_GenerateRandomBytes, size } };
MESOSPHERE_INIT_ABORT_UNLESS(size <= sizeof(args) - sizeof(args.r[0]));
/* Ensure we successfully generate the random bytes. */ ::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, false>(args.r);
MESOSPHERE_INIT_ABORT_UNLESS(GenerateRandomBytesImpl(dst, size)); MESOSPHERE_INIT_ABORT_UNLESS((static_cast<SmcResult>(args.r[0]) == SmcResult::Success));
/* Copy output. */
std::memcpy(dst, std::addressof(args.r[1]), size);
} }
void ReadWriteRegister(u32 *out, u64 address, u32 mask, u32 value) { bool ReadWriteRegister(u32 *out, u64 address, u32 mask, u32 value) {
/* Ensure we successfully access the register. */ ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_ReadWriteRegister, address, mask, value } };
MESOSPHERE_INIT_ABORT_UNLESS(ReadWriteRegisterImpl(out, address, mask, value));
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, false>(args.r);
MESOSPHERE_INIT_ABORT_UNLESS((static_cast<SmcResult>(args.r[0]) == SmcResult::Success));
*out = args.r[1];
return static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
} }
} }
bool TryGetConfig(u64 *out, size_t num_qwords, ConfigItem config_item) { bool TryGetConfig(u64 *out, size_t num_qwords, ConfigItem config_item) {
/* Disable interrupts. */ ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_GetConfig, static_cast<u32>(config_item) } };
KScopedInterruptDisable di;
/* Get the config. */ ::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, true>(args.r);
return TryGetConfigImpl(out, num_qwords, config_item); if (AMS_UNLIKELY(static_cast<SmcResult>(args.r[0]) != SmcResult::Success)) {
return false;
}
for (size_t i = 0; i < num_qwords && i < 7; i++) {
out[i] = args.r[1 + i];
}
return true;
} }
void GetConfig(u64 *out, size_t num_qwords, ConfigItem config_item) { void GetConfig(u64 *out, size_t num_qwords, ConfigItem config_item) {
/* Ensure we successfully get the config. */
MESOSPHERE_ABORT_UNLESS(TryGetConfig(out, num_qwords, config_item)); MESOSPHERE_ABORT_UNLESS(TryGetConfig(out, num_qwords, config_item));
} }
bool SetConfig(ConfigItem config_item, u64 value) { bool SetConfig(ConfigItem config_item, u64 value) {
/* Disable interrupts. */ ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_SetConfig, static_cast<u32>(config_item), 0, value } };
KScopedInterruptDisable di;
/* Set the config. */ ::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, true>(args.r);
return SetConfigImpl(config_item, value);
return static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
} }
bool ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value) { bool ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value) {
/* Disable interrupts. */ ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_ReadWriteRegister, address, mask, value } };
KScopedInterruptDisable di;
/* Access the register. */ ::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, true>(args.r);
return ReadWriteRegisterImpl(out, address, mask, value);
*out = static_cast<u32>(args.r[1]);
return static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
} }
void ConfigureCarveout(size_t which, uintptr_t address, size_t size) { void ConfigureCarveout(size_t which, uintptr_t address, size_t size) {
/* Disable interrupts. */ ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_ConfigureCarveout, static_cast<u64>(which), static_cast<u64>(address), static_cast<u64>(size) } };
KScopedInterruptDisable di;
/* Ensure that we successfully configure the carveout. */ ::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, true>(args.r);
MESOSPHERE_ABORT_UNLESS(ConfigureCarveoutImpl(which, address, size));
MESOSPHERE_ABORT_UNLESS((static_cast<SmcResult>(args.r[0]) == SmcResult::Success));
} }
void GenerateRandomBytes(void *dst, size_t size) { void GenerateRandomBytes(void *dst, size_t size) {
/* Check that the size is valid. */ /* Setup for call. */
MESOSPHERE_ABORT_UNLESS(0 < size && size <= GenerateRandomBytesSizeMax); ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_GenerateRandomBytes, size } };
MESOSPHERE_ABORT_UNLESS(size <= sizeof(args) - sizeof(args.r[0]));
/* Disable interrupts. */ /* Make call. */
KScopedInterruptDisable di; {
KScopedInterruptDisable intr_disable;
KScopedSpinLock lk(g_generate_random_lock);
/* Acquire the exclusive right to generate random bytes. */ ::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, true>(args.r);
KScopedSpinLock lk(g_generate_random_lock); }
MESOSPHERE_ABORT_UNLESS((static_cast<SmcResult>(args.r[0]) == SmcResult::Success));
/* Ensure we successfully generate the random bytes. */ /* Copy output. */
MESOSPHERE_ABORT_UNLESS(GenerateRandomBytesImpl(dst, size)); std::memcpy(dst, std::addressof(args.r[1]), size);
} }
void ShowError(u32 color) { void NORETURN Panic(u32 color) {
/* Disable interrupts. */ ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_Panic, color } };
KScopedInterruptDisable di;
/* Ensure we successfully show the error. */ ::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor, true>(args.r);
MESOSPHERE_ABORT_UNLESS(ShowErrorImpl(color));
AMS_INFINITE_LOOP();
} }
void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args) { void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args) {
/* Disable interrupts. */ ::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_User, true>(args->r);
KScopedInterruptDisable di;
/* Perform the call. */
CallSecureMonitorFromUserImpl(args);
} }
} }

View File

@ -111,7 +111,7 @@ namespace ams::kern::board::nintendo::nx::smc {
bool SetConfig(ConfigItem config_item, u64 value); bool SetConfig(ConfigItem config_item, u64 value);
void ShowError(u32 color); void NORETURN Panic(u32 color);
void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args); void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args);
@ -119,7 +119,7 @@ namespace ams::kern::board::nintendo::nx::smc {
void GetConfig(u64 *out, size_t num_qwords, ConfigItem config_item); void GetConfig(u64 *out, size_t num_qwords, ConfigItem config_item);
void GenerateRandomBytes(void *dst, size_t size); void GenerateRandomBytes(void *dst, size_t size);
void ReadWriteRegister(u32 *out, u64 address, u32 mask, u32 value); bool ReadWriteRegister(u32 *out, u64 address, u32 mask, u32 value);
} }

View File

@ -136,7 +136,7 @@ namespace ams::kern {
{ {
/* Allocate the previously unreserved pages. */ /* Allocate the previously unreserved pages. */
KPageGroup unreserve_pg(Kernel::GetSystemSystemResource().GetBlockInfoManagerPointer()); KPageGroup unreserve_pg(Kernel::GetSystemSystemResource().GetBlockInfoManagerPointer());
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(unreserve_pg), unreserved_size / PageSize, 1, KMemoryManager::EncodeOption(dst_pool, KMemoryManager::Direction_FromFront))); MESOSPHERE_R_ABORT_UNLESS(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(unreserve_pg), unreserved_size / PageSize, KMemoryManager::EncodeOption(dst_pool, KMemoryManager::Direction_FromFront)));
/* Add the previously reserved pages. */ /* Add the previously reserved pages. */
if (src_pool == dst_pool && binary_pages != 0) { if (src_pool == dst_pool && binary_pages != 0) {
@ -173,7 +173,7 @@ namespace ams::kern {
/* If the pool is the same, we need to use the workaround page group. */ /* If the pool is the same, we need to use the workaround page group. */
if (src_pool == dst_pool) { if (src_pool == dst_pool) {
/* Allocate a new, usable group for the process. */ /* Allocate a new, usable group for the process. */
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(workaround_pg), static_cast<size_t>(params.code_num_pages), 1, KMemoryManager::EncodeOption(dst_pool, KMemoryManager::Direction_FromFront))); MESOSPHERE_R_ABORT_UNLESS(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(workaround_pg), static_cast<size_t>(params.code_num_pages), KMemoryManager::EncodeOption(dst_pool, KMemoryManager::Direction_FromFront)));
/* Copy data from the working page group to the usable one. */ /* Copy data from the working page group to the usable one. */
auto work_it = pg.begin(); auto work_it = pg.begin();

View File

@ -184,11 +184,6 @@ namespace ams::kern {
case RegionType::NoMapping: case RegionType::NoMapping:
break; break;
case RegionType::KernelTraceBuffer: case RegionType::KernelTraceBuffer:
/* NOTE: This does not match official, but is used to make pre-processing hbl capabilities in userland unnecessary. */
/* If ktrace isn't enabled, allow ktrace to succeed without mapping anything. */
if constexpr (!ams::kern::IsKTraceEnabled) {
break;
}
case RegionType::OnMemoryBootImage: case RegionType::OnMemoryBootImage:
case RegionType::DTB: case RegionType::DTB:
R_TRY(f(MemoryRegions[static_cast<u32>(type)], perm)); R_TRY(f(MemoryRegions[static_cast<u32>(type)], perm));

View File

@ -107,6 +107,7 @@ namespace ams::kern {
R_UNLESS(cur_sessions < max, svc::ResultOutOfSessions()); R_UNLESS(cur_sessions < max, svc::ResultOutOfSessions());
new_sessions = cur_sessions + 1; new_sessions = cur_sessions + 1;
} while (!m_num_sessions.CompareExchangeWeak<std::memory_order_relaxed>(cur_sessions, new_sessions)); } while (!m_num_sessions.CompareExchangeWeak<std::memory_order_relaxed>(cur_sessions, new_sessions));
} }
/* Atomically update the peak session tracking. */ /* Atomically update the peak session tracking. */
@ -181,6 +182,7 @@ namespace ams::kern {
R_UNLESS(cur_sessions < max, svc::ResultOutOfSessions()); R_UNLESS(cur_sessions < max, svc::ResultOutOfSessions());
new_sessions = cur_sessions + 1; new_sessions = cur_sessions + 1;
} while (!m_num_sessions.CompareExchangeWeak<std::memory_order_relaxed>(cur_sessions, new_sessions)); } while (!m_num_sessions.CompareExchangeWeak<std::memory_order_relaxed>(cur_sessions, new_sessions));
} }
/* Atomically update the peak session tracking. */ /* Atomically update the peak session tracking. */

View File

@ -79,7 +79,29 @@ namespace ams::kern {
/* Create a page group representing the segment. */ /* Create a page group representing the segment. */
KPageGroup segment_pg(Kernel::GetSystemSystemResource().GetBlockInfoManagerPointer()); KPageGroup segment_pg(Kernel::GetSystemSystemResource().GetBlockInfoManagerPointer());
MESOSPHERE_R_ABORT_UNLESS(pg.CopyRangeTo(segment_pg, seg_offset, util::AlignUp(seg_size, PageSize))); if (size_t remaining_size = util::AlignUp(seg_size, PageSize); remaining_size != 0) {
/* Find the pages whose data corresponds to the segment. */
size_t cur_offset = 0;
for (auto it = pg.begin(); it != pg.end() && remaining_size > 0; ++it) {
/* Get the current size. */
const size_t cur_size = it->GetSize();
/* Determine if the offset is in range. */
const size_t rel_diff = seg_offset - cur_offset;
const bool is_before = cur_offset <= seg_offset;
cur_offset += cur_size;
if (is_before && seg_offset < cur_offset) {
/* It is, so add the block. */
const size_t block_size = std::min<size_t>(cur_size - rel_diff, remaining_size);
MESOSPHERE_R_ABORT_UNLESS(segment_pg.AddBlock(it->GetAddress() + rel_diff, block_size / PageSize));
/* Advance. */
cur_offset = seg_offset + block_size;
remaining_size -= block_size;
seg_offset += block_size;
}
}
}
/* Setup the new page group's memory so that we can load the segment. */ /* Setup the new page group's memory so that we can load the segment. */
{ {
@ -204,9 +226,6 @@ namespace ams::kern {
const uintptr_t map_end = map_start + map_size; const uintptr_t map_end = map_start + map_size;
MESOSPHERE_ABORT_UNLESS(start_address == 0); MESOSPHERE_ABORT_UNLESS(start_address == 0);
/* Default fields in parameter to zero. */
*out = {};
/* Set fields in parameter. */ /* Set fields in parameter. */
out->code_address = map_start + start_address; out->code_address = map_start + start_address;
out->code_num_pages = util::AlignUp(end_address - start_address, PageSize) / PageSize; out->code_num_pages = util::AlignUp(end_address - start_address, PageSize) / PageSize;

View File

@ -225,7 +225,7 @@ namespace ams::kern {
return allocated_block; return allocated_block;
} }
Result KMemoryManager::AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool unoptimized, bool random, s32 min_heap_index) { Result KMemoryManager::AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool unoptimized, bool random) {
/* Choose a heap based on our page size request. */ /* Choose a heap based on our page size request. */
const s32 heap_index = KPageHeap::GetBlockIndex(num_pages); const s32 heap_index = KPageHeap::GetBlockIndex(num_pages);
R_UNLESS(0 <= heap_index, svc::ResultOutOfMemory()); R_UNLESS(0 <= heap_index, svc::ResultOutOfMemory());
@ -241,7 +241,7 @@ namespace ams::kern {
}; };
/* Keep allocating until we've allocated all our pages. */ /* Keep allocating until we've allocated all our pages. */
for (s32 index = heap_index; index >= min_heap_index && num_pages > 0; index--) { for (s32 index = heap_index; index >= 0 && num_pages > 0; index--) {
const size_t pages_per_alloc = KPageHeap::GetBlockNumPages(index); const size_t pages_per_alloc = KPageHeap::GetBlockNumPages(index);
for (Impl *cur_manager = this->GetFirstManager(pool, dir); cur_manager != nullptr; cur_manager = this->GetNextManager(cur_manager, dir)) { for (Impl *cur_manager = this->GetFirstManager(pool, dir); cur_manager != nullptr; cur_manager = this->GetNextManager(cur_manager, dir)) {
while (num_pages >= pages_per_alloc) { while (num_pages >= pages_per_alloc) {
@ -274,7 +274,7 @@ namespace ams::kern {
R_SUCCEED(); R_SUCCEED();
} }
Result KMemoryManager::AllocateAndOpen(KPageGroup *out, size_t num_pages, size_t align_pages, u32 option) { Result KMemoryManager::AllocateAndOpen(KPageGroup *out, size_t num_pages, u32 option) {
MESOSPHERE_ASSERT(out != nullptr); MESOSPHERE_ASSERT(out != nullptr);
MESOSPHERE_ASSERT(out->GetNumPages() == 0); MESOSPHERE_ASSERT(out->GetNumPages() == 0);
@ -285,11 +285,8 @@ namespace ams::kern {
const auto [pool, dir] = DecodeOption(option); const auto [pool, dir] = DecodeOption(option);
KScopedLightLock lk(m_pool_locks[pool]); KScopedLightLock lk(m_pool_locks[pool]);
/* Choose a heap based on our alignment size request. */
const s32 heap_index = KPageHeap::GetAlignedBlockIndex(align_pages, align_pages);
/* Allocate the page group. */ /* Allocate the page group. */
R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, m_has_optimized_process[pool], true, heap_index)); R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, m_has_optimized_process[pool], true));
/* Open the first reference to the pages. */ /* Open the first reference to the pages. */
for (const auto &block : *out) { for (const auto &block : *out) {
@ -329,11 +326,8 @@ namespace ams::kern {
const bool has_optimized = m_has_optimized_process[pool]; const bool has_optimized = m_has_optimized_process[pool];
const bool is_optimized = m_optimized_process_ids[pool] == process_id; const bool is_optimized = m_optimized_process_ids[pool] == process_id;
/* Always use the minimum alignment size. */
const s32 heap_index = 0;
/* Allocate the page group. */ /* Allocate the page group. */
R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, has_optimized && !is_optimized, false, heap_index)); R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, has_optimized && !is_optimized, false));
/* Set whether we should optimize. */ /* Set whether we should optimize. */
optimized = has_optimized && is_optimized; optimized = has_optimized && is_optimized;

View File

@ -84,58 +84,6 @@ namespace ams::kern {
R_SUCCEED(); R_SUCCEED();
} }
Result KPageGroup::CopyRangeTo(KPageGroup &out, size_t range_offset, size_t range_size) const {
/* Get the previous last block for the group. */
KBlockInfo * const out_last = out.m_last_block;
const auto out_last_addr = out_last != nullptr ? out_last->GetAddress() : Null<KPhysicalAddress>;
const auto out_last_np = out_last != nullptr ? out_last->GetNumPages() : 0;
/* Ensure we cleanup the group on failure. */
ON_RESULT_FAILURE {
KBlockInfo *cur = out_last != nullptr ? out_last->GetNext() : out.m_first_block;
while (cur != nullptr) {
KBlockInfo *next = cur->GetNext();
out.m_manager->Free(cur);
cur = next;
}
if (out_last != nullptr) {
out_last->Initialize(out_last_addr, out_last_np);
out_last->SetNext(nullptr);
} else {
out.m_first_block = nullptr;
}
out.m_last_block = out_last;
};
/* Find the pages within the requested range. */
size_t cur_offset = 0, remaining_size = range_size;
for (auto it = this->begin(); it != this->end() && remaining_size > 0; ++it) {
/* Get the current size. */
const size_t cur_size = it->GetSize();
/* Determine if the offset is in range. */
const size_t rel_diff = range_offset - cur_offset;
const bool is_before = cur_offset <= range_offset;
cur_offset += cur_size;
if (is_before && range_offset < cur_offset) {
/* It is, so add the block. */
const size_t block_size = std::min<size_t>(cur_size - rel_diff, remaining_size);
R_TRY(out.AddBlock(it->GetAddress() + rel_diff, block_size / PageSize));
/* Advance. */
cur_offset = range_offset + block_size;
remaining_size -= block_size;
range_offset += block_size;
}
}
/* Check that we successfully copied the range. */
MESOSPHERE_ABORT_UNLESS(remaining_size == 0);
R_SUCCEED();
}
void KPageGroup::Open() const { void KPageGroup::Open() const {
auto &mm = Kernel::GetMemoryManager(); auto &mm = Kernel::GetMemoryManager();

View File

@ -97,12 +97,15 @@ namespace ams::kern {
m_enable_aslr = true; m_enable_aslr = true;
m_enable_device_address_space_merge = false; m_enable_device_address_space_merge = false;
for (auto i = 0; i < RegionType_Count; ++i) { m_heap_region_start = 0;
m_region_starts[i] = 0; m_heap_region_end = 0;
m_region_ends[i] = 0;
}
m_current_heap_end = 0; m_current_heap_end = 0;
m_alias_region_start = 0;
m_alias_region_end = 0;
m_stack_region_start = 0;
m_stack_region_end = 0;
m_kernel_map_region_start = 0;
m_kernel_map_region_end = 0;
m_alias_code_region_start = 0; m_alias_code_region_start = 0;
m_alias_code_region_end = 0; m_alias_code_region_end = 0;
m_code_region_start = 0; m_code_region_start = 0;
@ -112,7 +115,6 @@ namespace ams::kern {
m_mapped_unsafe_physical_memory = 0; m_mapped_unsafe_physical_memory = 0;
m_mapped_insecure_memory = 0; m_mapped_insecure_memory = 0;
m_mapped_ipc_server_memory = 0; m_mapped_ipc_server_memory = 0;
m_alias_region_extra_size = 0;
m_memory_block_slab_manager = Kernel::GetSystemSystemResource().GetMemoryBlockSlabManagerPointer(); m_memory_block_slab_manager = Kernel::GetSystemSystemResource().GetMemoryBlockSlabManagerPointer();
m_block_info_manager = Kernel::GetSystemSystemResource().GetBlockInfoManagerPointer(); m_block_info_manager = Kernel::GetSystemSystemResource().GetBlockInfoManagerPointer();
@ -133,7 +135,7 @@ namespace ams::kern {
R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end, m_memory_block_slab_manager)); R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end, m_memory_block_slab_manager));
} }
Result KPageTableBase::InitializeForProcess(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, void *table, KProcessAddress start, KProcessAddress end, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit) { Result KPageTableBase::InitializeForProcess(ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, void *table, KProcessAddress start, KProcessAddress end, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit) {
/* Validate the region. */ /* Validate the region. */
MESOSPHERE_ABORT_UNLESS(start <= code_address); MESOSPHERE_ABORT_UNLESS(start <= code_address);
MESOSPHERE_ABORT_UNLESS(code_address < code_address + code_size); MESOSPHERE_ABORT_UNLESS(code_address < code_address + code_size);
@ -147,16 +149,13 @@ namespace ams::kern {
return KAddressSpaceInfo::GetAddressSpaceSize(m_address_space_width, type); return KAddressSpaceInfo::GetAddressSpaceSize(m_address_space_width, type);
}; };
/* Default to zero alias region extra size. */
m_alias_region_extra_size = 0;
/* Set our width and heap/alias sizes. */ /* Set our width and heap/alias sizes. */
m_address_space_width = GetAddressSpaceWidth(flags); m_address_space_width = GetAddressSpaceWidth(as_type);
size_t alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type_Alias); size_t alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type_Alias);
size_t heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type_Heap); size_t heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type_Heap);
/* Adjust heap/alias size if we don't have an alias region. */ /* Adjust heap/alias size if we don't have an alias region. */
if ((flags & ams::svc::CreateProcessFlag_AddressSpaceMask) == ams::svc::CreateProcessFlag_AddressSpace32BitWithoutAlias) { if ((as_type & ams::svc::CreateProcessFlag_AddressSpaceMask) == ams::svc::CreateProcessFlag_AddressSpace32BitWithoutAlias) {
heap_region_size += alias_region_size; heap_region_size += alias_region_size;
alias_region_size = 0; alias_region_size = 0;
} }
@ -166,57 +165,35 @@ namespace ams::kern {
KProcessAddress process_code_end; KProcessAddress process_code_end;
size_t stack_region_size; size_t stack_region_size;
size_t kernel_map_region_size; size_t kernel_map_region_size;
KProcessAddress before_process_code_start, after_process_code_start;
size_t before_process_code_size, after_process_code_size;
if (m_address_space_width == 39) { if (m_address_space_width == 39) {
stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type_Stack); alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type_Alias);
kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type_MapSmall); heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type_Heap);
stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type_Stack);
m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type_Map39Bit); kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type_MapSmall);
m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type_Map39Bit); m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type_Map39Bit);
m_alias_code_region_start = m_code_region_start; m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type_Map39Bit);
m_alias_code_region_end = m_code_region_end; m_alias_code_region_start = m_code_region_start;
m_alias_code_region_end = m_code_region_end;
process_code_start = util::AlignDown(GetInteger(code_address), RegionAlignment); process_code_start = util::AlignDown(GetInteger(code_address), RegionAlignment);
process_code_end = util::AlignUp(GetInteger(code_address) + code_size, RegionAlignment); process_code_end = util::AlignUp(GetInteger(code_address) + code_size, RegionAlignment);
before_process_code_start = m_code_region_start;
before_process_code_size = process_code_start - before_process_code_start;
after_process_code_start = process_code_end;
after_process_code_size = m_code_region_end - process_code_end;
/* If we have a 39-bit address space and should, enable extra size to the alias region. */
if (flags & ams::svc::CreateProcessFlag_EnableAliasRegionExtraSize) {
/* Extra size is 1/8th of the address space. */
m_alias_region_extra_size = (static_cast<size_t>(1) << m_address_space_width) / 8;
alias_region_size += m_alias_region_extra_size;
}
} else { } else {
stack_region_size = 0; stack_region_size = 0;
kernel_map_region_size = 0; kernel_map_region_size = 0;
m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type_MapSmall);
m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type_MapSmall); m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type_MapSmall);
m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type_MapSmall); m_stack_region_start = m_code_region_start;
m_alias_code_region_start = m_code_region_start; m_alias_code_region_start = m_code_region_start;
m_alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type_MapLarge) + GetSpaceSize(KAddressSpaceInfo::Type_MapLarge); m_alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type_MapLarge) + GetSpaceSize(KAddressSpaceInfo::Type_MapLarge);
m_region_starts[RegionType_Stack] = m_code_region_start; m_stack_region_end = m_code_region_end;
m_region_ends[RegionType_Stack] = m_code_region_end; m_kernel_map_region_start = m_code_region_start;
m_region_starts[RegionType_KernelMap] = m_code_region_start; m_kernel_map_region_end = m_code_region_end;
m_region_ends[RegionType_KernelMap] = m_code_region_end; process_code_start = m_code_region_start;
process_code_end = m_code_region_end;
process_code_start = m_code_region_start;
process_code_end = m_code_region_end;
before_process_code_start = m_code_region_start;
before_process_code_size = 0;
after_process_code_start = GetSpaceStart(KAddressSpaceInfo::Type_MapLarge);
after_process_code_size = GetSpaceSize(KAddressSpaceInfo::Type_MapLarge);
} }
/* Set other basic fields. */ /* Set other basic fields. */
m_enable_aslr = (flags & ams::svc::CreateProcessFlag_EnableAslr) != 0; m_enable_aslr = enable_aslr;
m_enable_device_address_space_merge = (flags & ams::svc::CreateProcessFlag_DisableDeviceAddressSpaceMerge) == 0; m_enable_device_address_space_merge = enable_das_merge;
m_address_space_start = start; m_address_space_start = start;
m_address_space_end = end; m_address_space_end = end;
m_is_kernel = false; m_is_kernel = false;
@ -224,285 +201,100 @@ namespace ams::kern {
m_block_info_manager = system_resource->GetBlockInfoManagerPointer(); m_block_info_manager = system_resource->GetBlockInfoManagerPointer();
m_resource_limit = resource_limit; m_resource_limit = resource_limit;
/* Set up our undetermined regions. */ /* Determine the region we can place our undetermineds in. */
{ KProcessAddress alloc_start;
/* Declare helper structure for layout process. */ size_t alloc_size;
struct RegionLayoutInfo { if ((GetInteger(process_code_start) - GetInteger(m_code_region_start)) >= (GetInteger(end) - GetInteger(process_code_end))) {
size_t size; alloc_start = m_code_region_start;
RegionType type; alloc_size = GetInteger(process_code_start) - GetInteger(m_code_region_start);
s32 alloc_index; /* 0 for before process code, 1 for after process code */ } else {
}; alloc_start = process_code_end;
alloc_size = GetInteger(end) - GetInteger(process_code_end);
}
const size_t needed_size = (alias_region_size + heap_region_size + stack_region_size + kernel_map_region_size);
R_UNLESS(alloc_size >= needed_size, svc::ResultOutOfMemory());
/* Create region layout info array, and add regions to it. */ const size_t remaining_size = alloc_size - needed_size;
RegionLayoutInfo region_layouts[RegionType_Count] = {};
size_t num_regions = 0;
if (kernel_map_region_size > 0) { region_layouts[num_regions++] = { .size = kernel_map_region_size, .type = RegionType_KernelMap, .alloc_index = 0, }; } /* Determine random placements for each region. */
if (stack_region_size > 0) { region_layouts[num_regions++] = { .size = stack_region_size, .type = RegionType_Stack, .alloc_index = 0, }; } size_t alias_rnd = 0, heap_rnd = 0, stack_rnd = 0, kmap_rnd = 0;
if (enable_aslr) {
alias_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * RegionAlignment;
heap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * RegionAlignment;
stack_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * RegionAlignment;
kmap_rnd = KSystemControl::GenerateRandomRange(0, remaining_size / RegionAlignment) * RegionAlignment;
}
region_layouts[num_regions++] = { .size = alias_region_size, .type = RegionType_Alias, .alloc_index = 0, }; /* Setup heap and alias regions. */
region_layouts[num_regions++] = { .size = heap_region_size, .type = RegionType_Heap, .alloc_index = 0, }; m_alias_region_start = alloc_start + alias_rnd;
m_alias_region_end = m_alias_region_start + alias_region_size;
m_heap_region_start = alloc_start + heap_rnd;
m_heap_region_end = m_heap_region_start + heap_region_size;
/* Selection-sort the regions by size largest-to-smallest. */ if (alias_rnd <= heap_rnd) {
for (size_t i = 0; i < num_regions - 1; ++i) { m_heap_region_start += alias_region_size;
for (size_t j = i + 1; j < num_regions; ++j) { m_heap_region_end += alias_region_size;
if (region_layouts[i].size < region_layouts[j].size) { } else {
std::swap(region_layouts[i], region_layouts[j]); m_alias_region_start += heap_region_size;
} m_alias_region_end += heap_region_size;
} }
/* Setup stack region. */
if (stack_region_size) {
m_stack_region_start = alloc_start + stack_rnd;
m_stack_region_end = m_stack_region_start + stack_region_size;
if (alias_rnd < stack_rnd) {
m_stack_region_start += alias_region_size;
m_stack_region_end += alias_region_size;
} else {
m_alias_region_start += stack_region_size;
m_alias_region_end += stack_region_size;
} }
/* Layout the regions. */ if (heap_rnd < stack_rnd) {
constexpr auto AllocIndexCount = 2; m_stack_region_start += heap_region_size;
KProcessAddress alloc_starts[AllocIndexCount] = { before_process_code_start, after_process_code_start }; m_stack_region_end += heap_region_size;
size_t alloc_sizes[AllocIndexCount] = { before_process_code_size, after_process_code_size }; } else {
size_t alloc_counts[AllocIndexCount] = {}; m_heap_region_start += stack_region_size;
for (size_t i = 0; i < num_regions; ++i) { m_heap_region_end += stack_region_size;
/* Get reference to the current region. */ }
auto &cur_region = region_layouts[i]; }
/* Determine where the current region should go. */ /* Setup kernel map region. */
cur_region.alloc_index = alloc_sizes[1] >= alloc_sizes[0] ? 1 : 0; if (kernel_map_region_size) {
++alloc_counts[cur_region.alloc_index]; m_kernel_map_region_start = alloc_start + kmap_rnd;
m_kernel_map_region_end = m_kernel_map_region_start + kernel_map_region_size;
/* Check that the current region can fit. */ if (alias_rnd < kmap_rnd) {
R_UNLESS(alloc_sizes[cur_region.alloc_index] >= cur_region.size, svc::ResultOutOfMemory()); m_kernel_map_region_start += alias_region_size;
m_kernel_map_region_end += alias_region_size;
/* Update our remaining size tracking. */ } else {
alloc_sizes[cur_region.alloc_index] -= cur_region.size; m_alias_region_start += kernel_map_region_size;
m_alias_region_end += kernel_map_region_size;
} }
/* Selection sort the regions to coalesce them by alloc index. */ if (heap_rnd < kmap_rnd) {
for (size_t i = 0; i < num_regions - 1; ++i) { m_kernel_map_region_start += heap_region_size;
for (size_t j = i + 1; j < num_regions; ++j) { m_kernel_map_region_end += heap_region_size;
if (region_layouts[i].alloc_index > region_layouts[j].alloc_index) { } else {
std::swap(region_layouts[i], region_layouts[j]); m_heap_region_start += kernel_map_region_size;
} m_heap_region_end += kernel_map_region_size;
}
} }
/* Layout the regions for each alloc index. */ if (stack_region_size) {
for (auto cur_alloc_index = 0; cur_alloc_index < AllocIndexCount; ++cur_alloc_index) { if (stack_rnd < kmap_rnd) {
/* If there are no regions to place, continue. */ m_kernel_map_region_start += stack_region_size;
const size_t cur_alloc_count = alloc_counts[cur_alloc_index]; m_kernel_map_region_end += stack_region_size;
if (cur_alloc_count == 0) {
continue;
}
/* Determine the starting region index for the current alloc index. */
size_t cur_region_index = 0;
for (size_t i = 0; i < num_regions; ++i) {
if (region_layouts[i].alloc_index == cur_alloc_index) {
cur_region_index = i;
break;
}
}
/* If aslr is enabled, randomize the current region order. Otherwise, sort by type. */
if (m_enable_aslr) {
for (size_t i = 0; i < cur_alloc_count - 1; ++i) {
std::swap(region_layouts[cur_region_index + i], region_layouts[cur_region_index + KSystemControl::GenerateRandomRange(i, cur_alloc_count - 1)]);
}
} else { } else {
for (size_t i = 0; i < cur_alloc_count - 1; ++i) { m_stack_region_start += kernel_map_region_size;
for (size_t j = i + 1; j < cur_alloc_count; ++j) { m_stack_region_end += kernel_map_region_size;
if (region_layouts[cur_region_index + i].type > region_layouts[cur_region_index + j].type) {
std::swap(region_layouts[cur_region_index + i], region_layouts[cur_region_index + j]);
}
}
}
}
/* Determine aslr offsets for the current space. */
size_t aslr_offsets[RegionType_Count] = {};
if (m_enable_aslr) {
/* Generate the aslr offsets. */
for (size_t i = 0; i < cur_alloc_count; ++i) {
aslr_offsets[i] = KSystemControl::GenerateRandomRange(0, alloc_sizes[cur_alloc_index] / RegionAlignment) * RegionAlignment;
}
/* Sort the aslr offsets. */
for (size_t i = 0; i < cur_alloc_count - 1; ++i) {
for (size_t j = i + 1; j < cur_alloc_count; ++j) {
if (aslr_offsets[i] > aslr_offsets[j]) {
std::swap(aslr_offsets[i], aslr_offsets[j]);
}
}
}
}
/* Calculate final region positions. */
KProcessAddress prev_region_end = alloc_starts[cur_alloc_index];
size_t prev_aslr_offset = 0;
for (size_t i = 0; i < cur_alloc_count; ++i) {
/* Get the current region. */
auto &cur_region = region_layouts[cur_region_index + i];
/* Set the current region start/end. */
m_region_starts[cur_region.type] = (aslr_offsets[i] - prev_aslr_offset) + GetInteger(prev_region_end);
m_region_ends[cur_region.type] = m_region_starts[cur_region.type] + cur_region.size;
/* Update tracking variables. */
prev_region_end = m_region_ends[cur_region.type];
prev_aslr_offset = aslr_offsets[i];
}
}
/* Declare helpers to check that regions are inside our address space. */
const KProcessAddress process_code_last = process_code_end - 1;
auto IsInAddressSpace = [&](KProcessAddress addr) ALWAYS_INLINE_LAMBDA { return m_address_space_start <= addr && addr <= m_address_space_end; };
/* Ensure that the KernelMap region is valid. */
for (size_t k = 0; k < num_regions; ++k) {
if (const auto &kmap_region = region_layouts[k]; kmap_region.type == RegionType_KernelMap) {
/* If there's no kmap region, we have nothing to check. */
if (kmap_region.size == 0) {
break;
}
/* Check that the kmap region is within our address space. */
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_region_starts[RegionType_KernelMap]));
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_region_ends[RegionType_KernelMap]));
/* Check for overlap with process code. */
const KProcessAddress kmap_start = m_region_starts[RegionType_KernelMap];
const KProcessAddress kmap_last = m_region_ends[RegionType_KernelMap] - 1;
MESOSPHERE_ABORT_UNLESS(kernel_map_region_size == 0 || kmap_last < process_code_start || process_code_last < kmap_start);
/* Check for overlap with stack. */
for (size_t s = 0; s < num_regions; ++s) {
if (const auto &stack_region = region_layouts[s]; stack_region.type == RegionType_Stack) {
if (stack_region.size != 0) {
const KProcessAddress stack_start = m_region_starts[RegionType_Stack];
const KProcessAddress stack_last = m_region_ends[RegionType_Stack] - 1;
MESOSPHERE_ABORT_UNLESS((kernel_map_region_size == 0 && stack_region_size == 0) || kmap_last < stack_start || stack_last < kmap_start);
}
break;
}
}
/* Check for overlap with alias. */
for (size_t a = 0; a < num_regions; ++a) {
if (const auto &alias_region = region_layouts[a]; alias_region.type == RegionType_Alias) {
if (alias_region.size != 0) {
const KProcessAddress alias_start = m_region_starts[RegionType_Alias];
const KProcessAddress alias_last = m_region_ends[RegionType_Alias] - 1;
MESOSPHERE_ABORT_UNLESS(kmap_last < alias_start || alias_last < kmap_start);
}
break;
}
}
/* Check for overlap with heap. */
for (size_t h = 0; h < num_regions; ++h) {
if (const auto &heap_region = region_layouts[h]; heap_region.type == RegionType_Heap) {
if (heap_region.size != 0) {
const KProcessAddress heap_start = m_region_starts[RegionType_Heap];
const KProcessAddress heap_last = m_region_ends[RegionType_Heap] - 1;
MESOSPHERE_ABORT_UNLESS(kmap_last < heap_start || heap_last < kmap_start);
}
break;
}
}
}
}
/* Check that the Stack region is valid. */
for (size_t s = 0; s < num_regions; ++s) {
if (const auto &stack_region = region_layouts[s]; stack_region.type == RegionType_Stack) {
/* If there's no stack region, we have nothing to check. */
if (stack_region.size == 0) {
break;
}
/* Check that the stack region is within our address space. */
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_region_starts[RegionType_Stack]));
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_region_ends[RegionType_Stack]));
/* Check for overlap with process code. */
const KProcessAddress stack_start = m_region_starts[RegionType_Stack];
const KProcessAddress stack_last = m_region_ends[RegionType_Stack] - 1;
MESOSPHERE_ABORT_UNLESS(stack_region_size == 0 || stack_last < process_code_start || process_code_last < stack_start);
/* Check for overlap with alias. */
for (size_t a = 0; a < num_regions; ++a) {
if (const auto &alias_region = region_layouts[a]; alias_region.type == RegionType_Alias) {
if (alias_region.size != 0) {
const KProcessAddress alias_start = m_region_starts[RegionType_Alias];
const KProcessAddress alias_last = m_region_ends[RegionType_Alias] - 1;
MESOSPHERE_ABORT_UNLESS(stack_last < alias_start || alias_last < stack_start);
}
break;
}
}
/* Check for overlap with heap. */
for (size_t h = 0; h < num_regions; ++h) {
if (const auto &heap_region = region_layouts[h]; heap_region.type == RegionType_Heap) {
if (heap_region.size != 0) {
const KProcessAddress heap_start = m_region_starts[RegionType_Heap];
const KProcessAddress heap_last = m_region_ends[RegionType_Heap] - 1;
MESOSPHERE_ABORT_UNLESS(stack_last < heap_start || heap_last < stack_start);
}
break;
}
}
}
}
/* Check that the Alias region is valid. */
for (size_t a = 0; a < num_regions; ++a) {
if (const auto &alias_region = region_layouts[a]; alias_region.type == RegionType_Alias) {
/* If there's no alias region, we have nothing to check. */
if (alias_region.size == 0) {
break;
}
/* Check that the alias region is within our address space. */
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_region_starts[RegionType_Alias]));
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_region_ends[RegionType_Alias]));
/* Check for overlap with process code. */
const KProcessAddress alias_start = m_region_starts[RegionType_Alias];
const KProcessAddress alias_last = m_region_ends[RegionType_Alias] - 1;
MESOSPHERE_ABORT_UNLESS(alias_last < process_code_start || process_code_last < alias_start);
/* Check for overlap with heap. */
for (size_t h = 0; h < num_regions; ++h) {
if (const auto &heap_region = region_layouts[h]; heap_region.type == RegionType_Heap) {
if (heap_region.size != 0) {
const KProcessAddress heap_start = m_region_starts[RegionType_Heap];
const KProcessAddress heap_last = m_region_ends[RegionType_Heap] - 1;
MESOSPHERE_ABORT_UNLESS(alias_last < heap_start || heap_last < alias_start);
}
break;
}
}
}
}
/* Check that the Heap region is valid. */
for (size_t h = 0; h < num_regions; ++h) {
if (const auto &heap_region = region_layouts[h]; heap_region.type == RegionType_Heap) {
/* If there's no heap region, we have nothing to check. */
if (heap_region.size == 0) {
break;
}
/* Check that the heap region is within our address space. */
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_region_starts[RegionType_Heap]));
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_region_ends[RegionType_Heap]));
/* Check for overlap with process code. */
const KProcessAddress heap_start = m_region_starts[RegionType_Heap];
const KProcessAddress heap_last = m_region_ends[RegionType_Heap] - 1;
MESOSPHERE_ABORT_UNLESS(heap_last < process_code_start || process_code_last < heap_start);
} }
} }
} }
/* Set heap and fill members. */ /* Set heap and fill members. */
m_current_heap_end = m_region_starts[RegionType_Heap]; m_current_heap_end = m_heap_region_start;
m_max_heap_size = 0; m_max_heap_size = 0;
m_mapped_physical_memory_size = 0; m_mapped_physical_memory_size = 0;
m_mapped_unsafe_physical_memory = 0; m_mapped_unsafe_physical_memory = 0;
@ -517,6 +309,32 @@ namespace ams::kern {
/* Set allocation option. */ /* Set allocation option. */
m_allocate_option = KMemoryManager::EncodeOption(pool, from_back ? KMemoryManager::Direction_FromBack : KMemoryManager::Direction_FromFront); m_allocate_option = KMemoryManager::EncodeOption(pool, from_back ? KMemoryManager::Direction_FromBack : KMemoryManager::Direction_FromFront);
/* Ensure that we regions inside our address space. */
auto IsInAddressSpace = [&](KProcessAddress addr) ALWAYS_INLINE_LAMBDA { return m_address_space_start <= addr && addr <= m_address_space_end; };
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_alias_region_start));
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_alias_region_end));
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_heap_region_start));
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_heap_region_end));
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_stack_region_start));
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_stack_region_end));
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_kernel_map_region_start));
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_kernel_map_region_end));
/* Ensure that we selected regions that don't overlap. */
const KProcessAddress alias_start = m_alias_region_start;
const KProcessAddress alias_last = m_alias_region_end - 1;
const KProcessAddress heap_start = m_heap_region_start;
const KProcessAddress heap_last = m_heap_region_end - 1;
const KProcessAddress stack_start = m_stack_region_start;
const KProcessAddress stack_last = m_stack_region_end - 1;
const KProcessAddress kmap_start = m_kernel_map_region_start;
const KProcessAddress kmap_last = m_kernel_map_region_end - 1;
MESOSPHERE_ABORT_UNLESS(alias_last < heap_start || heap_last < alias_start);
MESOSPHERE_ABORT_UNLESS(alias_last < stack_start || stack_last < alias_start);
MESOSPHERE_ABORT_UNLESS(alias_last < kmap_start || kmap_last < alias_start);
MESOSPHERE_ABORT_UNLESS(heap_last < stack_start || stack_last < heap_start);
MESOSPHERE_ABORT_UNLESS(heap_last < kmap_start || kmap_last < heap_start);
/* Initialize our implementation. */ /* Initialize our implementation. */
m_impl.InitializeForProcess(table, GetInteger(start), GetInteger(end)); m_impl.InitializeForProcess(table, GetInteger(start), GetInteger(end));
@ -556,16 +374,16 @@ namespace ams::kern {
case ams::svc::MemoryState_Kernel: case ams::svc::MemoryState_Kernel:
return m_address_space_start; return m_address_space_start;
case ams::svc::MemoryState_Normal: case ams::svc::MemoryState_Normal:
return m_region_starts[RegionType_Heap]; return m_heap_region_start;
case ams::svc::MemoryState_Ipc: case ams::svc::MemoryState_Ipc:
case ams::svc::MemoryState_NonSecureIpc: case ams::svc::MemoryState_NonSecureIpc:
case ams::svc::MemoryState_NonDeviceIpc: case ams::svc::MemoryState_NonDeviceIpc:
return m_region_starts[RegionType_Alias]; return m_alias_region_start;
case ams::svc::MemoryState_Stack: case ams::svc::MemoryState_Stack:
return m_region_starts[RegionType_Stack]; return m_stack_region_start;
case ams::svc::MemoryState_Static: case ams::svc::MemoryState_Static:
case ams::svc::MemoryState_ThreadLocal: case ams::svc::MemoryState_ThreadLocal:
return m_region_starts[RegionType_KernelMap]; return m_kernel_map_region_start;
case ams::svc::MemoryState_Io: case ams::svc::MemoryState_Io:
case ams::svc::MemoryState_Shared: case ams::svc::MemoryState_Shared:
case ams::svc::MemoryState_AliasCode: case ams::svc::MemoryState_AliasCode:
@ -591,16 +409,16 @@ namespace ams::kern {
case ams::svc::MemoryState_Kernel: case ams::svc::MemoryState_Kernel:
return m_address_space_end - m_address_space_start; return m_address_space_end - m_address_space_start;
case ams::svc::MemoryState_Normal: case ams::svc::MemoryState_Normal:
return m_region_ends[RegionType_Heap] - m_region_starts[RegionType_Heap]; return m_heap_region_end - m_heap_region_start;
case ams::svc::MemoryState_Ipc: case ams::svc::MemoryState_Ipc:
case ams::svc::MemoryState_NonSecureIpc: case ams::svc::MemoryState_NonSecureIpc:
case ams::svc::MemoryState_NonDeviceIpc: case ams::svc::MemoryState_NonDeviceIpc:
return m_region_ends[RegionType_Alias] - m_region_starts[RegionType_Alias]; return m_alias_region_end - m_alias_region_start;
case ams::svc::MemoryState_Stack: case ams::svc::MemoryState_Stack:
return m_region_ends[RegionType_Stack] - m_region_starts[RegionType_Stack]; return m_stack_region_end - m_stack_region_start;
case ams::svc::MemoryState_Static: case ams::svc::MemoryState_Static:
case ams::svc::MemoryState_ThreadLocal: case ams::svc::MemoryState_ThreadLocal:
return m_region_ends[RegionType_KernelMap] - m_region_starts[RegionType_KernelMap]; return m_kernel_map_region_end - m_kernel_map_region_start;
case ams::svc::MemoryState_Io: case ams::svc::MemoryState_Io:
case ams::svc::MemoryState_Shared: case ams::svc::MemoryState_Shared:
case ams::svc::MemoryState_AliasCode: case ams::svc::MemoryState_AliasCode:
@ -628,8 +446,8 @@ namespace ams::kern {
const size_t region_size = this->GetRegionSize(state); const size_t region_size = this->GetRegionSize(state);
const bool is_in_region = region_start <= addr && addr < end && last <= region_start + region_size - 1; const bool is_in_region = region_start <= addr && addr < end && last <= region_start + region_size - 1;
const bool is_in_heap = !(end <= m_region_starts[RegionType_Heap] || m_region_ends[RegionType_Heap] <= addr || m_region_starts[RegionType_Heap] == m_region_ends[RegionType_Heap]); const bool is_in_heap = !(end <= m_heap_region_start || m_heap_region_end <= addr || m_heap_region_start == m_heap_region_end);
const bool is_in_alias = !(end <= m_region_starts[RegionType_Alias] || m_region_ends[RegionType_Alias] <= addr || m_region_starts[RegionType_Alias] == m_region_ends[RegionType_Alias]); const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr || m_alias_region_start == m_alias_region_end);
switch (state) { switch (state) {
case ams::svc::MemoryState_Free: case ams::svc::MemoryState_Free:
case ams::svc::MemoryState_Kernel: case ams::svc::MemoryState_Kernel:
@ -916,7 +734,7 @@ namespace ams::kern {
/* Begin traversal. */ /* Begin traversal. */
TraversalContext context; TraversalContext context;
TraversalEntry cur_entry = { .phys_addr = Null<KPhysicalAddress>, .block_size = 0, .sw_reserved_bits = 0, .attr = 0 }; TraversalEntry cur_entry = { .phys_addr = Null<KPhysicalAddress>, .block_size = 0, .sw_reserved_bits = 0 };
bool cur_valid = false; bool cur_valid = false;
TraversalEntry next_entry; TraversalEntry next_entry;
bool next_valid; bool next_valid;
@ -1266,7 +1084,7 @@ namespace ams::kern {
/* Allocate pages for the insecure memory. */ /* Allocate pages for the insecure memory. */
KPageGroup pg(m_block_info_manager); KPageGroup pg(m_block_info_manager);
R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), size / PageSize, 1, KMemoryManager::EncodeOption(insecure_pool, KMemoryManager::Direction_FromFront))); R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), size / PageSize, KMemoryManager::EncodeOption(insecure_pool, KMemoryManager::Direction_FromFront)));
/* Close the opened pages when we're done with them. */ /* Close the opened pages when we're done with them. */
/* If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed automatically. */ /* If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed automatically. */
@ -1419,14 +1237,14 @@ namespace ams::kern {
return this->GetSize(KMemoryState_AliasCodeData); return this->GetSize(KMemoryState_AliasCodeData);
} }
Result KPageTableBase::AllocateAndMapPagesImpl(PageLinkedList *page_list, KProcessAddress address, size_t num_pages, const KPageProperties &properties) { Result KPageTableBase::AllocateAndMapPagesImpl(PageLinkedList *page_list, KProcessAddress address, size_t num_pages, KMemoryPermission perm) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread()); MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
/* Create a page group to hold the pages we allocate. */ /* Create a page group to hold the pages we allocate. */
KPageGroup pg(m_block_info_manager); KPageGroup pg(m_block_info_manager);
/* Allocate the pages. */ /* Allocate the pages. */
R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, 1, m_allocate_option)); R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, m_allocate_option));
/* Ensure that the page group is closed when we're done working with it. */ /* Ensure that the page group is closed when we're done working with it. */
ON_SCOPE_EXIT { pg.Close(); }; ON_SCOPE_EXIT { pg.Close(); };
@ -1437,6 +1255,7 @@ namespace ams::kern {
} }
/* Map the pages. */ /* Map the pages. */
const KPageProperties properties = { perm, false, false, DisableMergeAttribute_None };
R_RETURN(this->Operate(page_list, address, num_pages, pg, properties, OperationType_MapGroup, false)); R_RETURN(this->Operate(page_list, address, num_pages, pg, properties, OperationType_MapGroup, false));
} }
@ -1687,12 +1506,11 @@ namespace ams::kern {
/* Begin a traversal. */ /* Begin a traversal. */
TraversalContext context; TraversalContext context;
TraversalEntry cur_entry = { .phys_addr = Null<KPhysicalAddress>, .block_size = 0, .sw_reserved_bits = 0, .attr = 0 }; TraversalEntry cur_entry = { .phys_addr = Null<KPhysicalAddress>, .block_size = 0, .sw_reserved_bits = 0 };
R_UNLESS(impl.BeginTraversal(std::addressof(cur_entry), std::addressof(context), address), svc::ResultInvalidCurrentMemory()); R_UNLESS(impl.BeginTraversal(std::addressof(cur_entry), std::addressof(context), address), svc::ResultInvalidCurrentMemory());
/* Traverse until we have enough size or we aren't contiguous any more. */ /* Traverse until we have enough size or we aren't contiguous any more. */
const KPhysicalAddress phys_address = cur_entry.phys_addr; const KPhysicalAddress phys_address = cur_entry.phys_addr;
const u8 entry_attr = cur_entry.attr;
size_t contig_size; size_t contig_size;
for (contig_size = cur_entry.block_size - (GetInteger(phys_address) & (cur_entry.block_size - 1)); contig_size < size; contig_size += cur_entry.block_size) { for (contig_size = cur_entry.block_size - (GetInteger(phys_address) & (cur_entry.block_size - 1)); contig_size < size; contig_size += cur_entry.block_size) {
if (!impl.ContinueTraversal(std::addressof(cur_entry), std::addressof(context))) { if (!impl.ContinueTraversal(std::addressof(cur_entry), std::addressof(context))) {
@ -1701,9 +1519,6 @@ namespace ams::kern {
if (cur_entry.phys_addr != phys_address + contig_size) { if (cur_entry.phys_addr != phys_address + contig_size) {
break; break;
} }
if (cur_entry.attr != entry_attr) {
break;
}
} }
/* Take the minimum size for our region. */ /* Take the minimum size for our region. */
@ -1717,7 +1532,7 @@ namespace ams::kern {
} }
/* The memory is contiguous, so set the output range. */ /* The memory is contiguous, so set the output range. */
out->Set(phys_address, size, is_heap, attr); out->Set(phys_address, size, is_heap);
R_SUCCEED(); R_SUCCEED();
} }
@ -1877,17 +1692,17 @@ namespace ams::kern {
KScopedLightLock lk(m_general_lock); KScopedLightLock lk(m_general_lock);
/* Validate that setting heap size is possible at all. */ /* Validate that setting heap size is possible at all. */
R_UNLESS(!m_is_kernel, svc::ResultOutOfMemory()); R_UNLESS(!m_is_kernel, svc::ResultOutOfMemory());
R_UNLESS(size <= static_cast<size_t>(m_region_ends[RegionType_Heap] - m_region_starts[RegionType_Heap]), svc::ResultOutOfMemory()); R_UNLESS(size <= static_cast<size_t>(m_heap_region_end - m_heap_region_start), svc::ResultOutOfMemory());
R_UNLESS(size <= m_max_heap_size, svc::ResultOutOfMemory()); R_UNLESS(size <= m_max_heap_size, svc::ResultOutOfMemory());
if (size < static_cast<size_t>(m_current_heap_end - m_region_starts[RegionType_Heap])) { if (size < static_cast<size_t>(m_current_heap_end - m_heap_region_start)) {
/* The size being requested is less than the current size, so we need to free the end of the heap. */ /* The size being requested is less than the current size, so we need to free the end of the heap. */
/* Validate memory state. */ /* Validate memory state. */
size_t num_allocator_blocks; size_t num_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks),
m_region_starts[RegionType_Heap] + size, (m_current_heap_end - m_region_starts[RegionType_Heap]) - size, m_heap_region_start + size, (m_current_heap_end - m_heap_region_start) - size,
KMemoryState_All, KMemoryState_Normal, KMemoryState_All, KMemoryState_Normal,
KMemoryPermission_All, KMemoryPermission_UserReadWrite, KMemoryPermission_All, KMemoryPermission_UserReadWrite,
KMemoryAttribute_All, KMemoryAttribute_None)); KMemoryAttribute_All, KMemoryAttribute_None));
@ -1901,30 +1716,30 @@ namespace ams::kern {
KScopedPageTableUpdater updater(this); KScopedPageTableUpdater updater(this);
/* Unmap the end of the heap. */ /* Unmap the end of the heap. */
const size_t num_pages = ((m_current_heap_end - m_region_starts[RegionType_Heap]) - size) / PageSize; const size_t num_pages = ((m_current_heap_end - m_heap_region_start) - size) / PageSize;
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None }; const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
R_TRY(this->Operate(updater.GetPageList(), m_region_starts[RegionType_Heap] + size, num_pages, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, false)); R_TRY(this->Operate(updater.GetPageList(), m_heap_region_start + size, num_pages, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, false));
/* Release the memory from the resource limit. */ /* Release the memory from the resource limit. */
m_resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, num_pages * PageSize); m_resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, num_pages * PageSize);
/* Apply the memory block update. */ /* Apply the memory block update. */
m_memory_block_manager.Update(std::addressof(allocator), m_region_starts[RegionType_Heap] + size, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, size == 0 ? KMemoryBlockDisableMergeAttribute_Normal : KMemoryBlockDisableMergeAttribute_None); m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, size == 0 ? KMemoryBlockDisableMergeAttribute_Normal : KMemoryBlockDisableMergeAttribute_None);
/* Update the current heap end. */ /* Update the current heap end. */
m_current_heap_end = m_region_starts[RegionType_Heap] + size; m_current_heap_end = m_heap_region_start + size;
/* Set the output. */ /* Set the output. */
*out = m_region_starts[RegionType_Heap]; *out = m_heap_region_start;
R_SUCCEED(); R_SUCCEED();
} else if (size == static_cast<size_t>(m_current_heap_end - m_region_starts[RegionType_Heap])) { } else if (size == static_cast<size_t>(m_current_heap_end - m_heap_region_start)) {
/* The size requested is exactly the current size. */ /* The size requested is exactly the current size. */
*out = m_region_starts[RegionType_Heap]; *out = m_heap_region_start;
R_SUCCEED(); R_SUCCEED();
} else { } else {
/* We have to allocate memory. Determine how much to allocate and where while the table is locked. */ /* We have to allocate memory. Determine how much to allocate and where while the table is locked. */
cur_address = m_current_heap_end; cur_address = m_current_heap_end;
allocation_size = size - (m_current_heap_end - m_region_starts[RegionType_Heap]); allocation_size = size - (m_current_heap_end - m_heap_region_start);
} }
} }
@ -1934,7 +1749,7 @@ namespace ams::kern {
/* Allocate pages for the heap extension. */ /* Allocate pages for the heap extension. */
KPageGroup pg(m_block_info_manager); KPageGroup pg(m_block_info_manager);
R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), allocation_size / PageSize, 1, m_allocate_option)); R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), allocation_size / PageSize, m_allocate_option));
/* Close the opened pages when we're done with them. */ /* Close the opened pages when we're done with them. */
/* If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed automatically. */ /* If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed automatically. */
@ -1967,20 +1782,20 @@ namespace ams::kern {
/* Map the pages. */ /* Map the pages. */
const size_t num_pages = allocation_size / PageSize; const size_t num_pages = allocation_size / PageSize;
const KPageProperties map_properties = { KMemoryPermission_UserReadWrite, false, false, (m_current_heap_end == m_region_starts[RegionType_Heap]) ? DisableMergeAttribute_DisableHead : DisableMergeAttribute_None }; const KPageProperties map_properties = { KMemoryPermission_UserReadWrite, false, false, (m_current_heap_end == m_heap_region_start) ? DisableMergeAttribute_DisableHead : DisableMergeAttribute_None };
R_TRY(this->Operate(updater.GetPageList(), m_current_heap_end, num_pages, pg, map_properties, OperationType_MapGroup, false)); R_TRY(this->Operate(updater.GetPageList(), m_current_heap_end, num_pages, pg, map_properties, OperationType_MapGroup, false));
/* We succeeded, so commit our memory reservation. */ /* We succeeded, so commit our memory reservation. */
memory_reservation.Commit(); memory_reservation.Commit();
/* Apply the memory block update. */ /* Apply the memory block update. */
m_memory_block_manager.Update(std::addressof(allocator), m_current_heap_end, num_pages, KMemoryState_Normal, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, m_region_starts[RegionType_Heap] == m_current_heap_end ? KMemoryBlockDisableMergeAttribute_Normal : KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_None); m_memory_block_manager.Update(std::addressof(allocator), m_current_heap_end, num_pages, KMemoryState_Normal, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, m_heap_region_start == m_current_heap_end ? KMemoryBlockDisableMergeAttribute_Normal : KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_None);
/* Update the current heap end. */ /* Update the current heap end. */
m_current_heap_end = m_region_starts[RegionType_Heap] + size; m_current_heap_end = m_heap_region_start + size;
/* Set the output. */ /* Set the output. */
*out = m_region_starts[RegionType_Heap]; *out = m_heap_region_start;
R_SUCCEED(); R_SUCCEED();
} }
} }
@ -2112,8 +1927,8 @@ namespace ams::kern {
const KPhysicalAddress last = phys_addr + size - 1; const KPhysicalAddress last = phys_addr + size - 1;
/* Get region extents. */ /* Get region extents. */
const KProcessAddress region_start = m_region_starts[RegionType_KernelMap]; const KProcessAddress region_start = m_kernel_map_region_start;
const size_t region_size = m_region_ends[RegionType_KernelMap] - m_region_starts[RegionType_KernelMap]; const size_t region_size = m_kernel_map_region_end - m_kernel_map_region_start;
const size_t region_num_pages = region_size / PageSize; const size_t region_num_pages = region_size / PageSize;
MESOSPHERE_ASSERT(this->CanContain(region_start, region_size, state)); MESOSPHERE_ASSERT(this->CanContain(region_start, region_size, state));
@ -2422,11 +2237,11 @@ namespace ams::kern {
KScopedPageTableUpdater updater(this); KScopedPageTableUpdater updater(this);
/* Perform mapping operation. */ /* Perform mapping operation. */
const KPageProperties properties = { perm, false, false, DisableMergeAttribute_DisableHead };
if (is_pa_valid) { if (is_pa_valid) {
const KPageProperties properties = { perm, false, false, DisableMergeAttribute_DisableHead };
R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, phys_addr, true, properties, OperationType_Map, false)); R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, phys_addr, true, properties, OperationType_Map, false));
} else { } else {
R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), addr, num_pages, properties)); R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), addr, num_pages, perm));
} }
/* Update the blocks. */ /* Update the blocks. */
@ -2458,8 +2273,7 @@ namespace ams::kern {
KScopedPageTableUpdater updater(this); KScopedPageTableUpdater updater(this);
/* Map the pages. */ /* Map the pages. */
const KPageProperties properties = { perm, false, false, DisableMergeAttribute_DisableHead }; R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), address, num_pages, perm));
R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), address, num_pages, properties));
/* Update the blocks. */ /* Update the blocks. */
m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None); m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
@ -2998,7 +2812,7 @@ namespace ams::kern {
MESOSPHERE_ABORT_UNLESS(src_page_table.GetPhysicalAddressLocked(std::addressof(phys_addr), address)); MESOSPHERE_ABORT_UNLESS(src_page_table.GetPhysicalAddressLocked(std::addressof(phys_addr), address));
/* Determine the current read size. */ /* Determine the current read size. */
const size_t cur_size = std::min<size_t>(last_address - address + 1, PageSize - (GetInteger(address) & (PageSize - 1))); const size_t cur_size = std::min<size_t>(last_address - address + 1, util::AlignDown(GetInteger(address) + PageSize, PageSize) - GetInteger(address));
/* Read. */ /* Read. */
R_TRY(dst_page_table.ReadIoMemoryImpl(dst, phys_addr, cur_size, state)); R_TRY(dst_page_table.ReadIoMemoryImpl(dst, phys_addr, cur_size, state));
@ -3034,7 +2848,7 @@ namespace ams::kern {
MESOSPHERE_ABORT_UNLESS(src_page_table.GetPhysicalAddressLocked(std::addressof(phys_addr), address)); MESOSPHERE_ABORT_UNLESS(src_page_table.GetPhysicalAddressLocked(std::addressof(phys_addr), address));
/* Determine the current read size. */ /* Determine the current read size. */
const size_t cur_size = std::min<size_t>(last_address - address + 1, PageSize - (GetInteger(address) & (PageSize - 1))); const size_t cur_size = std::min<size_t>(last_address - address + 1, util::AlignDown(GetInteger(address) + PageSize, PageSize) - GetInteger(address));
/* Read. */ /* Read. */
R_TRY(dst_page_table.WriteIoMemoryImpl(phys_addr, src, cur_size, state)); R_TRY(dst_page_table.WriteIoMemoryImpl(phys_addr, src, cur_size, state));
@ -3810,7 +3624,7 @@ namespace ams::kern {
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory()); R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
/* Get the source permission. */ /* Get the source permission. */
const auto src_perm = static_cast<KMemoryPermission>((test_perm == KMemoryPermission_UserReadWrite) ? (KMemoryPermission_KernelReadWrite | KMemoryPermission_NotMapped) : KMemoryPermission_UserRead); const auto src_perm = static_cast<KMemoryPermission>((test_perm == KMemoryPermission_UserReadWrite) ? KMemoryPermission_KernelReadWrite | KMemoryPermission_NotMapped : KMemoryPermission_UserRead);
/* Get aligned extents. */ /* Get aligned extents. */
const KProcessAddress aligned_src_start = util::AlignDown(GetInteger(address), PageSize); const KProcessAddress aligned_src_start = util::AlignDown(GetInteger(address), PageSize);
@ -3906,8 +3720,8 @@ namespace ams::kern {
MESOSPHERE_ASSERT(src_page_table.IsLockedByCurrentThread()); MESOSPHERE_ASSERT(src_page_table.IsLockedByCurrentThread());
/* Check that we can theoretically map. */ /* Check that we can theoretically map. */
const KProcessAddress region_start = m_region_starts[RegionType_Alias]; const KProcessAddress region_start = m_alias_region_start;
const size_t region_size = m_region_ends[RegionType_Alias] - m_region_starts[RegionType_Alias]; const size_t region_size = m_alias_region_end - m_alias_region_start;
R_UNLESS(size < region_size, svc::ResultOutOfAddressSpace()); R_UNLESS(size < region_size, svc::ResultOutOfAddressSpace());
/* Get aligned source extents. */ /* Get aligned source extents. */
@ -4139,7 +3953,7 @@ namespace ams::kern {
const size_t src_map_size = src_map_end - src_map_start; const size_t src_map_size = src_map_end - src_map_start;
/* Ensure that we clean up appropriately if we fail after this. */ /* Ensure that we clean up appropriately if we fail after this. */
const auto src_perm = static_cast<KMemoryPermission>((test_perm == KMemoryPermission_UserReadWrite) ? (KMemoryPermission_KernelReadWrite | KMemoryPermission_NotMapped) : KMemoryPermission_UserRead); const auto src_perm = static_cast<KMemoryPermission>((test_perm == KMemoryPermission_UserReadWrite) ? KMemoryPermission_KernelReadWrite | KMemoryPermission_NotMapped : KMemoryPermission_UserRead);
ON_RESULT_FAILURE { ON_RESULT_FAILURE {
if (src_map_end > src_map_start) { if (src_map_end > src_map_start) {
src_page_table.CleanupForIpcClientOnServerSetupFailure(updater.GetPageList(), src_map_start, src_map_size, src_perm); src_page_table.CleanupForIpcClientOnServerSetupFailure(updater.GetPageList(), src_map_start, src_map_size, src_perm);
@ -4674,7 +4488,7 @@ namespace ams::kern {
} }
} }
/* Map the pages. */ /* Map the papges. */
R_TRY(this->Operate(updater.GetPageList(), cur_address, map_pages, cur_pg, map_properties, OperationType_MapFirstGroup, false)); R_TRY(this->Operate(updater.GetPageList(), cur_address, map_pages, cur_pg, map_properties, OperationType_MapFirstGroup, false));
} }
} }
@ -4848,7 +4662,7 @@ namespace ams::kern {
/* Allocate the new memory. */ /* Allocate the new memory. */
const size_t num_pages = size / PageSize; const size_t num_pages = size / PageSize;
R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, 1, KMemoryManager::EncodeOption(KMemoryManager::Pool_Unsafe, KMemoryManager::Direction_FromFront))); R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, KMemoryManager::EncodeOption(KMemoryManager::Pool_Unsafe, KMemoryManager::Direction_FromFront)));
/* Close the page group when we're done with it. */ /* Close the page group when we're done with it. */
ON_SCOPE_EXIT { pg.Close(); }; ON_SCOPE_EXIT { pg.Close(); };

View File

@ -298,8 +298,10 @@ namespace ams::kern {
/* Setup page table. */ /* Setup page table. */
{ {
const bool from_back = (params.flags & ams::svc::CreateProcessFlag_EnableAslr) == 0; const auto as_type = static_cast<ams::svc::CreateProcessFlag>(params.flags & ams::svc::CreateProcessFlag_AddressSpaceMask);
R_TRY(m_page_table.Initialize(static_cast<ams::svc::CreateProcessFlag>(params.flags), from_back, pool, params.code_address, params.code_num_pages * PageSize, m_system_resource, res_limit)); const bool enable_aslr = (params.flags & ams::svc::CreateProcessFlag_EnableAslr) != 0;
const bool enable_das_merge = (params.flags & ams::svc::CreateProcessFlag_DisableDeviceAddressSpaceMerge) == 0;
R_TRY(m_page_table.Initialize(as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address, params.code_num_pages * PageSize, m_system_resource, res_limit));
} }
ON_RESULT_FAILURE_2 { m_page_table.Finalize(); }; ON_RESULT_FAILURE_2 { m_page_table.Finalize(); };
@ -377,8 +379,10 @@ namespace ams::kern {
/* Setup page table. */ /* Setup page table. */
{ {
const bool from_back = (params.flags & ams::svc::CreateProcessFlag_EnableAslr) == 0; const auto as_type = static_cast<ams::svc::CreateProcessFlag>(params.flags & ams::svc::CreateProcessFlag_AddressSpaceMask);
R_TRY(m_page_table.Initialize(static_cast<ams::svc::CreateProcessFlag>(params.flags), from_back, pool, params.code_address, code_size, m_system_resource, res_limit)); const bool enable_aslr = (params.flags & ams::svc::CreateProcessFlag_EnableAslr) != 0;
const bool enable_das_merge = (params.flags & ams::svc::CreateProcessFlag_DisableDeviceAddressSpaceMerge) == 0;
R_TRY(m_page_table.Initialize(as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address, code_size, m_system_resource, res_limit));
} }
ON_RESULT_FAILURE_2 { m_page_table.Finalize(); }; ON_RESULT_FAILURE_2 { m_page_table.Finalize(); };

View File

@ -36,7 +36,7 @@ namespace ams::kern {
/* Cleanup the session list. */ /* Cleanup the session list. */
while (true) { while (true) {
/* Get the last session in the list. */ /* Get the last session in the list */
KServerSession *session = nullptr; KServerSession *session = nullptr;
{ {
KScopedSchedulerLock sl; KScopedSchedulerLock sl;
@ -56,7 +56,7 @@ namespace ams::kern {
/* Cleanup the light session list. */ /* Cleanup the light session list. */
while (true) { while (true) {
/* Get the last session in the list. */ /* Get the last session in the list */
KLightServerSession *session = nullptr; KLightServerSession *session = nullptr;
{ {
KScopedSchedulerLock sl; KScopedSchedulerLock sl;

View File

@ -650,7 +650,7 @@ namespace ams::kern {
const auto src_state = src_user ? KMemoryState_FlagReferenceCounted : KMemoryState_FlagLinearMapped; const auto src_state = src_user ? KMemoryState_FlagReferenceCounted : KMemoryState_FlagLinearMapped;
/* Determine the source permission. User buffer should be unmapped + read, TLS should be user readable. */ /* Determine the source permission. User buffer should be unmapped + read, TLS should be user readable. */
const KMemoryPermission src_perm = static_cast<KMemoryPermission>(src_user ? (KMemoryPermission_NotMapped | KMemoryPermission_KernelRead) : KMemoryPermission_UserRead); const KMemoryPermission src_perm = static_cast<KMemoryPermission>(src_user ? KMemoryPermission_NotMapped | KMemoryPermission_KernelRead : KMemoryPermission_UserRead);
/* Perform the fast part of the copy. */ /* Perform the fast part of the copy. */
R_TRY(src_page_table.CopyMemoryFromLinearToKernel(reinterpret_cast<uintptr_t>(dst_msg_ptr) + offset_words, fast_size, src_message_buffer + offset_words, R_TRY(src_page_table.CopyMemoryFromLinearToKernel(reinterpret_cast<uintptr_t>(dst_msg_ptr) + offset_words, fast_size, src_message_buffer + offset_words,
@ -753,7 +753,7 @@ namespace ams::kern {
/* Perform the pointer data copy. */ /* Perform the pointer data copy. */
const bool dst_heap = dst_user && dst_recv_list.IsToMessageBuffer(); const bool dst_heap = dst_user && dst_recv_list.IsToMessageBuffer();
const auto dst_state = dst_heap ? KMemoryState_FlagReferenceCounted : KMemoryState_FlagLinearMapped; const auto dst_state = dst_heap ? KMemoryState_FlagReferenceCounted : KMemoryState_FlagLinearMapped;
const KMemoryPermission dst_perm = static_cast<KMemoryPermission>(dst_heap ? (KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite) : KMemoryPermission_UserReadWrite); const KMemoryPermission dst_perm = static_cast<KMemoryPermission>(dst_heap ? KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite : KMemoryPermission_UserReadWrite);
R_TRY(dst_page_table.CopyMemoryFromUserToLinear(recv_pointer, recv_size, R_TRY(dst_page_table.CopyMemoryFromUserToLinear(recv_pointer, recv_size,
dst_state, dst_state, dst_state, dst_state,
dst_perm, dst_perm,
@ -911,7 +911,7 @@ namespace ams::kern {
const auto dst_state = dst_user ? KMemoryState_FlagReferenceCounted : KMemoryState_FlagLinearMapped; const auto dst_state = dst_user ? KMemoryState_FlagReferenceCounted : KMemoryState_FlagLinearMapped;
/* Determine the dst permission. User buffer should be unmapped + read, TLS should be user readable. */ /* Determine the dst permission. User buffer should be unmapped + read, TLS should be user readable. */
const KMemoryPermission dst_perm = static_cast<KMemoryPermission>(dst_user ? (KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite) : KMemoryPermission_UserReadWrite); const KMemoryPermission dst_perm = static_cast<KMemoryPermission>(dst_user ? KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite : KMemoryPermission_UserReadWrite);
/* Perform the fast part of the copy. */ /* Perform the fast part of the copy. */
R_TRY(dst_page_table.CopyMemoryFromKernelToLinear(dst_message_buffer + offset_words, fast_size, R_TRY(dst_page_table.CopyMemoryFromKernelToLinear(dst_message_buffer + offset_words, fast_size,

View File

@ -37,7 +37,7 @@ namespace ams::kern {
R_UNLESS(memory_reservation.Succeeded(), svc::ResultLimitReached()); R_UNLESS(memory_reservation.Succeeded(), svc::ResultLimitReached());
/* Allocate the memory. */ /* Allocate the memory. */
R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(m_page_group), num_pages, 1, owner->GetAllocateOption())); R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(m_page_group), num_pages, owner->GetAllocateOption()));
/* Commit our reservation. */ /* Commit our reservation. */
memory_reservation.Commit(); memory_reservation.Commit();

View File

@ -39,18 +39,17 @@ namespace ams::kern {
KPhysicalAddress KSystemControlBase::Init::GetKernelPhysicalBaseAddress(KPhysicalAddress base_address) { KPhysicalAddress KSystemControlBase::Init::GetKernelPhysicalBaseAddress(KPhysicalAddress base_address) {
const size_t real_dram_size = KSystemControl::Init::GetRealMemorySize(); const size_t real_dram_size = KSystemControl::Init::GetRealMemorySize();
const size_t intended_dram_size = KSystemControl::Init::GetIntendedMemorySize(); const size_t intended_dram_size = KSystemControl::Init::GetIntendedMemorySize();
if (intended_dram_size * 2 <= real_dram_size) { if (intended_dram_size * 2 < real_dram_size) {
return base_address; return base_address;
} else { } else {
return base_address + ((real_dram_size - intended_dram_size) / 2); return base_address + ((real_dram_size - intended_dram_size) / 2);
} }
} }
void KSystemControlBase::Init::GetInitialProcessBinaryLayout(InitialProcessBinaryLayout *out, KPhysicalAddress kern_base_address) { void KSystemControlBase::Init::GetInitialProcessBinaryLayout(InitialProcessBinaryLayout *out) {
*out = { *out = {
.address = GetInteger(KSystemControl::Init::GetKernelPhysicalBaseAddress(ams::kern::MainMemoryAddress)) + KSystemControl::Init::GetIntendedMemorySize() - InitialProcessBinarySizeMax, .address = GetInteger(KSystemControl::Init::GetKernelPhysicalBaseAddress(ams::kern::MainMemoryAddress)) + KSystemControl::Init::GetIntendedMemorySize() - InitialProcessBinarySizeMax,
._08 = 0, ._08 = 0,
.kern_address = GetInteger(kern_base_address),
}; };
} }
@ -78,7 +77,7 @@ namespace ams::kern {
void KSystemControlBase::Init::CpuOnImpl(u64 core_id, uintptr_t entrypoint, uintptr_t arg) { void KSystemControlBase::Init::CpuOnImpl(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
#if defined(ATMOSPHERE_ARCH_ARM64) #if defined(ATMOSPHERE_ARCH_ARM64)
MESOSPHERE_INIT_ABORT_UNLESS((::ams::kern::arch::arm64::smc::CpuOn<0>(core_id, entrypoint, arg)) == 0); MESOSPHERE_INIT_ABORT_UNLESS((::ams::kern::arch::arm64::smc::CpuOn<0, false>(core_id, entrypoint, arg)) == 0);
#else #else
AMS_INFINITE_LOOP(); AMS_INFINITE_LOOP();
#endif #endif

View File

@ -106,9 +106,6 @@ namespace ams::kern::svc {
*out = 0; *out = 0;
} }
break; break;
case ams::svc::InfoType_AliasRegionExtraSize:
*out = process->GetPageTable().GetAliasRegionExtraSize();
break;
MESOSPHERE_UNREACHABLE_DEFAULT_CASE(); MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
} }
@ -137,7 +134,6 @@ namespace ams::kern::svc {
case ams::svc::InfoType_UsedNonSystemMemorySize: case ams::svc::InfoType_UsedNonSystemMemorySize:
case ams::svc::InfoType_IsApplication: case ams::svc::InfoType_IsApplication:
case ams::svc::InfoType_FreeThreadCount: case ams::svc::InfoType_FreeThreadCount:
case ams::svc::InfoType_AliasRegionExtraSize:
{ {
/* These info types don't support non-zero subtypes. */ /* These info types don't support non-zero subtypes. */
R_UNLESS(info_subtype == 0, svc::ResultInvalidCombination()); R_UNLESS(info_subtype == 0, svc::ResultInvalidCombination());

View File

@ -143,7 +143,7 @@ namespace ams::kern::svc {
/* Get the process page table. */ /* Get the process page table. */
auto &page_table = GetCurrentProcess().GetPageTable(); auto &page_table = GetCurrentProcess().GetPageTable();
/* Lock the message buffer. */ /* Lock the mesage buffer. */
R_TRY(page_table.LockForIpcUserBuffer(nullptr, message, buffer_size)); R_TRY(page_table.LockForIpcUserBuffer(nullptr, message, buffer_size));
{ {
@ -186,7 +186,7 @@ namespace ams::kern::svc {
/* Commit our reservation. */ /* Commit our reservation. */
event_reservation.Commit(); event_reservation.Commit();
/* At end of scope, kill the standing event references. */ /* At end of scope, kill the standing references to the sub events. */
ON_SCOPE_EXIT { ON_SCOPE_EXIT {
event->GetReadableEvent().Close(); event->GetReadableEvent().Close();
event->Close(); event->Close();
@ -215,7 +215,7 @@ namespace ams::kern::svc {
/* Get the process page table. */ /* Get the process page table. */
auto &page_table = GetCurrentProcess().GetPageTable(); auto &page_table = GetCurrentProcess().GetPageTable();
/* Lock the message buffer. */ /* Lock the mesage buffer. */
R_TRY(page_table.LockForIpcUserBuffer(nullptr, message, buffer_size)); R_TRY(page_table.LockForIpcUserBuffer(nullptr, message, buffer_size));
/* Ensure that if we fail and aren't terminating that we unlock the user buffer. */ /* Ensure that if we fail and aren't terminating that we unlock the user buffer. */
@ -242,7 +242,7 @@ namespace ams::kern::svc {
/* Get the process page table. */ /* Get the process page table. */
auto &page_table = GetCurrentProcess().GetPageTable(); auto &page_table = GetCurrentProcess().GetPageTable();
/* Lock the message buffer, getting its physical address. */ /* Lock the mesage buffer, getting its physical address. */
KPhysicalAddress message_paddr; KPhysicalAddress message_paddr;
R_TRY(page_table.LockForIpcUserBuffer(std::addressof(message_paddr), message, buffer_size)); R_TRY(page_table.LockForIpcUserBuffer(std::addressof(message_paddr), message, buffer_size));

View File

@ -96,7 +96,7 @@ namespace ams::kern::svc {
/* Add the client to the handle table. */ /* Add the client to the handle table. */
R_TRY(handle_table.Add(out_client, std::addressof(port->GetClientPort()))); R_TRY(handle_table.Add(out_client, std::addressof(port->GetClientPort())));
/* Ensure that we maintain a clean handle state on exit. */ /* Ensure that we maintaing a clean handle state on exit. */
ON_RESULT_FAILURE { handle_table.Remove(*out_client); }; ON_RESULT_FAILURE { handle_table.Remove(*out_client); };
/* Add the server to the handle table. */ /* Add the server to the handle table. */

View File

@ -162,18 +162,6 @@ namespace ams::kern::svc {
/* Check that the number of extra resource pages is >= 0. */ /* Check that the number of extra resource pages is >= 0. */
R_UNLESS(params.system_resource_num_pages >= 0, svc::ResultInvalidSize()); R_UNLESS(params.system_resource_num_pages >= 0, svc::ResultInvalidSize());
/* Validate that the alias region extra size is allowed, if enabled. */
if (params.flags & ams::svc::CreateProcessFlag_EnableAliasRegionExtraSize) {
/* Check that we have a 64-bit address space. */
R_UNLESS((params.flags & ams::svc::CreateProcessFlag_AddressSpaceMask) == ams::svc::CreateProcessFlag_AddressSpace64Bit, svc::ResultInvalidState());
/* Check that the system resource page count is non-zero. */
R_UNLESS(params.system_resource_num_pages > 0, svc::ResultInvalidState());
/* Check that debug mode is enabled. */
R_UNLESS(KTargetSystem::IsDebugMode(), svc::ResultInvalidState());
}
/* Convert to sizes. */ /* Convert to sizes. */
const size_t code_num_pages = params.code_num_pages; const size_t code_num_pages = params.code_num_pages;
const size_t system_resource_num_pages = params.system_resource_num_pages; const size_t system_resource_num_pages = params.system_resource_num_pages;

File diff suppressed because it is too large Load Diff

View File

@ -34,6 +34,7 @@ namespace ams::erpt {
enum CategoryId { enum CategoryId {
AMS_ERPT_FOREACH_CATEGORY(GENERATE_ENUM) AMS_ERPT_FOREACH_CATEGORY(GENERATE_ENUM)
CategoryId_Count,
}; };
#undef GENERATE_ENUM #undef GENERATE_ENUM
@ -42,6 +43,7 @@ namespace ams::erpt {
enum FieldId { enum FieldId {
AMS_ERPT_FOREACH_FIELD(GENERATE_ENUM) AMS_ERPT_FOREACH_FIELD(GENERATE_ENUM)
FieldId_Count,
}; };
#undef GENERATE_ENUM #undef GENERATE_ENUM

View File

@ -58,88 +58,34 @@ namespace ams::erpt::srv {
}; };
#undef STRINGIZE_HANDLER #undef STRINGIZE_HANDLER
#define GET_FIELD_CATEGORY(FIELD, ID, CATEGORY, TYPE, FLAG) CategoryId_##CATEGORY, #define GET_FIELD_CATEGORY(FIELD, ID, CATEGORY, TYPE, FLAG) [FieldId_##FIELD] = CategoryId_##CATEGORY,
constexpr inline const CategoryId FieldIndexToCategoryMap[] = { constexpr inline const CategoryId FieldToCategoryMap[] = {
AMS_ERPT_FOREACH_FIELD(GET_FIELD_CATEGORY) AMS_ERPT_FOREACH_FIELD(GET_FIELD_CATEGORY)
}; };
#undef GET_FIELD_CATEGORY #undef GET_FIELD_CATEGORY
#define GET_FIELD_TYPE(FIELD, ID, CATEGORY, TYPE, FLAG) TYPE, #define GET_FIELD_TYPE(FIELD, ID, CATEGORY, TYPE, FLAG) [FieldId_##FIELD] = TYPE,
constexpr inline const FieldType FieldIndexToTypeMap[] = { constexpr inline const FieldType FieldToTypeMap[] = {
AMS_ERPT_FOREACH_FIELD(GET_FIELD_TYPE) AMS_ERPT_FOREACH_FIELD(GET_FIELD_TYPE)
}; };
#undef GET_FIELD_TYPE #undef GET_FIELD_TYPE
#define GET_FIELD_FLAG(FIELD, ID, CATEGORY, TYPE, FLAG) FLAG, #define GET_FIELD_FLAG(FIELD, ID, CATEGORY, TYPE, FLAG) [FieldId_##FIELD] = FLAG,
constexpr inline const FieldFlag FieldIndexToFlagMap[] = { constexpr inline const FieldFlag FieldToFlagMap[] = {
AMS_ERPT_FOREACH_FIELD(GET_FIELD_FLAG) AMS_ERPT_FOREACH_FIELD(GET_FIELD_FLAG)
}; };
#undef GET_FIELD_FLAG #undef GET_FIELD_FLAG
#define GET_FIELD_ID(FIELD, ...) FieldId_##FIELD, inline CategoryId ConvertFieldToCategory(FieldId id) {
constexpr inline const FieldId FieldIndexToFieldIdMap[] = { return FieldToCategoryMap[id];
AMS_ERPT_FOREACH_FIELD(GET_FIELD_ID)
};
#undef GET_FIELD_ID
#define GET_CATEGORY_ID(CATEGORY, ...) CategoryId_##CATEGORY,
constexpr inline const CategoryId CategoryIndexToCategoryIdMap[] = {
AMS_ERPT_FOREACH_CATEGORY(GET_CATEGORY_ID)
};
#undef GET_CATEGORY_ID
constexpr util::optional<size_t> FindFieldIndex(FieldId id) {
if (std::is_constant_evaluated()) {
for (size_t i = 0; i < util::size(FieldIndexToFieldIdMap); ++i) {
if (FieldIndexToFieldIdMap[i] == id) {
return i;
}
}
return util::nullopt;
} else {
if (const auto it = std::lower_bound(std::begin(FieldIndexToFieldIdMap), std::end(FieldIndexToFieldIdMap), id); it != std::end(FieldIndexToFieldIdMap) && *it == id) {
return std::distance(FieldIndexToFieldIdMap, it);
} else {
return util::nullopt;
}
}
} }
constexpr util::optional<size_t> FindCategoryIndex(CategoryId id) { inline FieldType ConvertFieldToType(FieldId id) {
if (std::is_constant_evaluated()) { return FieldToTypeMap[id];
for (size_t i = 0; i < util::size(CategoryIndexToCategoryIdMap); ++i) {
if (CategoryIndexToCategoryIdMap[i] == id) {
return i;
}
}
return util::nullopt;
} else {
if (const auto it = std::lower_bound(std::begin(CategoryIndexToCategoryIdMap), std::end(CategoryIndexToCategoryIdMap), id); it != std::end(CategoryIndexToCategoryIdMap) && *it == id) {
return std::distance(CategoryIndexToCategoryIdMap, it);
} else {
return util::nullopt;
}
}
} }
constexpr inline CategoryId ConvertFieldToCategory(FieldId id) { inline FieldFlag ConvertFieldToFlag(FieldId id) {
const auto index = FindFieldIndex(id); return FieldToFlagMap[id];
AMS_ASSERT(index.has_value());
return FieldIndexToCategoryMap[index.value()];
}
constexpr inline FieldType ConvertFieldToType(FieldId id) {
const auto index = FindFieldIndex(id);
AMS_ASSERT(index.has_value());
return FieldIndexToTypeMap[index.value()];
}
constexpr inline FieldFlag ConvertFieldToFlag(FieldId id) {
const auto index = FindFieldIndex(id);
AMS_ASSERT(index.has_value());
return FieldIndexToFlagMap[index.value()];
} }
constexpr inline ReportFlagSet MakeNoReportFlags() { constexpr inline ReportFlagSet MakeNoReportFlags() {

View File

@ -81,8 +81,6 @@ namespace ams::hos {
Version_16_0_3 = ::ams::TargetFirmware_16_0_3, Version_16_0_3 = ::ams::TargetFirmware_16_0_3,
Version_16_1_0 = ::ams::TargetFirmware_16_1_0, Version_16_1_0 = ::ams::TargetFirmware_16_1_0,
Version_17_0_0 = ::ams::TargetFirmware_17_0_0, Version_17_0_0 = ::ams::TargetFirmware_17_0_0,
Version_17_0_1 = ::ams::TargetFirmware_17_0_1,
Version_18_0_0 = ::ams::TargetFirmware_18_0_0,
Version_Current = ::ams::TargetFirmware_Current, Version_Current = ::ams::TargetFirmware_Current,

View File

@ -53,8 +53,7 @@ namespace ams::settings {
/* 4.0.0+ */ /* 4.0.0+ */
Language_SimplifiedChinese, Language_SimplifiedChinese,
Language_TraditionalChinese, Language_TraditionalChinese,
/* 10.1.0+ */
Language_PortugueseBr,
Language_Count, Language_Count,
}; };
@ -93,8 +92,6 @@ namespace ams::settings {
/* 4.0.0+ */ /* 4.0.0+ */
AMS_MATCH_LANGUAGE(SimplifiedChinese, "zh-Hans") AMS_MATCH_LANGUAGE(SimplifiedChinese, "zh-Hans")
AMS_MATCH_LANGUAGE(TraditionalChinese, "zh-Hant") AMS_MATCH_LANGUAGE(TraditionalChinese, "zh-Hant")
/* 10.1.0+ */
AMS_MATCH_LANGUAGE(PortugueseBr, "pt-BR")
#undef AMS_MATCH_LANGUAGE #undef AMS_MATCH_LANGUAGE
else { static_assert(Lang != Language_Japanese); } else { static_assert(Lang != Language_Japanese); }
} }
@ -119,8 +116,6 @@ namespace ams::settings {
/* 4.0.0+ */ /* 4.0.0+ */
EncodeLanguage<Language_SimplifiedChinese>(), EncodeLanguage<Language_SimplifiedChinese>(),
EncodeLanguage<Language_TraditionalChinese>(), EncodeLanguage<Language_TraditionalChinese>(),
/* 10.1.0+ */
EncodeLanguage<Language_PortugueseBr>(),
}; };
return EncodedLanguages[language]; return EncodedLanguages[language];
} }
@ -161,11 +156,7 @@ namespace ams::settings {
} }
constexpr inline bool IsValidLanguageCodeDeprecated(const LanguageCode &lc) { constexpr inline bool IsValidLanguageCodeDeprecated(const LanguageCode &lc) {
return impl::IsValidLanguageCode(lc, std::make_index_sequence<Language_Count - 3>{}); return impl::IsValidLanguageCode(lc, std::make_index_sequence<Language_Count - 2>{});
}
constexpr inline bool IsValidLanguageCodeDeprecated2(const LanguageCode &lc) {
return impl::IsValidLanguageCode(lc, std::make_index_sequence<Language_Count - 1>{});
} }
constexpr inline bool IsValidLanguageCode(const LanguageCode &lc) { constexpr inline bool IsValidLanguageCode(const LanguageCode &lc) {

View File

@ -70,7 +70,6 @@ namespace ams::spl::impl {
Result ModularExponentiateWithDrmDeviceCertKey(void *out, size_t out_size, const void *base, size_t base_size, const void *mod, size_t mod_size); Result ModularExponentiateWithDrmDeviceCertKey(void *out, size_t out_size, const void *base, size_t base_size, const void *mod, size_t mod_size);
Result PrepareEsArchiveKey(AccessKey *out_access_key, const void *base, size_t base_size, const void *mod, size_t mod_size, const void *label_digest, size_t label_digest_size, u32 generation); Result PrepareEsArchiveKey(AccessKey *out_access_key, const void *base, size_t base_size, const void *mod, size_t mod_size, const void *label_digest, size_t label_digest_size, u32 generation);
Result LoadPreparedAesKey(s32 keyslot, const AccessKey &access_key); Result LoadPreparedAesKey(s32 keyslot, const AccessKey &access_key);
Result PrepareEsUnknown2Key(AccessKey *out_access_key, const void *base, size_t base_size, const void *mod, size_t mod_size, const void *label_digest, size_t label_digest_size, u32 generation);
/* FS */ /* FS */
Result DecryptAndStoreGcKey(const void *src, size_t src_size, const AccessKey &access_key, const KeySource &key_source, u32 option); Result DecryptAndStoreGcKey(const void *src, size_t src_size, const AccessKey &access_key, const KeySource &key_source, u32 option);

View File

@ -28,7 +28,6 @@
AMS_SF_METHOD_INFO(C, H, 28, Result, DecryptAndStoreDrmDeviceCertKey, (const sf::InPointerBuffer &src, spl::AccessKey access_key, spl::KeySource key_source), (src, access_key, key_source), hos::Version_5_0_0) \ AMS_SF_METHOD_INFO(C, H, 28, Result, DecryptAndStoreDrmDeviceCertKey, (const sf::InPointerBuffer &src, spl::AccessKey access_key, spl::KeySource key_source), (src, access_key, key_source), hos::Version_5_0_0) \
AMS_SF_METHOD_INFO(C, H, 29, Result, ModularExponentiateWithDrmDeviceCertKey, (const sf::OutPointerBuffer &out, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod), (out, base, mod), hos::Version_5_0_0) \ AMS_SF_METHOD_INFO(C, H, 29, Result, ModularExponentiateWithDrmDeviceCertKey, (const sf::OutPointerBuffer &out, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod), (out, base, mod), hos::Version_5_0_0) \
AMS_SF_METHOD_INFO(C, H, 31, Result, PrepareEsArchiveKey, (sf::Out<spl::AccessKey> out_access_key, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod, const sf::InPointerBuffer &label_digest, u32 generation), (out_access_key, base, mod, label_digest, generation), hos::Version_6_0_0) \ AMS_SF_METHOD_INFO(C, H, 31, Result, PrepareEsArchiveKey, (sf::Out<spl::AccessKey> out_access_key, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod, const sf::InPointerBuffer &label_digest, u32 generation), (out_access_key, base, mod, label_digest, generation), hos::Version_6_0_0) \
AMS_SF_METHOD_INFO(C, H, 32, Result, LoadPreparedAesKey, (s32 keyslot, spl::AccessKey access_key), (keyslot, access_key), hos::Version_6_0_0) \ AMS_SF_METHOD_INFO(C, H, 32, Result, LoadPreparedAesKey, (s32 keyslot, spl::AccessKey access_key), (keyslot, access_key), hos::Version_6_0_0)
AMS_SF_METHOD_INFO(C, H, 33, Result, PrepareEsUnknown2Key, (sf::Out<spl::AccessKey> out_access_key, const sf::InPointerBuffer &base, const sf::InPointerBuffer &mod, const sf::InPointerBuffer &label_digest, u32 generation), (out_access_key, base, mod, label_digest, generation), hos::Version_18_0_0)
AMS_SF_DEFINE_INTERFACE_WITH_BASE(ams::spl::impl, IEsInterface, ::ams::spl::impl::IDeviceUniqueDataInterface, AMS_SPL_I_ES_INTERFACE_INTERFACE_INFO, 0x346D5001) AMS_SF_DEFINE_INTERFACE_WITH_BASE(ams::spl::impl, IEsInterface, ::ams::spl::impl::IDeviceUniqueDataInterface, AMS_SPL_I_ES_INTERFACE_INTERFACE_INFO, 0x346D5001)

View File

@ -69,13 +69,13 @@ namespace ams::erpt::srv {
auto guard = SCOPE_GUARD { m_ctx.field_count = 0; }; auto guard = SCOPE_GUARD { m_ctx.field_count = 0; };
R_UNLESS(m_ctx.field_count <= FieldsPerContext, erpt::ResultInvalidArgument()); R_UNLESS(m_ctx.field_count <= FieldsPerContext, erpt::ResultInvalidArgument());
R_UNLESS(FindCategoryIndex(m_ctx.category).has_value(), erpt::ResultInvalidArgument()); R_UNLESS(0 <= m_ctx.category && m_ctx.category < CategoryId_Count, erpt::ResultInvalidArgument());
for (u32 i = 0; i < m_ctx.field_count; i++) { for (u32 i = 0; i < m_ctx.field_count; i++) {
m_ctx.fields[i] = ctx_ptr->fields[i]; m_ctx.fields[i] = ctx_ptr->fields[i];
R_UNLESS(FindFieldIndex(m_ctx.fields[i].id).has_value(), erpt::ResultInvalidArgument()); R_UNLESS(0 <= m_ctx.fields[i].id && m_ctx.fields[i].id < FieldId_Count, erpt::ResultInvalidArgument());
R_UNLESS(0 <= m_ctx.fields[i].type && m_ctx.fields[i].type < FieldType_Count, erpt::ResultInvalidArgument()); R_UNLESS(0 <= m_ctx.fields[i].type && m_ctx.fields[i].type < FieldType_Count, erpt::ResultInvalidArgument());
R_UNLESS(m_ctx.fields[i].type == ConvertFieldToType(m_ctx.fields[i].id), erpt::ResultFieldTypeMismatch()); R_UNLESS(m_ctx.fields[i].type == ConvertFieldToType(m_ctx.fields[i].id), erpt::ResultFieldTypeMismatch());

View File

@ -62,10 +62,7 @@ namespace ams::erpt::srv {
static Result AddId(Report *report, FieldId field_id) { static Result AddId(Report *report, FieldId field_id) {
static_assert(MaxFieldStringSize < ElementSize_256); static_assert(MaxFieldStringSize < ElementSize_256);
const auto index = FindFieldIndex(field_id); R_TRY(AddStringValue(report, FieldString[field_id], strnlen(FieldString[field_id], MaxFieldStringSize)));
AMS_ASSERT(index.has_value());
R_TRY(AddStringValue(report, FieldString[index.value()], strnlen(FieldString[index.value()], MaxFieldStringSize)));
R_SUCCEED(); R_SUCCEED();
} }

View File

@ -105,8 +105,8 @@ namespace ams::erpt::srv {
g_sf_allocator.Attach(g_heap_handle); g_sf_allocator.Attach(g_heap_handle);
for (const auto category_id : CategoryIndexToCategoryIdMap) { for (auto i = 0; i < CategoryId_Count; i++) {
Context *ctx = new Context(category_id); Context *ctx = new Context(static_cast<CategoryId>(i));
AMS_ABORT_UNLESS(ctx != nullptr); AMS_ABORT_UNLESS(ctx != nullptr);
} }

View File

@ -277,7 +277,7 @@ namespace ams::erpt::srv {
void SaveSyslogReportIfRequired(const ContextEntry *ctx, const ReportId &report_id) { void SaveSyslogReportIfRequired(const ContextEntry *ctx, const ReportId &report_id) {
bool needs_save_syslog = true; bool needs_save_syslog = true;
for (u32 i = 0; i < ctx->field_count; i++) { for (u32 i = 0; i < ctx->field_count; i++) {
static_assert(FieldIndexToTypeMap[*FindFieldIndex(FieldId_HasSyslogFlag)] == FieldType_Bool); static_assert(FieldToTypeMap[FieldId_HasSyslogFlag] == FieldType_Bool);
if (ctx->fields[i].id == FieldId_HasSyslogFlag && !ctx->fields[i].value_bool) { if (ctx->fields[i].id == FieldId_HasSyslogFlag && !ctx->fields[i].value_bool) {
needs_save_syslog = false; needs_save_syslog = false;
break; break;

View File

@ -500,7 +500,7 @@ namespace ams::fs {
R_UNLESS((mode & fs::OpenMode_All) == fs::OpenMode_Read, fs::ResultInvalidOpenMode()); R_UNLESS((mode & fs::OpenMode_All) == fs::OpenMode_Read, fs::ResultInvalidOpenMode());
RomFileTable::FileInfo file_info{}; RomFileTable::FileInfo file_info;
R_TRY(this->GetFileInfo(std::addressof(file_info), path.GetString())); R_TRY(this->GetFileInfo(std::addressof(file_info), path.GetString()));
auto file = std::make_unique<RomFsFile>(this, m_entry_size + file_info.offset.Get(), m_entry_size + file_info.offset.Get() + file_info.size.Get()); auto file = std::make_unique<RomFsFile>(this, m_entry_size + file_info.offset.Get(), m_entry_size + file_info.offset.Get() + file_info.size.Get());

View File

@ -21,7 +21,7 @@ namespace ams::fs::impl {
#define ADD_ENUM_CASE(v) case v: return #v #define ADD_ENUM_CASE(v) case v: return #v
template<> const char *IdString::ToString<pkg1::KeyGeneration>(pkg1::KeyGeneration id) { template<> const char *IdString::ToString<pkg1::KeyGeneration>(pkg1::KeyGeneration id) {
static_assert(pkg1::KeyGeneration_Current == pkg1::KeyGeneration_18_0_0); static_assert(pkg1::KeyGeneration_Current == pkg1::KeyGeneration_17_0_0);
switch (id) { switch (id) {
using enum pkg1::KeyGeneration; using enum pkg1::KeyGeneration;
case KeyGeneration_1_0_0: return "1.0.0-2.3.0"; case KeyGeneration_1_0_0: return "1.0.0-2.3.0";
@ -40,8 +40,7 @@ namespace ams::fs::impl {
case KeyGeneration_14_0_0: return "14.0.0-14.1.2"; case KeyGeneration_14_0_0: return "14.0.0-14.1.2";
case KeyGeneration_15_0_0: return "15.0.0-15.0.1"; case KeyGeneration_15_0_0: return "15.0.0-15.0.1";
case KeyGeneration_16_0_0: return "16.0.0-16.0.3"; case KeyGeneration_16_0_0: return "16.0.0-16.0.3";
case KeyGeneration_17_0_0: return "17.0.0-17.0.1"; case KeyGeneration_17_0_0: return "17.0.0-";
case KeyGeneration_18_0_0: return "18.0.0-";
default: return "Unknown"; default: return "Unknown";
} }
} }

View File

@ -34,8 +34,8 @@ namespace ams::fssrv {
void ProgramRegistryImpl::Initialize(ProgramRegistryServiceImpl *service) { void ProgramRegistryImpl::Initialize(ProgramRegistryServiceImpl *service) {
/* Check pre-conditions. */ /* Check pre-conditions. */
AMS_ASSERT(service != nullptr); AMS_ASSERT(g_impl != nullptr);
AMS_ASSERT(g_impl == nullptr); AMS_ASSERT(g_impl == nullptr);
/* Set the global service. */ /* Set the global service. */
g_impl = service; g_impl = service;

View File

@ -288,7 +288,7 @@ namespace ams::htc::server::rpc {
/* Get a task. */ /* Get a task. */
Task *task; Task *task;
u32 task_id{}; u32 task_id{};
PacketCategory category{}; PacketCategory category;
do { do {
/* Dequeue a task. */ /* Dequeue a task. */
R_TRY(m_task_queue.Take(std::addressof(task_id), std::addressof(category))); R_TRY(m_task_queue.Take(std::addressof(task_id), std::addressof(category)));

View File

@ -39,7 +39,7 @@ namespace ams::htcs::impl {
void HtcsManager::Socket(s32 *out_err, s32 *out_desc, bool enable_disconnection_emulation) { void HtcsManager::Socket(s32 *out_err, s32 *out_desc, bool enable_disconnection_emulation) {
/* Invoke our implementation. */ /* Invoke our implementation. */
s32 err = -1, desc = -1; s32 err, desc;
const Result result = m_impl->CreateSocket(std::addressof(err), std::addressof(desc), enable_disconnection_emulation); const Result result = m_impl->CreateSocket(std::addressof(err), std::addressof(desc), enable_disconnection_emulation);
/* Set output. */ /* Set output. */
@ -71,7 +71,7 @@ namespace ams::htcs::impl {
void HtcsManager::Connect(s32 *out_err, s32 *out_res, const SockAddrHtcs &address, s32 desc) { void HtcsManager::Connect(s32 *out_err, s32 *out_res, const SockAddrHtcs &address, s32 desc) {
/* Invoke our implementation. */ /* Invoke our implementation. */
s32 err = -1; s32 err;
const Result result = m_impl->Connect(std::addressof(err), desc, address); const Result result = m_impl->Connect(std::addressof(err), desc, address);
/* Set output. */ /* Set output. */
@ -90,7 +90,7 @@ namespace ams::htcs::impl {
void HtcsManager::Bind(s32 *out_err, s32 *out_res, const SockAddrHtcs &address, s32 desc) { void HtcsManager::Bind(s32 *out_err, s32 *out_res, const SockAddrHtcs &address, s32 desc) {
/* Invoke our implementation. */ /* Invoke our implementation. */
s32 err = -1; s32 err;
const Result result = m_impl->Bind(std::addressof(err), desc, address); const Result result = m_impl->Bind(std::addressof(err), desc, address);
/* Set output. */ /* Set output. */
@ -109,7 +109,7 @@ namespace ams::htcs::impl {
void HtcsManager::Listen(s32 *out_err, s32 *out_res, s32 backlog_count, s32 desc) { void HtcsManager::Listen(s32 *out_err, s32 *out_res, s32 backlog_count, s32 desc) {
/* Invoke our implementation. */ /* Invoke our implementation. */
s32 err = -1; s32 err;
const Result result = m_impl->Listen(std::addressof(err), desc, backlog_count); const Result result = m_impl->Listen(std::addressof(err), desc, backlog_count);
/* Set output. */ /* Set output. */
@ -128,8 +128,8 @@ namespace ams::htcs::impl {
void HtcsManager::Recv(s32 *out_err, s64 *out_size, char *buffer, size_t size, s32 flags, s32 desc) { void HtcsManager::Recv(s32 *out_err, s64 *out_size, char *buffer, size_t size, s32 flags, s32 desc) {
/* Invoke our implementation. */ /* Invoke our implementation. */
s32 err = -1; s32 err;
s64 recv_size = -1; s64 recv_size;
const Result result = m_impl->Receive(std::addressof(err), std::addressof(recv_size), buffer, size, desc, flags); const Result result = m_impl->Receive(std::addressof(err), std::addressof(recv_size), buffer, size, desc, flags);
/* Set output. */ /* Set output. */
@ -148,8 +148,8 @@ namespace ams::htcs::impl {
void HtcsManager::Send(s32 *out_err, s64 *out_size, const char *buffer, size_t size, s32 flags, s32 desc) { void HtcsManager::Send(s32 *out_err, s64 *out_size, const char *buffer, size_t size, s32 flags, s32 desc) {
/* Invoke our implementation. */ /* Invoke our implementation. */
s32 err = -1; s32 err;
s64 send_size = -1; s64 send_size;
const Result result = m_impl->Send(std::addressof(err), std::addressof(send_size), buffer, size, desc, flags); const Result result = m_impl->Send(std::addressof(err), std::addressof(send_size), buffer, size, desc, flags);
/* Set output. */ /* Set output. */
@ -168,7 +168,7 @@ namespace ams::htcs::impl {
void HtcsManager::Shutdown(s32 *out_err, s32 *out_res, s32 how, s32 desc) { void HtcsManager::Shutdown(s32 *out_err, s32 *out_res, s32 how, s32 desc) {
/* Invoke our implementation. */ /* Invoke our implementation. */
s32 err = -1; s32 err;
const Result result = m_impl->Shutdown(std::addressof(err), desc, how); const Result result = m_impl->Shutdown(std::addressof(err), desc, how);
/* Set output. */ /* Set output. */
@ -191,7 +191,7 @@ namespace ams::htcs::impl {
void HtcsManager::Fcntl(s32 *out_err, s32 *out_res, s32 command, s32 value, s32 desc) { void HtcsManager::Fcntl(s32 *out_err, s32 *out_res, s32 command, s32 value, s32 desc) {
/* Invoke our implementation. */ /* Invoke our implementation. */
s32 err = -1, res = -1; s32 err, res;
const Result result = m_impl->Fcntl(std::addressof(err), std::addressof(res), desc, command, value); const Result result = m_impl->Fcntl(std::addressof(err), std::addressof(res), desc, command, value);
/* Set output. */ /* Set output. */
@ -210,7 +210,7 @@ namespace ams::htcs::impl {
void HtcsManager::AcceptResults(s32 *out_err, s32 *out_desc, SockAddrHtcs *out_address, u32 task_id, s32 desc) { void HtcsManager::AcceptResults(s32 *out_err, s32 *out_desc, SockAddrHtcs *out_address, u32 task_id, s32 desc) {
/* Invoke our implementation. */ /* Invoke our implementation. */
s32 err = -1; s32 err;
const Result result = m_impl->AcceptResults(std::addressof(err), out_desc, out_address, task_id, desc); const Result result = m_impl->AcceptResults(std::addressof(err), out_desc, out_address, task_id, desc);
/* Set output. */ /* Set output. */
@ -233,8 +233,8 @@ namespace ams::htcs::impl {
void HtcsManager::RecvResults(s32 *out_err, s64 *out_size, char *buffer, s64 buffer_size, u32 task_id, s32 desc) { void HtcsManager::RecvResults(s32 *out_err, s64 *out_size, char *buffer, s64 buffer_size, u32 task_id, s32 desc) {
/* Invoke our implementation. */ /* Invoke our implementation. */
s32 err = -1; s32 err;
s64 size = -1; s64 size;
const Result result = m_impl->RecvResults(std::addressof(err), std::addressof(size), buffer, buffer_size, task_id, desc); const Result result = m_impl->RecvResults(std::addressof(err), std::addressof(size), buffer, buffer_size, task_id, desc);
/* Set output. */ /* Set output. */
@ -265,8 +265,8 @@ namespace ams::htcs::impl {
void HtcsManager::SendResults(s32 *out_err, s64 *out_size, u32 task_id, s32 desc) { void HtcsManager::SendResults(s32 *out_err, s64 *out_size, u32 task_id, s32 desc) {
/* Invoke our implementation. */ /* Invoke our implementation. */
s32 err = -1; s32 err;
s64 size = -1; s64 size;
const Result result = m_impl->SendResults(std::addressof(err), std::addressof(size), task_id, desc); const Result result = m_impl->SendResults(std::addressof(err), std::addressof(size), task_id, desc);
/* Set output. */ /* Set output. */
@ -293,7 +293,7 @@ namespace ams::htcs::impl {
Result HtcsManager::ContinueSend(s64 *out_size, const char *buffer, s64 buffer_size, u32 task_id, s32 desc) { Result HtcsManager::ContinueSend(s64 *out_size, const char *buffer, s64 buffer_size, u32 task_id, s32 desc) {
/* Invoke our implementation. */ /* Invoke our implementation. */
s64 size = -1; s64 size;
R_TRY_CATCH(m_impl->ContinueSend(std::addressof(size), buffer, buffer_size, task_id, desc)) { R_TRY_CATCH(m_impl->ContinueSend(std::addressof(size), buffer, buffer_size, task_id, desc)) {
R_CONVERT(htclow::ResultInvalidChannelState, tma::ResultUnknown()) R_CONVERT(htclow::ResultInvalidChannelState, tma::ResultUnknown())
R_CONVERT(htc::ResultTaskCancelled, tma::ResultUnknown()) R_CONVERT(htc::ResultTaskCancelled, tma::ResultUnknown())
@ -306,8 +306,8 @@ namespace ams::htcs::impl {
void HtcsManager::EndSend(s32 *out_err, s64 *out_size, u32 task_id, s32 desc) { void HtcsManager::EndSend(s32 *out_err, s64 *out_size, u32 task_id, s32 desc) {
/* Invoke our implementation. */ /* Invoke our implementation. */
s32 err = -1; s32 err;
s64 size = -1; s64 size;
const Result result = m_impl->EndSend(std::addressof(err), std::addressof(size), task_id, desc); const Result result = m_impl->EndSend(std::addressof(err), std::addressof(size), task_id, desc);
/* Set output. */ /* Set output. */
@ -334,8 +334,8 @@ namespace ams::htcs::impl {
void HtcsManager::EndRecv(s32 *out_err, s64 *out_size, char *buffer, s64 buffer_size, u32 task_id, s32 desc) { void HtcsManager::EndRecv(s32 *out_err, s64 *out_size, char *buffer, s64 buffer_size, u32 task_id, s32 desc) {
/* Invoke our implementation. */ /* Invoke our implementation. */
s32 err = -1; s32 err;
s64 size = -1; s64 size;
const Result result = m_impl->EndRecv(std::addressof(err), std::addressof(size), buffer, buffer_size, task_id, desc); const Result result = m_impl->EndRecv(std::addressof(err), std::addressof(size), buffer, buffer_size, task_id, desc);
/* Set output. */ /* Set output. */
@ -367,8 +367,8 @@ namespace ams::htcs::impl {
Result HtcsManager::EndSelect(s32 *out_err, s32 *out_count, Span<int> read_handles, Span<int> write_handles, Span<int> exception_handles, u32 task_id) { Result HtcsManager::EndSelect(s32 *out_err, s32 *out_count, Span<int> read_handles, Span<int> write_handles, Span<int> exception_handles, u32 task_id) {
/* Invoke our implementation. */ /* Invoke our implementation. */
s32 err = -1; s32 err;
bool empty = false; bool empty;
const Result result = m_impl->EndSelect(std::addressof(err), std::addressof(empty), read_handles, write_handles, exception_handles, task_id); const Result result = m_impl->EndSelect(std::addressof(err), std::addressof(empty), read_handles, write_handles, exception_handles, task_id);
/* Set output. */ /* Set output. */

View File

@ -21,12 +21,11 @@ namespace ams::os::impl {
class VammManagerHorizonImpl { class VammManagerHorizonImpl {
public: public:
static void GetReservedRegionImpl(uintptr_t *out_start, uintptr_t *out_size) { static void GetReservedRegionImpl(uintptr_t *out_start, uintptr_t *out_size) {
u64 start, size, extra_size; u64 start, size;
R_ABORT_UNLESS(svc::GetInfo(std::addressof(start), svc::InfoType_AliasRegionAddress, svc::PseudoHandle::CurrentProcess, 0)); R_ABORT_UNLESS(svc::GetInfo(std::addressof(start), svc::InfoType_AliasRegionAddress, svc::PseudoHandle::CurrentProcess, 0));
R_ABORT_UNLESS(svc::GetInfo(std::addressof(size), svc::InfoType_AliasRegionSize, svc::PseudoHandle::CurrentProcess, 0)); R_ABORT_UNLESS(svc::GetInfo(std::addressof(size), svc::InfoType_AliasRegionSize, svc::PseudoHandle::CurrentProcess, 0));
R_ABORT_UNLESS(svc::GetInfo(std::addressof(extra_size), svc::InfoType_AliasRegionExtraSize, svc::PseudoHandle::CurrentProcess, 0));
*out_start = start; *out_start = start;
*out_size = size - extra_size; *out_size = size;
} }
static Result AllocatePhysicalMemoryImpl(uintptr_t address, size_t size) { static Result AllocatePhysicalMemoryImpl(uintptr_t address, size_t size) {

View File

@ -167,7 +167,6 @@ namespace ams::patcher {
/* Apply patch. */ /* Apply patch. */
if (patch_offset + rle_size > mapped_size) { if (patch_offset + rle_size > mapped_size) {
AMS_ABORT_UNLESS(patch_offset <= mapped_size);
rle_size = mapped_size - patch_offset; rle_size = mapped_size - patch_offset;
} }
std::memset(mapped_module + patch_offset, buffer[0], rle_size); std::memset(mapped_module + patch_offset, buffer[0], rle_size);
@ -191,7 +190,6 @@ namespace ams::patcher {
/* Apply patch. */ /* Apply patch. */
u32 read_size = patch_size; u32 read_size = patch_size;
if (patch_offset + read_size > mapped_size) { if (patch_offset + read_size > mapped_size) {
AMS_ABORT_UNLESS(patch_offset <= mapped_size);
read_size = mapped_size - patch_offset; read_size = mapped_size - patch_offset;
} }
{ {

View File

@ -893,10 +893,6 @@ namespace ams::spl::impl {
R_RETURN(PrepareEsDeviceUniqueKey(out_access_key, base, base_size, mod, mod_size, label_digest, label_digest_size, smc::EsDeviceUniqueKeyType::ArchiveKey, generation)); R_RETURN(PrepareEsDeviceUniqueKey(out_access_key, base, base_size, mod, mod_size, label_digest, label_digest_size, smc::EsDeviceUniqueKeyType::ArchiveKey, generation));
} }
Result PrepareEsUnknown2Key(AccessKey *out_access_key, const void *base, size_t base_size, const void *mod, size_t mod_size, const void *label_digest, size_t label_digest_size, u32 generation) {
R_RETURN(PrepareEsDeviceUniqueKey(out_access_key, base, base_size, mod, mod_size, label_digest, label_digest_size, smc::EsDeviceUniqueKeyType::Unknown2, generation));
}
/* FS */ /* FS */
Result DecryptAndStoreGcKey(const void *src, size_t src_size, const AccessKey &access_key, const KeySource &key_source, u32 option) { Result DecryptAndStoreGcKey(const void *src, size_t src_size, const AccessKey &access_key, const KeySource &key_source, u32 option) {
R_RETURN(DecryptAndStoreDeviceUniqueKey(src, src_size, access_key, key_source, option)); R_RETURN(DecryptAndStoreDeviceUniqueKey(src, src_size, access_key, key_source, option));

View File

@ -16,11 +16,11 @@
#pragma once #pragma once
#define ATMOSPHERE_RELEASE_VERSION_MAJOR 1 #define ATMOSPHERE_RELEASE_VERSION_MAJOR 1
#define ATMOSPHERE_RELEASE_VERSION_MINOR 7 #define ATMOSPHERE_RELEASE_VERSION_MINOR 6
#define ATMOSPHERE_RELEASE_VERSION_MICRO 0 #define ATMOSPHERE_RELEASE_VERSION_MICRO 2
#define ATMOSPHERE_RELEASE_VERSION ATMOSPHERE_RELEASE_VERSION_MAJOR, ATMOSPHERE_RELEASE_VERSION_MINOR, ATMOSPHERE_RELEASE_VERSION_MICRO #define ATMOSPHERE_RELEASE_VERSION ATMOSPHERE_RELEASE_VERSION_MAJOR, ATMOSPHERE_RELEASE_VERSION_MINOR, ATMOSPHERE_RELEASE_VERSION_MICRO
#define ATMOSPHERE_SUPPORTED_HOS_VERSION_MAJOR 18 #define ATMOSPHERE_SUPPORTED_HOS_VERSION_MAJOR 17
#define ATMOSPHERE_SUPPORTED_HOS_VERSION_MINOR 0 #define ATMOSPHERE_SUPPORTED_HOS_VERSION_MINOR 0
#define ATMOSPHERE_SUPPORTED_HOS_VERSION_MICRO 0 #define ATMOSPHERE_SUPPORTED_HOS_VERSION_MICRO 0

View File

@ -79,10 +79,8 @@
#define ATMOSPHERE_TARGET_FIRMWARE_16_0_3 ATMOSPHERE_TARGET_FIRMWARE(16, 0, 3) #define ATMOSPHERE_TARGET_FIRMWARE_16_0_3 ATMOSPHERE_TARGET_FIRMWARE(16, 0, 3)
#define ATMOSPHERE_TARGET_FIRMWARE_16_1_0 ATMOSPHERE_TARGET_FIRMWARE(16, 1, 0) #define ATMOSPHERE_TARGET_FIRMWARE_16_1_0 ATMOSPHERE_TARGET_FIRMWARE(16, 1, 0)
#define ATMOSPHERE_TARGET_FIRMWARE_17_0_0 ATMOSPHERE_TARGET_FIRMWARE(17, 0, 0) #define ATMOSPHERE_TARGET_FIRMWARE_17_0_0 ATMOSPHERE_TARGET_FIRMWARE(17, 0, 0)
#define ATMOSPHERE_TARGET_FIRMWARE_17_0_1 ATMOSPHERE_TARGET_FIRMWARE(17, 0, 1)
#define ATMOSPHERE_TARGET_FIRMWARE_18_0_0 ATMOSPHERE_TARGET_FIRMWARE(18, 0, 0)
#define ATMOSPHERE_TARGET_FIRMWARE_CURRENT ATMOSPHERE_TARGET_FIRMWARE_18_0_0 #define ATMOSPHERE_TARGET_FIRMWARE_CURRENT ATMOSPHERE_TARGET_FIRMWARE_17_0_0
#define ATMOSPHERE_TARGET_FIRMWARE_MIN ATMOSPHERE_TARGET_FIRMWARE(0, 0, 0) #define ATMOSPHERE_TARGET_FIRMWARE_MIN ATMOSPHERE_TARGET_FIRMWARE(0, 0, 0)
#define ATMOSPHERE_TARGET_FIRMWARE_MAX ATMOSPHERE_TARGET_FIRMWARE_CURRENT #define ATMOSPHERE_TARGET_FIRMWARE_MAX ATMOSPHERE_TARGET_FIRMWARE_CURRENT
@ -154,8 +152,6 @@ namespace ams {
TargetFirmware_16_0_3 = ATMOSPHERE_TARGET_FIRMWARE_16_0_3, TargetFirmware_16_0_3 = ATMOSPHERE_TARGET_FIRMWARE_16_0_3,
TargetFirmware_16_1_0 = ATMOSPHERE_TARGET_FIRMWARE_16_1_0, TargetFirmware_16_1_0 = ATMOSPHERE_TARGET_FIRMWARE_16_1_0,
TargetFirmware_17_0_0 = ATMOSPHERE_TARGET_FIRMWARE_17_0_0, TargetFirmware_17_0_0 = ATMOSPHERE_TARGET_FIRMWARE_17_0_0,
TargetFirmware_17_0_1 = ATMOSPHERE_TARGET_FIRMWARE_17_0_1,
TargetFirmware_18_0_0 = ATMOSPHERE_TARGET_FIRMWARE_18_0_0,
TargetFirmware_Current = ATMOSPHERE_TARGET_FIRMWARE_CURRENT, TargetFirmware_Current = ATMOSPHERE_TARGET_FIRMWARE_CURRENT,

View File

@ -190,7 +190,6 @@ namespace ams::svc {
InfoType_ThreadTickCount = 25, InfoType_ThreadTickCount = 25,
InfoType_IsSvcPermitted = 26, InfoType_IsSvcPermitted = 26,
InfoType_IoRegionHint = 27, InfoType_IoRegionHint = 27,
InfoType_AliasRegionExtraSize = 28,
InfoType_MesosphereMeta = 65000, InfoType_MesosphereMeta = 65000,
InfoType_MesosphereCurrentProcess = 65001, InfoType_MesosphereCurrentProcess = 65001,
@ -437,19 +436,15 @@ namespace ams::svc {
/* 11.x+ DisableDeviceAddressSpaceMerge. */ /* 11.x+ DisableDeviceAddressSpaceMerge. */
CreateProcessFlag_DisableDeviceAddressSpaceMerge = (1 << 12), CreateProcessFlag_DisableDeviceAddressSpaceMerge = (1 << 12),
/* 18.x EnableAliasRegionExtraSize. */
CreateProcessFlag_EnableAliasRegionExtraSize = (1 << 13),
/* Mask of all flags. */ /* Mask of all flags. */
CreateProcessFlag_All = CreateProcessFlag_Is64Bit | CreateProcessFlag_All = CreateProcessFlag_Is64Bit |
CreateProcessFlag_AddressSpaceMask | CreateProcessFlag_AddressSpaceMask |
CreateProcessFlag_EnableDebug | CreateProcessFlag_EnableDebug |
CreateProcessFlag_EnableAslr | CreateProcessFlag_EnableAslr |
CreateProcessFlag_IsApplication | CreateProcessFlag_IsApplication |
CreateProcessFlag_PoolPartitionMask | CreateProcessFlag_PoolPartitionMask |
CreateProcessFlag_OptimizeMemoryAllocation | CreateProcessFlag_OptimizeMemoryAllocation |
CreateProcessFlag_DisableDeviceAddressSpaceMerge | CreateProcessFlag_DisableDeviceAddressSpaceMerge,
CreateProcessFlag_EnableAliasRegionExtraSize,
}; };
/* Debug types. */ /* Debug types. */

View File

@ -57,8 +57,8 @@ namespace ams::svc {
/* This is the highest SVC version supported by Atmosphere, to be updated on new kernel releases. */ /* This is the highest SVC version supported by Atmosphere, to be updated on new kernel releases. */
/* NOTE: Official kernel versions have SVC major = SDK major + 4, SVC minor = SDK minor. */ /* NOTE: Official kernel versions have SVC major = SDK major + 4, SVC minor = SDK minor. */
constexpr inline u32 SupportedKernelMajorVersion = ConvertToSvcMajorVersion(18); constexpr inline u32 SupportedKernelMajorVersion = ConvertToSvcMajorVersion(17);
constexpr inline u32 SupportedKernelMinorVersion = ConvertToSvcMinorVersion( 3); constexpr inline u32 SupportedKernelMinorVersion = ConvertToSvcMinorVersion( 5);
constexpr inline u32 SupportedKernelVersion = EncodeKernelVersion(SupportedKernelMajorVersion, SupportedKernelMinorVersion); constexpr inline u32 SupportedKernelVersion = EncodeKernelVersion(SupportedKernelMajorVersion, SupportedKernelMinorVersion);