Compare commits

..

No commits in common. "8a9eb85e055776720abeec5517909a32aed9a32f" and "159f8d384b453d310a39c189d22196eea2267b22" have entirely different histories.

133 changed files with 953 additions and 2481 deletions

View File

@ -84,7 +84,7 @@ dist-no-debug: package3 $(CURRENT_DIRECTORY)/$(ATMOSPHERE_OUT_DIR)
mkdir -p $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/0100000000000034
mkdir -p $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/0100000000000036
mkdir -p $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/0100000000000037
#mkdir -p $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/010000000000003c
mkdir -p $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/010000000000003c
mkdir -p $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/0100000000000042
mkdir -p $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/0100000000000420
mkdir -p $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/010000000000b240
@ -98,7 +98,7 @@ dist-no-debug: package3 $(CURRENT_DIRECTORY)/$(ATMOSPHERE_OUT_DIR)
cp stratosphere/fatal/$(ATMOSPHERE_OUT_DIR)/fatal.nsp $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/0100000000000034/exefs.nsp
cp stratosphere/creport/$(ATMOSPHERE_OUT_DIR)/creport.nsp $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/0100000000000036/exefs.nsp
cp stratosphere/ro/$(ATMOSPHERE_OUT_DIR)/ro.nsp $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/0100000000000037/exefs.nsp
#cp stratosphere/jpegdec/$(ATMOSPHERE_OUT_DIR)/jpegdec.nsp $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/010000000000003c/exefs.nsp
cp stratosphere/jpegdec/$(ATMOSPHERE_OUT_DIR)/jpegdec.nsp $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/010000000000003c/exefs.nsp
cp stratosphere/pgl/$(ATMOSPHERE_OUT_DIR)/pgl.nsp $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/0100000000000042/exefs.nsp
cp stratosphere/LogManager/$(ATMOSPHERE_OUT_DIR)/LogManager.nsp $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/0100000000000420/exefs.nsp
cp stratosphere/htc/$(ATMOSPHERE_OUT_DIR)/htc.nsp $(DIST_DIR)/stratosphere_romfs/atmosphere/contents/010000000000b240/exefs.nsp

View File

@ -1,20 +1,4 @@
# Changelog
## 1.6.0
+ Basic support was added for 17.0.0.
+ The console should boot and atmosphère should be fully functional. However, not all modules have been fully updated to reflect the latest changes.
+ There shouldn't be anything user visible resulting from this, but it will be addressed in a soon-to-come atmosphère update.
+ `exosphère` was updated to reflect the latest official secure monitor behavior.
+ `mesosphère` was updated to reflect the latest official kernel behavior.
+ `ncm` was updated to reflect the latest official behavior.
+ `erpt` was partially updated to support the latest official behavior.
+ Atmosphere's gdbstub now supports waiting to attach to a specific program id on launch (as opposed to any application).
+ The monitor command for this is `monitor wait <hex program id>`, where program id can optionally have an `0x` prefix.
+ Support was added to `haze` for editing files in-place and performing 64-bit transfers (files larger than 4 GB).
+ `bpc.mitm` was enabled on Mariko units, and now triggers pmic-based shutdowns/reboots (thanks @CTCaer).
+ This should cause the console to no longer wake ~15 seconds after shutdown on Mariko.
+ A number of minor issues were fixed and improvements were made, including:
+ A workaround was added for a change in 17.0.0 that would cause consoles which had previously re-built their SYSTEM partition to brick on update-to-17.0.0.
+ General system stability improvements to enhance the user's experience.
## 1.5.5
+ Support was added for 16.1.0.
+ General system stability improvements to enhance the user's experience.

4
emummc/.gitrepo vendored
View File

@ -6,7 +6,7 @@
[subrepo]
remote = https://github.com/m4xw/emummc
branch = develop
commit = 9513a5412057b1f1bc44ed8e717c57c726763a88
parent = e4d08ae0c5342cdb0875d164522a63ec9d233052
commit = 30205111ee375bef96f0f76cb6a3130a2f0fc85c
parent = 81e9154a52a976f85317bddd0131426599d26a62
method = merge
cmdver = 0.4.1

2
emummc/README.md vendored
View File

@ -2,7 +2,7 @@
*A SDMMC driver replacement for Nintendo's Filesystem Services, by **m4xw***
### Supported Horizon Versions
**1.0.0 - 17.0.0**
**1.0.0 - 16.0.3**
## Features
* Arbitrary SDMMC backend selection

View File

@ -67,8 +67,6 @@
#include "offsets/1600_exfat.h"
#include "offsets/1603.h"
#include "offsets/1603_exfat.h"
#include "offsets/1700.h"
#include "offsets/1700_exfat.h"
#include "../utils/fatal.h"
#define GET_OFFSET_STRUCT_NAME(vers) g_offsets##vers
@ -147,8 +145,6 @@ DEFINE_OFFSET_STRUCT(_1600);
DEFINE_OFFSET_STRUCT(_1600_EXFAT);
DEFINE_OFFSET_STRUCT(_1603);
DEFINE_OFFSET_STRUCT(_1603_EXFAT);
DEFINE_OFFSET_STRUCT(_1700);
DEFINE_OFFSET_STRUCT(_1700_EXFAT);
const fs_offsets_t *get_fs_offsets(enum FS_VER version) {
switch (version) {
@ -254,10 +250,6 @@ const fs_offsets_t *get_fs_offsets(enum FS_VER version) {
return &(GET_OFFSET_STRUCT_NAME(_1603));
case FS_VER_16_0_3_EXFAT:
return &(GET_OFFSET_STRUCT_NAME(_1603_EXFAT));
case FS_VER_17_0_0:
return &(GET_OFFSET_STRUCT_NAME(_1700));
case FS_VER_17_0_0_EXFAT:
return &(GET_OFFSET_STRUCT_NAME(_1700_EXFAT));
default:
fatal_abort(Fatal_UnknownVersion);
}

View File

@ -98,9 +98,6 @@ enum FS_VER
FS_VER_16_0_3,
FS_VER_16_0_3_EXFAT,
FS_VER_17_0_0,
FS_VER_17_0_0_EXFAT,
FS_VER_MAX,
};

View File

@ -1,59 +0,0 @@
/*
* Copyright (c) 2019 m4xw <m4x@m4xw.net>
* Copyright (c) 2019 Atmosphere-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __FS_1700_H__
#define __FS_1700_H__
// Accessor vtable getters
#define FS_OFFSET_1700_SDMMC_ACCESSOR_GC 0x18AD00
#define FS_OFFSET_1700_SDMMC_ACCESSOR_SD 0x18C9D0
#define FS_OFFSET_1700_SDMMC_ACCESSOR_NAND 0x18B1D0
// Hooks
#define FS_OFFSET_1700_SDMMC_WRAPPER_READ 0x186BC0
#define FS_OFFSET_1700_SDMMC_WRAPPER_WRITE 0x186C20
#define FS_OFFSET_1700_RTLD 0x29D10
#define FS_OFFSET_1700_RTLD_DESTINATION ((uintptr_t)(INT64_C(-0x3C)))
#define FS_OFFSET_1700_CLKRST_SET_MIN_V_CLK_RATE 0x1A7B60
// Misc funcs
#define FS_OFFSET_1700_LOCK_MUTEX 0x17FEA0
#define FS_OFFSET_1700_UNLOCK_MUTEX 0x17FEF0
#define FS_OFFSET_1700_SDMMC_WRAPPER_CONTROLLER_OPEN 0x186B80
#define FS_OFFSET_1700_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x186BA0
// Misc Data
#define FS_OFFSET_1700_SD_MUTEX 0xFCE3F0
#define FS_OFFSET_1700_NAND_MUTEX 0xFC9B78
#define FS_OFFSET_1700_ACTIVE_PARTITION 0xFC9BB8
#define FS_OFFSET_1700_SDMMC_DAS_HANDLE 0xFAF840
// NOPs
#define FS_OFFSET_1700_SD_DAS_INIT 0x28C64
// Nintendo Paths
#define FS_OFFSET_1700_NINTENDO_PATHS \
{ \
{.opcode_reg = 3, .adrp_offset = 0x00068068, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 3, .adrp_offset = 0x0007510C, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 4, .adrp_offset = 0x0007BEAC, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 4, .adrp_offset = 0x0008F674, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 0, .adrp_offset = 0, .add_rel_offset = 0}, \
}
#endif // __FS_1700_H__

View File

@ -1,59 +0,0 @@
/*
* Copyright (c) 2019 m4xw <m4x@m4xw.net>
* Copyright (c) 2019 Atmosphere-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __FS_1700_EXFAT_H__
#define __FS_1700_EXFAT_H__
// Accessor vtable getters
#define FS_OFFSET_1700_EXFAT_SDMMC_ACCESSOR_GC 0x195B60
#define FS_OFFSET_1700_EXFAT_SDMMC_ACCESSOR_SD 0x197830
#define FS_OFFSET_1700_EXFAT_SDMMC_ACCESSOR_NAND 0x196030
// Hooks
#define FS_OFFSET_1700_EXFAT_SDMMC_WRAPPER_READ 0x191A20
#define FS_OFFSET_1700_EXFAT_SDMMC_WRAPPER_WRITE 0x191A80
#define FS_OFFSET_1700_EXFAT_RTLD 0x29D10
#define FS_OFFSET_1700_EXFAT_RTLD_DESTINATION ((uintptr_t)(INT64_C(-0x3C)))
#define FS_OFFSET_1700_EXFAT_CLKRST_SET_MIN_V_CLK_RATE 0x1B29C0
// Misc funcs
#define FS_OFFSET_1700_EXFAT_LOCK_MUTEX 0x18AD00
#define FS_OFFSET_1700_EXFAT_UNLOCK_MUTEX 0x18AD50
#define FS_OFFSET_1700_EXFAT_SDMMC_WRAPPER_CONTROLLER_OPEN 0x1919E0
#define FS_OFFSET_1700_EXFAT_SDMMC_WRAPPER_CONTROLLER_CLOSE 0x191A00
// Misc Data
#define FS_OFFSET_1700_EXFAT_SD_MUTEX 0xFE03F0
#define FS_OFFSET_1700_EXFAT_NAND_MUTEX 0xFDBB78
#define FS_OFFSET_1700_EXFAT_ACTIVE_PARTITION 0xFDBBB8
#define FS_OFFSET_1700_EXFAT_SDMMC_DAS_HANDLE 0xFBC840
// NOPs
#define FS_OFFSET_1700_EXFAT_SD_DAS_INIT 0x28C64
// Nintendo Paths
#define FS_OFFSET_1700_EXFAT_NINTENDO_PATHS \
{ \
{.opcode_reg = 3, .adrp_offset = 0x00068068, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 3, .adrp_offset = 0x0007510C, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 4, .adrp_offset = 0x0007BEAC, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 4, .adrp_offset = 0x0008F674, .add_rel_offset = 0x00000004}, \
{.opcode_reg = 0, .adrp_offset = 0, .add_rel_offset = 0}, \
}
#endif // __FS_1700_EXFAT_H__

View File

@ -85,10 +85,10 @@ _ZN3ams6secmon4boot15VolatileKeyDataE:
/* We can get away with only including latest because exosphere supports newer-than-expected master key in engine. */
/* TODO: Update on next change of keys. */
/* Mariko Development Master Kek Source. */
.byte 0x43, 0xDB, 0x9D, 0x88, 0xDB, 0x38, 0xE9, 0xBF, 0x3D, 0xD7, 0x83, 0x39, 0xEF, 0xB1, 0x4F, 0xA7
.byte 0x3A, 0x9C, 0xF0, 0x39, 0x70, 0x23, 0xF6, 0xAF, 0x71, 0x44, 0x60, 0xF4, 0x6D, 0xED, 0xA1, 0xD6
/* Mariko Production Master Kek Source. */
.byte 0x8D, 0xEE, 0x9E, 0x11, 0x36, 0x3A, 0x9B, 0x0A, 0x6A, 0xC7, 0xBB, 0xE9, 0xD1, 0x03, 0xF7, 0x80
.byte 0xA5, 0xEC, 0x16, 0x39, 0x1A, 0x30, 0x16, 0x08, 0x2E, 0xCF, 0x09, 0x6F, 0x5E, 0x7C, 0xEE, 0xA9
/* Development Master Key Vectors. */
.byte 0x46, 0x22, 0xB4, 0x51, 0x9A, 0x7E, 0xA7, 0x7F, 0x62, 0xA1, 0x1F, 0x8F, 0xC5, 0x3A, 0xDB, 0xFE /* Zeroes encrypted with Master Key 00. */
@ -107,7 +107,6 @@ _ZN3ams6secmon4boot15VolatileKeyDataE:
.byte 0x08, 0xE0, 0xF4, 0xBE, 0xAA, 0x6E, 0x5A, 0xC3, 0xA6, 0xBC, 0xFE, 0xB9, 0xE2, 0xA3, 0x24, 0x12 /* Master key 0C encrypted with Master key 0D. */
.byte 0xD6, 0x80, 0x98, 0xC0, 0xFA, 0xC7, 0x13, 0xCB, 0x93, 0xD2, 0x0B, 0x82, 0x4C, 0xA1, 0x7B, 0x8D /* Master key 0D encrypted with Master key 0E. */
.byte 0x78, 0x66, 0x19, 0xBD, 0x86, 0xE7, 0xC1, 0x09, 0x9B, 0x6F, 0x92, 0xB2, 0x58, 0x7D, 0xCF, 0x26 /* Master key 0E encrypted with Master key 0F. */
.byte 0x39, 0x1E, 0x7E, 0xF8, 0x7E, 0x73, 0xEA, 0x6F, 0xAF, 0x00, 0x3A, 0xB4, 0xAA, 0xB8, 0xB7, 0x59 /* Master key 0F encrypted with Master key 10. */
/* Production Master Key Vectors. */
.byte 0x0C, 0xF0, 0x59, 0xAC, 0x85, 0xF6, 0x26, 0x65, 0xE1, 0xE9, 0x19, 0x55, 0xE6, 0xF2, 0x67, 0x3D /* Zeroes encrypted with Master Key 00. */
@ -126,7 +125,6 @@ _ZN3ams6secmon4boot15VolatileKeyDataE:
.byte 0x83, 0x67, 0xAF, 0x01, 0xCF, 0x93, 0xA1, 0xAB, 0x80, 0x45, 0xF7, 0x3F, 0x72, 0xFD, 0x3B, 0x38 /* Master key 0C encrypted with Master key 0D. */
.byte 0xB1, 0x81, 0xA6, 0x0D, 0x72, 0xC7, 0xEE, 0x15, 0x21, 0xF3, 0xC0, 0xB5, 0x6B, 0x61, 0x6D, 0xE7 /* Master key 0D encrypted with Master key 0E. */
.byte 0xAF, 0x11, 0x4C, 0x67, 0x17, 0x7A, 0x52, 0x43, 0xF7, 0x70, 0x2F, 0xC7, 0xEF, 0x81, 0x72, 0x16 /* Master key 0E encrypted with Master key 0F. */
.byte 0x25, 0x12, 0x8B, 0xCB, 0xB5, 0x46, 0xA1, 0xF8, 0xE0, 0x52, 0x15, 0xB7, 0x0B, 0x57, 0x00, 0xBD /* Master key 0F encrypted with Master key 10. */
/* Device Master Key Source Sources. */
.byte 0x8B, 0x4E, 0x1C, 0x22, 0x42, 0x07, 0xC8, 0x73, 0x56, 0x94, 0x08, 0x8B, 0xCC, 0x47, 0x0F, 0x5D /* 4.0.0 Device Master Key Source Source. */
@ -142,7 +140,6 @@ _ZN3ams6secmon4boot15VolatileKeyDataE:
.byte 0x5B, 0x94, 0x63, 0xF7, 0xAD, 0x96, 0x1B, 0xA6, 0x23, 0x30, 0x06, 0x4D, 0x01, 0xE4, 0xCE, 0x1D /* 14.0.0 Device Master Key Source Source. */
.byte 0x5E, 0xC9, 0xC5, 0x0A, 0xD0, 0x5F, 0x8B, 0x7B, 0xA7, 0x39, 0xEA, 0xBC, 0x60, 0x0F, 0x74, 0xE6 /* 15.0.0 Device Master Key Source Source. */
.byte 0xEA, 0x90, 0x6E, 0xA8, 0xAE, 0x92, 0x99, 0x64, 0x36, 0xC1, 0xF3, 0x1C, 0xC6, 0x32, 0x83, 0x8C /* 16.0.0 Device Master Key Source Source. */
.byte 0xDA, 0xB9, 0xD6, 0x77, 0x52, 0x2D, 0x1F, 0x78, 0x73, 0xC9, 0x98, 0x5B, 0x06, 0xFE, 0xA0, 0x52 /* 17.0.0 Device Master Key Source Source. */
/* Development Device Master Kek Sources. */
.byte 0xD6, 0xBD, 0x9F, 0xC6, 0x18, 0x09, 0xE1, 0x96, 0x20, 0x39, 0x60, 0xD2, 0x89, 0x83, 0x31, 0x34 /* 4.0.0 Device Master Kek Source. */
@ -158,7 +155,6 @@ _ZN3ams6secmon4boot15VolatileKeyDataE:
.byte 0xCE, 0x14, 0x74, 0x66, 0x98, 0xA8, 0x6D, 0x7D, 0xBD, 0x54, 0x91, 0x68, 0x5F, 0x1D, 0x0E, 0xEA /* 14.0.0 Device Master Kek Source. */
.byte 0xAE, 0x05, 0x48, 0x65, 0xAB, 0x17, 0x9D, 0x3D, 0x51, 0xB7, 0x56, 0xBD, 0x9B, 0x0B, 0x5B, 0x6E /* 15.0.0 Device Master Kek Source. */
.byte 0xFF, 0xF6, 0x4B, 0x0F, 0xFF, 0x0D, 0xC0, 0x4F, 0x56, 0x8A, 0x40, 0x74, 0x67, 0xC5, 0xFE, 0x9F /* 16.0.0 Device Master Kek Source. */
.byte 0x4E, 0xCE, 0x7B, 0x2A, 0xEA, 0x2E, 0x3D, 0x16, 0xD5, 0x2A, 0xDE, 0xF6, 0xF8, 0x6A, 0x7D, 0x43 /* 17.0.0 Device Master Kek Source. */
/* Production Device Master Kek Sources. */
.byte 0x88, 0x62, 0x34, 0x6E, 0xFA, 0xF7, 0xD8, 0x3F, 0xE1, 0x30, 0x39, 0x50, 0xF0, 0xB7, 0x5D, 0x5D /* 4.0.0 Device Master Kek Source. */
@ -174,4 +170,3 @@ _ZN3ams6secmon4boot15VolatileKeyDataE:
.byte 0x67, 0xD5, 0xD6, 0x0C, 0x08, 0xF5, 0xA3, 0x11, 0xBD, 0x6D, 0x5A, 0xEB, 0x96, 0x24, 0xB0, 0xD2 /* 14.0.0 Device Master Kek Source. */
.byte 0x7C, 0x30, 0xED, 0x8B, 0x39, 0x25, 0x2C, 0x08, 0x8F, 0x48, 0xDC, 0x28, 0xE6, 0x1A, 0x6B, 0x49 /* 15.0.0 Device Master Kek Source. */
.byte 0xF0, 0xF3, 0xFF, 0x52, 0x75, 0x2F, 0xBA, 0x4D, 0x09, 0x72, 0x30, 0x89, 0xA9, 0xDF, 0xFE, 0x1F /* 16.0.0 Device Master Kek Source. */
.byte 0x21, 0xD6, 0x35, 0xF1, 0x0F, 0x7A, 0xF0, 0x5D, 0xDF, 0x79, 0x1C, 0x7A, 0xE4, 0x32, 0x82, 0x9E /* 17.0.0 Device Master Kek Source. */

View File

@ -94,7 +94,7 @@ namespace ams::secmon::boot {
}
/* Check that the key generation is one that we can use. */
static_assert(pkg1::KeyGeneration_Count == 17);
static_assert(pkg1::KeyGeneration_Count == 16);
if (key_generation >= pkg1::KeyGeneration_Count) {
return false;
}

View File

@ -70,15 +70,6 @@ namespace ams::secmon {
}
void PerformUserRebootByPmic() {
/* Ensure that i2c-5 is usable for communicating with the pmic. */
clkrst::EnableI2c5Clock();
i2c::Initialize(i2c::Port_5);
/* Reboot. */
pmic::ShutdownSystem(true);
}
void PerformUserRebootToRcm() {
/* Configure the bootrom to boot to rcm. */
reg::Write(PMC + APBDEV_PMC_SCRATCH0, 0x2);
@ -109,20 +100,11 @@ namespace ams::secmon {
}
void PerformUserShutDown() {
if (fuse::GetSocType() == fuse::SocType_Mariko) {
/* Ensure that i2c-5 is usable for communicating with the pmic. */
clkrst::EnableI2c5Clock();
i2c::Initialize(i2c::Port_5);
/* On Mariko shutdown via pmic. */
pmic::ShutdownSystem(false);
} else /* if (fuse::GetSocType() == fuse::SocType_Erista) */ {
/* Load our reboot stub to iram. */
LoadRebootStub(RebootStubAction_ShutDown);
/* Reboot. */
PerformPmcReboot();
}
}
}

View File

@ -23,13 +23,11 @@ namespace ams::secmon {
UserRebootType_ToRcm = 1,
UserRebootType_ToPayload = 2,
UserRebootType_ToFatalError = 3,
UserRebootType_ByPmic = 4,
};
void PerformUserRebootToRcm();
void PerformUserRebootToPayload();
void PerformUserRebootToFatalError();
void PerformUserRebootByPmic();
void PerformUserShutDown();
}

View File

@ -161,7 +161,6 @@ namespace ams::secmon::smc {
constexpr const u8 EsCommonKeySources[EsCommonKeyType_Count][AesKeySize] = {
[EsCommonKeyType_TitleKey] = { 0x1E, 0xDC, 0x7B, 0x3B, 0x60, 0xE6, 0xB4, 0xD8, 0x78, 0xB8, 0x17, 0x15, 0x98, 0x5E, 0x62, 0x9B },
[EsCommonKeyType_ArchiveKey] = { 0x3B, 0x78, 0xF2, 0x61, 0x0F, 0x9D, 0x5A, 0xE2, 0x7B, 0x4E, 0x45, 0xAF, 0xCB, 0x0B, 0x67, 0x4D },
[EsCommonKeyType_Unknown2] = { 0x42, 0x64, 0x0B, 0xE3, 0x5F, 0xC6, 0xBE, 0x47, 0xC7, 0xB4, 0x84, 0xC5, 0xEB, 0x63, 0xAA, 0x02 },
};
constexpr const u8 EsSealKeySource[AesKeySize] = {

View File

@ -22,7 +22,6 @@ namespace ams::secmon::smc {
enum EsCommonKeyType {
EsCommonKeyType_TitleKey = 0,
EsCommonKeyType_ArchiveKey = 1,
EsCommonKeyType_Unknown2 = 2,
EsCommonKeyType_Count,
};

View File

@ -357,9 +357,6 @@ namespace ams::secmon::smc {
case UserRebootType_ToFatalError:
PerformUserRebootToFatalError();
break;
case UserRebootType_ByPmic:
PerformUserRebootByPmic();
break;
default:
return SmcResult::InvalidArgument;
}
@ -368,18 +365,19 @@ namespace ams::secmon::smc {
case UserRebootType_ToFatalError:
PerformUserRebootToFatalError();
break;
case UserRebootType_ByPmic:
PerformUserRebootByPmic();
break;
default:
return SmcResult::InvalidArgument;
}
}
break;
case ConfigItem::ExosphereNeedsShutdown:
if (soc_type == fuse::SocType_Erista) {
if (args.r[3] != 0) {
PerformUserShutDown();
}
} else /* if (soc_type == fuse::SocType_Mariko) */ {
return SmcResult::NotSupported;
}
break;
case ConfigItem::ExospherePayloadAddress:
if (g_payload_address == 0) {

View File

@ -32,8 +32,8 @@ namespace ams::secmon::smc {
struct PrepareEsDeviceUniqueKeyOption {
using KeyGeneration = util::BitPack32::Field<0, 6, int>;
using Type = util::BitPack32::Field<6, 2, EsCommonKeyType>;
using Reserved = util::BitPack32::Field<8, 24, u32>;
using Type = util::BitPack32::Field<6, 1, EsCommonKeyType>;
using Reserved = util::BitPack32::Field<7, 25, u32>;
};
constexpr const u8 ModularExponentiateByStorageKeyTable[] = {

View File

@ -23,17 +23,17 @@ namespace ams::nxboot {
alignas(se::AesBlockSize) constexpr inline const u8 MarikoMasterKekSource[se::AesBlockSize] = {
/* TODO: Update on next change of keys. */
0x8D, 0xEE, 0x9E, 0x11, 0x36, 0x3A, 0x9B, 0x0A, 0x6A, 0xC7, 0xBB, 0xE9, 0xD1, 0x03, 0xF7, 0x80
0xA5, 0xEC, 0x16, 0x39, 0x1A, 0x30, 0x16, 0x08, 0x2E, 0xCF, 0x09, 0x6F, 0x5E, 0x7C, 0xEE, 0xA9
};
alignas(se::AesBlockSize) constexpr inline const u8 MarikoMasterKekSourceDev[se::AesBlockSize] = {
/* TODO: Update on next change of keys. */
0x43, 0xDB, 0x9D, 0x88, 0xDB, 0x38, 0xE9, 0xBF, 0x3D, 0xD7, 0x83, 0x39, 0xEF, 0xB1, 0x4F, 0xA7
0x3A, 0x9C, 0xF0, 0x39, 0x70, 0x23, 0xF6, 0xAF, 0x71, 0x44, 0x60, 0xF4, 0x6D, 0xED, 0xA1, 0xD6
};
alignas(se::AesBlockSize) constexpr inline const u8 EristaMasterKekSource[se::AesBlockSize] = {
/* TODO: Update on next change of keys. */
0x71, 0xB9, 0xA6, 0xC0, 0xFF, 0x97, 0x6B, 0x0C, 0xB4, 0x40, 0xB9, 0xD5, 0x81, 0x5D, 0x81, 0x90
0x99, 0x22, 0x09, 0x57, 0xA7, 0xF9, 0x5E, 0x94, 0xFE, 0x78, 0x7F, 0x41, 0xD6, 0xE7, 0x56, 0xE6
};
alignas(se::AesBlockSize) constexpr inline const u8 KeyblobKeySource[se::AesBlockSize] = {
@ -70,7 +70,6 @@ namespace ams::nxboot {
{ 0x5B, 0x94, 0x63, 0xF7, 0xAD, 0x96, 0x1B, 0xA6, 0x23, 0x30, 0x06, 0x4D, 0x01, 0xE4, 0xCE, 0x1D }, /* 14.0.0 Device Master Key Source Source. */
{ 0x5E, 0xC9, 0xC5, 0x0A, 0xD0, 0x5F, 0x8B, 0x7B, 0xA7, 0x39, 0xEA, 0xBC, 0x60, 0x0F, 0x74, 0xE6 }, /* 15.0.0 Device Master Key Source Source. */
{ 0xEA, 0x90, 0x6E, 0xA8, 0xAE, 0x92, 0x99, 0x64, 0x36, 0xC1, 0xF3, 0x1C, 0xC6, 0x32, 0x83, 0x8C }, /* 16.0.0 Device Master Key Source Source. */
{ 0xDA, 0xB9, 0xD6, 0x77, 0x52, 0x2D, 0x1F, 0x78, 0x73, 0xC9, 0x98, 0x5B, 0x06, 0xFE, 0xA0, 0x52 }, /* 17.0.0 Device Master Key Source Source. */
};
alignas(se::AesBlockSize) constexpr inline const u8 DeviceMasterKekSources[pkg1::OldDeviceMasterKeyCount][se::AesBlockSize] = {
@ -87,7 +86,6 @@ namespace ams::nxboot {
{ 0x67, 0xD5, 0xD6, 0x0C, 0x08, 0xF5, 0xA3, 0x11, 0xBD, 0x6D, 0x5A, 0xEB, 0x96, 0x24, 0xB0, 0xD2 }, /* 14.0.0 Device Master Kek Source. */
{ 0x7C, 0x30, 0xED, 0x8B, 0x39, 0x25, 0x2C, 0x08, 0x8F, 0x48, 0xDC, 0x28, 0xE6, 0x1A, 0x6B, 0x49 }, /* 15.0.0 Device Master Kek Source. */
{ 0xF0, 0xF3, 0xFF, 0x52, 0x75, 0x2F, 0xBA, 0x4D, 0x09, 0x72, 0x30, 0x89, 0xA9, 0xDF, 0xFE, 0x1F }, /* 16.0.0 Device Master Kek Source. */
{ 0x21, 0xD6, 0x35, 0xF1, 0x0F, 0x7A, 0xF0, 0x5D, 0xDF, 0x79, 0x1C, 0x7A, 0xE4, 0x32, 0x82, 0x9E }, /* 17.0.0 Device Master Kek Source. */
};
alignas(se::AesBlockSize) constexpr inline const u8 DeviceMasterKekSourcesDev[pkg1::OldDeviceMasterKeyCount][se::AesBlockSize] = {
@ -104,7 +102,6 @@ namespace ams::nxboot {
{ 0xCE, 0x14, 0x74, 0x66, 0x98, 0xA8, 0x6D, 0x7D, 0xBD, 0x54, 0x91, 0x68, 0x5F, 0x1D, 0x0E, 0xEA }, /* 14.0.0 Device Master Kek Source. */
{ 0xAE, 0x05, 0x48, 0x65, 0xAB, 0x17, 0x9D, 0x3D, 0x51, 0xB7, 0x56, 0xBD, 0x9B, 0x0B, 0x5B, 0x6E }, /* 15.0.0 Device Master Kek Source. */
{ 0xFF, 0xF6, 0x4B, 0x0F, 0xFF, 0x0D, 0xC0, 0x4F, 0x56, 0x8A, 0x40, 0x74, 0x67, 0xC5, 0xFE, 0x9F }, /* 16.0.0 Device Master Kek Source. */
{ 0x4E, 0xCE, 0x7B, 0x2A, 0xEA, 0x2E, 0x3D, 0x16, 0xD5, 0x2A, 0xDE, 0xF6, 0xF8, 0x6A, 0x7D, 0x43 }, /* 17.0.0 Device Master Kek Source. */
};
alignas(se::AesBlockSize) constexpr inline const u8 MasterKeySources[pkg1::KeyGeneration_Count][se::AesBlockSize] = {
@ -124,7 +121,6 @@ namespace ams::nxboot {
{ 0x83, 0x67, 0xAF, 0x01, 0xCF, 0x93, 0xA1, 0xAB, 0x80, 0x45, 0xF7, 0x3F, 0x72, 0xFD, 0x3B, 0x38 }, /* Master key 0C encrypted with Master key 0D. */
{ 0xB1, 0x81, 0xA6, 0x0D, 0x72, 0xC7, 0xEE, 0x15, 0x21, 0xF3, 0xC0, 0xB5, 0x6B, 0x61, 0x6D, 0xE7 }, /* Master key 0D encrypted with Master key 0E. */
{ 0xAF, 0x11, 0x4C, 0x67, 0x17, 0x7A, 0x52, 0x43, 0xF7, 0x70, 0x2F, 0xC7, 0xEF, 0x81, 0x72, 0x16 }, /* Master key 0E encrypted with Master key 0F. */
{ 0x25, 0x12, 0x8B, 0xCB, 0xB5, 0x46, 0xA1, 0xF8, 0xE0, 0x52, 0x15, 0xB7, 0x0B, 0x57, 0x00, 0xBD }, /* Master key 0F encrypted with Master key 10. */
};
alignas(se::AesBlockSize) constexpr inline const u8 MasterKeySourcesDev[pkg1::KeyGeneration_Count][se::AesBlockSize] = {
@ -144,7 +140,6 @@ namespace ams::nxboot {
{ 0x08, 0xE0, 0xF4, 0xBE, 0xAA, 0x6E, 0x5A, 0xC3, 0xA6, 0xBC, 0xFE, 0xB9, 0xE2, 0xA3, 0x24, 0x12 }, /* Master key 0C encrypted with Master key 0D. */
{ 0xD6, 0x80, 0x98, 0xC0, 0xFA, 0xC7, 0x13, 0xCB, 0x93, 0xD2, 0x0B, 0x82, 0x4C, 0xA1, 0x7B, 0x8D }, /* Master key 0D encrypted with Master key 0E. */
{ 0x78, 0x66, 0x19, 0xBD, 0x86, 0xE7, 0xC1, 0x09, 0x9B, 0x6F, 0x92, 0xB2, 0x58, 0x7D, 0xCF, 0x26 }, /* Master key 0E encrypted with Master key 0F. */
{ 0x39, 0x1E, 0x7E, 0xF8, 0x7E, 0x73, 0xEA, 0x6F, 0xAF, 0x00, 0x3A, 0xB4, 0xAA, 0xB8, 0xB7, 0x59 }, /* Master key 0F encrypted with Master key 10. */
};
alignas(se::AesBlockSize) constinit u8 MasterKeys[pkg1::OldMasterKeyCount][se::AesBlockSize] = {};

View File

@ -80,7 +80,7 @@ namespace ams::nxboot {
}
/* Check that the key generation is one that we can use. */
static_assert(pkg1::KeyGeneration_Count == 17);
static_assert(pkg1::KeyGeneration_Count == 16);
if (key_generation >= pkg1::KeyGeneration_Count) {
return false;
}

View File

@ -257,8 +257,6 @@ namespace ams::nxboot {
return ams::TargetFirmware_15_0_0;
} else if (std::memcmp(package1 + 0x10, "20230111", 8) == 0) {
return ams::TargetFirmware_16_0_0;
} else if (std::memcmp(package1 + 0x10, "20230906", 8) == 0) {
return ams::TargetFirmware_17_0_0;
}
break;
default:

View File

@ -24,9 +24,6 @@ namespace ams::nxboot {
namespace {
constexpr u32 MesoshereMetadataLayout0Magic = util::FourCC<'M','S','S','0'>::Code;
constexpr u32 MesoshereMetadataLayout1Magic = util::FourCC<'M','S','S','1'>::Code;
struct InitialProcessBinaryHeader {
static constexpr u32 Magic = util::FourCC<'I','N','I','1'>::Code;
@ -168,9 +165,6 @@ namespace ams::nxboot {
FsVersion_16_0_3,
FsVersion_16_0_3_Exfat,
FsVersion_17_0_0,
FsVersion_17_0_0_Exfat,
FsVersion_Count,
};
@ -251,24 +245,10 @@ namespace ams::nxboot {
{ 0x39, 0xEE, 0x1F, 0x1E, 0x0E, 0xA7, 0x32, 0x5D }, /* FsVersion_16_0_3 */
{ 0x62, 0xC6, 0x5E, 0xFD, 0x9A, 0xBF, 0x7C, 0x43 }, /* FsVersion_16_0_3_Exfat */
{ 0x27, 0x07, 0x3B, 0xF0, 0xA1, 0xB8, 0xCE, 0x61 }, /* FsVersion_17_0_0 */
{ 0xEE, 0x0F, 0x4B, 0xAC, 0x6D, 0x1F, 0xFC, 0x4B }, /* FsVersion_17_0_0_Exfat */
};
const InitialProcessBinaryHeader *FindInitialProcessBinary(const pkg2::Package2Header *header, const u8 *data, ams::TargetFirmware target_firmware) {
if (target_firmware >= ams::TargetFirmware_17_0_0) {
const u32 *data_32 = reinterpret_cast<const u32 *>(data);
const u32 branch_target = (data_32[0] & 0x00FFFFFF);
for (size_t i = branch_target; i < branch_target + 0x1000 / sizeof(u32); ++i) {
const u32 ini_offset = (i * sizeof(u32)) + data_32[i];
if (data_32[i + 1] == 0 && ini_offset <= header->meta.payload_sizes[0] && std::memcmp(data + ini_offset, "INI1", 4) == 0) {
return reinterpret_cast<const InitialProcessBinaryHeader *>(data + ini_offset);
}
}
return nullptr;
} else if (target_firmware >= ams::TargetFirmware_8_0_0) {
if (target_firmware >= ams::TargetFirmware_8_0_0) {
/* Try to find initial process binary. */
const u32 *data_32 = reinterpret_cast<const u32 *>(data);
for (size_t i = 0; i < 0x1000 / sizeof(u32); ++i) {
@ -690,14 +670,6 @@ namespace ams::nxboot {
AddPatch(fs_meta, 0x191409, NogcPatch0, sizeof(NogcPatch0));
AddPatch(fs_meta, 0x16B9A0, NogcPatch1, sizeof(NogcPatch1));
break;
case FsVersion_17_0_0:
AddPatch(fs_meta, 0x18B149, NogcPatch0, sizeof(NogcPatch0));
AddPatch(fs_meta, 0x165200, NogcPatch1, sizeof(NogcPatch1));
break;
case FsVersion_17_0_0_Exfat:
AddPatch(fs_meta, 0x195FA9, NogcPatch0, sizeof(NogcPatch0));
AddPatch(fs_meta, 0x170060, NogcPatch1, sizeof(NogcPatch1));
break;
default:
break;
}
@ -1039,20 +1011,7 @@ namespace ams::nxboot {
}
/* Set the embedded ini pointer. */
const u32 magic = *reinterpret_cast<const u32 *>(payload_data + 4);
if (magic == MesoshereMetadataLayout0Magic) {
std::memcpy(payload_data + 8, std::addressof(meso_size), sizeof(meso_size));
} else if (magic == MesoshereMetadataLayout1Magic) {
if (const u32 meta_offset = *reinterpret_cast<const u32 *>(payload_data + 8); meta_offset <= meso_size - sizeof(meso_size)) {
s64 relative_offset = meso_size - meta_offset;
std::memcpy(payload_data + meta_offset, std::addressof(relative_offset), sizeof(relative_offset));
} else {
ShowFatalError("Invalid mesosphere metadata layout!\n");
}
} else {
ShowFatalError("Unknown mesosphere metadata version!\n");
}
/* Get the ini pointer. */
InitialProcessBinaryHeader * const ini = reinterpret_cast<InitialProcessBinaryHeader *>(payload_data + meso_size);

View File

@ -6,7 +6,7 @@
[subrepo]
remote = https://github.com/Atmosphere-NX/Atmosphere-libs
branch = master
commit = 132558c33865f6a21f06caa31bdfc6b1f92bd9b2
parent = d389ef639ed2988325523f1d95f90030a3370541
commit = c3dc418a28e390bc57426016aa2c9e7e87d7a584
parent = e488b6ee478f5b3a0380e75e4b468e1e4b1d816f
method = merge
cmdver = 0.4.1

View File

@ -36,7 +36,6 @@ namespace ams::pkg1 {
KeyGeneration_14_0_0 = 0x0D,
KeyGeneration_15_0_0 = 0x0E,
KeyGeneration_16_0_0 = 0x0F,
KeyGeneration_17_0_0 = 0x10,
KeyGeneration_Count,

View File

@ -23,8 +23,8 @@ namespace ams::pkg2 {
constexpr inline int PayloadCount = 3;
constexpr inline int MinimumValidDataVersion = 0; /* We allow older package2 to load; this value is currently 0x18 in Nintendo's code. */
constexpr inline int CurrentBootloaderVersion = 0x14;
constexpr inline int MinimumValidDataVersion = 0; /* We allow older package2 to load; this value is currently 0x17 in Nintendo's code. */
constexpr inline int CurrentBootloaderVersion = 0x13;
struct Package2Meta {
using Magic = util::FourCC<'P','K','2','1'>;

View File

@ -177,7 +177,6 @@ namespace ams::fuse {
}
constexpr const TargetFirmware FuseVersionIncrementFirmwares[] = {
TargetFirmware_17_0_0,
TargetFirmware_16_0_0,
TargetFirmware_15_0_0,
TargetFirmware_13_2_1,

View File

@ -19,8 +19,13 @@
namespace ams::kern::init {
struct alignas(util::CeilingPowerOfTwo(INIT_ARGUMENTS_SIZE)) KInitArguments {
u64 ttbr0;
u64 ttbr1;
u64 tcr;
u64 mair;
u64 cpuactlr;
u64 cpuectlr;
u64 sctlr;
u64 sp;
u64 entrypoint;
u64 argument;
@ -28,8 +33,13 @@ namespace ams::kern::init {
static_assert(alignof(KInitArguments) == util::CeilingPowerOfTwo(INIT_ARGUMENTS_SIZE));
static_assert(sizeof(KInitArguments) == std::max(INIT_ARGUMENTS_SIZE, util::CeilingPowerOfTwo(INIT_ARGUMENTS_SIZE)));
static_assert(AMS_OFFSETOF(KInitArguments, ttbr0) == INIT_ARGUMENTS_TTBR0);
static_assert(AMS_OFFSETOF(KInitArguments, ttbr1) == INIT_ARGUMENTS_TTBR1);
static_assert(AMS_OFFSETOF(KInitArguments, tcr) == INIT_ARGUMENTS_TCR);
static_assert(AMS_OFFSETOF(KInitArguments, mair) == INIT_ARGUMENTS_MAIR);
static_assert(AMS_OFFSETOF(KInitArguments, cpuactlr) == INIT_ARGUMENTS_CPUACTLR);
static_assert(AMS_OFFSETOF(KInitArguments, cpuectlr) == INIT_ARGUMENTS_CPUECTLR);
static_assert(AMS_OFFSETOF(KInitArguments, sctlr) == INIT_ARGUMENTS_SCTLR);
static_assert(AMS_OFFSETOF(KInitArguments, sp) == INIT_ARGUMENTS_SP);
static_assert(AMS_OFFSETOF(KInitArguments, entrypoint) == INIT_ARGUMENTS_ENTRYPOINT);
static_assert(AMS_OFFSETOF(KInitArguments, argument) == INIT_ARGUMENTS_ARGUMENT);

View File

@ -87,8 +87,9 @@ namespace ams::kern::arch::arm64::init {
template<IsInitialPageAllocator PageAllocator>
static ALWAYS_INLINE KPhysicalAddress AllocateNewPageTable(PageAllocator &allocator, u64 phys_to_virt_offset) {
MESOSPHERE_UNUSED(phys_to_virt_offset);
return allocator.Allocate(PageSize);
auto address = allocator.Allocate(PageSize);
ClearNewPageTable(address, phys_to_virt_offset);
return address;
}
static ALWAYS_INLINE void ClearNewPageTable(KPhysicalAddress address, u64 phys_to_virt_offset) {
@ -882,12 +883,6 @@ namespace ams::kern::arch::arm64::init {
const size_t ind_max = ((aligned_end - aligned_start) / align) - 1;
while (true) {
if (const uintptr_t random_address = aligned_start + (KSystemControl::Init::GenerateRandomRange(0, ind_max) * align); this->TryAllocate(random_address, size)) {
/* Clear the allocated pages. */
volatile u64 *ptr = reinterpret_cast<volatile u64 *>(random_address);
for (size_t i = 0; i < size / sizeof(u64); ++i) {
ptr[i] = 0;
}
return random_address;
}
}

View File

@ -94,8 +94,3 @@ label_done:
ENABLE_FPU(xtmp1) \
GET_THREAD_CONTEXT_AND_RESTORE_FPCR_FPSR(ctx, xtmp1, xtmp2, wtmp1, wtmp2) \
RESTORE_FPU32_ALL_REGISTERS(ctx, xtmp1)
#define ERET_WITH_SPECULATION_BARRIER \
eret; \
dsb nsh; \
isb

View File

@ -246,12 +246,17 @@
#define THREAD_LOCAL_REGION_SIZE 0x200
/* ams::kern::init::KInitArguments, https://github.com/Atmosphere-NX/Atmosphere/blob/master/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_arguments.hpp */
#define INIT_ARGUMENTS_SIZE 0x28
#define INIT_ARGUMENTS_CPUACTLR 0x00
#define INIT_ARGUMENTS_CPUECTLR 0x08
#define INIT_ARGUMENTS_SP 0x10
#define INIT_ARGUMENTS_ENTRYPOINT 0x18
#define INIT_ARGUMENTS_ARGUMENT 0x20
#define INIT_ARGUMENTS_SIZE 0x50
#define INIT_ARGUMENTS_TTBR0 0x00
#define INIT_ARGUMENTS_TTBR1 0x08
#define INIT_ARGUMENTS_TCR 0x10
#define INIT_ARGUMENTS_MAIR 0x18
#define INIT_ARGUMENTS_CPUACTLR 0x20
#define INIT_ARGUMENTS_CPUECTLR 0x28
#define INIT_ARGUMENTS_SCTLR 0x30
#define INIT_ARGUMENTS_SP 0x38
#define INIT_ARGUMENTS_ENTRYPOINT 0x40
#define INIT_ARGUMENTS_ARGUMENT 0x48
/* ams::kern::KScheduler (::SchedulingState), https://github.com/Atmosphere-NX/Atmosphere/blob/master/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp */
/* NOTE: Due to constraints on ldarb relative offsets, KSCHEDULER_NEEDS_SCHEDULING cannot trivially be changed, and will require assembly edits. */

View File

@ -372,10 +372,6 @@ namespace ams::kern::arch::arm64::cpu {
this->SetBit(19, en);
return *this;
}
constexpr ALWAYS_INLINE bool GetWxn() const {
return this->GetBits(19, 1) != 0;
}
};
/* Accessors for timer registers. */

View File

@ -178,7 +178,7 @@ namespace ams::kern::arch::arm64 {
}
NOINLINE Result InitializeForKernel(void *table, KVirtualAddress start, KVirtualAddress end);
NOINLINE Result InitializeForProcess(ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit);
NOINLINE Result InitializeForProcess(u32 id, ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit);
Result Finalize();
private:
Result MapL1Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll);
@ -208,8 +208,8 @@ namespace ams::kern::arch::arm64 {
}
}
Result MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll);
Result MapGroup(KProcessAddress virt_addr, const KPageGroup &pg, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, bool not_first, PageLinkedList *page_list, bool reuse_ll);
Result MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, bool not_first, PageLinkedList *page_list, bool reuse_ll);
Result MapGroup(KProcessAddress virt_addr, const KPageGroup &pg, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll);
bool MergePages(KProcessAddress virt_addr, PageLinkedList *page_list);

View File

@ -28,8 +28,8 @@ namespace ams::kern::arch::arm64 {
m_page_table.Activate(id);
}
Result Initialize(ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit) {
R_RETURN(m_page_table.InitializeForProcess(as_type, enable_aslr, enable_das_merge, from_back, pool, code_address, code_size, system_resource, resource_limit));
Result Initialize(u32 id, ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit) {
R_RETURN(m_page_table.InitializeForProcess(id, as_type, enable_aslr, enable_das_merge, from_back, pool, code_address, code_size, system_resource, resource_limit));
}
void Finalize() { m_page_table.Finalize(); }
@ -150,24 +150,20 @@ namespace ams::kern::arch::arm64 {
R_RETURN(m_page_table.InvalidateProcessDataCache(address, size));
}
Result InvalidateCurrentProcessDataCache(KProcessAddress address, size_t size) {
R_RETURN(m_page_table.InvalidateCurrentProcessDataCache(address, size));
}
Result ReadDebugMemory(void *buffer, KProcessAddress address, size_t size) {
R_RETURN(m_page_table.ReadDebugMemory(buffer, address, size));
}
Result ReadDebugIoMemory(void *buffer, KProcessAddress address, size_t size, KMemoryState state) {
R_RETURN(m_page_table.ReadDebugIoMemory(buffer, address, size, state));
Result ReadDebugIoMemory(void *buffer, KProcessAddress address, size_t size) {
R_RETURN(m_page_table.ReadDebugIoMemory(buffer, address, size));
}
Result WriteDebugMemory(KProcessAddress address, const void *buffer, size_t size) {
R_RETURN(m_page_table.WriteDebugMemory(address, buffer, size));
}
Result WriteDebugIoMemory(KProcessAddress address, const void *buffer, size_t size, KMemoryState state) {
R_RETURN(m_page_table.WriteDebugIoMemory(address, buffer, size, state));
Result WriteDebugIoMemory(KProcessAddress address, const void *buffer, size_t size) {
R_RETURN(m_page_table.WriteDebugIoMemory(address, buffer, size));
}
Result LockForMapDeviceAddressSpace(bool *out_is_io, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned, bool check_heap) {
@ -300,7 +296,6 @@ namespace ams::kern::arch::arm64 {
bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const { return m_page_table.IsInUnsafeAliasRegion(addr, size); }
bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const { return m_page_table.CanContain(addr, size, state); }
bool CanContain(KProcessAddress addr, size_t size, ams::svc::MemoryState state) const { return m_page_table.CanContain(addr, size, state); }
KProcessAddress GetAddressSpaceStart() const { return m_page_table.GetAddressSpaceStart(); }
KProcessAddress GetHeapRegionStart() const { return m_page_table.GetHeapRegionStart(); }

View File

@ -23,8 +23,9 @@ namespace ams::kern::arch::arm64 {
class KSupervisorPageTable {
private:
KPageTable m_page_table;
u64 m_ttbr0_identity[cpu::NumCores];
public:
constexpr KSupervisorPageTable() : m_page_table(util::ConstantInitialize) { /* ... */ }
constexpr KSupervisorPageTable() : m_page_table(util::ConstantInitialize), m_ttbr0_identity() { /* ... */ }
NOINLINE void Initialize(s32 core_id);
@ -60,6 +61,8 @@ namespace ams::kern::arch::arm64 {
return m_page_table.GetPhysicalAddress(out, address);
}
constexpr u64 GetIdentityMapTtbr0(s32 core_id) const { return m_ttbr0_identity[core_id]; }
void DumpMemoryBlocks() const {
return m_page_table.DumpMemoryBlocks();
}

View File

@ -114,23 +114,6 @@ namespace ams::kern::init::Elf::Elf64 {
}
};
class Relr {
private:
Xword m_info;
public:
constexpr ALWAYS_INLINE bool IsLocation() const {
return (m_info & 1) == 0;
}
constexpr ALWAYS_INLINE Xword GetLocation() const {
return m_info;
}
constexpr ALWAYS_INLINE Xword GetBitmap() const {
return m_info >> 1;
}
};
enum DynamicTag {
DT_NULL = 0,
DT_RELA = 7,
@ -138,10 +121,6 @@ namespace ams::kern::init::Elf::Elf64 {
DT_REL = 17,
DT_RELENT = 19,
DT_RELRSZ = 35,
DT_RELR = 36,
DT_RELRENT = 37,
DT_RELACOUNT = 0x6ffffff9,
DT_RELCOUNT = 0x6ffffffa
};

View File

@ -31,22 +31,8 @@ namespace ams::kern::init {
u32 dynamic_offset;
u32 init_array_offset;
u32 init_array_end_offset;
u32 sysreg_offset;
};
static_assert(util::is_pod<KernelLayout>::value);
static_assert(sizeof(KernelLayout) == 0x34);
#if defined(ATMOSPHERE_ARCH_ARM64)
struct KernelSystemRegisters {
u64 ttbr0_el1;
u64 ttbr1_el1;
u64 tcr_el1;
u64 mair_el1;
u64 sctlr_el1;
};
#else
struct KernelSystemRegisters {
};
#endif
static_assert(sizeof(KernelLayout) == 0x30);
}

View File

@ -34,14 +34,8 @@ namespace ams::kern {
uintptr_t _08;
};
struct InitialProcessBinaryLayoutWithSize {
InitialProcessBinaryLayout layout;
size_t size;
};
KPhysicalAddress GetInitialProcessBinaryPhysicalAddress();
size_t GetInitialProcessBinarySize();
void SetInitialProcessBinaryPhysicalAddress(KPhysicalAddress phys_addr, size_t size);
void SetInitialProcessBinaryPhysicalAddress(KPhysicalAddress phys_addr);
u64 GetInitialProcessIdMin();
u64 GetInitialProcessIdMax();

View File

@ -133,7 +133,7 @@ namespace ams::kern {
}
Result MakeCreateProcessParameter(ams::svc::CreateProcessParameter *out, bool enable_aslr) const;
void Load(const KPageGroup &pg, KVirtualAddress data) const;
Result Load(KProcessAddress address, const ams::svc::CreateProcessParameter &params, KProcessAddress src) const;
Result SetMemoryPermissions(KProcessPageTable &page_table, const ams::svc::CreateProcessParameter &params) const;
};

View File

@ -44,7 +44,6 @@ namespace ams::kern {
KMemoryState_FlagCanChangeAttribute = (1 << 24),
KMemoryState_FlagCanCodeMemory = (1 << 25),
KMemoryState_FlagLinearMapped = (1 << 26),
KMemoryState_FlagCanPermissionLock = (1 << 27),
KMemoryState_FlagsData = KMemoryState_FlagCanReprotect | KMemoryState_FlagCanUseIpc |
KMemoryState_FlagCanUseNonDeviceIpc | KMemoryState_FlagCanUseNonSecureIpc |
@ -67,22 +66,18 @@ namespace ams::kern {
KMemoryState_Free = ams::svc::MemoryState_Free,
KMemoryState_IoMemory = ams::svc::MemoryState_Io | KMemoryState_FlagMapped | KMemoryState_FlagCanDeviceMap | KMemoryState_FlagCanAlignedDeviceMap,
KMemoryState_IoRegister = ams::svc::MemoryState_Io | KMemoryState_FlagCanDeviceMap | KMemoryState_FlagCanAlignedDeviceMap,
KMemoryState_Static = ams::svc::MemoryState_Static | KMemoryState_FlagCanQueryPhysical,
KMemoryState_Io = ams::svc::MemoryState_Io | KMemoryState_FlagMapped | KMemoryState_FlagCanDeviceMap | KMemoryState_FlagCanAlignedDeviceMap,
KMemoryState_Static = ams::svc::MemoryState_Static | KMemoryState_FlagMapped | KMemoryState_FlagCanQueryPhysical,
KMemoryState_Code = ams::svc::MemoryState_Code | KMemoryState_FlagsCode | KMemoryState_FlagCanMapProcess,
KMemoryState_CodeData = ams::svc::MemoryState_CodeData | KMemoryState_FlagsData | KMemoryState_FlagCanMapProcess | KMemoryState_FlagCanCodeMemory | KMemoryState_FlagCanPermissionLock,
KMemoryState_CodeData = ams::svc::MemoryState_CodeData | KMemoryState_FlagsData | KMemoryState_FlagCanMapProcess | KMemoryState_FlagCanCodeMemory,
KMemoryState_Normal = ams::svc::MemoryState_Normal | KMemoryState_FlagsData | KMemoryState_FlagCanCodeMemory,
KMemoryState_Shared = ams::svc::MemoryState_Shared | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted | KMemoryState_FlagLinearMapped,
/* KMemoryState_Alias was removed after 1.0.0. */
KMemoryState_AliasCode = ams::svc::MemoryState_AliasCode | KMemoryState_FlagsCode | KMemoryState_FlagCanMapProcess | KMemoryState_FlagCanCodeAlias,
KMemoryState_AliasCodeData = ams::svc::MemoryState_AliasCodeData | KMemoryState_FlagsData | KMemoryState_FlagCanMapProcess | KMemoryState_FlagCanCodeAlias | KMemoryState_FlagCanCodeMemory
| KMemoryState_FlagCanPermissionLock,
KMemoryState_AliasCodeData = ams::svc::MemoryState_AliasCodeData | KMemoryState_FlagsData | KMemoryState_FlagCanMapProcess | KMemoryState_FlagCanCodeAlias | KMemoryState_FlagCanCodeMemory,
KMemoryState_Ipc = ams::svc::MemoryState_Ipc | KMemoryState_FlagsMisc | KMemoryState_FlagCanAlignedDeviceMap
| KMemoryState_FlagCanUseIpc | KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
@ -90,7 +85,7 @@ namespace ams::kern {
KMemoryState_Stack = ams::svc::MemoryState_Stack | KMemoryState_FlagsMisc | KMemoryState_FlagCanAlignedDeviceMap
| KMemoryState_FlagCanUseIpc | KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
KMemoryState_ThreadLocal = ams::svc::MemoryState_ThreadLocal | KMemoryState_FlagLinearMapped,
KMemoryState_ThreadLocal = ams::svc::MemoryState_ThreadLocal | KMemoryState_FlagMapped | KMemoryState_FlagLinearMapped,
KMemoryState_Transfered = ams::svc::MemoryState_Transfered | KMemoryState_FlagsMisc | KMemoryState_FlagCanAlignedDeviceMap | KMemoryState_FlagCanChangeAttribute
| KMemoryState_FlagCanUseIpc | KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
@ -109,7 +104,7 @@ namespace ams::kern {
KMemoryState_NonDeviceIpc = ams::svc::MemoryState_NonDeviceIpc | KMemoryState_FlagsMisc | KMemoryState_FlagCanUseNonDeviceIpc,
KMemoryState_Kernel = ams::svc::MemoryState_Kernel,
KMemoryState_Kernel = ams::svc::MemoryState_Kernel | KMemoryState_FlagMapped,
KMemoryState_GeneratedCode = ams::svc::MemoryState_GeneratedCode | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted | KMemoryState_FlagCanDebug | KMemoryState_FlagLinearMapped,
KMemoryState_CodeOut = ams::svc::MemoryState_CodeOut | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted | KMemoryState_FlagLinearMapped,
@ -117,36 +112,35 @@ namespace ams::kern {
KMemoryState_Coverage = ams::svc::MemoryState_Coverage | KMemoryState_FlagMapped,
KMemoryState_Insecure = ams::svc::MemoryState_Insecure | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted | KMemoryState_FlagLinearMapped | KMemoryState_FlagCanChangeAttribute
| KMemoryState_FlagCanDeviceMap | KMemoryState_FlagCanAlignedDeviceMap | KMemoryState_FlagCanQueryPhysical
| KMemoryState_FlagCanDeviceMap | KMemoryState_FlagCanAlignedDeviceMap
| KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
};
#if 1
static_assert(KMemoryState_Free == 0x00000000);
static_assert(KMemoryState_IoMemory == 0x00182001);
static_assert(KMemoryState_IoRegister == 0x00180001);
static_assert(KMemoryState_Static == 0x00040002);
static_assert(KMemoryState_Io == 0x00182001);
static_assert(KMemoryState_Static == 0x00042002);
static_assert(KMemoryState_Code == 0x04DC7E03);
static_assert(KMemoryState_CodeData == 0x0FFEBD04);
static_assert(KMemoryState_CodeData == 0x07FEBD04);
static_assert(KMemoryState_Normal == 0x077EBD05);
static_assert(KMemoryState_Shared == 0x04402006);
static_assert(KMemoryState_AliasCode == 0x04DD7E08);
static_assert(KMemoryState_AliasCodeData == 0x0FFFBD09);
static_assert(KMemoryState_AliasCodeData == 0x07FFBD09);
static_assert(KMemoryState_Ipc == 0x045C3C0A);
static_assert(KMemoryState_Stack == 0x045C3C0B);
static_assert(KMemoryState_ThreadLocal == 0x0400000C);
static_assert(KMemoryState_ThreadLocal == 0x0400200C);
static_assert(KMemoryState_Transfered == 0x055C3C0D);
static_assert(KMemoryState_SharedTransfered == 0x045C380E);
static_assert(KMemoryState_SharedCode == 0x0440380F);
static_assert(KMemoryState_Inaccessible == 0x00000010);
static_assert(KMemoryState_NonSecureIpc == 0x045C3811);
static_assert(KMemoryState_NonDeviceIpc == 0x044C2812);
static_assert(KMemoryState_Kernel == 0x00000013);
static_assert(KMemoryState_Kernel == 0x00002013);
static_assert(KMemoryState_GeneratedCode == 0x04402214);
static_assert(KMemoryState_CodeOut == 0x04402015);
static_assert(KMemoryState_Coverage == 0x00002016); /* TODO: Is this correct? */
static_assert(KMemoryState_Insecure == 0x055C3817);
static_assert(KMemoryState_Coverage == 0x00002016);
static_assert(KMemoryState_Insecure == 0x05583817);
#endif
enum KMemoryPermission : u8 {
@ -189,9 +183,8 @@ namespace ams::kern {
KMemoryAttribute_IpcLocked = ams::svc::MemoryAttribute_IpcLocked,
KMemoryAttribute_DeviceShared = ams::svc::MemoryAttribute_DeviceShared,
KMemoryAttribute_Uncached = ams::svc::MemoryAttribute_Uncached,
KMemoryAttribute_PermissionLocked = ams::svc::MemoryAttribute_PermissionLocked,
KMemoryAttribute_SetMask = KMemoryAttribute_Uncached | KMemoryAttribute_PermissionLocked,
KMemoryAttribute_SetMask = KMemoryAttribute_Uncached,
};
enum KMemoryBlockDisableMergeAttribute : u8 {
@ -265,10 +258,6 @@ namespace ams::kern {
return m_state;
}
constexpr ams::svc::MemoryState GetSvcState() const {
return static_cast<ams::svc::MemoryState>(m_state & KMemoryState_Mask);
}
constexpr KMemoryPermission GetPermission() const {
return m_permission;
}
@ -331,10 +320,6 @@ namespace ams::kern {
return this->GetEndAddress() - 1;
}
constexpr KMemoryState GetState() const {
return m_memory_state;
}
constexpr u16 GetIpcLockCount() const {
return m_ipc_lock_count;
}
@ -454,14 +439,6 @@ namespace ams::kern {
}
}
constexpr void UpdateAttribute(u32 mask, u32 attr) {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT((mask & KMemoryAttribute_IpcLocked) == 0);
MESOSPHERE_ASSERT((mask & KMemoryAttribute_DeviceShared) == 0);
m_attribute = static_cast<KMemoryAttribute>((m_attribute & ~mask) | attr);
}
constexpr void Split(KMemoryBlock *block, KProcessAddress addr) {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(this->GetAddress() < addr);

View File

@ -104,9 +104,7 @@ namespace ams::kern {
void Update(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr, KMemoryBlockDisableMergeAttribute set_disable_attr, KMemoryBlockDisableMergeAttribute clear_disable_attr);
void UpdateLock(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, MemoryBlockLockFunction lock_func, KMemoryPermission perm);
void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm, KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr, KMemoryBlockDisableMergeAttribute set_disable_attr, KMemoryBlockDisableMergeAttribute clear_disable_attr);
void UpdateAttribute(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, u32 mask, u32 attr);
void UpdateIfMatch(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm, KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr);
iterator FindIterator(KProcessAddress address) const {
return m_memory_block_tree.find(KMemoryBlock(util::ConstantInitialize, address, 1, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None));

View File

@ -212,9 +212,7 @@ namespace ams::kern {
static_assert(KMemoryRegionType_DramKernelInitPt.GetValue() == (0x44E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped));
constexpr inline const auto KMemoryRegionType_DramKernelSecureAppletMemory = KMemoryRegionType_DramKernelBase.DeriveSparse(1, 3, 0).SetAttribute(KMemoryRegionAttr_LinearMapped);
constexpr inline const auto KMemoryRegionType_DramKernelSecureUnknown = KMemoryRegionType_DramKernelBase.DeriveSparse(1, 3, 1).SetAttribute(KMemoryRegionAttr_LinearMapped);
static_assert(KMemoryRegionType_DramKernelSecureAppletMemory.GetValue() == (0x18E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped));
static_assert(KMemoryRegionType_DramKernelSecureUnknown.GetValue() == (0x28E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped));
constexpr inline const auto KMemoryRegionType_DramReservedEarly = KMemoryRegionType_DramReservedBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap);
static_assert(KMemoryRegionType_DramReservedEarly.GetValue() == (0x16 | KMemoryRegionAttr_NoUserMap));
@ -230,55 +228,53 @@ namespace ams::kern {
constexpr inline const auto KMemoryRegionType_DramPoolPartition = KMemoryRegionType_DramHeapBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap);
static_assert(KMemoryRegionType_DramPoolPartition.GetValue() == (0x26 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
constexpr inline const auto KMemoryRegionType_DramPoolManagement = KMemoryRegionType_DramPoolPartition.Derive(4, 0).SetAttribute(KMemoryRegionAttr_CarveoutProtected);
/* UNUSED: .Derive(4, 1); */
/* UNUSED: .Derive(4, 2); */
constexpr inline const auto KMemoryRegionType_DramUserPool = KMemoryRegionType_DramPoolPartition.Derive(4, 3);
static_assert(KMemoryRegionType_DramPoolManagement.GetValue() == (0xE6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected));
static_assert(KMemoryRegionType_DramUserPool .GetValue() == (0x266 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
constexpr inline const auto KMemoryRegionType_DramPoolManagement = KMemoryRegionType_DramPoolPartition.DeriveTransition(0, 2).DeriveTransition().SetAttribute(KMemoryRegionAttr_CarveoutProtected);
constexpr inline const auto KMemoryRegionType_DramUserPool = KMemoryRegionType_DramPoolPartition.DeriveTransition(1, 2).DeriveTransition();
static_assert(KMemoryRegionType_DramPoolManagement.GetValue() == (0x166 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected));
static_assert(KMemoryRegionType_DramUserPool.GetValue() == (0x1A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
constexpr inline const auto KMemoryRegionType_DramApplicationPool = KMemoryRegionType_DramUserPool.Derive(4, 0);
constexpr inline const auto KMemoryRegionType_DramAppletPool = KMemoryRegionType_DramUserPool.Derive(4, 1);
constexpr inline const auto KMemoryRegionType_DramSystemNonSecurePool = KMemoryRegionType_DramUserPool.Derive(4, 2);
constexpr inline const auto KMemoryRegionType_DramSystemPool = KMemoryRegionType_DramUserPool.Derive(4, 3).SetAttribute(KMemoryRegionAttr_CarveoutProtected);
static_assert(KMemoryRegionType_DramApplicationPool .GetValue() == (0xE66 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
static_assert(KMemoryRegionType_DramAppletPool .GetValue() == (0x1666 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
static_assert(KMemoryRegionType_DramSystemNonSecurePool.GetValue() == (0x1A66 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
static_assert(KMemoryRegionType_DramSystemPool .GetValue() == (0x2666 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected));
static_assert(KMemoryRegionType_DramApplicationPool .GetValue() == (0x7A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
static_assert(KMemoryRegionType_DramAppletPool .GetValue() == (0xBA6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
static_assert(KMemoryRegionType_DramSystemNonSecurePool.GetValue() == (0xDA6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
static_assert(KMemoryRegionType_DramSystemPool .GetValue() == (0x13A6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected));
constexpr inline const auto KMemoryRegionType_VirtualDramHeapBase = KMemoryRegionType_Dram.DeriveSparse(1, 4, 0);
constexpr inline const auto KMemoryRegionType_VirtualDramKernelPtHeap = KMemoryRegionType_Dram.DeriveSparse(1, 4, 1);
constexpr inline const auto KMemoryRegionType_VirtualDramKernelTraceBuffer = KMemoryRegionType_Dram.DeriveSparse(1, 4, 2);
constexpr inline const auto KMemoryRegionType_VirtualDramHeapBase = KMemoryRegionType_Dram.DeriveSparse(1, 3, 0);
constexpr inline const auto KMemoryRegionType_VirtualDramKernelPtHeap = KMemoryRegionType_Dram.DeriveSparse(1, 3, 1);
constexpr inline const auto KMemoryRegionType_VirtualDramKernelTraceBuffer = KMemoryRegionType_Dram.DeriveSparse(1, 3, 2);
static_assert(KMemoryRegionType_VirtualDramHeapBase .GetValue() == 0x1A);
static_assert(KMemoryRegionType_VirtualDramKernelPtHeap .GetValue() == 0x2A);
static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A);
/* UNUSED: .DeriveSparse(2, 2, 0); */
constexpr inline const auto KMemoryRegionType_VirtualDramUnknownDebug = KMemoryRegionType_Dram.Advance(2).Derive(4, 0);
constexpr inline const auto KMemoryRegionType_VirtualDramKernelSecureAppletMemory = KMemoryRegionType_Dram.Advance(2).Derive(4, 1);
/* UNUSED: .Derive(4, 2); */
constexpr inline const auto KMemoryRegionType_VirtualDramKernelSecureUnknown = KMemoryRegionType_Dram.Advance(2).Derive(4, 3);
static_assert(KMemoryRegionType_VirtualDramUnknownDebug .GetValue() == (0x32));
static_assert(KMemoryRegionType_VirtualDramKernelSecureAppletMemory.GetValue() == (0x52));
static_assert(KMemoryRegionType_VirtualDramKernelSecureUnknown .GetValue() == (0x92));
constexpr inline const auto KMemoryRegionType_VirtualDramUnknownDebug = KMemoryRegionType_Dram.DeriveSparse(2, 2, 1);
static_assert(KMemoryRegionType_VirtualDramUnknownDebug.GetValue() == (0x52));
constexpr inline const auto KMemoryRegionType_VirtualDramKernelSecureAppletMemory = KMemoryRegionType_Dram.DeriveSparse(3, 1, 0);
static_assert(KMemoryRegionType_VirtualDramKernelSecureAppletMemory.GetValue() == (0x62));
constexpr inline const auto KMemoryRegionType_VirtualDramKernelInitPt = KMemoryRegionType_VirtualDramHeapBase.Derive(4, 0);
constexpr inline const auto KMemoryRegionType_VirtualDramPoolManagement = KMemoryRegionType_VirtualDramHeapBase.Derive(4, 1);
constexpr inline const auto KMemoryRegionType_VirtualDramUserPool = KMemoryRegionType_VirtualDramHeapBase.Derive(4, 2);
/* UNUSED: .Derive(4, 3); */
static_assert(KMemoryRegionType_VirtualDramKernelInitPt .GetValue() == 0x31A);
static_assert(KMemoryRegionType_VirtualDramPoolManagement.GetValue() == 0x51A);
static_assert(KMemoryRegionType_VirtualDramUserPool .GetValue() == 0x61A);
constexpr inline const auto KMemoryRegionType_VirtualDramKernelInitPt = KMemoryRegionType_VirtualDramHeapBase.Derive(3, 0);
constexpr inline const auto KMemoryRegionType_VirtualDramPoolManagement = KMemoryRegionType_VirtualDramHeapBase.Derive(3, 1);
constexpr inline const auto KMemoryRegionType_VirtualDramUserPool = KMemoryRegionType_VirtualDramHeapBase.Derive(3, 2);
static_assert(KMemoryRegionType_VirtualDramKernelInitPt .GetValue() == 0x19A);
static_assert(KMemoryRegionType_VirtualDramPoolManagement.GetValue() == 0x29A);
static_assert(KMemoryRegionType_VirtualDramUserPool .GetValue() == 0x31A);
constexpr inline const auto KMemoryRegionType_VirtualDramApplicationPool = KMemoryRegionType_VirtualDramUserPool.Derive(4, 0);
constexpr inline const auto KMemoryRegionType_VirtualDramAppletPool = KMemoryRegionType_VirtualDramUserPool.Derive(4, 1);
constexpr inline const auto KMemoryRegionType_VirtualDramSystemNonSecurePool = KMemoryRegionType_VirtualDramUserPool.Derive(4, 2);
constexpr inline const auto KMemoryRegionType_VirtualDramSystemPool = KMemoryRegionType_VirtualDramUserPool.Derive(4, 3);
static_assert(KMemoryRegionType_VirtualDramApplicationPool .GetValue() == 0x361A);
static_assert(KMemoryRegionType_VirtualDramAppletPool .GetValue() == 0x561A);
static_assert(KMemoryRegionType_VirtualDramSystemNonSecurePool.GetValue() == 0x661A);
static_assert(KMemoryRegionType_VirtualDramSystemPool .GetValue() == 0x961A);
/* NOTE: For unknown reason, the pools are derived out-of-order here. */
/* It's worth eventually trying to understand why Nintendo made this choice. */
/* UNUSED: .Derive(6, 0); */
/* UNUSED: .Derive(6, 1); */
constexpr inline const auto KMemoryRegionType_VirtualDramAppletPool = KMemoryRegionType_VirtualDramUserPool.Derive(6, 2);
constexpr inline const auto KMemoryRegionType_VirtualDramApplicationPool = KMemoryRegionType_VirtualDramUserPool.Derive(6, 3);
constexpr inline const auto KMemoryRegionType_VirtualDramSystemNonSecurePool = KMemoryRegionType_VirtualDramUserPool.Derive(6, 4);
constexpr inline const auto KMemoryRegionType_VirtualDramSystemPool = KMemoryRegionType_VirtualDramUserPool.Derive(6, 5);
static_assert(KMemoryRegionType_VirtualDramAppletPool .GetValue() == 0x1B1A);
static_assert(KMemoryRegionType_VirtualDramApplicationPool .GetValue() == 0x271A);
static_assert(KMemoryRegionType_VirtualDramSystemNonSecurePool.GetValue() == 0x2B1A);
static_assert(KMemoryRegionType_VirtualDramSystemPool .GetValue() == 0x331A);
constexpr inline const auto KMemoryRegionType_ArchDeviceBase = KMemoryRegionType_Kernel.DeriveTransition(0, 1).SetSparseOnly();
constexpr inline const auto KMemoryRegionType_BoardDeviceBase = KMemoryRegionType_Kernel.DeriveTransition(0, 2).SetDenseOnly();
@ -332,14 +328,12 @@ namespace ams::kern {
static_assert(KMemoryRegionType_KernelTemp.GetValue() == 0x31);
constexpr ALWAYS_INLINE KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) {
if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) {
if (KMemoryRegionType_KernelTraceBuffer.IsAncestorOf(type_id)) {
return KMemoryRegionType_VirtualDramKernelTraceBuffer;
} else if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) {
return KMemoryRegionType_VirtualDramKernelPtHeap;
} else if (KMemoryRegionType_DramKernelSecureAppletMemory.IsAncestorOf(type_id)) {
return KMemoryRegionType_VirtualDramKernelSecureAppletMemory;
} else if (KMemoryRegionType_DramKernelSecureUnknown.IsAncestorOf(type_id)) {
return KMemoryRegionType_VirtualDramKernelSecureUnknown;
} else if (KMemoryRegionType_KernelTraceBuffer.IsAncestorOf(type_id)) {
return KMemoryRegionType_VirtualDramKernelTraceBuffer;
} else if ((type_id | KMemoryRegionAttr_ShouldKernelMap) == type_id) {
return KMemoryRegionType_VirtualDramUnknownDebug;
} else {

View File

@ -158,16 +158,8 @@ namespace ams::kern {
private:
const KPageGroup *m_pg;
public:
explicit ALWAYS_INLINE KScopedPageGroup(const KPageGroup *gp, bool not_first = true) : m_pg(gp) {
if (m_pg) {
if (not_first) {
m_pg->Open();
} else {
m_pg->OpenFirst();
}
}
}
explicit ALWAYS_INLINE KScopedPageGroup(const KPageGroup &gp, bool not_first = true) : KScopedPageGroup(std::addressof(gp), not_first) { /* ... */ }
explicit ALWAYS_INLINE KScopedPageGroup(const KPageGroup *gp) : m_pg(gp) { if (m_pg) { m_pg->Open(); } }
explicit ALWAYS_INLINE KScopedPageGroup(const KPageGroup &gp) : KScopedPageGroup(std::addressof(gp)) { /* ... */ }
ALWAYS_INLINE ~KScopedPageGroup() { if (m_pg) { m_pg->Close(); } }
ALWAYS_INLINE void CancelClose() {

View File

@ -88,8 +88,8 @@ namespace ams::kern {
enum OperationType {
OperationType_Map = 0,
OperationType_MapGroup = 1,
OperationType_MapFirstGroup = 2,
OperationType_MapFirst = 1,
OperationType_MapGroup = 2,
OperationType_Unmap = 3,
OperationType_ChangePermissions = 4,
OperationType_ChangePermissionsAndRefresh = 5,
@ -241,20 +241,16 @@ namespace ams::kern {
bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const {
/* Even though Unsafe physical memory is KMemoryState_Normal, it must be mapped inside the alias code region. */
return this->CanContain(addr, size, ams::svc::MemoryState_AliasCode);
return this->CanContain(addr, size, KMemoryState_AliasCode);
}
ALWAYS_INLINE KScopedLightLock AcquireDeviceMapLock() {
return KScopedLightLock(m_device_map_lock);
}
KProcessAddress GetRegionAddress(ams::svc::MemoryState state) const;
size_t GetRegionSize(ams::svc::MemoryState state) const;
bool CanContain(KProcessAddress addr, size_t size, ams::svc::MemoryState state) const;
ALWAYS_INLINE KProcessAddress GetRegionAddress(KMemoryState state) const { return this->GetRegionAddress(static_cast<ams::svc::MemoryState>(state & KMemoryState_Mask)); }
ALWAYS_INLINE size_t GetRegionSize(KMemoryState state) const { return this->GetRegionSize(static_cast<ams::svc::MemoryState>(state & KMemoryState_Mask)); }
ALWAYS_INLINE bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const { return this->CanContain(addr, size, static_cast<ams::svc::MemoryState>(state & KMemoryState_Mask)); }
KProcessAddress GetRegionAddress(KMemoryState state) const;
size_t GetRegionSize(KMemoryState state) const;
bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const;
protected:
/* NOTE: These three functions (Operate, Operate, FinalizeUpdate) are virtual functions */
/* in Nintendo's kernel. We devirtualize them, since KPageTable is the only derived */
@ -312,7 +308,6 @@ namespace ams::kern {
}
Result CheckMemoryState(const KMemoryInfo &info, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const;
Result CheckMemoryState(KMemoryState *out_state, KMemoryPermission *out_perm, KMemoryAttribute *out_attr, size_t *out_blocks_needed, KMemoryBlockManager::const_iterator it, KProcessAddress last_addr, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr = DefaultMemoryIgnoreAttr) const;
Result CheckMemoryState(KMemoryState *out_state, KMemoryPermission *out_perm, KMemoryAttribute *out_attr, size_t *out_blocks_needed, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr = DefaultMemoryIgnoreAttr) const;
Result CheckMemoryState(size_t *out_blocks_needed, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr = DefaultMemoryIgnoreAttr) const {
R_RETURN(this->CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size, state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr));
@ -326,7 +321,7 @@ namespace ams::kern {
Result QueryInfoImpl(KMemoryInfo *out_info, ams::svc::PageInfo *out_page, KProcessAddress address) const;
Result QueryMappingImpl(KProcessAddress *out, KPhysicalAddress address, size_t size, ams::svc::MemoryState state) const;
Result QueryMappingImpl(KProcessAddress *out, KPhysicalAddress address, size_t size, KMemoryState state) const;
Result AllocateAndMapPagesImpl(PageLinkedList *page_list, KProcessAddress address, size_t num_pages, KMemoryPermission perm);
Result MapPageGroupImpl(PageLinkedList *page_list, KProcessAddress address, const KPageGroup &pg, const KPageProperties properties, bool reuse_ll);
@ -340,9 +335,9 @@ namespace ams::kern {
NOINLINE Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
Result MapIoImpl(KProcessAddress *out, PageLinkedList *page_list, KPhysicalAddress phys_addr, size_t size, KMemoryState state, KMemoryPermission perm);
Result ReadIoMemoryImpl(void *buffer, KPhysicalAddress phys_addr, size_t size, KMemoryState state);
Result WriteIoMemoryImpl(KPhysicalAddress phys_addr, const void *buffer, size_t size, KMemoryState state);
Result MapIoImpl(KProcessAddress *out, PageLinkedList *page_list, KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm);
Result ReadIoMemoryImpl(void *buffer, KPhysicalAddress phys_addr, size_t size);
Result WriteIoMemoryImpl(KPhysicalAddress phys_addr, const void *buffer, size_t size);
Result SetupForIpcClient(PageLinkedList *page_list, size_t *out_blocks_needed, KProcessAddress address, size_t size, KMemoryPermission test_perm, KMemoryState dst_state);
Result SetupForIpcServer(KProcessAddress *out_addr, size_t size, KProcessAddress src_addr, KMemoryPermission test_perm, KMemoryState dst_state, KPageTableBase &src_page_table, bool send);
@ -376,8 +371,8 @@ namespace ams::kern {
Result SetMaxHeapSize(size_t size);
Result QueryInfo(KMemoryInfo *out_info, ams::svc::PageInfo *out_page_info, KProcessAddress addr) const;
Result QueryPhysicalAddress(ams::svc::PhysicalMemoryInfo *out, KProcessAddress address) const;
Result QueryStaticMapping(KProcessAddress *out, KPhysicalAddress address, size_t size) const { R_RETURN(this->QueryMappingImpl(out, address, size, ams::svc::MemoryState_Static)); }
Result QueryIoMapping(KProcessAddress *out, KPhysicalAddress address, size_t size) const { R_RETURN(this->QueryMappingImpl(out, address, size, ams::svc::MemoryState_Io)); }
Result QueryStaticMapping(KProcessAddress *out, KPhysicalAddress address, size_t size) const { R_RETURN(this->QueryMappingImpl(out, address, size, KMemoryState_Static)); }
Result QueryIoMapping(KProcessAddress *out, KPhysicalAddress address, size_t size) const { R_RETURN(this->QueryMappingImpl(out, address, size, KMemoryState_Io)); }
Result MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
Result UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
@ -412,13 +407,12 @@ namespace ams::kern {
Result MakeAndOpenPageGroup(KPageGroup *out, KProcessAddress address, size_t num_pages, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr);
Result InvalidateProcessDataCache(KProcessAddress address, size_t size);
Result InvalidateCurrentProcessDataCache(KProcessAddress address, size_t size);
Result ReadDebugMemory(void *buffer, KProcessAddress address, size_t size);
Result ReadDebugIoMemory(void *buffer, KProcessAddress address, size_t size, KMemoryState state);
Result ReadDebugIoMemory(void *buffer, KProcessAddress address, size_t size);
Result WriteDebugMemory(KProcessAddress address, const void *buffer, size_t size);
Result WriteDebugIoMemory(KProcessAddress address, const void *buffer, size_t size, KMemoryState state);
Result WriteDebugIoMemory(KProcessAddress address, const void *buffer, size_t size);
Result LockForMapDeviceAddressSpace(bool *out_is_io, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned, bool check_heap);
Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap);

View File

@ -76,7 +76,6 @@ namespace ams::kern {
bool m_is_signaled;
bool m_is_initialized;
bool m_is_application;
bool m_is_default_application_system_resource;
char m_name[13];
util::Atomic<u16> m_num_running_threads;
u32 m_flags;
@ -179,8 +178,6 @@ namespace ams::kern {
constexpr bool IsApplication() const { return m_is_application; }
constexpr bool IsDefaultApplicationSystemResource() const { return m_is_default_application_system_resource; }
constexpr bool IsSuspended() const { return m_is_suspended; }
constexpr void SetSuspended(bool suspended) { m_is_suspended = suspended; }
@ -283,20 +280,12 @@ namespace ams::kern {
void IncrementRunningThreadCount();
void DecrementRunningThreadCount();
size_t GetRequiredSecureMemorySizeNonDefault() const {
return (!this->IsDefaultApplicationSystemResource() && m_system_resource->IsSecureResource()) ? static_cast<KSecureSystemResource *>(m_system_resource)->CalculateRequiredSecureMemorySize() : 0;
}
size_t GetRequiredSecureMemorySize() const {
return m_system_resource->IsSecureResource() ? static_cast<KSecureSystemResource *>(m_system_resource)->CalculateRequiredSecureMemorySize() : 0;
}
size_t GetTotalSystemResourceSize() const {
return (!this->IsDefaultApplicationSystemResource() && m_system_resource->IsSecureResource()) ? static_cast<KSecureSystemResource *>(m_system_resource)->GetSize() : 0;
return m_system_resource->IsSecureResource() ? static_cast<KSecureSystemResource *>(m_system_resource)->GetSize() : 0;
}
size_t GetUsedSystemResourceSize() const {
return (!this->IsDefaultApplicationSystemResource() && m_system_resource->IsSecureResource()) ? static_cast<KSecureSystemResource *>(m_system_resource)->GetUsedSize() : 0;
return m_system_resource->IsSecureResource() ? static_cast<KSecureSystemResource *>(m_system_resource)->GetUsedSize() : 0;
}
void SetRunningThread(s32 core, KThread *thread, u64 idle_count, u64 switch_count) {

View File

@ -25,8 +25,7 @@ namespace ams::kern {
static constexpr s32 ExitWorkerPriority = 11;
enum WorkerType {
WorkerType_ExitThread,
WorkerType_ExitProcess,
WorkerType_Exit,
WorkerType_Count,
};

View File

@ -59,9 +59,7 @@ namespace ams::kern::arch::arm64 {
EsrEc_BrkInstruction = 0b111100,
};
u32 GetInstructionDataSupervisorMode(const KExceptionContext *context, u64 esr) {
constexpr u32 GetInstructionData(const KExceptionContext *context, u64 esr) {
/* Check for THUMB usermode */
if ((context->psr & 0x3F) == 0x30) {
u32 insn = *reinterpret_cast<u16 *>(context->pc & ~0x1);
@ -76,37 +74,6 @@ namespace ams::kern::arch::arm64 {
}
}
u32 GetInstructionDataUserMode(const KExceptionContext *context) {
/* Check for THUMB usermode */
u32 insn = 0;
if ((context->psr & 0x3F) == 0x30) {
u16 insn_high = 0;
if (UserspaceAccess::CopyMemoryFromUser(std::addressof(insn_high), reinterpret_cast<u16 *>(context->pc & ~0x1), sizeof(insn_high))) {
insn = insn_high;
/* Check if the instruction was a THUMB mode branch prefix. */
if (((insn >> 11) & 0b11110) == 0b11110) {
u16 insn_low = 0;
if (UserspaceAccess::CopyMemoryFromUser(std::addressof(insn_low), reinterpret_cast<u16 *>((context->pc & ~0x1) + sizeof(u16)), sizeof(insn_low))) {
insn = (static_cast<u32>(insn_high) << 16) | (static_cast<u32>(insn_low) << 0);
} else {
insn = 0;
}
}
} else {
insn = 0;
}
} else {
u32 insn_value = 0;
if (UserspaceAccess::CopyMemoryFromUser(std::addressof(insn_value), reinterpret_cast<u32 *>(context->pc), sizeof(insn_value))) {
insn = insn_value;
} else {
insn = 0;
}
}
return insn;
}
void HandleUserException(KExceptionContext *context, u64 esr, u64 far, u64 afsr0, u64 afsr1, u32 data) {
KProcess &cur_process = GetCurrentProcess();
bool should_process_user_exception = KTargetSystem::IsUserExceptionHandlersEnabled();
@ -534,7 +501,6 @@ namespace ams::kern::arch::arm64 {
MESOSPHERE_ASSERT(!KInterruptManager::AreInterruptsEnabled());
/* Retrieve information about the exception. */
const bool is_user_mode = (context->psr & 0xF) == 0;
const u64 esr = cpu::GetEsrEl1();
const u64 afsr0 = cpu::GetAfsr0El1();
const u64 afsr1 = cpu::GetAfsr1El1();
@ -548,12 +514,7 @@ namespace ams::kern::arch::arm64 {
case EsrEc_BkptInstruction:
case EsrEc_BrkInstruction:
far = context->pc;
/* NOTE: Nintendo always calls GetInstructionDataUserMode. */
if (is_user_mode) {
data = GetInstructionDataUserMode(context);
} else {
data = GetInstructionDataSupervisorMode(context, esr);
}
data = GetInstructionData(context, esr);
break;
case EsrEc_Svc32:
if (context->psr & 0x20) {
@ -582,6 +543,7 @@ namespace ams::kern::arch::arm64 {
/* Verify that spsr's M is allowable (EL0t). */
{
const bool is_user_mode = (context->psr & 0xF) == 0;
if (is_user_mode) {
/* If the user disable count is set, we may need to pin the current thread. */
if (GetCurrentThread().GetUserDisableCount() != 0 && GetCurrentProcess().GetPinnedThread(GetCurrentCoreId()) == nullptr) {

View File

@ -207,7 +207,10 @@ namespace ams::kern::arch::arm64 {
R_SUCCEED();
}
Result KPageTable::InitializeForProcess(ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit) {
Result KPageTable::InitializeForProcess(u32 id, ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit) {
/* The input ID isn't actually used. */
MESOSPHERE_UNUSED(id);
/* Get an ASID */
m_asid = g_asid_manager.Reserve();
ON_RESULT_FAILURE { g_asid_manager.Release(m_asid); };
@ -236,15 +239,9 @@ namespace ams::kern::arch::arm64 {
/* Only process tables should be finalized. */
MESOSPHERE_ASSERT(!this->IsKernel());
/* NOTE: Here Nintendo calls an unknown OnFinalize function. */
/* this->OnFinalize(); */
/* Note that we've updated (to ensure we're synchronized). */
this->NoteUpdated();
/* NOTE: Here Nintendo calls a second unknown OnFinalize function. */
/* this->OnFinalize2(); */
/* Free all pages in the table. */
{
/* Get implementation objects. */
@ -351,7 +348,7 @@ namespace ams::kern::arch::arm64 {
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), PageSize));
MESOSPHERE_ASSERT(this->ContainsPages(virt_addr, num_pages));
if (operation == OperationType_Map) {
if (operation == OperationType_Map || operation == OperationType_MapFirst) {
MESOSPHERE_ABORT_UNLESS(is_pa_valid);
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize));
} else {
@ -378,7 +375,8 @@ namespace ams::kern::arch::arm64 {
switch (operation) {
case OperationType_Map:
R_RETURN(this->MapContiguous(virt_addr, phys_addr, num_pages, entry_template, properties.disable_merge_attributes == DisableMergeAttribute_DisableHead, page_list, reuse_ll));
case OperationType_MapFirst:
R_RETURN(this->MapContiguous(virt_addr, phys_addr, num_pages, entry_template, properties.disable_merge_attributes == DisableMergeAttribute_DisableHead, operation != OperationType_MapFirst, page_list, reuse_ll));
case OperationType_ChangePermissions:
R_RETURN(this->ChangePermissions(virt_addr, num_pages, entry_template, properties.disable_merge_attributes, false, false, page_list, reuse_ll));
case OperationType_ChangePermissionsAndRefresh:
@ -401,8 +399,7 @@ namespace ams::kern::arch::arm64 {
auto entry_template = this->GetEntryTemplate(properties);
switch (operation) {
case OperationType_MapGroup:
case OperationType_MapFirstGroup:
R_RETURN(this->MapGroup(virt_addr, page_group, num_pages, entry_template, properties.disable_merge_attributes == DisableMergeAttribute_DisableHead, operation != OperationType_MapFirstGroup, page_list, reuse_ll));
R_RETURN(this->MapGroup(virt_addr, page_group, num_pages, entry_template, properties.disable_merge_attributes == DisableMergeAttribute_DisableHead, page_list, reuse_ll));
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
}
}
@ -759,7 +756,7 @@ namespace ams::kern::arch::arm64 {
R_SUCCEED();
}
Result KPageTable::MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll) {
Result KPageTable::MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, bool not_first, PageLinkedList *page_list, bool reuse_ll) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
/* Cache initial addresses for use on cleanup. */
@ -830,17 +827,21 @@ namespace ams::kern::arch::arm64 {
/* Open references to the pages, if we should. */
if (IsHeapPhysicalAddress(orig_phys_addr)) {
if (not_first) {
Kernel::GetMemoryManager().Open(orig_phys_addr, num_pages);
} else {
Kernel::GetMemoryManager().OpenFirst(orig_phys_addr, num_pages);
}
}
R_SUCCEED();
}
Result KPageTable::MapGroup(KProcessAddress virt_addr, const KPageGroup &pg, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, bool not_first, PageLinkedList *page_list, bool reuse_ll) {
Result KPageTable::MapGroup(KProcessAddress virt_addr, const KPageGroup &pg, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
/* We want to maintain a new reference to every page in the group. */
KScopedPageGroup spg(pg, not_first);
KScopedPageGroup spg(pg);
/* Cache initial address for use on cleanup. */
const KProcessAddress orig_virt_addr = virt_addr;

View File

@ -18,8 +18,12 @@
namespace ams::kern::arch::arm64 {
void KSupervisorPageTable::Initialize(s32 core_id) {
/* Verify that sctlr_el1 has the wxn bit set. */
MESOSPHERE_ABORT_UNLESS(cpu::SystemControlRegisterAccessor().GetWxn());
/* Get the identity mapping ttbr0. */
m_ttbr0_identity[core_id] = cpu::GetTtbr0El1();
/* Set sctlr_el1 */
cpu::SystemControlRegisterAccessor().SetWxn(true).Store();
cpu::EnsureInstructionConsistency();
/* Invalidate the entire TLB. */
cpu::InvalidateEntireTlb();

View File

@ -130,4 +130,4 @@ _ZN3ams4kern3svc14RestoreContextEm:
/* Return. */
add sp, sp, #(EXCEPTION_CONTEXT_SIZE)
ERET_WITH_SPECULATION_BARRIER
eret

View File

@ -194,7 +194,7 @@ _ZN3ams4kern4arch5arm6412SvcHandler64Ev:
/* Return. */
add sp, sp, #(EXCEPTION_CONTEXT_SIZE)
ERET_WITH_SPECULATION_BARRIER
eret
5: /* Return from SVC. */
@ -297,7 +297,7 @@ _ZN3ams4kern4arch5arm6412SvcHandler64Ev:
/* Return. */
add sp, sp, #(EXCEPTION_CONTEXT_SIZE)
ERET_WITH_SPECULATION_BARRIER
eret
/* ams::kern::arch::arm64::SvcHandler32() */
.section .text._ZN3ams4kern4arch5arm6412SvcHandler32Ev, "ax", %progbits
@ -467,7 +467,7 @@ _ZN3ams4kern4arch5arm6412SvcHandler32Ev:
/* Return. */
add sp, sp, #(EXCEPTION_CONTEXT_SIZE)
ERET_WITH_SPECULATION_BARRIER
eret
5: /* Return from SVC. */
@ -547,4 +547,4 @@ _ZN3ams4kern4arch5arm6412SvcHandler32Ev:
/* Return. */
add sp, sp, #(EXCEPTION_CONTEXT_SIZE)
ERET_WITH_SPECULATION_BARRIER
eret

View File

@ -32,6 +32,7 @@ namespace ams::kern::board::nintendo::nx {
class SavedSystemRegisters {
private:
u64 ttbr0_el1;
u64 tcr_el1;
u64 elr_el1;
u64 sp_el0;
u64 spsr_el1;
@ -91,6 +92,7 @@ namespace ams::kern::board::nintendo::nx {
void SavedSystemRegisters::Save() {
/* Save system registers. */
this->ttbr0_el1 = cpu::GetTtbr0El1();
this->tcr_el1 = cpu::GetTcrEl1();
this->tpidr_el0 = cpu::GetTpidrEl0();
this->elr_el1 = cpu::GetElrEl1();
this->sp_el0 = cpu::GetSpEl0();
@ -406,6 +408,7 @@ namespace ams::kern::board::nintendo::nx {
/* Restore system registers. */
cpu::SetTtbr0El1 (this->ttbr0_el1);
cpu::SetTcrEl1 (this->tcr_el1);
cpu::SetTpidrEl0 (this->tpidr_el0);
cpu::SetElrEl1 (this->elr_el1);
cpu::SetSpEl0 (this->sp_el0);
@ -512,6 +515,24 @@ namespace ams::kern::board::nintendo::nx {
/* Save the system registers for the current core. */
g_sleep_system_registers[core_id].Save();
/* Change the translation tables to use the kernel table. */
{
/* Get the current value of the translation control register. */
const u64 tcr = cpu::GetTcrEl1();
/* Disable translation table walks on tlb miss. */
cpu::TranslationControlRegisterAccessor(tcr).SetEpd0(true).Store();
cpu::EnsureInstructionConsistency();
/* Change the translation table base (ttbr0) to use the kernel table. */
cpu::SetTtbr0El1(Kernel::GetKernelPageTable().GetIdentityMapTtbr0(core_id));
cpu::EnsureInstructionConsistency();
/* Enable translation table walks on tlb miss. */
cpu::TranslationControlRegisterAccessor(tcr).SetEpd0(false).Store();
cpu::EnsureInstructionConsistency();
}
/* Invalidate the entire tlb. */
cpu::InvalidateEntireTlb();
@ -531,14 +552,13 @@ namespace ams::kern::board::nintendo::nx {
/* Setup the initial arguments. */
{
/* Determine whether we're running on a cortex-a53 or a-57. */
cpu::MainIdRegisterAccessor midr_el1;
const auto implementer = midr_el1.GetImplementer();
const auto primary_part = midr_el1.GetPrimaryPartNumber();
const bool needs_cpu_ctlr = (implementer == cpu::MainIdRegisterAccessor::Implementer::ArmLimited) && (primary_part == cpu::MainIdRegisterAccessor::PrimaryPartNumber::CortexA57 || primary_part == cpu::MainIdRegisterAccessor::PrimaryPartNumber::CortexA53);
init_args->cpuactlr = needs_cpu_ctlr ? cpu::GetCpuActlrEl1() : 0;
init_args->cpuectlr = needs_cpu_ctlr ? cpu::GetCpuEctlrEl1() : 0;
init_args->ttbr0 = cpu::GetTtbr0El1();
init_args->ttbr1 = cpu::GetTtbr1El1();
init_args->tcr = cpu::GetTcrEl1();
init_args->mair = cpu::GetMairEl1();
init_args->cpuactlr = cpu::GetCpuActlrEl1();
init_args->cpuectlr = cpu::GetCpuEctlrEl1();
init_args->sctlr = cpu::GetSctlrEl1();
init_args->sp = 0;
init_args->entrypoint = reinterpret_cast<uintptr_t>(::ams::kern::board::nintendo::nx::KSleepManager::ResumeEntry);
init_args->argument = sleep_buffer;

View File

@ -21,13 +21,10 @@ namespace ams::kern::init::Elf {
void ApplyRelocations(uintptr_t base_address, const Dyn *dynamic) {
uintptr_t dyn_rel = 0;
uintptr_t dyn_rela = 0;
uintptr_t dyn_relr = 0;
uintptr_t rel_count = 0;
uintptr_t rela_count = 0;
uintptr_t relr_sz = 0;
uintptr_t rel_ent = 0;
uintptr_t rela_ent = 0;
uintptr_t relr_ent = 0;
/* Iterate over all tags, identifying important extents. */
for (const Dyn *cur_entry = dynamic; cur_entry->GetTag() != DT_NULL; cur_entry++) {
@ -38,38 +35,24 @@ namespace ams::kern::init::Elf {
case DT_RELA:
dyn_rela = base_address + cur_entry->GetPtr();
break;
case DT_RELR:
dyn_relr = base_address + cur_entry->GetPtr();
break;
case DT_RELENT:
rel_ent = cur_entry->GetValue();
break;
case DT_RELAENT:
rela_ent = cur_entry->GetValue();
break;
case DT_RELRENT:
relr_ent = cur_entry->GetValue();
break;
case DT_RELCOUNT:
rel_count = cur_entry->GetValue();
break;
case DT_RELACOUNT:
rela_count = cur_entry->GetValue();
break;
case DT_RELRSZ:
relr_sz = cur_entry->GetValue();
break;
}
}
/* Apply all Rel relocations */
if (rel_count > 0) {
/* Check that the rel relocations are applyable. */
MESOSPHERE_INIT_ABORT_UNLESS(dyn_rel != 0);
MESOSPHERE_INIT_ABORT_UNLESS(rel_ent == sizeof(Elf::Rel));
for (size_t i = 0; i < rel_count; ++i) {
const auto &rel = reinterpret_cast<const Elf::Rel *>(dyn_rel)[i];
for (size_t i = 0; i < rel_count; i++) {
const auto &rel = *reinterpret_cast<const Elf::Rel *>(dyn_rel + rel_ent * i);
/* Only allow architecture-specific relocations. */
while (rel.GetType() != R_ARCHITECTURE_RELATIVE) { /* ... */ }
@ -78,16 +61,10 @@ namespace ams::kern::init::Elf {
Elf::Addr *target_address = reinterpret_cast<Elf::Addr *>(base_address + rel.GetOffset());
*target_address += base_address;
}
}
/* Apply all Rela relocations. */
if (rela_count > 0) {
/* Check that the rela relocations are applyable. */
MESOSPHERE_INIT_ABORT_UNLESS(dyn_rela != 0);
MESOSPHERE_INIT_ABORT_UNLESS(rela_ent == sizeof(Elf::Rela));
for (size_t i = 0; i < rela_count; ++i) {
const auto &rela = reinterpret_cast<const Elf::Rela *>(dyn_rela)[i];
for (size_t i = 0; i < rela_count; i++) {
const auto &rela = *reinterpret_cast<const Elf::Rela *>(dyn_rela + rela_ent * i);
/* Only allow architecture-specific relocations. */
while (rela.GetType() != R_ARCHITECTURE_RELATIVE) { /* ... */ }
@ -98,42 +75,6 @@ namespace ams::kern::init::Elf {
}
}
/* Apply all Relr relocations. */
if (relr_sz >= sizeof(Elf::Relr)) {
/* Check that the relr relocations are applyable. */
MESOSPHERE_INIT_ABORT_UNLESS(dyn_relr != 0);
MESOSPHERE_INIT_ABORT_UNLESS(relr_ent == sizeof(Elf::Relr));
const size_t relr_count = relr_sz / sizeof(Elf::Relr);
Elf::Addr *where = nullptr;
for (size_t i = 0; i < relr_count; ++i) {
const auto &relr = reinterpret_cast<const Elf::Relr *>(dyn_relr)[i];
if (relr.IsLocation()) {
/* Update location. */
where = reinterpret_cast<Elf::Addr *>(base_address + relr.GetLocation());
/* Apply the relocation. */
*(where++) += base_address;
} else {
/* Get the bitmap. */
u64 bitmap = relr.GetBitmap();
/* Apply all relocations. */
while (bitmap != 0) {
const u64 next = util::CountTrailingZeros(bitmap);
bitmap &= ~(static_cast<u64>(1) << next);
where[next] += base_address;
}
/* Advance. */
where += BITSIZEOF(bitmap) - 1;
}
}
}
}
void CallInitArrayFuncs(uintptr_t init_array_start, uintptr_t init_array_end) {
for (uintptr_t cur_entry = init_array_start; cur_entry < init_array_end; cur_entry += sizeof(void *)) {
(*(void (**)())(cur_entry))();

View File

@ -171,9 +171,6 @@ namespace ams::kern::init {
const KMemoryRegion &slab_region = KMemoryLayout::GetSlabRegion();
KVirtualAddress address = slab_region.GetAddress();
/* Clear the slab region. */
std::memset(GetVoidPointer(address), 0, slab_region.GetSize());
/* Initialize slab type array to be in sorted order. */
KSlabType slab_types[KSlabType_Count];
for (size_t i = 0; i < util::size(slab_types); i++) { slab_types[i] = static_cast<KSlabType>(i); }

View File

@ -27,7 +27,6 @@ namespace ams::kern {
constinit KPhysicalAddress g_initial_process_binary_phys_addr = Null<KPhysicalAddress>;
constinit KVirtualAddress g_initial_process_binary_address = Null<KVirtualAddress>;
constinit size_t g_initial_process_binary_size = 0;
constinit InitialProcessBinaryHeader g_initial_process_binary_header = {};
constinit size_t g_initial_process_secure_memory_size = 0;
constinit u64 g_initial_process_id_min = std::numeric_limits<u64>::max();
@ -156,15 +155,41 @@ namespace ams::kern {
KPageGroup *process_pg = std::addressof(pg);
ON_SCOPE_EXIT { process_pg->Close(); };
/* Load the process. */
reader.Load(pg, data);
/* Get the temporary region. */
const auto &temp_region = KMemoryLayout::GetTempRegion();
MESOSPHERE_ABORT_UNLESS(temp_region.GetEndAddress() != 0);
/* If necessary, close/release the aligned part of the data we just loaded. */
if (const size_t aligned_bin_size = util::AlignDown(binary_size, PageSize); aligned_bin_size != 0 && src_pool != dst_pool) {
Kernel::GetMemoryManager().Close(KMemoryLayout::GetLinearPhysicalAddress(data), aligned_bin_size / PageSize);
Kernel::GetSystemResourceLimit().Release(ams::svc::LimitableResource_PhysicalMemoryMax, aligned_bin_size);
/* Map the process's memory into the temporary region. */
KProcessAddress temp_address = Null<KProcessAddress>;
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetKernelPageTable().MapPageGroup(std::addressof(temp_address), pg, temp_region.GetAddress(), temp_region.GetSize() / PageSize, KMemoryState_Kernel, KMemoryPermission_KernelReadWrite));
/* Setup the new page group's memory, so that we can load the process. */
{
/* Copy the unaligned ending of the compressed binary. */
if (const size_t unaligned_size = binary_size - util::AlignDown(binary_size, PageSize); unaligned_size != 0) {
std::memcpy(GetVoidPointer(temp_address + process_size - unaligned_size), GetVoidPointer(data + binary_size - unaligned_size), unaligned_size);
}
/* Copy the aligned part of the compressed binary. */
if (const size_t aligned_size = util::AlignDown(binary_size, PageSize); aligned_size != 0 && src_pool == dst_pool) {
std::memmove(GetVoidPointer(temp_address + process_size - binary_size), GetVoidPointer(temp_address), aligned_size);
} else {
if (src_pool != dst_pool) {
std::memcpy(GetVoidPointer(temp_address + process_size - binary_size), GetVoidPointer(data), aligned_size);
Kernel::GetMemoryManager().Close(KMemoryLayout::GetLinearPhysicalAddress(data), aligned_size / PageSize);
}
}
/* Clear the first part of the memory. */
std::memset(GetVoidPointer(temp_address), 0, process_size - binary_size);
}
/* Load the process. */
MESOSPHERE_R_ABORT_UNLESS(reader.Load(temp_address, params, temp_address + process_size - binary_size));
/* Unmap the temporary mapping. */
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetKernelPageTable().UnmapPageGroup(temp_address, pg, KMemoryState_Kernel));
/* Create a KProcess object. */
new_process = KProcess::Create();
MESOSPHERE_ABORT_UNLESS(new_process != nullptr);
@ -216,6 +241,11 @@ namespace ams::kern {
MESOSPHERE_R_ABORT_UNLESS(new_process->Initialize(params, *process_pg, reader.GetCapabilities(), reader.GetNumCapabilities(), std::addressof(Kernel::GetSystemResourceLimit()), dst_pool, reader.IsImmortal()));
}
/* Release the memory that was previously reserved. */
if (const size_t aligned_bin_size = util::AlignDown(binary_size, PageSize); aligned_bin_size != 0 && src_pool != dst_pool) {
Kernel::GetSystemResourceLimit().Release(ams::svc::LimitableResource_PhysicalMemoryMax, aligned_bin_size);
}
/* Set the process's memory permissions. */
MESOSPHERE_R_ABORT_UNLESS(reader.SetMemoryPermissions(new_process->GetPageTable(), params));
@ -245,11 +275,10 @@ namespace ams::kern {
}
void SetInitialProcessBinaryPhysicalAddress(KPhysicalAddress phys_addr, size_t size) {
void SetInitialProcessBinaryPhysicalAddress(KPhysicalAddress phys_addr) {
MESOSPHERE_INIT_ABORT_UNLESS(g_initial_process_binary_phys_addr == Null<KPhysicalAddress>);
g_initial_process_binary_phys_addr = phys_addr;
g_initial_process_binary_size = size;
}
KPhysicalAddress GetInitialProcessBinaryPhysicalAddress() {
@ -258,12 +287,6 @@ namespace ams::kern {
return g_initial_process_binary_phys_addr;
}
size_t GetInitialProcessBinarySize() {
MESOSPHERE_INIT_ABORT_UNLESS(g_initial_process_binary_phys_addr != Null<KPhysicalAddress>);
return g_initial_process_binary_size;
}
u64 GetInitialProcessIdMin() {
return g_initial_process_id_min;
}
@ -282,18 +305,15 @@ namespace ams::kern {
LoadInitialProcessBinaryHeader();
if (g_initial_process_binary_header.num_processes > 0) {
/* Ensure that we have a non-zero size. */
const size_t expected_size = g_initial_process_binary_size;
MESOSPHERE_INIT_ABORT_UNLESS(expected_size != 0);
/* Ensure that the size we need to reserve is as we expect it to be. */
const size_t total_size = util::AlignUp(g_initial_process_binary_header.size, PageSize);
MESOSPHERE_ABORT_UNLESS(total_size == expected_size);
MESOSPHERE_ABORT_UNLESS(total_size <= InitialProcessBinarySizeMax);
/* Reserve pages for the initial process binary from the system resource limit. */
const size_t total_size = util::AlignUp(g_initial_process_binary_header.size, PageSize);
MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, total_size));
/* The initial process binary is potentially over-allocated, so free any extra pages. */
if (total_size < InitialProcessBinarySizeMax) {
Kernel::GetMemoryManager().Close(KMemoryLayout::GetLinearPhysicalAddress(g_initial_process_binary_address + total_size), (InitialProcessBinarySizeMax - total_size) / PageSize);
}
return total_size;
} else {
return 0;

View File

@ -118,12 +118,12 @@ namespace ams::kern {
const size_t cur_size = std::min(remaining, info.GetEndAddress() - GetInteger(cur_address));
/* Read the memory. */
if (info.GetSvcState() != ams::svc::MemoryState_Io) {
if (info.GetState() != KMemoryState_Io) {
/* The memory is normal memory. */
R_TRY(target_pt.ReadDebugMemory(GetVoidPointer(buffer), cur_address, cur_size));
} else {
/* The memory is IO memory. */
R_TRY(target_pt.ReadDebugIoMemory(GetVoidPointer(buffer), cur_address, cur_size, info.GetState()));
R_TRY(target_pt.ReadDebugIoMemory(GetVoidPointer(buffer), cur_address, cur_size));
}
/* Advance. */
@ -181,12 +181,12 @@ namespace ams::kern {
const size_t cur_size = std::min(remaining, info.GetEndAddress() - GetInteger(cur_address));
/* Read the memory. */
if (info.GetSvcState() != ams::svc::MemoryState_Io) {
if (info.GetState() != KMemoryState_Io) {
/* The memory is normal memory. */
R_TRY(target_pt.WriteDebugMemory(cur_address, GetVoidPointer(buffer), cur_size));
} else {
/* The memory is IO memory. */
R_TRY(target_pt.WriteDebugIoMemory(cur_address, GetVoidPointer(buffer), cur_size, info.GetState()));
R_TRY(target_pt.WriteDebugIoMemory(cur_address, GetVoidPointer(buffer), cur_size));
}
/* Advance. */

View File

@ -73,120 +73,6 @@ namespace ams::kern {
}
}
NOINLINE void LoadInitialProcessSegment(const KPageGroup &pg, size_t seg_offset, size_t seg_size, size_t binary_size, KVirtualAddress data, bool compressed) {
/* Save the original binary extents, for later use. */
const KPhysicalAddress binary_phys = KMemoryLayout::GetLinearPhysicalAddress(data);
/* Create a page group representing the segment. */
KPageGroup segment_pg(Kernel::GetSystemSystemResource().GetBlockInfoManagerPointer());
if (size_t remaining_size = util::AlignUp(seg_size, PageSize); remaining_size != 0) {
/* Find the pages whose data corresponds to the segment. */
size_t cur_offset = 0;
for (auto it = pg.begin(); it != pg.end() && remaining_size > 0; ++it) {
/* Get the current size. */
const size_t cur_size = it->GetSize();
/* Determine if the offset is in range. */
const size_t rel_diff = seg_offset - cur_offset;
const bool is_before = cur_offset <= seg_offset;
cur_offset += cur_size;
if (is_before && seg_offset < cur_offset) {
/* It is, so add the block. */
const size_t block_size = std::min<size_t>(cur_size - rel_diff, remaining_size);
MESOSPHERE_R_ABORT_UNLESS(segment_pg.AddBlock(it->GetAddress() + rel_diff, block_size / PageSize));
/* Advance. */
cur_offset = seg_offset + block_size;
remaining_size -= block_size;
seg_offset += block_size;
}
}
}
/* Setup the new page group's memory so that we can load the segment. */
{
KVirtualAddress last_block = Null<KVirtualAddress>;
KVirtualAddress last_data = Null<KVirtualAddress>;
size_t last_copy_size = 0;
size_t last_clear_size = 0;
size_t remaining_copy_size = binary_size;
for (const auto &block : segment_pg) {
/* Get the current block extents. */
const auto block_addr = block.GetAddress();
const size_t block_size = block.GetSize();
if (remaining_copy_size > 0) {
/* Determine if we need to copy anything. */
const size_t cur_size = std::min<size_t>(block_size, remaining_copy_size);
/* NOTE: The first block may potentially overlap the binary we want to copy to. */
/* Consider e.g. the case where the overall compressed image has size 0x40000, seg_offset is 0x30000, and binary_size is > 0x20000. */
/* Suppose too that data points, say, 0x18000 into the compressed image. */
/* Suppose finally that we simply naively copy in order. */
/* The first iteration of this loop will perform an 0x10000 copy from image+0x18000 to image + 0x30000 (as there is no overlap). */
/* The second iteration will perform a copy from image+0x28000 to <allocated pages>. */
/* However, the first copy will have trashed the data in the second copy. */
/* Thus, we must copy the first block after-the-fact to avoid potentially trashing data in the overlap case. */
/* It is guaranteed by pre-condition that only the very first block can overlap with the physical binary, so we can simply memmove it at the end. */
if (last_block != Null<KVirtualAddress>) {
/* This is guaranteed by pre-condition, but for ease of debugging, check for no overlap. */
MESOSPHERE_ASSERT(!util::HasOverlap(GetInteger(binary_phys), binary_size, GetInteger(block_addr), cur_size));
MESOSPHERE_UNUSED(binary_phys);
/* We need to copy. */
std::memcpy(GetVoidPointer(KMemoryLayout::GetLinearVirtualAddress(block_addr)), GetVoidPointer(data), cur_size);
/* If we need to, clear past where we're copying. */
if (cur_size != block_size) {
std::memset(GetVoidPointer(KMemoryLayout::GetLinearVirtualAddress(block_addr + cur_size)), 0, block_size - cur_size);
}
/* Advance. */
remaining_copy_size -= cur_size;
data += cur_size;
} else {
/* Save the first block, which may potentially overlap, so that we can copy it later. */
last_block = KMemoryLayout::GetLinearVirtualAddress(block_addr);
last_data = data;
last_copy_size = cur_size;
last_clear_size = block_size - cur_size;
/* Advance. */
remaining_copy_size -= cur_size;
data += cur_size;
}
} else {
/* We don't have data to copy, so we should just clear the pages. */
std::memset(GetVoidPointer(KMemoryLayout::GetLinearVirtualAddress(block_addr)), 0, block_size);
}
}
/* Handle a last block. */
if (last_copy_size != 0) {
if (last_block != last_data) {
std::memmove(GetVoidPointer(last_block), GetVoidPointer(last_data), last_copy_size);
}
if (last_clear_size != 0) {
std::memset(GetVoidPointer(last_block + last_copy_size), 0, last_clear_size);
}
}
}
/* If compressed, uncompress the data. */
if (compressed) {
/* Get the temporary region. */
const auto &temp_region = KMemoryLayout::GetTempRegion();
MESOSPHERE_ABORT_UNLESS(temp_region.GetEndAddress() != 0);
/* Map the process's memory into the temporary region. */
KProcessAddress temp_address = Null<KProcessAddress>;
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetKernelPageTable().MapPageGroup(std::addressof(temp_address), segment_pg, temp_region.GetAddress(), temp_region.GetSize() / PageSize, KMemoryState_Kernel, KMemoryPermission_KernelReadWrite));
ON_SCOPE_EXIT { MESOSPHERE_R_ABORT_UNLESS(Kernel::GetKernelPageTable().UnmapPageGroup(temp_address, segment_pg, KMemoryState_Kernel)); };
/* Uncompress the data. */
BlzUncompress(GetVoidPointer(temp_address + binary_size));
}
}
}
Result KInitialProcessReader::MakeCreateProcessParameter(ams::svc::CreateProcessParameter *out, bool enable_aslr) const {
@ -232,8 +118,6 @@ namespace ams::kern {
out->program_id = m_kip_header.GetProgramId();
out->version = m_kip_header.GetVersion();
out->flags = 0;
out->reslimit = ams::svc::InvalidHandle;
out->system_resource_num_pages = 0;
MESOSPHERE_ABORT_UNLESS((out->code_address / PageSize) + out->code_num_pages <= (map_end / PageSize));
/* Copy name field. */
@ -262,55 +146,42 @@ namespace ams::kern {
R_SUCCEED();
}
void KInitialProcessReader::Load(const KPageGroup &pg, KVirtualAddress data) const {
Result KInitialProcessReader::Load(KProcessAddress address, const ams::svc::CreateProcessParameter &params, KProcessAddress src) const {
/* Prepare to layout the data. */
const KVirtualAddress rx_data = data;
const KVirtualAddress ro_data = rx_data + m_kip_header.GetRxCompressedSize();
const KVirtualAddress rw_data = ro_data + m_kip_header.GetRoCompressedSize();
const size_t rx_size = m_kip_header.GetRxSize();
const size_t ro_size = m_kip_header.GetRoSize();
const size_t rw_size = m_kip_header.GetRwSize();
const KProcessAddress rx_address = address + m_kip_header.GetRxAddress();
const KProcessAddress ro_address = address + m_kip_header.GetRoAddress();
const KProcessAddress rw_address = address + m_kip_header.GetRwAddress();
const u8 *rx_binary = GetPointer<const u8>(src);
const u8 *ro_binary = rx_binary + m_kip_header.GetRxCompressedSize();
const u8 *rw_binary = ro_binary + m_kip_header.GetRoCompressedSize();
/* If necessary, setup bss. */
if (const size_t bss_size = m_kip_header.GetBssSize(); bss_size > 0) {
/* Determine how many additional pages are needed for bss. */
const u64 rw_end = util::AlignUp<u64>(m_kip_header.GetRwAddress() + m_kip_header.GetRwSize(), PageSize);
const u64 bss_end = util::AlignUp<u64>(m_kip_header.GetBssAddress() + m_kip_header.GetBssSize(), PageSize);
if (rw_end != bss_end) {
/* Find the pages corresponding to bss. */
size_t cur_offset = 0;
size_t remaining_size = bss_end - rw_end;
size_t bss_offset = rw_end - m_kip_header.GetRxAddress();
for (auto it = pg.begin(); it != pg.end() && remaining_size > 0; ++it) {
/* Get the current size. */
const size_t cur_size = it->GetSize();
/* Determine if the offset is in range. */
const size_t rel_diff = bss_offset - cur_offset;
const bool is_before = cur_offset <= bss_offset;
cur_offset += cur_size;
if (is_before && bss_offset < cur_offset) {
/* It is, so clear the bss range. */
const size_t block_size = std::min<size_t>(cur_size - rel_diff, remaining_size);
std::memset(GetVoidPointer(KMemoryLayout::GetLinearVirtualAddress(it->GetAddress() + rel_diff)), 0, block_size);
/* Advance. */
cur_offset = bss_offset + block_size;
remaining_size -= block_size;
bss_offset += block_size;
}
}
/* Copy text. */
if (util::AlignUp(m_kip_header.GetRxSize(), PageSize)) {
std::memmove(GetVoidPointer(rx_address), rx_binary, m_kip_header.GetRxCompressedSize());
if (m_kip_header.IsRxCompressed()) {
BlzUncompress(GetVoidPointer(rx_address + m_kip_header.GetRxCompressedSize()));
}
}
/* Load .rwdata. */
LoadInitialProcessSegment(pg, m_kip_header.GetRwAddress() - m_kip_header.GetRxAddress(), rw_size, m_kip_header.GetRwCompressedSize(), rw_data, m_kip_header.IsRwCompressed());
/* Copy rodata. */
if (util::AlignUp(m_kip_header.GetRoSize(), PageSize)) {
std::memmove(GetVoidPointer(ro_address), ro_binary, m_kip_header.GetRoCompressedSize());
if (m_kip_header.IsRoCompressed()) {
BlzUncompress(GetVoidPointer(ro_address + m_kip_header.GetRoCompressedSize()));
}
}
/* Load .rodata. */
LoadInitialProcessSegment(pg, m_kip_header.GetRoAddress() - m_kip_header.GetRxAddress(), ro_size, m_kip_header.GetRoCompressedSize(), ro_data, m_kip_header.IsRoCompressed());
/* Copy rwdata. */
if (util::AlignUp(m_kip_header.GetRwSize(), PageSize)) {
std::memmove(GetVoidPointer(rw_address), rw_binary, m_kip_header.GetRwCompressedSize());
if (m_kip_header.IsRwCompressed()) {
BlzUncompress(GetVoidPointer(rw_address + m_kip_header.GetRwCompressedSize()));
}
}
/* Load .text. */
LoadInitialProcessSegment(pg, m_kip_header.GetRxAddress() - m_kip_header.GetRxAddress(), rx_size, m_kip_header.GetRxCompressedSize(), rx_data, m_kip_header.IsRxCompressed());
MESOSPHERE_UNUSED(params);
R_SUCCEED();
}
Result KInitialProcessReader::SetMemoryPermissions(KProcessPageTable &page_table, const ams::svc::CreateProcessParameter &params) const {

View File

@ -21,8 +21,7 @@ namespace ams::kern {
constexpr const std::pair<KMemoryState, const char *> MemoryStateNames[] = {
{KMemoryState_Free , "----- Free -----"},
{KMemoryState_IoMemory , "IoMemory "},
{KMemoryState_IoRegister , "IoRegister "},
{KMemoryState_Io , "Io "},
{KMemoryState_Static , "Static "},
{KMemoryState_Code , "Code "},
{KMemoryState_CodeData , "CodeData "},
@ -223,7 +222,7 @@ namespace ams::kern {
}
/* Update block state. */
it->Update(state, perm, attr, it->GetAddress() == address, set_disable_attr, clear_disable_attr);
it->Update(state, perm, attr, cur_address == address, set_disable_attr, clear_disable_attr);
cur_address += cur_info.GetSize();
remaining_pages -= cur_info.GetNumPages();
}
@ -233,7 +232,7 @@ namespace ams::kern {
this->CoalesceForUpdate(allocator, address, num_pages);
}
void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm, KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr, KMemoryBlockDisableMergeAttribute set_disable_attr, KMemoryBlockDisableMergeAttribute clear_disable_attr) {
void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm, KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr) {
/* Ensure for auditing that we never end up with an invalid tree. */
KScopedMemoryBlockManagerAuditor auditor(this);
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(address), PageSize));
@ -270,7 +269,7 @@ namespace ams::kern {
}
/* Update block state. */
it->Update(state, perm, attr, it->GetAddress() == address, set_disable_attr, clear_disable_attr);
it->Update(state, perm, attr, false, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_None);
cur_address += cur_info.GetSize();
remaining_pages -= cur_info.GetNumPages();
} else {
@ -336,62 +335,6 @@ namespace ams::kern {
this->CoalesceForUpdate(allocator, address, num_pages);
}
void KMemoryBlockManager::UpdateAttribute(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, u32 mask, u32 attr) {
/* Ensure for auditing that we never end up with an invalid tree. */
KScopedMemoryBlockManagerAuditor auditor(this);
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(address), PageSize));
KProcessAddress cur_address = address;
size_t remaining_pages = num_pages;
iterator it = this->FindIterator(address);
while (remaining_pages > 0) {
const size_t remaining_size = remaining_pages * PageSize;
KMemoryInfo cur_info = it->GetMemoryInfo();
if ((it->GetAttribute() & mask) != attr) {
/* If we need to, create a new block before and insert it. */
if (cur_info.GetAddress() != GetInteger(cur_address)) {
KMemoryBlock *new_block = allocator->Allocate();
it->Split(new_block, cur_address);
it = m_memory_block_tree.insert(*new_block);
it++;
cur_info = it->GetMemoryInfo();
cur_address = cur_info.GetAddress();
}
/* If we need to, create a new block after and insert it. */
if (cur_info.GetSize() > remaining_size) {
KMemoryBlock *new_block = allocator->Allocate();
it->Split(new_block, cur_address + remaining_size);
it = m_memory_block_tree.insert(*new_block);
cur_info = it->GetMemoryInfo();
}
/* Update block state. */
it->UpdateAttribute(mask, attr);
cur_address += cur_info.GetSize();
remaining_pages -= cur_info.GetNumPages();
} else {
/* If we already have the right attributes, just advance. */
if (cur_address + remaining_size < cur_info.GetEndAddress()) {
remaining_pages = 0;
cur_address += remaining_size;
} else {
remaining_pages = (cur_address + remaining_size - cur_info.GetEndAddress()) / PageSize;
cur_address = cur_info.GetEndAddress();
}
}
it++;
}
this->CoalesceForUpdate(allocator, address, num_pages);
}
/* Debug. */
bool KMemoryBlockManager::CheckState() const {
/* If we fail, we should dump blocks. */

View File

@ -108,8 +108,7 @@ namespace ams::kern {
/* Free each region to its corresponding heap. */
size_t reserved_sizes[MaxManagerCount] = {};
const KPhysicalAddress ini_start = GetInitialProcessBinaryPhysicalAddress();
const size_t ini_size = GetInitialProcessBinarySize();
const KPhysicalAddress ini_end = ini_start + ini_size;
const KPhysicalAddress ini_end = ini_start + InitialProcessBinarySizeMax;
const KPhysicalAddress ini_last = ini_end - 1;
for (const auto &it : KMemoryLayout::GetPhysicalMemoryRegionTree()) {
if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {
@ -127,13 +126,13 @@ namespace ams::kern {
}
/* Open/reserve the ini memory. */
manager.OpenFirst(ini_start, ini_size / PageSize);
reserved_sizes[it.GetAttributes()] += ini_size;
manager.OpenFirst(ini_start, InitialProcessBinarySizeMax / PageSize);
reserved_sizes[it.GetAttributes()] += InitialProcessBinarySizeMax;
/* Free memory after the ini to the heap. */
if (ini_last != cur_last) {
MESOSPHERE_ABORT_UNLESS(cur_end != Null<KPhysicalAddress>);
manager.Free(ini_end, (cur_end - ini_end) / PageSize);
manager.Free(ini_end, cur_end - ini_end);
}
} else {
/* Ensure there's no partial overlap with the ini image. */

View File

@ -368,77 +368,77 @@ namespace ams::kern {
cpu::InvalidateEntireInstructionCache();
}
KProcessAddress KPageTableBase::GetRegionAddress(ams::svc::MemoryState state) const {
KProcessAddress KPageTableBase::GetRegionAddress(KMemoryState state) const {
switch (state) {
case ams::svc::MemoryState_Free:
case ams::svc::MemoryState_Kernel:
case KMemoryState_Free:
case KMemoryState_Kernel:
return m_address_space_start;
case ams::svc::MemoryState_Normal:
case KMemoryState_Normal:
return m_heap_region_start;
case ams::svc::MemoryState_Ipc:
case ams::svc::MemoryState_NonSecureIpc:
case ams::svc::MemoryState_NonDeviceIpc:
case KMemoryState_Ipc:
case KMemoryState_NonSecureIpc:
case KMemoryState_NonDeviceIpc:
return m_alias_region_start;
case ams::svc::MemoryState_Stack:
case KMemoryState_Stack:
return m_stack_region_start;
case ams::svc::MemoryState_Static:
case ams::svc::MemoryState_ThreadLocal:
case KMemoryState_Static:
case KMemoryState_ThreadLocal:
return m_kernel_map_region_start;
case ams::svc::MemoryState_Io:
case ams::svc::MemoryState_Shared:
case ams::svc::MemoryState_AliasCode:
case ams::svc::MemoryState_AliasCodeData:
case ams::svc::MemoryState_Transfered:
case ams::svc::MemoryState_SharedTransfered:
case ams::svc::MemoryState_SharedCode:
case ams::svc::MemoryState_GeneratedCode:
case ams::svc::MemoryState_CodeOut:
case ams::svc::MemoryState_Coverage:
case ams::svc::MemoryState_Insecure:
case KMemoryState_Io:
case KMemoryState_Shared:
case KMemoryState_AliasCode:
case KMemoryState_AliasCodeData:
case KMemoryState_Transfered:
case KMemoryState_SharedTransfered:
case KMemoryState_SharedCode:
case KMemoryState_GeneratedCode:
case KMemoryState_CodeOut:
case KMemoryState_Coverage:
case KMemoryState_Insecure:
return m_alias_code_region_start;
case ams::svc::MemoryState_Code:
case ams::svc::MemoryState_CodeData:
case KMemoryState_Code:
case KMemoryState_CodeData:
return m_code_region_start;
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
}
}
size_t KPageTableBase::GetRegionSize(ams::svc::MemoryState state) const {
size_t KPageTableBase::GetRegionSize(KMemoryState state) const {
switch (state) {
case ams::svc::MemoryState_Free:
case ams::svc::MemoryState_Kernel:
case KMemoryState_Free:
case KMemoryState_Kernel:
return m_address_space_end - m_address_space_start;
case ams::svc::MemoryState_Normal:
case KMemoryState_Normal:
return m_heap_region_end - m_heap_region_start;
case ams::svc::MemoryState_Ipc:
case ams::svc::MemoryState_NonSecureIpc:
case ams::svc::MemoryState_NonDeviceIpc:
case KMemoryState_Ipc:
case KMemoryState_NonSecureIpc:
case KMemoryState_NonDeviceIpc:
return m_alias_region_end - m_alias_region_start;
case ams::svc::MemoryState_Stack:
case KMemoryState_Stack:
return m_stack_region_end - m_stack_region_start;
case ams::svc::MemoryState_Static:
case ams::svc::MemoryState_ThreadLocal:
case KMemoryState_Static:
case KMemoryState_ThreadLocal:
return m_kernel_map_region_end - m_kernel_map_region_start;
case ams::svc::MemoryState_Io:
case ams::svc::MemoryState_Shared:
case ams::svc::MemoryState_AliasCode:
case ams::svc::MemoryState_AliasCodeData:
case ams::svc::MemoryState_Transfered:
case ams::svc::MemoryState_SharedTransfered:
case ams::svc::MemoryState_SharedCode:
case ams::svc::MemoryState_GeneratedCode:
case ams::svc::MemoryState_CodeOut:
case ams::svc::MemoryState_Coverage:
case ams::svc::MemoryState_Insecure:
case KMemoryState_Io:
case KMemoryState_Shared:
case KMemoryState_AliasCode:
case KMemoryState_AliasCodeData:
case KMemoryState_Transfered:
case KMemoryState_SharedTransfered:
case KMemoryState_SharedCode:
case KMemoryState_GeneratedCode:
case KMemoryState_CodeOut:
case KMemoryState_Coverage:
case KMemoryState_Insecure:
return m_alias_code_region_end - m_alias_code_region_start;
case ams::svc::MemoryState_Code:
case ams::svc::MemoryState_CodeData:
case KMemoryState_Code:
case KMemoryState_CodeData:
return m_code_region_end - m_code_region_start;
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
}
}
bool KPageTableBase::CanContain(KProcessAddress addr, size_t size, ams::svc::MemoryState state) const {
bool KPageTableBase::CanContain(KProcessAddress addr, size_t size, KMemoryState state) const {
const KProcessAddress end = addr + size;
const KProcessAddress last = end - 1;
@ -449,32 +449,32 @@ namespace ams::kern {
const bool is_in_heap = !(end <= m_heap_region_start || m_heap_region_end <= addr || m_heap_region_start == m_heap_region_end);
const bool is_in_alias = !(end <= m_alias_region_start || m_alias_region_end <= addr || m_alias_region_start == m_alias_region_end);
switch (state) {
case ams::svc::MemoryState_Free:
case ams::svc::MemoryState_Kernel:
case KMemoryState_Free:
case KMemoryState_Kernel:
return is_in_region;
case ams::svc::MemoryState_Io:
case ams::svc::MemoryState_Static:
case ams::svc::MemoryState_Code:
case ams::svc::MemoryState_CodeData:
case ams::svc::MemoryState_Shared:
case ams::svc::MemoryState_AliasCode:
case ams::svc::MemoryState_AliasCodeData:
case ams::svc::MemoryState_Stack:
case ams::svc::MemoryState_ThreadLocal:
case ams::svc::MemoryState_Transfered:
case ams::svc::MemoryState_SharedTransfered:
case ams::svc::MemoryState_SharedCode:
case ams::svc::MemoryState_GeneratedCode:
case ams::svc::MemoryState_CodeOut:
case ams::svc::MemoryState_Coverage:
case ams::svc::MemoryState_Insecure:
case KMemoryState_Io:
case KMemoryState_Static:
case KMemoryState_Code:
case KMemoryState_CodeData:
case KMemoryState_Shared:
case KMemoryState_AliasCode:
case KMemoryState_AliasCodeData:
case KMemoryState_Stack:
case KMemoryState_ThreadLocal:
case KMemoryState_Transfered:
case KMemoryState_SharedTransfered:
case KMemoryState_SharedCode:
case KMemoryState_GeneratedCode:
case KMemoryState_CodeOut:
case KMemoryState_Coverage:
case KMemoryState_Insecure:
return is_in_region && !is_in_heap && !is_in_alias;
case ams::svc::MemoryState_Normal:
case KMemoryState_Normal:
MESOSPHERE_ASSERT(is_in_heap);
return is_in_region && !is_in_alias;
case ams::svc::MemoryState_Ipc:
case ams::svc::MemoryState_NonSecureIpc:
case ams::svc::MemoryState_NonDeviceIpc:
case KMemoryState_Ipc:
case KMemoryState_NonSecureIpc:
case KMemoryState_NonDeviceIpc:
MESOSPHERE_ASSERT(is_in_alias);
return is_in_region && !is_in_heap;
default:
@ -527,12 +527,17 @@ namespace ams::kern {
R_SUCCEED();
}
Result KPageTableBase::CheckMemoryState(KMemoryState *out_state, KMemoryPermission *out_perm, KMemoryAttribute *out_attr, size_t *out_blocks_needed, KMemoryBlockManager::const_iterator it, KProcessAddress last_addr, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr) const {
Result KPageTableBase::CheckMemoryState(KMemoryState *out_state, KMemoryPermission *out_perm, KMemoryAttribute *out_attr, size_t *out_blocks_needed, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr) const {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
/* Get information about the first block. */
const KProcessAddress last_addr = addr + size - 1;
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
KMemoryInfo info = it->GetMemoryInfo();
/* If the start address isn't aligned, we need a block. */
const size_t blocks_for_start_align = (util::AlignDown(GetInteger(addr), PageSize) != info.GetAddress()) ? 1 : 0;
/* Validate all blocks in the range have correct state. */
const KMemoryState first_state = info.m_state;
const KMemoryPermission first_perm = info.m_permission;
@ -557,6 +562,9 @@ namespace ams::kern {
info = it->GetMemoryInfo();
}
/* If the end address isn't aligned, we need a block. */
const size_t blocks_for_end_align = (util::AlignUp(GetInteger(addr) + size, PageSize) != info.GetEndAddress()) ? 1 : 0;
/* Write output state. */
if (out_state != nullptr) {
*out_state = first_state;
@ -567,29 +575,9 @@ namespace ams::kern {
if (out_attr != nullptr) {
*out_attr = static_cast<KMemoryAttribute>(first_attr & ~ignore_attr);
}
/* If the end address isn't aligned, we need a block. */
if (out_blocks_needed != nullptr) {
const size_t blocks_for_end_align = (util::AlignDown(GetInteger(last_addr), PageSize) + PageSize != info.GetEndAddress()) ? 1 : 0;
*out_blocks_needed = blocks_for_end_align;
*out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
}
R_SUCCEED();
}
Result KPageTableBase::CheckMemoryState(KMemoryState *out_state, KMemoryPermission *out_perm, KMemoryAttribute *out_attr, size_t *out_blocks_needed, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr) const {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
/* Check memory state. */
const KProcessAddress last_addr = addr + size - 1;
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
R_TRY(this->CheckMemoryState(out_state, out_perm, out_attr, out_blocks_needed, it, last_addr, state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr))
/* If the start address isn't aligned, we need a block. */
if (out_blocks_needed != nullptr && util::AlignDown(GetInteger(addr), PageSize) != it->GetAddress()) {
++(*out_blocks_needed);
}
R_SUCCEED();
}
@ -717,7 +705,7 @@ namespace ams::kern {
R_SUCCEED();
}
Result KPageTableBase::QueryMappingImpl(KProcessAddress *out, KPhysicalAddress address, size_t size, ams::svc::MemoryState state) const {
Result KPageTableBase::QueryMappingImpl(KProcessAddress *out, KPhysicalAddress address, size_t size, KMemoryState state) const {
MESOSPHERE_ASSERT(!this->IsLockedByCurrentThread());
MESOSPHERE_ASSERT(out != nullptr);
@ -751,7 +739,7 @@ namespace ams::kern {
if (cur_valid && cur_entry.phys_addr <= address && address + size <= cur_entry.phys_addr + cur_entry.block_size) {
/* Check if this region is valid. */
const KProcessAddress mapped_address = (region_start + tot_size) + (address - cur_entry.phys_addr);
if (R_SUCCEEDED(this->CheckMemoryState(mapped_address, size, KMemoryState_Mask, static_cast<KMemoryState>(util::ToUnderlying(state)), KMemoryPermission_UserRead, KMemoryPermission_UserRead, KMemoryAttribute_None, KMemoryAttribute_None))) {
if (R_SUCCEEDED(this->CheckMemoryState(mapped_address, size, KMemoryState_All, state, KMemoryPermission_UserRead, KMemoryPermission_UserRead, KMemoryAttribute_None, KMemoryAttribute_None))) {
/* It is! */
*out = mapped_address;
R_SUCCEED();
@ -987,7 +975,7 @@ namespace ams::kern {
/* Verify that the destination memory is aliasable code. */
size_t num_dst_allocator_blocks;
R_TRY(this->CheckMemoryStateContiguous(std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState_FlagCanCodeAlias, KMemoryState_FlagCanCodeAlias, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All & ~KMemoryAttribute_PermissionLocked, KMemoryAttribute_None));
R_TRY(this->CheckMemoryStateContiguous(std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState_FlagCanCodeAlias, KMemoryState_FlagCanCodeAlias, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All, KMemoryAttribute_None));
/* Determine whether any pages being unmapped are code. */
bool any_code_pages = false;
@ -1649,10 +1637,9 @@ namespace ams::kern {
KMemoryAttribute old_attr;
size_t num_allocator_blocks;
constexpr u32 AttributeTestMask = ~(KMemoryAttribute_SetMask | KMemoryAttribute_DeviceShared);
const u32 state_test_mask = ((mask & KMemoryAttribute_Uncached) ? static_cast<u32>(KMemoryState_FlagCanChangeAttribute) : 0) | ((mask & KMemoryAttribute_PermissionLocked) ? static_cast<u32>(KMemoryState_FlagCanPermissionLock) : 0);
R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), std::addressof(old_attr), std::addressof(num_allocator_blocks),
addr, size,
state_test_mask, state_test_mask,
KMemoryState_FlagCanChangeAttribute, KMemoryState_FlagCanChangeAttribute,
KMemoryPermission_None, KMemoryPermission_None,
AttributeTestMask, KMemoryAttribute_None, ~AttributeTestMask));
@ -1664,18 +1651,15 @@ namespace ams::kern {
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* If we need to, perform a change attribute operation. */
if ((mask & KMemoryAttribute_Uncached) != 0) {
/* Determine the new attribute. */
const KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(((old_attr & ~mask) | (attr & mask)));
/* Perform operation. */
const KPageProperties properties = { old_perm, false, (new_attr & KMemoryAttribute_Uncached) != 0, DisableMergeAttribute_None };
R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, Null<KPhysicalAddress>, false, properties, OperationType_ChangePermissionsAndRefreshAndFlush, false));
}
/* Update the blocks. */
m_memory_block_manager.UpdateAttribute(std::addressof(allocator), addr, num_pages, mask, attr);
m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, old_perm, new_attr, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_None);
R_SUCCEED();
}
@ -1915,7 +1899,7 @@ namespace ams::kern {
R_SUCCEED();
}
Result KPageTableBase::MapIoImpl(KProcessAddress *out, PageLinkedList *page_list, KPhysicalAddress phys_addr, size_t size, KMemoryState state, KMemoryPermission perm) {
Result KPageTableBase::MapIoImpl(KProcessAddress *out, PageLinkedList *page_list, KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
/* Check pre-conditions. */
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize));
@ -1931,7 +1915,7 @@ namespace ams::kern {
const size_t region_size = m_kernel_map_region_end - m_kernel_map_region_start;
const size_t region_num_pages = region_size / PageSize;
MESOSPHERE_ASSERT(this->CanContain(region_start, region_size, state));
MESOSPHERE_ASSERT(this->CanContain(region_start, region_size, KMemoryState_Io));
/* Locate the memory region. */
const KMemoryRegion *region = KMemoryLayout::Find(phys_addr);
@ -1961,16 +1945,10 @@ namespace ams::kern {
/* Select an address to map at. */
KProcessAddress addr = Null<KProcessAddress>;
const size_t phys_alignment = std::min(std::min(util::GetAlignment(GetInteger(phys_addr)), util::GetAlignment(size)), MaxPhysicalMapAlignment);
for (s32 block_type = KPageTable::GetMaxBlockType(); block_type >= 0; block_type--) {
const size_t alignment = KPageTable::GetBlockSize(static_cast<KPageTable::BlockType>(block_type));
const KPhysicalAddress aligned_phys = util::AlignUp(GetInteger(phys_addr), alignment) + alignment - 1;
if (aligned_phys <= phys_addr) {
continue;
}
const KPhysicalAddress last_aligned_paddr = util::AlignDown(GetInteger(last) + 1, alignment) - 1;
if (!(last_aligned_paddr <= last && aligned_phys <= last_aligned_paddr)) {
if (alignment > phys_alignment) {
continue;
}
@ -1982,11 +1960,11 @@ namespace ams::kern {
R_UNLESS(addr != Null<KProcessAddress>, svc::ResultOutOfMemory());
/* Check that we can map IO here. */
MESOSPHERE_ASSERT(this->CanContain(addr, size, state));
MESOSPHERE_ASSERT(this->CanContain(addr, size, KMemoryState_Io));
MESOSPHERE_R_ASSERT(this->CheckMemoryState(addr, size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None));
/* Perform mapping operation. */
const KPageProperties properties = { perm, state == KMemoryState_IoRegister, false, DisableMergeAttribute_DisableHead };
const KPageProperties properties = { perm, true, false, DisableMergeAttribute_DisableHead };
R_TRY(this->Operate(page_list, addr, num_pages, phys_addr, true, properties, OperationType_Map, false));
/* Set the output address. */
@ -2009,10 +1987,10 @@ namespace ams::kern {
/* Map the io memory. */
KProcessAddress addr;
R_TRY(this->MapIoImpl(std::addressof(addr), updater.GetPageList(), phys_addr, size, KMemoryState_IoRegister, perm));
R_TRY(this->MapIoImpl(std::addressof(addr), updater.GetPageList(), phys_addr, size, perm));
/* Update the blocks. */
m_memory_block_manager.Update(std::addressof(allocator), addr, size / PageSize, KMemoryState_IoRegister, perm, KMemoryAttribute_Locked, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
m_memory_block_manager.Update(std::addressof(allocator), addr, size / PageSize, KMemoryState_Io, perm, KMemoryAttribute_Locked, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
/* We successfully mapped the pages. */
R_SUCCEED();
@ -2042,8 +2020,7 @@ namespace ams::kern {
R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, phys_addr, true, properties, OperationType_Map, false));
/* Update the blocks. */
const auto state = mapping == ams::svc::MemoryMapping_Memory ? KMemoryState_IoMemory : KMemoryState_IoRegister;
m_memory_block_manager.Update(std::addressof(allocator), dst_address, num_pages, state, perm, KMemoryAttribute_Locked, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
m_memory_block_manager.Update(std::addressof(allocator), dst_address, num_pages, KMemoryState_Io, perm, KMemoryAttribute_Locked, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
/* We successfully mapped the pages. */
R_SUCCEED();
@ -2062,7 +2039,7 @@ namespace ams::kern {
size_t num_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), std::addressof(old_attr), std::addressof(num_allocator_blocks),
dst_address, size,
KMemoryState_All, mapping == ams::svc::MemoryMapping_Memory ? KMemoryState_IoMemory : KMemoryState_IoRegister,
KMemoryState_All, KMemoryState_Io,
KMemoryPermission_None, KMemoryPermission_None,
KMemoryAttribute_All, KMemoryAttribute_Locked));
@ -2152,16 +2129,10 @@ namespace ams::kern {
/* Select an address to map at. */
KProcessAddress addr = Null<KProcessAddress>;
const size_t phys_alignment = std::min(std::min(util::GetAlignment(GetInteger(phys_addr)), util::GetAlignment(size)), MaxPhysicalMapAlignment);
for (s32 block_type = KPageTable::GetMaxBlockType(); block_type >= 0; block_type--) {
const size_t alignment = KPageTable::GetBlockSize(static_cast<KPageTable::BlockType>(block_type));
const KPhysicalAddress aligned_phys = util::AlignUp(GetInteger(phys_addr), alignment) + alignment - 1;
if (aligned_phys <= phys_addr) {
continue;
}
const KPhysicalAddress last_aligned_paddr = util::AlignDown(GetInteger(last) + 1, alignment) - 1;
if (!(last_aligned_paddr <= last && aligned_phys <= last_aligned_paddr)) {
if (alignment > phys_alignment) {
continue;
}
@ -2337,7 +2308,7 @@ namespace ams::kern {
KScopedPageTableUpdater updater(this);
/* Perform mapping operation. */
const KPageProperties properties = { perm, false, false, DisableMergeAttribute_DisableHead };
const KPageProperties properties = { perm, state == KMemoryState_Io, false, DisableMergeAttribute_DisableHead };
R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
/* Update the blocks. */
@ -2372,7 +2343,7 @@ namespace ams::kern {
KScopedPageTableUpdater updater(this);
/* Perform mapping operation. */
const KPageProperties properties = { perm, false, false, DisableMergeAttribute_DisableHead };
const KPageProperties properties = { perm, state == KMemoryState_Io, false, DisableMergeAttribute_DisableHead };
R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
/* Update the blocks. */
@ -2508,23 +2479,6 @@ namespace ams::kern {
R_SUCCEED();
}
Result KPageTableBase::InvalidateCurrentProcessDataCache(KProcessAddress address, size_t size) {
/* Check pre-condition: this is being called on the current process. */
MESOSPHERE_ASSERT(this == std::addressof(GetCurrentProcess().GetPageTable().GetBasePageTable()));
/* Check that the region is in range. */
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Check the memory state. */
R_TRY(this->CheckMemoryStateContiguous(address, size, KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted, KMemoryPermission_UserReadWrite, KMemoryPermission_UserReadWrite, KMemoryAttribute_Uncached, KMemoryAttribute_None));
/* Invalidate the data cache. */
R_RETURN(cpu::InvalidateDataCache(GetVoidPointer(address), size));
}
Result KPageTableBase::ReadDebugMemory(void *buffer, KProcessAddress address, size_t size) {
/* Lightly validate the region is in range. */
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
@ -2699,7 +2653,7 @@ namespace ams::kern {
R_SUCCEED();
}
Result KPageTableBase::ReadIoMemoryImpl(void *buffer, KPhysicalAddress phys_addr, size_t size, KMemoryState state) {
Result KPageTableBase::ReadIoMemoryImpl(void *buffer, KPhysicalAddress phys_addr, size_t size) {
/* Check pre-conditions. */
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
@ -2713,7 +2667,7 @@ namespace ams::kern {
/* Temporarily map the io memory. */
KProcessAddress io_addr;
R_TRY(this->MapIoImpl(std::addressof(io_addr), updater.GetPageList(), map_start, map_size, state, KMemoryPermission_UserRead));
R_TRY(this->MapIoImpl(std::addressof(io_addr), updater.GetPageList(), map_start, map_size, KMemoryPermission_UserRead));
/* Ensure we unmap the io memory when we're done with it. */
ON_SCOPE_EXIT {
@ -2744,7 +2698,7 @@ namespace ams::kern {
R_SUCCEED();
}
Result KPageTableBase::WriteIoMemoryImpl(KPhysicalAddress phys_addr, const void *buffer, size_t size, KMemoryState state) {
Result KPageTableBase::WriteIoMemoryImpl(KPhysicalAddress phys_addr, const void *buffer, size_t size) {
/* Check pre-conditions. */
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
@ -2758,7 +2712,7 @@ namespace ams::kern {
/* Temporarily map the io memory. */
KProcessAddress io_addr;
R_TRY(this->MapIoImpl(std::addressof(io_addr), updater.GetPageList(), map_start, map_size, state, KMemoryPermission_UserReadWrite));
R_TRY(this->MapIoImpl(std::addressof(io_addr), updater.GetPageList(), map_start, map_size, KMemoryPermission_UserReadWrite));
/* Ensure we unmap the io memory when we're done with it. */
ON_SCOPE_EXIT {
@ -2789,7 +2743,7 @@ namespace ams::kern {
R_SUCCEED();
}
Result KPageTableBase::ReadDebugIoMemory(void *buffer, KProcessAddress address, size_t size, KMemoryState state) {
Result KPageTableBase::ReadDebugIoMemory(void *buffer, KProcessAddress address, size_t size) {
/* Lightly validate the range before doing anything else. */
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
@ -2801,7 +2755,7 @@ namespace ams::kern {
KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
/* Check that the desired range is readable io memory. */
R_TRY(this->CheckMemoryStateContiguous(address, size, KMemoryState_All, state, KMemoryPermission_UserRead, KMemoryPermission_UserRead, KMemoryAttribute_None, KMemoryAttribute_None));
R_TRY(this->CheckMemoryStateContiguous(address, size, KMemoryState_All, KMemoryState_Io, KMemoryPermission_UserRead, KMemoryPermission_UserRead, KMemoryAttribute_None, KMemoryAttribute_None));
/* Read the memory. */
u8 *dst = static_cast<u8 *>(buffer);
@ -2815,7 +2769,7 @@ namespace ams::kern {
const size_t cur_size = std::min<size_t>(last_address - address + 1, util::AlignDown(GetInteger(address) + PageSize, PageSize) - GetInteger(address));
/* Read. */
R_TRY(dst_page_table.ReadIoMemoryImpl(dst, phys_addr, cur_size, state));
R_TRY(dst_page_table.ReadIoMemoryImpl(dst, phys_addr, cur_size));
/* Advance. */
address += cur_size;
@ -2825,7 +2779,7 @@ namespace ams::kern {
R_SUCCEED();
}
Result KPageTableBase::WriteDebugIoMemory(KProcessAddress address, const void *buffer, size_t size, KMemoryState state) {
Result KPageTableBase::WriteDebugIoMemory(KProcessAddress address, const void *buffer, size_t size) {
/* Lightly validate the range before doing anything else. */
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
@ -2837,7 +2791,7 @@ namespace ams::kern {
KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
/* Check that the desired range is writable io memory. */
R_TRY(this->CheckMemoryStateContiguous(address, size, KMemoryState_All, state, KMemoryPermission_UserReadWrite, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryAttribute_None));
R_TRY(this->CheckMemoryStateContiguous(address, size, KMemoryState_All, KMemoryState_Io, KMemoryPermission_UserReadWrite, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryAttribute_None));
/* Read the memory. */
const u8 *src = static_cast<const u8 *>(buffer);
@ -2851,7 +2805,7 @@ namespace ams::kern {
const size_t cur_size = std::min<size_t>(last_address - address + 1, util::AlignDown(GetInteger(address) + PageSize, PageSize) - GetInteger(address));
/* Read. */
R_TRY(dst_page_table.WriteIoMemoryImpl(phys_addr, src, cur_size, state));
R_TRY(dst_page_table.WriteIoMemoryImpl(phys_addr, src, cur_size));
/* Advance. */
address += cur_size;
@ -2884,7 +2838,7 @@ namespace ams::kern {
m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, &KMemoryBlock::ShareToDevice, KMemoryPermission_None);
/* Set whether the locked memory was io. */
*out_is_io = static_cast<ams::svc::MemoryState>(old_state & KMemoryState_Mask) == ams::svc::MemoryState_Io;
*out_is_io = old_state == KMemoryState_Io;
R_SUCCEED();
}
@ -4452,21 +4406,11 @@ namespace ams::kern {
/* If it's unmapped, we need to map it. */
if (info.GetState() == KMemoryState_Free) {
/* Determine the range to map. */
const KPageProperties map_properties = { KMemoryPermission_UserReadWrite, false, false, cur_address == this->GetAliasRegionStart() ? DisableMergeAttribute_DisableHead : DisableMergeAttribute_None };
const KPageProperties map_properties = { KMemoryPermission_UserReadWrite, false, false, DisableMergeAttribute_None };
size_t map_pages = std::min(KProcessAddress(info.GetEndAddress()) - cur_address, last_address + 1 - cur_address) / PageSize;
/* While we have pages to map, map them. */
{
/* Create a page group for the current mapping range. */
KPageGroup cur_pg(m_block_info_manager);
{
ON_RESULT_FAILURE {
cur_pg.OpenFirst();
cur_pg.Close();
};
size_t remain_pages = map_pages;
while (remain_pages > 0) {
while (map_pages > 0) {
/* Check if we're at the end of the physical block. */
if (pg_pages == 0) {
/* Ensure there are more pages to map. */
@ -4478,18 +4422,16 @@ namespace ams::kern {
pg_pages = pg_it->GetNumPages();
}
/* Add whatever we can to the current block. */
const size_t cur_pages = std::min(pg_pages, remain_pages);
R_TRY(cur_pg.AddBlock(pg_phys_addr + ((pg_pages - cur_pages) * PageSize), cur_pages));
/* Map whatever we can. */
const size_t cur_pages = std::min(pg_pages, map_pages);
R_TRY(this->Operate(updater.GetPageList(), cur_address, cur_pages, pg_phys_addr, true, map_properties, OperationType_MapFirst, false));
/* Advance. */
remain_pages -= cur_pages;
pg_pages -= cur_pages;
}
}
cur_address += cur_pages * PageSize;
map_pages -= cur_pages;
/* Map the papges. */
R_TRY(this->Operate(updater.GetPageList(), cur_address, map_pages, cur_pg, map_properties, OperationType_MapFirstGroup, false));
pg_phys_addr += cur_pages * PageSize;
pg_pages -= cur_pages;
}
}
@ -4512,9 +4454,7 @@ namespace ams::kern {
/* Update the relevant memory blocks. */
m_memory_block_manager.UpdateIfMatch(std::addressof(allocator), address, size / PageSize,
KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None,
KMemoryState_Normal, KMemoryPermission_UserReadWrite, KMemoryAttribute_None,
address == this->GetAliasRegionStart() ? KMemoryBlockDisableMergeAttribute_Normal : KMemoryBlockDisableMergeAttribute_None,
KMemoryBlockDisableMergeAttribute_None);
KMemoryState_Normal, KMemoryPermission_UserReadWrite, KMemoryAttribute_None);
R_SUCCEED();
}
@ -4609,9 +4549,6 @@ namespace ams::kern {
/* Iterate over the memory, unmapping as we go. */
auto it = m_memory_block_manager.FindIterator(cur_address);
const auto clear_merge_attr = (it->GetState() == KMemoryState_Normal && it->GetAddress() == this->GetAliasRegionStart() && it->GetAddress() == address) ? KMemoryBlockDisableMergeAttribute_Normal : KMemoryBlockDisableMergeAttribute_None;
while (true) {
/* Check that the iterator is valid. */
MESOSPHERE_ASSERT(it != m_memory_block_manager.end());
@ -4644,7 +4581,7 @@ namespace ams::kern {
m_resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, mapped_size);
/* Update memory blocks. */
m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, clear_merge_attr);
m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_None);
/* We succeeded. */
R_SUCCEED();

View File

@ -263,47 +263,28 @@ namespace ams::kern {
MESOSPHERE_ASSERT(res_limit != nullptr);
MESOSPHERE_ABORT_UNLESS((params.code_num_pages * PageSize) / PageSize == static_cast<size_t>(params.code_num_pages));
/* Determine is application. */
const bool is_app = (params.flags & ams::svc::CreateProcessFlag_IsApplication) != 0;
/* Set members. */
m_memory_pool = pool;
m_resource_limit = res_limit;
m_is_default_application_system_resource = false;
m_system_resource = std::addressof(is_app ? Kernel::GetApplicationSystemResource() : Kernel::GetSystemSystemResource());
m_is_immortal = immortal;
/* Setup our system resource. */
if (const size_t system_resource_num_pages = params.system_resource_num_pages; system_resource_num_pages != 0) {
/* Create a secure system resource. */
KSecureSystemResource *secure_resource = KSecureSystemResource::Create();
R_UNLESS(secure_resource != nullptr, svc::ResultOutOfResource());
ON_RESULT_FAILURE { secure_resource->Close(); };
/* Initialize the secure resource. */
R_TRY(secure_resource->Initialize(system_resource_num_pages * PageSize, m_resource_limit, m_memory_pool));
/* Set our system resource. */
m_system_resource = secure_resource;
} else {
/* Use the system-wide system resource. */
const bool is_app = (params.flags & ams::svc::CreateProcessFlag_IsApplication);
m_system_resource = std::addressof(is_app ? Kernel::GetApplicationSystemResource() : Kernel::GetSystemSystemResource());
m_is_default_application_system_resource = is_app;
/* Open reference to the system resource. */
/* Open reference to our system resource. */
m_system_resource->Open();
}
/* Ensure we clean up our secure resource, if we fail. */
ON_RESULT_FAILURE { m_system_resource->Close(); };
/* Setup page table. */
/* NOTE: Nintendo passes process ID despite not having set it yet. */
/* This goes completely unused, but even so... */
{
const auto as_type = static_cast<ams::svc::CreateProcessFlag>(params.flags & ams::svc::CreateProcessFlag_AddressSpaceMask);
const bool enable_aslr = (params.flags & ams::svc::CreateProcessFlag_EnableAslr) != 0;
const bool enable_das_merge = (params.flags & ams::svc::CreateProcessFlag_DisableDeviceAddressSpaceMerge) == 0;
R_TRY(m_page_table.Initialize(as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address, params.code_num_pages * PageSize, m_system_resource, res_limit));
R_TRY(m_page_table.Initialize(m_process_id, as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address, params.code_num_pages * PageSize, m_system_resource, res_limit));
}
ON_RESULT_FAILURE_2 { m_page_table.Finalize(); };
ON_RESULT_FAILURE { m_page_table.Finalize(); };
/* Ensure we can insert the code region. */
R_UNLESS(m_page_table.CanContain(params.code_address, params.code_num_pages * PageSize, KMemoryState_Code), svc::ResultInvalidMemoryRegion());
@ -336,7 +317,6 @@ namespace ams::kern {
/* Set pool and resource limit. */
m_memory_pool = pool;
m_resource_limit = res_limit;
m_is_default_application_system_resource = false;
m_is_immortal = false;
/* Get the memory sizes. */
@ -368,8 +348,6 @@ namespace ams::kern {
const bool is_app = (params.flags & ams::svc::CreateProcessFlag_IsApplication);
m_system_resource = std::addressof(is_app ? Kernel::GetApplicationSystemResource() : Kernel::GetSystemSystemResource());
m_is_default_application_system_resource = is_app;
/* Open reference to the system resource. */
m_system_resource->Open();
}
@ -378,11 +356,13 @@ namespace ams::kern {
ON_RESULT_FAILURE { m_system_resource->Close(); };
/* Setup page table. */
/* NOTE: Nintendo passes process ID despite not having set it yet. */
/* This goes completely unused, but even so... */
{
const auto as_type = static_cast<ams::svc::CreateProcessFlag>(params.flags & ams::svc::CreateProcessFlag_AddressSpaceMask);
const bool enable_aslr = (params.flags & ams::svc::CreateProcessFlag_EnableAslr) != 0;
const bool enable_das_merge = (params.flags & ams::svc::CreateProcessFlag_DisableDeviceAddressSpaceMerge) == 0;
R_TRY(m_page_table.Initialize(as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address, code_size, m_system_resource, res_limit));
R_TRY(m_page_table.Initialize(m_process_id, as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address, code_size, m_system_resource, res_limit));
}
ON_RESULT_FAILURE_2 { m_page_table.Finalize(); };
@ -491,7 +471,7 @@ namespace ams::kern {
MESOSPHERE_LOG("KProcess::Exit() pid=%ld name=%-12s\n", m_process_id, m_name);
/* Register the process as a work task. */
KWorkerTaskManager::AddTask(KWorkerTaskManager::WorkerType_ExitProcess, this);
KWorkerTaskManager::AddTask(KWorkerTaskManager::WorkerType_Exit, this);
}
/* Exit the current thread. */
@ -536,7 +516,7 @@ namespace ams::kern {
MESOSPHERE_LOG("KProcess::Terminate() FAIL pid=%ld name=%-12s\n", m_process_id, m_name);
/* Register the process as a work task. */
KWorkerTaskManager::AddTask(KWorkerTaskManager::WorkerType_ExitProcess, this);
KWorkerTaskManager::AddTask(KWorkerTaskManager::WorkerType_Exit, this);
}
}
@ -868,7 +848,7 @@ namespace ams::kern {
size_t KProcess::GetUsedUserPhysicalMemorySize() const {
const size_t norm_size = m_page_table.GetNormalMemorySize();
const size_t other_size = m_code_size + m_main_thread_stack_size;
const size_t sec_size = this->GetRequiredSecureMemorySizeNonDefault();
const size_t sec_size = m_system_resource->IsSecureResource() ? static_cast<KSecureSystemResource *>(m_system_resource)->CalculateRequiredSecureMemorySize() : 0;
return norm_size + other_size + sec_size;
}
@ -876,20 +856,13 @@ namespace ams::kern {
size_t KProcess::GetTotalUserPhysicalMemorySize() const {
/* Get the amount of free and used size. */
const size_t free_size = m_resource_limit->GetFreeValue(ams::svc::LimitableResource_PhysicalMemoryMax);
const size_t used_size = this->GetUsedUserPhysicalMemorySize();
const size_t max_size = m_max_process_memory;
/* Determine used size. */
/* NOTE: This does *not* check this->IsDefaultApplicationSystemResource(), unlike GetUsedUserPhysicalMemorySize(). */
const size_t norm_size = m_page_table.GetNormalMemorySize();
const size_t other_size = m_code_size + m_main_thread_stack_size;
const size_t sec_size = this->GetRequiredSecureMemorySize();
const size_t used_size = norm_size + other_size + sec_size;
/* NOTE: These function calls will recalculate, introducing a race...it is unclear why Nintendo does it this way. */
if (used_size + free_size > max_size) {
return max_size;
} else {
return free_size + this->GetUsedUserPhysicalMemorySize();
return free_size + used_size;
}
}
@ -903,20 +876,14 @@ namespace ams::kern {
size_t KProcess::GetTotalNonSystemUserPhysicalMemorySize() const {
/* Get the amount of free and used size. */
const size_t free_size = m_resource_limit->GetFreeValue(ams::svc::LimitableResource_PhysicalMemoryMax);
const size_t used_size = this->GetUsedUserPhysicalMemorySize();
const size_t sec_size = m_system_resource->IsSecureResource() ? static_cast<KSecureSystemResource *>(m_system_resource)->CalculateRequiredSecureMemorySize() : 0;
const size_t max_size = m_max_process_memory;
/* Determine used size. */
/* NOTE: This does *not* check this->IsDefaultApplicationSystemResource(), unlike GetUsedUserPhysicalMemorySize(). */
const size_t norm_size = m_page_table.GetNormalMemorySize();
const size_t other_size = m_code_size + m_main_thread_stack_size;
const size_t sec_size = this->GetRequiredSecureMemorySize();
const size_t used_size = norm_size + other_size + sec_size;
/* NOTE: These function calls will recalculate, introducing a race...it is unclear why Nintendo does it this way. */
if (used_size + free_size > max_size) {
return max_size - this->GetRequiredSecureMemorySizeNonDefault();
return max_size - sec_size;
} else {
return free_size + this->GetUsedNonSystemUserPhysicalMemorySize();
return free_size + used_size - sec_size;
}
}

View File

@ -476,6 +476,10 @@ namespace ams::kern {
m_parent->ClearRunningThread(this);
}
/* Signal. */
m_signaled = true;
KSynchronizationObject::NotifyAvailable();
/* Call the on thread termination handler. */
KThreadContext::OnThreadTerminating(this);
@ -503,13 +507,6 @@ namespace ams::kern {
cpu::SynchronizeCores(m_parent->GetPhysicalCoreMask());
}
/* Acquire the scheduler lock. */
KScopedSchedulerLock sl;
/* Signal. */
m_signaled = true;
KSynchronizationObject::NotifyAvailable();
/* Close the thread. */
this->Close();
}
@ -1331,7 +1328,7 @@ namespace ams::kern {
this->StartTermination();
/* Register the thread as a work task. */
KWorkerTaskManager::AddTask(KWorkerTaskManager::WorkerType_ExitThread, this);
KWorkerTaskManager::AddTask(KWorkerTaskManager::WorkerType_Exit, this);
}
MESOSPHERE_PANIC("KThread::Exit() would return");

View File

@ -115,9 +115,8 @@ namespace ams::kern {
/* Perform more core-0 specific initialization. */
if (core_id == 0) {
/* Initialize the exit worker managers, so that threads and processes may exit cleanly. */
Kernel::GetWorkerTaskManager(KWorkerTaskManager::WorkerType_ExitThread).Initialize(KWorkerTaskManager::ExitWorkerPriority);
Kernel::GetWorkerTaskManager(KWorkerTaskManager::WorkerType_ExitProcess).Initialize(KWorkerTaskManager::ExitWorkerPriority);
/* Initialize the exit worker manager, so that threads and processes may exit cleanly. */
Kernel::GetWorkerTaskManager(KWorkerTaskManager::WorkerType_Exit).Initialize(KWorkerTaskManager::ExitWorkerPriority);
/* Setup so that we may sleep later, and reserve memory for secure applets. */
KSystemControl::InitializePhase2();

View File

@ -102,11 +102,7 @@ namespace ams::kern::svc {
R_UNLESS(process.IsNotNull(), svc::ResultInvalidHandle());
/* Invalidate the cache. */
if (process.GetPointerUnsafe() == GetCurrentProcessPointer()) {
R_TRY(process->GetPageTable().InvalidateCurrentProcessDataCache(address, size));
} else {
R_TRY(process->GetPageTable().InvalidateProcessDataCache(address, size));
}
R_SUCCEED();
}

View File

@ -127,7 +127,7 @@ namespace ams::kern::svc {
R_UNLESS((address < address + size), svc::ResultInvalidCurrentMemory());
/* Verify that the mapping is in range. */
R_UNLESS(GetCurrentProcess().GetPageTable().CanContain(address, size, ams::svc::MemoryState_Io), svc::ResultInvalidMemoryRegion());
R_UNLESS(GetCurrentProcess().GetPageTable().CanContain(address, size, KMemoryState_Io), svc::ResultInvalidMemoryRegion());
/* Validate the map permission. */
R_UNLESS(IsValidIoRegionPermission(map_perm), svc::ResultInvalidNewMemoryPermission());
@ -156,7 +156,7 @@ namespace ams::kern::svc {
R_UNLESS((address < address + size), svc::ResultInvalidCurrentMemory());
/* Verify that the mapping is in range. */
R_UNLESS(GetCurrentProcess().GetPageTable().CanContain(address, size, ams::svc::MemoryState_Io), svc::ResultInvalidMemoryRegion());
R_UNLESS(GetCurrentProcess().GetPageTable().CanContain(address, size, KMemoryState_Io), svc::ResultInvalidMemoryRegion());
/* Get the io region. */
KScopedAutoObject io_region = GetCurrentProcess().GetHandleTable().GetObject<KIoRegion>(io_region_handle);

View File

@ -58,13 +58,10 @@ namespace ams::kern::svc {
R_UNLESS((address < address + size), svc::ResultInvalidCurrentMemory());
/* Validate the attribute and mask. */
constexpr u32 SupportedMask = ams::svc::MemoryAttribute_Uncached | ams::svc::MemoryAttribute_PermissionLocked;
constexpr u32 SupportedMask = ams::svc::MemoryAttribute_Uncached;
R_UNLESS((mask | attr) == mask, svc::ResultInvalidCombination());
R_UNLESS((mask | attr | SupportedMask) == SupportedMask, svc::ResultInvalidCombination());
/* Check that permission locked is either being set or not masked. */
R_UNLESS((mask & ams::svc::MemoryAttribute_PermissionLocked) == (attr & ams::svc::MemoryAttribute_PermissionLocked), svc::ResultInvalidCombination());
/* Validate that the region is in range for the current process. */
auto &page_table = GetCurrentProcess().GetPageTable();
R_UNLESS(page_table.Contains(address, size), svc::ResultInvalidCurrentMemory());

View File

@ -25,7 +25,6 @@ namespace ams::exosphere {
void ForceRebootToRcm();
void ForceRebootToIramPayload();
void ForceRebootToFatalError();
void ForceRebootByPmic();
void ForceShutdown();
bool IsRcmBugPatched();

View File

@ -178,8 +178,6 @@
HANDLER(BuiltInWirelessOUIInfo, 137) \
HANDLER(WirelessAPOUIInfo, 138) \
HANDLER(EthernetAdapterOUIInfo, 139) \
HANDLER(NANDTypeInfo, 140) \
HANDLER(MicroSDTypeInfo, 141) \
#define AMS_ERPT_FOREACH_FIELD(HANDLER) \
HANDLER(TestU64, 0, Test, FieldType_NumericU64, FieldFlag_None ) \
@ -852,15 +850,4 @@
HANDLER(BuiltInWirelessOUI, 667, BuiltInWirelessOUIInfo, FieldType_String, FieldFlag_None ) \
HANDLER(WirelessAPOUI, 668, WirelessAPOUIInfo, FieldType_String, FieldFlag_None ) \
HANDLER(EthernetAdapterOUI, 669, EthernetAdapterOUIInfo, FieldType_String, FieldFlag_None ) \
HANDLER(FatFsBisSystemFatSafeControlResult, 670, FsProxyErrorInfo2, FieldType_NumericU8, FieldFlag_None ) \
HANDLER(FatFsBisSystemFatErrorNumber, 671, FsProxyErrorInfo2, FieldType_NumericI32, FieldFlag_None ) \
HANDLER(FatFsBisSystemFatSafeErrorNumber, 672, FsProxyErrorInfo2, FieldType_NumericI32, FieldFlag_None ) \
HANDLER(FatFsBisUserFatSafeControlResult, 673, FsProxyErrorInfo2, FieldType_NumericU8, FieldFlag_None ) \
HANDLER(FatFsBisUserFatErrorNumber, 674, FsProxyErrorInfo2, FieldType_NumericI32, FieldFlag_None ) \
HANDLER(FatFsBisUserFatSafeErrorNumber, 675, FsProxyErrorInfo2, FieldType_NumericI32, FieldFlag_None ) \
HANDLER(GpuCrashDump2, 676, GpuCrashInfo, FieldType_U8Array, FieldFlag_None ) \
HANDLER(NANDType, 677, NANDTypeInfo, FieldType_U8Array, FieldFlag_None ) \
HANDLER(MicroSDType, 678, MicroSDTypeInfo, FieldType_U8Array, FieldFlag_None ) \
HANDLER(GameCardLastDeactivateReasonResult, 679, GameCardErrorInfo, FieldType_NumericU32, FieldFlag_None ) \
HANDLER(GameCardLastDeactivateReason, 680, GameCardErrorInfo, FieldType_NumericU8, FieldFlag_None ) \

View File

@ -111,14 +111,6 @@ namespace ams::erpt {
static_assert(util::is_pod<ReportFlagSet>::value);
static_assert(sizeof(ReportFlagSet) == sizeof(u32));
struct CreateReportOptionFlag {
using SubmitFsInfo = util::BitFlagSet<BITSIZEOF(u32), CreateReportOptionFlag>::Flag<0>;
};
using CreateReportOptionFlagSet = util::BitFlagSet<BITSIZEOF(u32), CreateReportOptionFlag>;
static_assert(util::is_pod<CreateReportOptionFlagSet>::value);
static_assert(sizeof(CreateReportOptionFlagSet) == sizeof(u32));
struct ReportInfo {
ReportType type;
ReportId id;

View File

@ -32,10 +32,8 @@
AMS_SF_METHOD_INFO(C, H, 8, Result, ClearApplicationLaunchTime, (), (), hos::Version_6_0_0) \
AMS_SF_METHOD_INFO(C, H, 9, Result, SubmitAttachment, (ams::sf::Out<erpt::AttachmentId> out, const ams::sf::InBuffer &attachment_name, const ams::sf::InBuffer &attachment_data), (out, attachment_name, attachment_data), hos::Version_8_0_0) \
AMS_SF_METHOD_INFO(C, H, 10, Result, CreateReportWithAttachmentsDeprecated, (erpt::ReportType report_type, const ams::sf::InBuffer &ctx_buffer, const ams::sf::InBuffer &str_buffer, const ams::sf::InBuffer &attachment_ids_buffer), (report_type, ctx_buffer, str_buffer, attachment_ids_buffer), hos::Version_8_0_0, hos::Version_10_2_0) \
AMS_SF_METHOD_INFO(C, H, 10, Result, CreateReportWithAttachmentsDeprecated2, (erpt::ReportType report_type, const ams::sf::InBuffer &ctx_buffer, const ams::sf::InBuffer &str_buffer, const ams::sf::InBuffer &attachment_ids_buffer, Result result), (report_type, ctx_buffer, str_buffer, attachment_ids_buffer, result), hos::Version_11_0_0, hos::Version_16_1_0) \
AMS_SF_METHOD_INFO(C, H, 10, Result, CreateReportWithAttachments, (erpt::ReportType report_type, const ams::sf::InBuffer &ctx_buffer, const ams::sf::InBuffer &str_buffer, const ams::sf::InBuffer &attachment_ids_buffer, Result result, erpt::CreateReportOptionFlagSet flags), (report_type, ctx_buffer, str_buffer, attachment_ids_buffer, result, flags), hos::Version_17_0_0) \
AMS_SF_METHOD_INFO(C, H, 11, Result, CreateReportV1, (erpt::ReportType report_type, const ams::sf::InBuffer &ctx_buffer, const ams::sf::InBuffer &str_buffer, const ams::sf::InBuffer &meta_buffer, Result result), (report_type, ctx_buffer, str_buffer, meta_buffer, result), hos::Version_11_0_0) \
AMS_SF_METHOD_INFO(C, H, 12, Result, CreateReport, (erpt::ReportType report_type, const ams::sf::InBuffer &ctx_buffer, const ams::sf::InBuffer &str_buffer, const ams::sf::InBuffer &meta_buffer, Result result, erpt::CreateReportOptionFlagSet flags), (report_type, ctx_buffer, str_buffer, meta_buffer, result, flags), hos::Version_17_0_0) \
AMS_SF_METHOD_INFO(C, H, 10, Result, CreateReportWithAttachments, (erpt::ReportType report_type, const ams::sf::InBuffer &ctx_buffer, const ams::sf::InBuffer &str_buffer, const ams::sf::InBuffer &attachment_ids_buffer, Result result), (report_type, ctx_buffer, str_buffer, attachment_ids_buffer, result), hos::Version_11_0_0) \
AMS_SF_METHOD_INFO(C, H, 11, Result, CreateReport, (erpt::ReportType report_type, const ams::sf::InBuffer &ctx_buffer, const ams::sf::InBuffer &str_buffer, const ams::sf::InBuffer &meta_buffer, Result result), (report_type, ctx_buffer, str_buffer, meta_buffer, result), hos::Version_11_0_0) \
AMS_SF_METHOD_INFO(C, H, 20, Result, RegisterRunningApplet, (ncm::ProgramId program_id), (program_id), hos::Version_12_0_0) \
AMS_SF_METHOD_INFO(C, H, 21, Result, UnregisterRunningApplet, (ncm::ProgramId program_id), (program_id), hos::Version_12_0_0) \
AMS_SF_METHOD_INFO(C, H, 22, Result, UpdateAppletSuspendedDuration, (ncm::ProgramId program_id, TimeSpanType duration), (program_id, duration), hos::Version_12_0_0) \

View File

@ -76,26 +76,10 @@ namespace ams::erpt::srv {
};
#undef GET_FIELD_FLAG
inline CategoryId ConvertFieldToCategory(FieldId id) {
return FieldToCategoryMap[id];
}
inline FieldType ConvertFieldToType(FieldId id) {
return FieldToTypeMap[id];
}
inline FieldFlag ConvertFieldToFlag(FieldId id) {
return FieldToFlagMap[id];
}
constexpr inline ReportFlagSet MakeNoReportFlags() {
return util::MakeBitFlagSet<32, ReportFlag>();
}
constexpr inline CreateReportOptionFlagSet MakeNoCreateReportOptionFlags() {
return util::MakeBitFlagSet<32, CreateReportOptionFlag>();
}
constexpr inline AttachmentFlagSet MakeNoAttachmentFlags() {
return util::MakeBitFlagSet<32, AttachmentFlag>();
}

View File

@ -63,7 +63,6 @@
#include <stratosphere/fs/fs_signed_system_partition.hpp>
#include <stratosphere/fs/fs_system_data.hpp>
#include <stratosphere/fs/fs_program_index_map_info.hpp>
#include <stratosphere/fs/fs_program_id.hpp>
#include <stratosphere/fs/impl/fs_access_log_impl.hpp>
#include <stratosphere/fs/impl/fs_hash_generator_factory_selector.hpp>
#include <stratosphere/fs/impl/fs_storage_service_object_adapter.hpp>

View File

@ -1,26 +0,0 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <vapours.hpp>
#include <stratosphere/fs/fs_content_attributes.hpp>
#include <stratosphere/ncm/ncm_ids.hpp>
namespace ams::fs {
/* ACCURATE_TO_VERSION: 17.5.0.0 */
Result GetProgramId(ncm::ProgramId *out, const char *path, fs::ContentAttributes attr);
}

View File

@ -104,7 +104,6 @@ namespace ams::fssrv {
Result GetRightsId(ams::sf::Out<fs::RightsId> out, ncm::ProgramId program_id, ncm::StorageId storage_id);
Result RegisterExternalKey(const fs::RightsId &rights_id, const spl::AccessKey &access_key);
Result UnregisterAllExternalKey();
Result GetProgramId(ams::sf::Out<ncm::ProgramId> out, const fssrv::sf::FspPath &path, fs::ContentAttributes attr);
Result GetRightsIdByPath(ams::sf::Out<fs::RightsId> out, const fssrv::sf::FspPath &path);
Result GetRightsIdAndKeyGenerationByPathObsolete(ams::sf::Out<fs::RightsId> out, ams::sf::Out<u8> out_key_generation, const fssrv::sf::FspPath &path);
Result GetRightsIdAndKeyGenerationByPath(ams::sf::Out<fs::RightsId> out, ams::sf::Out<u8> out_key_generation, const fssrv::sf::FspPath &path, fs::ContentAttributes attr);
@ -147,8 +146,7 @@ namespace ams::fssrv {
/* fsp-ldr */
Result OpenCodeFileSystemDeprecated(ams::sf::Out<ams::sf::SharedPointer<fssrv::sf::IFileSystem>> out_fs, const fssrv::sf::Path &path, ncm::ProgramId program_id);
Result OpenCodeFileSystemDeprecated2(ams::sf::Out<ams::sf::SharedPointer<fssrv::sf::IFileSystem>> out_fs, ams::sf::Out<fs::CodeVerificationData> out_verif, const fssrv::sf::Path &path, ncm::ProgramId program_id);
Result OpenCodeFileSystemDeprecated3(ams::sf::Out<ams::sf::SharedPointer<fssrv::sf::IFileSystem>> out_fs, ams::sf::Out<fs::CodeVerificationData> out_verif, const fssrv::sf::Path &path, fs::ContentAttributes attr, ncm::ProgramId program_id);
Result OpenCodeFileSystem(ams::sf::Out<ams::sf::SharedPointer<fssrv::sf::IFileSystem>> out_fs, const ams::sf::OutBuffer &out_verif, const fssrv::sf::Path &path, fs::ContentAttributes attr, ncm::ProgramId program_id);
Result OpenCodeFileSystem(ams::sf::Out<ams::sf::SharedPointer<fssrv::sf::IFileSystem>> out_fs, ams::sf::Out<fs::CodeVerificationData> out_verif, const fssrv::sf::Path &path, fs::ContentAttributes attr, ncm::ProgramId program_id);
Result IsArchivedProgram(ams::sf::Out<bool> out, u64 process_id);
};
static_assert(sf::IsIFileSystemProxy<FileSystemProxyImpl>);
@ -167,12 +165,7 @@ namespace ams::fssrv {
R_THROW(fs::ResultPortAcceptableCountLimited());
}
Result OpenCodeFileSystemDeprecated3(ams::sf::Out<ams::sf::SharedPointer<fssrv::sf::IFileSystem>> out_fs, ams::sf::Out<fs::CodeVerificationData> out_verif, const fssrv::sf::Path &path, fs::ContentAttributes attr, ncm::ProgramId program_id) {
AMS_UNUSED(out_fs, out_verif, path, attr, program_id);
R_THROW(fs::ResultPortAcceptableCountLimited());
}
Result OpenCodeFileSystem(ams::sf::Out<ams::sf::SharedPointer<fssrv::sf::IFileSystem>> out_fs, const ams::sf::OutBuffer &out_verif, const fssrv::sf::Path &path, fs::ContentAttributes attr, ncm::ProgramId program_id) {
Result OpenCodeFileSystem(ams::sf::Out<ams::sf::SharedPointer<fssrv::sf::IFileSystem>> out_fs, ams::sf::Out<fs::CodeVerificationData> out_verif, const fssrv::sf::Path &path, fs::ContentAttributes attr, ncm::ProgramId program_id) {
AMS_UNUSED(out_fs, out_verif, path, attr, program_id);
R_THROW(fs::ResultPortAcceptableCountLimited());
}

View File

@ -115,7 +115,6 @@
AMS_SF_METHOD_INFO(C, H, 615, Result, QuerySaveDataInternalStorageTotalSize, (), (), hos::Version_5_0_0) \
AMS_SF_METHOD_INFO(C, H, 616, Result, GetSaveDataCommitId, (), (), hos::Version_6_0_0) \
AMS_SF_METHOD_INFO(C, H, 617, Result, UnregisterExternalKey, (const fs::RightsId &rights_id), (rights_id), hos::Version_7_0_0) \
AMS_SF_METHOD_INFO(C, H, 618, Result, GetProgramId, (ams::sf::Out<ncm::ProgramId> out, const fssrv::sf::FspPath &path, fs::ContentAttributes attr), (out, path, attr), hos::Version_17_0_0) \
AMS_SF_METHOD_INFO(C, H, 620, Result, SetSdCardEncryptionSeed, (const fs::EncryptionSeed &seed), (seed), hos::Version_2_0_0) \
AMS_SF_METHOD_INFO(C, H, 630, Result, SetSdCardAccessibility, (bool accessible), (accessible), hos::Version_4_0_0) \
AMS_SF_METHOD_INFO(C, H, 631, Result, IsSdCardAccessible, (ams::sf::Out<bool> out), (out), hos::Version_4_0_0) \

View File

@ -20,12 +20,11 @@
#include <stratosphere/fs/fs_code_verification_data.hpp>
#include <stratosphere/fs/fs_content_attributes.hpp>
/* ACCURATE_TO_VERSION: 17.5.0.0 */
/* ACCURATE_TO_VERSION: 13.4.0.0 */
#define AMS_FSSRV_I_FILE_SYSTEM_PROXY_FOR_LOADER_INTERFACE_INFO(C, H) \
AMS_SF_METHOD_INFO(C, H, 0, Result, OpenCodeFileSystemDeprecated, (ams::sf::Out<ams::sf::SharedPointer<fssrv::sf::IFileSystem>> out_fs, const fssrv::sf::Path &path, ncm::ProgramId program_id), (out_fs, path, program_id), hos::Version_Min, hos::Version_9_2_0) \
AMS_SF_METHOD_INFO(C, H, 0, Result, OpenCodeFileSystemDeprecated2, (ams::sf::Out<ams::sf::SharedPointer<fssrv::sf::IFileSystem>> out_fs, ams::sf::Out<fs::CodeVerificationData> out_verif, const fssrv::sf::Path &path, ncm::ProgramId program_id), (out_fs, out_verif, path, program_id), hos::Version_10_0_0, hos::Version_15_0_1) \
AMS_SF_METHOD_INFO(C, H, 0, Result, OpenCodeFileSystemDeprecated3, (ams::sf::Out<ams::sf::SharedPointer<fssrv::sf::IFileSystem>> out_fs, ams::sf::Out<fs::CodeVerificationData> out_verif, const fssrv::sf::Path &path, fs::ContentAttributes attr, ncm::ProgramId program_id), (out_fs, out_verif, path, attr, program_id), hos::Version_16_0_0, hos::Version_16_1_0) \
AMS_SF_METHOD_INFO(C, H, 0, Result, OpenCodeFileSystem, (ams::sf::Out<ams::sf::SharedPointer<fssrv::sf::IFileSystem>> out_fs, const ams::sf::OutBuffer &out_verif, const fssrv::sf::Path &path, fs::ContentAttributes attr, ncm::ProgramId program_id), (out_fs, out_verif, path, attr, program_id), hos::Version_17_0_0) \
AMS_SF_METHOD_INFO(C, H, 0, Result, OpenCodeFileSystem, (ams::sf::Out<ams::sf::SharedPointer<fssrv::sf::IFileSystem>> out_fs, ams::sf::Out<fs::CodeVerificationData> out_verif, const fssrv::sf::Path &path, fs::ContentAttributes attr, ncm::ProgramId program_id), (out_fs, out_verif, path, attr, program_id), hos::Version_16_0_0) \
AMS_SF_METHOD_INFO(C, H, 1, Result, IsArchivedProgram, (ams::sf::Out<bool> out, u64 process_id), (out, process_id)) \
AMS_SF_METHOD_INFO(C, H, 2, Result, SetCurrentProcess, (const ams::sf::ClientProcessId &client_pid), (client_pid), hos::Version_4_0_0)

View File

@ -80,7 +80,6 @@ namespace ams::hos {
Version_16_0_2 = ::ams::TargetFirmware_16_0_2,
Version_16_0_3 = ::ams::TargetFirmware_16_0_3,
Version_16_1_0 = ::ams::TargetFirmware_16_1_0,
Version_17_0_0 = ::ams::TargetFirmware_17_0_0,
Version_Current = ::ams::TargetFirmware_Current,

View File

@ -16,7 +16,6 @@
#pragma once
#include <stratosphere/ncm/ncm_content_meta_id.hpp>
#include <stratosphere/ncm/ncm_content_meta_key.hpp>
#include <stratosphere/ncm/ncm_content_meta_platform.hpp>
#include <stratosphere/ncm/ncm_content_info.hpp>
#include <stratosphere/ncm/ncm_content_info_data.hpp>
#include <stratosphere/ncm/ncm_firmware_variation.hpp>
@ -59,7 +58,7 @@ namespace ams::ncm {
u16 content_count;
u16 content_meta_count;
u8 attributes;
ContentMetaPlatform platform;
StorageId storage_id;
};
static_assert(sizeof(ContentMetaHeader) == 0x8);
@ -68,7 +67,7 @@ namespace ams::ncm {
u64 id;
u32 version;
ContentMetaType type;
ContentMetaPlatform platform;
u8 reserved_0D;
u16 extended_header_size;
u16 content_count;
u16 content_meta_count;
@ -80,6 +79,7 @@ namespace ams::ncm {
u8 reserved_1C[4];
};
static_assert(sizeof(PackagedContentMetaHeader) == 0x20);
static_assert(AMS_OFFSETOF(PackagedContentMetaHeader, reserved_0D) == 0x0D);
static_assert(AMS_OFFSETOF(PackagedContentMetaHeader, reserved_1C) == 0x1C);
using InstallContentMetaHeader = PackagedContentMetaHeader;

View File

@ -221,11 +221,6 @@ namespace ams::ncm {
AMS_ASSERT(m_interface != nullptr);
R_RETURN(m_interface->GetContentInfoByTypeAndIdOffset(out_content_info, key, type, id_offset));
}
Result GetPlatform(ContentMetaPlatform *out, const ContentMetaKey &key) {
AMS_ASSERT(m_interface != nullptr);
R_RETURN(m_interface->GetPlatform(out, key));
}
};
}

View File

@ -1,25 +0,0 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <vapours.hpp>
namespace ams::ncm {
enum class ContentMetaPlatform : u8 {
Nx = 0x0,
};
}

View File

@ -207,11 +207,6 @@ namespace ams::ncm {
R_RETURN(m_interface->GetRightsIdFromPlaceHolderIdWithCacheDeprecated(out_rights_id, cache_content_id, placeholder_id));
}
}
Result GetProgramId(ncm::ProgramId *out, ContentId content_id, fs::ContentAttributes attr) {
AMS_ASSERT(m_interface != nullptr);
R_RETURN(m_interface->GetProgramId(out, content_id, attr));
}
};
}

View File

@ -43,8 +43,7 @@
AMS_SF_METHOD_INFO(C, H, 22, Result, GetOwnerApplicationId, (sf::Out<ncm::ApplicationId> out_id, const ncm::ContentMetaKey &key), (out_id, key), hos::Version_10_0_0) \
AMS_SF_METHOD_INFO(C, H, 23, Result, GetContentAccessibilities, (sf::Out<u8> out_accessibilities, const ncm::ContentMetaKey &key), (out_accessibilities, key), hos::Version_15_0_0) \
AMS_SF_METHOD_INFO(C, H, 24, Result, GetContentInfoByType, (sf::Out<ncm::ContentInfo> out_content_info, const ncm::ContentMetaKey &key, ncm::ContentType type), (out_content_info, key, type), hos::Version_15_0_0) \
AMS_SF_METHOD_INFO(C, H, 25, Result, GetContentInfoByTypeAndIdOffset, (sf::Out<ncm::ContentInfo> out_content_info, const ncm::ContentMetaKey &key, ncm::ContentType type, u8 id_offset), (out_content_info, key, type, id_offset), hos::Version_15_0_0) \
AMS_SF_METHOD_INFO(C, H, 26, Result, GetPlatform, (sf::Out<ncm::ContentMetaPlatform> out, const ncm::ContentMetaKey &key), (out, key), hos::Version_17_0_0)
AMS_SF_METHOD_INFO(C, H, 25, Result, GetContentInfoByTypeAndIdOffset, (sf::Out<ncm::ContentInfo> out_content_info, const ncm::ContentMetaKey &key, ncm::ContentType type, u8 id_offset), (out_content_info, key, type, id_offset), hos::Version_15_0_0)
AMS_SF_DEFINE_INTERFACE(ams::ncm, IContentMetaDatabase, AMS_NCM_I_CONTENT_META_DATABASE_INTERFACE_INFO, 0x58021FEC)

View File

@ -58,7 +58,6 @@
AMS_SF_METHOD_INFO(C, H, 27, Result, GetRightsIdFromPlaceHolderIdWithCacheDeprecated, (sf::Out<ncm::RightsId> out_rights_id, ncm::ContentId cache_content_id, ncm::PlaceHolderId placeholder_id), (out_rights_id, cache_content_id, placeholder_id), hos::Version_8_0_0, hos::Version_15_0_1) \
AMS_SF_METHOD_INFO(C, H, 27, Result, GetRightsIdFromPlaceHolderIdWithCache, (sf::Out<ncm::RightsId> out_rights_id, ncm::PlaceHolderId placeholder_id, ncm::ContentId cache_content_id, fs::ContentAttributes attr), (out_rights_id, placeholder_id, cache_content_id, attr), hos::Version_16_0_0) \
AMS_SF_METHOD_INFO(C, H, 28, Result, RegisterPath, (const ncm::ContentId &content_id, const ncm::Path &path), (content_id, path), hos::Version_13_0_0) \
AMS_SF_METHOD_INFO(C, H, 29, Result, ClearRegisteredPath, (), (), hos::Version_13_0_0) \
AMS_SF_METHOD_INFO(C, H, 30, Result, GetProgramId, (sf::Out<ncm::ProgramId> out, ncm::ContentId content_id, fs::ContentAttributes attr), (out, content_id, attr), hos::Version_17_0_0)
AMS_SF_METHOD_INFO(C, H, 29, Result, ClearRegisteredPath, (), (), hos::Version_13_0_0)
AMS_SF_DEFINE_INTERFACE(ams::ncm, IContentStorage, AMS_NCM_I_CONTENT_STORAGE_INTERFACE_INFO, 0xFEAE3DD1)

View File

@ -71,7 +71,6 @@ namespace ams::ncm {
Result GetContentAccessibilities(sf::Out<u8> out_accessibilities, const ContentMetaKey &key);
Result GetContentInfoByType(sf::Out<ContentInfo> out_content_info, const ContentMetaKey &key, ContentType type);
Result GetContentInfoByTypeAndIdOffset(sf::Out<ContentInfo> out_content_info, const ContentMetaKey &key, ContentType type, u8 id_offset);
Result GetPlatform(sf::Out<ncm::ContentMetaPlatform> out, const ContentMetaKey &key);
};
static_assert(ncm::IsIContentMetaDatabase<IntegratedContentMetaDatabaseImpl>);

View File

@ -79,7 +79,6 @@ namespace ams::ncm {
Result GetRightsIdFromPlaceHolderIdWithCache(sf::Out<ncm::RightsId> out_rights_id, PlaceHolderId placeholder_id, ContentId cache_content_id, fs::ContentAttributes attr);
Result RegisterPath(const ContentId &content_id, const Path &path);
Result ClearRegisteredPath();
Result GetProgramId(sf::Out<ncm::ProgramId> out, ContentId content_id, fs::ContentAttributes attr);
/* 16.0.0 Alignment change hacks. */
Result CreatePlaceHolder_AtmosphereAlignmentFix(ContentId content_id, PlaceHolderId placeholder_id, s64 size) { R_RETURN(this->CreatePlaceHolder(placeholder_id, content_id, size)); }

View File

@ -98,7 +98,6 @@ namespace ams::spl {
enum class EsDeviceUniqueKeyType {
TitleKey = 0,
ArchiveKey = 1,
Unknown2 = 2,
};
struct AsyncOperationKey {

View File

@ -39,10 +39,6 @@ namespace ams::exosphere {
R_ABORT_UNLESS(spl::impl::SetConfig(spl::ConfigItem::ExosphereNeedsReboot, 3));
}
void ForceRebootByPmic() {
R_ABORT_UNLESS(spl::impl::SetConfig(spl::ConfigItem::ExosphereNeedsReboot, 4));
}
void ForceShutdown() {
R_ABORT_UNLESS(spl::impl::SetConfig(spl::ConfigItem::ExosphereNeedsShutdown, 1));
}

View File

@ -48,7 +48,7 @@ namespace ams::erpt::srv {
ON_SCOPE_EXIT { Deallocate(hdr); };
hdr->magic = HeaderMagic;
hdr->field_type = static_cast<u32>(ConvertFieldToType(field_id));
hdr->field_type = static_cast<u32>(FieldToTypeMap[field_id]);
hdr->element_count = arr_size;
hdr->reserved = 0;
@ -97,7 +97,7 @@ namespace ams::erpt::srv {
}
static Result AddField(Report *report, FieldId field_id, char *str, u32 len) {
if (ConvertFieldToFlag(field_id) == FieldFlag_Encrypt) {
if (FieldToFlagMap[field_id] == FieldFlag_Encrypt) {
R_RETURN(EncryptArray<char>(report, field_id, str, len));
} else {
R_RETURN(Formatter::AddField(report, field_id, str, len));
@ -105,7 +105,7 @@ namespace ams::erpt::srv {
}
static Result AddField(Report *report, FieldId field_id, u8 *bin, u32 len) {
if (ConvertFieldToFlag(field_id) == FieldFlag_Encrypt) {
if (FieldToFlagMap[field_id] == FieldFlag_Encrypt) {
R_RETURN(EncryptArray<u8>(report, field_id, bin, len));
} else {
R_RETURN(Formatter::AddField(report, field_id, bin, len));
@ -114,7 +114,7 @@ namespace ams::erpt::srv {
template<typename T>
static Result AddField(Report *report, FieldId field_id, T *arr, u32 len) {
if (ConvertFieldToFlag(field_id) == FieldFlag_Encrypt) {
if (FieldToFlagMap[field_id] == FieldFlag_Encrypt) {
R_RETURN(EncryptArray<T>(report, field_id, arr, len));
} else {
R_RETURN(Formatter::AddField<T>(report, field_id, arr, len));

View File

@ -38,7 +38,7 @@ namespace ams::erpt::srv {
R_RETURN(Context::SubmitContext(ctx, data, data_size));
}
Result ContextImpl::CreateReport(ReportType report_type, const ams::sf::InBuffer &ctx_buffer, const ams::sf::InBuffer &data_buffer, const ams::sf::InBuffer &meta_buffer, Result result, erpt::CreateReportOptionFlagSet flags) {
Result ContextImpl::CreateReport(ReportType report_type, const ams::sf::InBuffer &ctx_buffer, const ams::sf::InBuffer &data_buffer, const ams::sf::InBuffer &meta_buffer, Result result) {
const ContextEntry *ctx = reinterpret_cast<const ContextEntry *>( ctx_buffer.GetPointer());
const u8 *data = reinterpret_cast<const u8 *>(data_buffer.GetPointer());
const ReportMetaData *meta = reinterpret_cast<const ReportMetaData *>(meta_buffer.GetPointer());
@ -50,19 +50,15 @@ namespace ams::erpt::srv {
R_UNLESS(ctx_size == sizeof(ContextEntry), erpt::ResultInvalidArgument());
R_UNLESS(meta_size == 0 || meta_size == sizeof(ReportMetaData), erpt::ResultInvalidArgument());
R_TRY(Reporter::CreateReport(report_type, result, ctx, data, data_size, meta_size != 0 ? meta : nullptr, nullptr, 0, flags));
R_TRY(Reporter::CreateReport(report_type, result, ctx, data, data_size, meta_size != 0 ? meta : nullptr, nullptr, 0));
ManagerImpl::NotifyAll();
R_SUCCEED();
}
Result ContextImpl::CreateReportV1(ReportType report_type, const ams::sf::InBuffer &ctx_buffer, const ams::sf::InBuffer &data_buffer, const ams::sf::InBuffer &meta_buffer, Result result) {
R_RETURN(this->CreateReport(report_type, ctx_buffer, data_buffer, meta_buffer, result, erpt::srv::MakeNoCreateReportOptionFlags()));
}
Result ContextImpl::CreateReportV0(ReportType report_type, const ams::sf::InBuffer &ctx_buffer, const ams::sf::InBuffer &data_buffer, const ams::sf::InBuffer &meta_buffer) {
R_RETURN(this->CreateReportV1(report_type, ctx_buffer, data_buffer, meta_buffer, ResultSuccess()));
R_RETURN(this->CreateReport(report_type, ctx_buffer, data_buffer, meta_buffer, ResultSuccess()));
}
Result ContextImpl::SetInitialLaunchSettingsCompletionTime(const time::SteadyClockTimePoint &time_point) {
@ -142,7 +138,7 @@ namespace ams::erpt::srv {
R_RETURN(JournalForAttachments::SubmitAttachment(out.GetPointer(), name_safe, data, data_size));
}
Result ContextImpl::CreateReportWithAttachments(ReportType report_type, const ams::sf::InBuffer &ctx_buffer, const ams::sf::InBuffer &data_buffer, const ams::sf::InBuffer &attachment_ids_buffer, Result result, erpt::CreateReportOptionFlagSet flags) {
Result ContextImpl::CreateReportWithAttachments(ReportType report_type, const ams::sf::InBuffer &ctx_buffer, const ams::sf::InBuffer &data_buffer, const ams::sf::InBuffer &attachment_ids_buffer, Result result) {
const ContextEntry *ctx = reinterpret_cast<const ContextEntry *>( ctx_buffer.GetPointer());
const u8 *data = reinterpret_cast<const u8 *>(data_buffer.GetPointer());
const u32 ctx_size = static_cast<u32>(ctx_buffer.GetSize());
@ -154,19 +150,15 @@ namespace ams::erpt::srv {
R_UNLESS(ctx_size == sizeof(ContextEntry), erpt::ResultInvalidArgument());
R_UNLESS(num_attachments <= AttachmentsPerReportMax, erpt::ResultInvalidArgument());
R_TRY(Reporter::CreateReport(report_type, result, ctx, data, data_size, nullptr, attachments, num_attachments, flags));
R_TRY(Reporter::CreateReport(report_type, result, ctx, data, data_size, nullptr, attachments, num_attachments));
ManagerImpl::NotifyAll();
R_SUCCEED();
}
Result ContextImpl::CreateReportWithAttachmentsDeprecated2(ReportType report_type, const ams::sf::InBuffer &ctx_buffer, const ams::sf::InBuffer &data_buffer, const ams::sf::InBuffer &attachment_ids_buffer, Result result) {
R_RETURN(this->CreateReportWithAttachments(report_type, ctx_buffer, data_buffer, attachment_ids_buffer, result, erpt::srv::MakeNoCreateReportOptionFlags()));
}
Result ContextImpl::CreateReportWithAttachmentsDeprecated(ReportType report_type, const ams::sf::InBuffer &ctx_buffer, const ams::sf::InBuffer &data_buffer, const ams::sf::InBuffer &attachment_ids_buffer) {
R_RETURN(this->CreateReportWithAttachmentsDeprecated2(report_type, ctx_buffer, data_buffer, attachment_ids_buffer, ResultSuccess()));
R_RETURN(this->CreateReportWithAttachments(report_type, ctx_buffer, data_buffer, attachment_ids_buffer, ResultSuccess()));
}
Result ContextImpl::RegisterRunningApplet(ncm::ProgramId program_id) {

View File

@ -31,10 +31,8 @@ namespace ams::erpt::srv {
Result ClearApplicationLaunchTime();
Result SubmitAttachment(ams::sf::Out<AttachmentId> out, const ams::sf::InBuffer &attachment_name, const ams::sf::InBuffer &attachment_data);
Result CreateReportWithAttachmentsDeprecated(ReportType report_type, const ams::sf::InBuffer &ctx_buffer, const ams::sf::InBuffer &data_buffer, const ams::sf::InBuffer &attachment_ids_buffer);
Result CreateReportWithAttachmentsDeprecated2(ReportType report_type, const ams::sf::InBuffer &ctx_buffer, const ams::sf::InBuffer &data_buffer, const ams::sf::InBuffer &attachment_ids_buffer, Result result);
Result CreateReportWithAttachments(ReportType report_type, const ams::sf::InBuffer &ctx_buffer, const ams::sf::InBuffer &data_buffer, const ams::sf::InBuffer &attachment_ids_buffer, Result result, erpt::CreateReportOptionFlagSet flags);
Result CreateReportV1(ReportType report_type, const ams::sf::InBuffer &ctx_buffer, const ams::sf::InBuffer &data_buffer, const ams::sf::InBuffer &meta_buffer, Result result);
Result CreateReport(ReportType report_type, const ams::sf::InBuffer &ctx_buffer, const ams::sf::InBuffer &data_buffer, const ams::sf::InBuffer &meta_buffer, Result result, erpt::CreateReportOptionFlagSet flags);
Result CreateReportWithAttachments(ReportType report_type, const ams::sf::InBuffer &ctx_buffer, const ams::sf::InBuffer &data_buffer, const ams::sf::InBuffer &attachment_ids_buffer, Result result);
Result CreateReport(ReportType report_type, const ams::sf::InBuffer &ctx_buffer, const ams::sf::InBuffer &data_buffer, const ams::sf::InBuffer &meta_buffer, Result result);
Result RegisterRunningApplet(ncm::ProgramId program_id);
Result UnregisterRunningApplet(ncm::ProgramId program_id);
Result UpdateAppletSuspendedDuration(ncm::ProgramId program_id, TimeSpanType duration);

View File

@ -78,8 +78,8 @@ namespace ams::erpt::srv {
R_UNLESS(0 <= m_ctx.fields[i].id && m_ctx.fields[i].id < FieldId_Count, erpt::ResultInvalidArgument());
R_UNLESS(0 <= m_ctx.fields[i].type && m_ctx.fields[i].type < FieldType_Count, erpt::ResultInvalidArgument());
R_UNLESS(m_ctx.fields[i].type == ConvertFieldToType(m_ctx.fields[i].id), erpt::ResultFieldTypeMismatch());
R_UNLESS(m_ctx.category == ConvertFieldToCategory(m_ctx.fields[i].id), erpt::ResultFieldCategoryMismatch());
R_UNLESS(m_ctx.fields[i].type == FieldToTypeMap[m_ctx.fields[i].id], erpt::ResultFieldTypeMismatch());
R_UNLESS(m_ctx.category == FieldToCategoryMap[m_ctx.fields[i].id], erpt::ResultFieldCategoryMismatch());
if (IsArrayFieldType(m_ctx.fields[i].type)) {
const u32 start_idx = m_ctx.fields[i].value_array.start_idx;

View File

@ -86,7 +86,7 @@ namespace ams::erpt::srv {
R_TRY(record->Add(FieldId_ErrorCode, error_code_str, std::strlen(error_code_str)));
/* Create report. */
R_TRY(Reporter::CreateReport(ReportType_Invisible, ResultSuccess(), std::move(record), nullptr, nullptr, 0, erpt::srv::MakeNoCreateReportOptionFlags()));
R_TRY(Reporter::CreateReport(ReportType_Invisible, ResultSuccess(), std::move(record), nullptr, nullptr, 0));
R_SUCCEED();
}

View File

@ -413,7 +413,7 @@ namespace ams::erpt::srv {
R_SUCCEED();
}
Result Reporter::CreateReport(ReportType type, Result ctx_result, const ContextEntry *ctx, const u8 *data, u32 data_size, const ReportMetaData *meta, const AttachmentId *attachments, u32 num_attachments, erpt::CreateReportOptionFlagSet flags) {
Result Reporter::CreateReport(ReportType type, Result ctx_result, const ContextEntry *ctx, const u8 *data, u32 data_size, const ReportMetaData *meta, const AttachmentId *attachments, u32 num_attachments) {
/* Create a context record for the report. */
auto record = std::make_unique<ContextRecord>();
R_UNLESS(record != nullptr, erpt::ResultOutOfMemory());
@ -422,10 +422,10 @@ namespace ams::erpt::srv {
R_TRY(record->Initialize(ctx, data, data_size));
/* Create the report. */
R_RETURN(CreateReport(type, ctx_result, std::move(record), meta, attachments, num_attachments, flags));
R_RETURN(CreateReport(type, ctx_result, std::move(record), meta, attachments, num_attachments));
}
Result Reporter::CreateReport(ReportType type, Result ctx_result, std::unique_ptr<ContextRecord> record, const ReportMetaData *meta, const AttachmentId *attachments, u32 num_attachments, erpt::CreateReportOptionFlagSet flags) {
Result Reporter::CreateReport(ReportType type, Result ctx_result, std::unique_ptr<ContextRecord> record, const ReportMetaData *meta, const AttachmentId *attachments, u32 num_attachments) {
/* Clear the automatic categories, when we're done with our report. */
ON_SCOPE_EXIT {
Context::ClearContext(CategoryId_ErrorInfo);
@ -457,7 +457,7 @@ namespace ams::erpt::srv {
SaveSyslogReportIfRequired(ctx, report_id);
/* Submit report contexts. */
R_TRY(SubmitReportContexts(report_id, type, ctx_result, std::move(record), timestamp_user, timestamp_network, flags));
R_TRY(SubmitReportContexts(report_id, type, ctx_result, std::move(record), timestamp_user, timestamp_network));
/* Link attachments to the report. */
R_TRY(LinkAttachments(report_id, attachments, num_attachments));
@ -468,7 +468,7 @@ namespace ams::erpt::srv {
R_SUCCEED();
}
Result Reporter::SubmitReportContexts(const ReportId &report_id, ReportType type, Result ctx_result, std::unique_ptr<ContextRecord> record, const time::PosixTime &timestamp_user, const time::PosixTime &timestamp_network, erpt::CreateReportOptionFlagSet flags) {
Result Reporter::SubmitReportContexts(const ReportId &report_id, ReportType type, Result ctx_result, std::unique_ptr<ContextRecord> record, const time::PosixTime &timestamp_user, const time::PosixTime &timestamp_network) {
/* Create automatic record. */
auto auto_record = std::make_unique<ContextRecord>(CategoryId_ErrorInfoAuto, 0x300);
R_UNLESS(auto_record != nullptr, erpt::ResultOutOfMemory());
@ -530,10 +530,6 @@ namespace ams::erpt::srv {
SubmitResourceLimitContexts();
#endif
if (flags.Test<CreateReportOptionFlag::SubmitFsInfo>()) {
/* TODO: 17.0.0 SubmitFsInfo() */
}
R_SUCCEED();
}

View File

@ -56,10 +56,10 @@ namespace ams::erpt::srv {
static void SetRedirectNewReportsToSdCard(bool en) { s_redirect_new_reports = en; }
private:
static Result SubmitReportContexts(const ReportId &report_id, ReportType type, Result ctx_result, std::unique_ptr<ContextRecord> record, const time::PosixTime &user_timestamp, const time::PosixTime &network_timestamp, erpt::CreateReportOptionFlagSet flags);
static Result SubmitReportContexts(const ReportId &report_id, ReportType type, Result ctx_result, std::unique_ptr<ContextRecord> record, const time::PosixTime &user_timestamp, const time::PosixTime &network_timestamp);
public:
static Result CreateReport(ReportType type, Result ctx_result, const ContextEntry *ctx, const u8 *data, u32 data_size, const ReportMetaData *meta, const AttachmentId *attachments, u32 num_attachments, erpt::CreateReportOptionFlagSet flags);
static Result CreateReport(ReportType type, Result ctx_result, std::unique_ptr<ContextRecord> record, const ReportMetaData *meta, const AttachmentId *attachments, u32 num_attachments, erpt::CreateReportOptionFlagSet flags);
static Result CreateReport(ReportType type, Result ctx_result, const ContextEntry *ctx, const u8 *data, u32 data_size, const ReportMetaData *meta, const AttachmentId *attachments, u32 num_attachments);
static Result CreateReport(ReportType type, Result ctx_result, std::unique_ptr<ContextRecord> record, const ReportMetaData *meta, const AttachmentId *attachments, u32 num_attachments);
};
}

View File

@ -79,7 +79,7 @@ namespace ams::fs {
R_TRY(fsp->SetCurrentProcess({}));
sf::SharedPointer<fssrv::sf::IFileSystem> fs;
R_TRY(fsp->OpenCodeFileSystem(std::addressof(fs), ams::sf::OutBuffer(out_verification_data, sizeof(*out_verification_data)), sf_path, attr, program_id));
R_TRY(fsp->OpenCodeFileSystem(std::addressof(fs), out_verification_data, sf_path, attr, program_id));
/* Allocate a new filesystem wrapper. */
auto fsa = std::make_unique<impl::FileSystemServiceObjectAdapter>(std::move(fs));

View File

@ -1,36 +0,0 @@
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <stratosphere.hpp>
#include <stratosphere/fs/fs_rights_id.hpp>
#include "impl/fs_file_system_proxy_service_object.hpp"
namespace ams::fs {
Result GetProgramId(ncm::ProgramId *out, const char *path, fs::ContentAttributes attr) {
AMS_FS_R_UNLESS(out != nullptr, fs::ResultNullptrArgument());
AMS_FS_R_UNLESS(path != nullptr, fs::ResultNullptrArgument());
/* Convert the path for fsp. */
fssrv::sf::FspPath sf_path;
R_TRY(fs::ConvertToFspPath(std::addressof(sf_path), path));
auto fsp = impl::GetFileSystemProxyServiceObject();
AMS_FS_R_TRY(fsp->GetProgramId(out, sf_path, attr));
R_SUCCEED();
}
}

View File

@ -320,10 +320,6 @@ namespace ams::fs {
AMS_ABORT("TODO");
}
Result GetProgramId(ams::sf::Out<ncm::ProgramId> out, const fssrv::sf::FspPath &path, fs::ContentAttributes attr) {
static_assert(sizeof(ncm::ProgramId) == sizeof(u64));
R_RETURN(fsGetProgramId(reinterpret_cast<u64 *>(out.GetPointer()), path.str, static_cast<::FsContentAttributes>(static_cast<u8>(attr))));
}
Result GetRightsIdByPath(ams::sf::Out<fs::RightsId> out, const fssrv::sf::FspPath &path) {
static_assert(sizeof(RightsId) == sizeof(::FsRightsId));

View File

@ -47,15 +47,7 @@ namespace ams::fs {
R_SUCCEED();
}
Result OpenCodeFileSystemDeprecated3(ams::sf::Out<ams::sf::SharedPointer<fssrv::sf::IFileSystem>> out_fs, ams::sf::Out<fs::CodeVerificationData> out_verif, const fssrv::sf::Path &path, fs::ContentAttributes attr, ncm::ProgramId program_id) {
::FsFileSystem fs;
R_TRY(fsldrOpenCodeFileSystem(reinterpret_cast<::FsCodeInfo *>(out_verif.GetPointer()), program_id.value, path.str, static_cast<::FsContentAttributes>(static_cast<u8>(attr)), std::addressof(fs)));
out_fs.SetValue(ObjectFactory::CreateSharedEmplaced<fssrv::sf::IFileSystem, fssrv::impl::RemoteFileSystem>(fs));
R_SUCCEED();
}
Result OpenCodeFileSystem(ams::sf::Out<ams::sf::SharedPointer<fssrv::sf::IFileSystem>> out_fs, const ams::sf::OutBuffer &out_verif, const fssrv::sf::Path &path, fs::ContentAttributes attr, ncm::ProgramId program_id) {
Result OpenCodeFileSystem(ams::sf::Out<ams::sf::SharedPointer<fssrv::sf::IFileSystem>> out_fs, ams::sf::Out<fs::CodeVerificationData> out_verif, const fssrv::sf::Path &path, fs::ContentAttributes attr, ncm::ProgramId program_id) {
::FsFileSystem fs;
R_TRY(fsldrOpenCodeFileSystem(reinterpret_cast<::FsCodeInfo *>(out_verif.GetPointer()), program_id.value, path.str, static_cast<::FsContentAttributes>(static_cast<u8>(attr)), std::addressof(fs)));

View File

@ -21,7 +21,7 @@ namespace ams::fs::impl {
#define ADD_ENUM_CASE(v) case v: return #v
template<> const char *IdString::ToString<pkg1::KeyGeneration>(pkg1::KeyGeneration id) {
static_assert(pkg1::KeyGeneration_Current == pkg1::KeyGeneration_17_0_0);
static_assert(pkg1::KeyGeneration_Current == pkg1::KeyGeneration_16_0_0);
switch (id) {
using enum pkg1::KeyGeneration;
case KeyGeneration_1_0_0: return "1.0.0-2.3.0";
@ -39,8 +39,7 @@ namespace ams::fs::impl {
case KeyGeneration_13_0_0: return "13.0.0-13.2.1";
case KeyGeneration_14_0_0: return "14.0.0-14.1.2";
case KeyGeneration_15_0_0: return "15.0.0-15.0.1";
case KeyGeneration_16_0_0: return "16.0.0-16.0.3";
case KeyGeneration_17_0_0: return "17.0.0-";
case KeyGeneration_16_0_0: return "16.0.0-";
default: return "Unknown";
}
}

View File

@ -465,12 +465,7 @@ namespace ams::fssrv {
AMS_UNUSED(out_fs, out_verif, path, program_id);
}
Result FileSystemProxyImpl::OpenCodeFileSystemDeprecated3(ams::sf::Out<ams::sf::SharedPointer<fssrv::sf::IFileSystem>> out_fs, ams::sf::Out<fs::CodeVerificationData> out_verif, const fssrv::sf::Path &path, fs::ContentAttributes attr, ncm::ProgramId program_id) {
AMS_ABORT("TODO");
AMS_UNUSED(out_fs, out_verif, path, attr, program_id);
}
Result FileSystemProxyImpl::OpenCodeFileSystem(ams::sf::Out<ams::sf::SharedPointer<fssrv::sf::IFileSystem>> out_fs, const ams::sf::OutBuffer &out_verif, const fssrv::sf::Path &path, fs::ContentAttributes attr, ncm::ProgramId program_id) {
Result FileSystemProxyImpl::OpenCodeFileSystem(ams::sf::Out<ams::sf::SharedPointer<fssrv::sf::IFileSystem>> out_fs, ams::sf::Out<fs::CodeVerificationData> out_verif, const fssrv::sf::Path &path, fs::ContentAttributes attr, ncm::ProgramId program_id) {
AMS_ABORT("TODO");
AMS_UNUSED(out_fs, out_verif, path, attr, program_id);
}

View File

@ -784,8 +784,7 @@ namespace ams::ncm {
}
Result ContentManagerImpl::BuildContentMetaDatabase(StorageId storage_id) {
/* NOTE: we build on 17.0.0+, to work around a change in Nintendo save handling behavior. */
if (hos::GetVersion() < hos::Version_5_0_0 || hos::GetVersion() >= hos::Version_17_0_0) {
if (hos::GetVersion() < hos::Version_5_0_0) {
/* Temporarily activate the database. */
R_TRY(this->ActivateContentMetaDatabase(storage_id));
ON_SCOPE_EXIT { this->InactivateContentMetaDatabase(storage_id); };
@ -947,8 +946,7 @@ namespace ams::ncm {
R_TRY(this->CreateContentMetaDatabase(StorageId::BuiltInSystem));
/* Try to build or import a database, depending on our configuration. */
/* NOTE: To work around a change in save management behavior in 17.0.0+, we build the database if needed. */
if (manager_config.ShouldBuildDatabase() || hos::GetVersion() >= hos::Version_17_0_0) {
if (manager_config.ShouldBuildDatabase()) {
/* If we should build the database, do so. */
R_TRY(this->BuildContentMetaDatabase(StorageId::BuiltInSystem));
R_TRY(this->VerifyContentMetaDatabase(StorageId::BuiltInSystem));

View File

@ -26,7 +26,6 @@ namespace ams::ncm {
.content_count = src.content_count,
.content_meta_count = src.content_meta_count,
.attributes = src.attributes,
.platform = src.platform,
};
}
@ -43,7 +42,6 @@ namespace ams::ncm {
.content_count = src.content_count,
.content_meta_count = src.content_meta_count,
.attributes = src.attributes,
.platform = src.platform,
};
}

View File

@ -518,20 +518,4 @@ namespace ams::ncm {
R_RETURN(this->GetContentInfoImpl(out_content_info.GetPointer(), key, type, util::make_optional(id_offset)));
}
Result ContentMetaDatabaseImpl::GetPlatform(sf::Out<ncm::ContentMetaPlatform> out, const ContentMetaKey &key) {
R_TRY(this->EnsureEnabled());
/* Obtain the content meta for the key. */
const void *meta;
size_t meta_size;
R_TRY(this->GetContentMetaPointer(&meta, &meta_size, key));
/* Create a reader. */
ContentMetaReader reader(meta, meta_size);
/* Set the ouput value. */
out.SetValue(reader.GetHeader()->platform);
R_SUCCEED();
}
}

Some files were not shown because too many files have changed in this diff Show More